void init_callin_functable(void) { unsigned char *env_top, *address_top; uint4 address_len; /* The address of the vector table containing pointers * to gt_timers functions is of type unsigned int which * is o.k in current GT.M implementations, however when * GT.M migrates to a fully 64 port this part of the code * might have be re-visited. */ assert ( 64 > sizeof(gtmvectortable_address)); address_top = i2asc(gtmvectortable_address, (uint4 )&callintogtm_vectortable[0]); *address_top = '\0'; address_len = (uint4 )(address_top - >mvectortable_address[0]); env_top = >mvectortable_env[0]; memcpy(env_top, GTM_CALLIN_START_ENV, strlen(GTM_CALLIN_START_ENV)); memcpy((env_top + strlen(GTM_CALLIN_START_ENV)), gtmvectortable_address, address_len); *(env_top + strlen(GTM_CALLIN_START_ENV) + address_len) = '\0'; if (PUTENV((char *)gtmvectortable_env)) { rts_error(VARLSTCNT(1) errno); } }
boolean_t cli_disallow_mupip_replicate(void) { int disallow_return_value = 0; boolean_t p1, p2, p3, p4, p5, p6; *cli_err_str_ptr = 0; p1 = d_c_cli_present("EDITINSTANCE"); p2 = d_c_cli_present("INSTANCE_CREATE"); p3 = d_c_cli_present("RECEIVER"); p4 = d_c_cli_present("SOURCE"); p5 = d_c_cli_present("UPDATEPROC"); p6 = d_c_cli_present("UPDHELPER"); /* any MUPIP REPLIC command should contain at LEAST one of the above qualifiers */ disallow_return_value = !(p1 || p2 || p3 || p4 || p5 || p6); CLI_DIS_CHECK; /* Note CLI_DIS_CHECK_N_RESET is not used as we want to reuse the computed error string (cli_err_str_ptr) * for the next check as well in case it fails. Note that this can be done only if both checks use * exactly the same set of qualifiers (which is TRUE in this case). */ /* any MUPIP REPLIC command should contain at MOST one of the above qualifiers */ disallow_return_value = cli_check_any2(VARLSTCNT(6) p1, p2, p3, p4, p5, p6); CLI_DIS_CHECK_N_RESET; return FALSE; }
static void clean_initialize(mutex_struct_ptr_t addr, int n, bool crash) { mutex_que_entry_ptr_t q_free_entry; # if defined(MUTEX_MSEM_WAKE) && !defined(POSIX_MSEM) msemaphore *status; # endif assert(n > 0); addr->queslots = n; /* Initialize the waiting process queue to be empty */ addr->prochead.que.fl = addr->prochead.que.bl = 0; SET_LATCH_GLOBAL(&addr->prochead.latch, LOCK_AVAILABLE); /* Initialize the free queue to be empty */ addr->freehead.que.fl = addr->freehead.que.bl = 0; SET_LATCH_GLOBAL(&addr->freehead.latch, LOCK_AVAILABLE); /* Clear the first free entry */ q_free_entry = (mutex_que_entry_ptr_t)((sm_uc_ptr_t)&addr->freehead + SIZEOF(mutex_que_head)); q_free_entry->que.fl = q_free_entry->que.bl = 0; q_free_entry->pid = 0; q_free_entry->super_crit = (void *)NULL; q_free_entry->mutex_wake_instance = 0; while (n--) { # ifdef MUTEX_MSEM_WAKE # ifdef POSIX_MSEM if (-1 == sem_init(&q_free_entry->mutex_wake_msem, TRUE, 0)) /* Shared lock with no initial resources (locked) */ # else if ((NULL == (status = msem_init(&q_free_entry->mutex_wake_msem, MSEM_LOCKED))) || ((msemaphore *)-1 == status)) # endif rts_error(VARLSTCNT(7) ERR_MUTEXERR, 0, ERR_TEXT, 2, RTS_ERROR_TEXT("Error with mutex wait memory semaphore initialization"), errno); # endif /* Initialize fl,bl links to 0 before INSQTI as it (gtm_insqti in relqueopi.c) asserts this */ DEBUG_ONLY(((que_ent_ptr_t)q_free_entry)->fl = 0;) DEBUG_ONLY(((que_ent_ptr_t)q_free_entry)->bl = 0;) if (INTERLOCK_FAIL == INSQTI((que_ent_ptr_t)q_free_entry++, (que_head_ptr_t)&addr->freehead))
void job_addr(mstr *rtn, mstr *label, int4 offset, char **hdr, char **labaddr) { rhdtyp *rt_hdr; int4 *lp; error_def (ERR_JOBLABOFF); if ((rt_hdr = find_rtn_hdr(rtn)) == 0) { mval rt; rt.mvtype = MV_STR; rt.str = *rtn; op_zlink(&rt,0); if ((rt_hdr = find_rtn_hdr (rtn)) == 0) GTMASSERT; } lp = NULL; if (!rt_hdr->label_only || 0 == offset) /* If label_only and offset != 0 should cause error */ lp = find_line_addr (rt_hdr, label, offset); if (!lp) rts_error(VARLSTCNT(1) ERR_JOBLABOFF); *labaddr = (char *) LINE_NUMBER_ADDR(rt_hdr, lp); *hdr = (char *)rt_hdr; }
void bin_load(uint4 begin, uint4 end) { unsigned char *ptr, *cp1, *cp2, *btop, *gvkey_char_ptr, *tmp_ptr, *tmp_key_ptr, *c, *ctop; unsigned char hdr_lvl, src_buff[MAX_KEY_SZ + 1], dest_buff[MAX_ZWR_KEY_SZ], cmpc_str[MAX_KEY_SZ + 1], dup_key_str[MAX_KEY_SZ + 1]; unsigned char *end_buff; unsigned short rec_len, next_cmpc; int len; int current, last, length, max_blk_siz, max_key, status; uint4 iter, max_data_len, max_subsc_len, key_count; ssize_t rec_count, global_key_count, subsc_len,extr_std_null_coll; boolean_t need_xlation, new_gvn, utf8_extract; rec_hdr *rp, *next_rp; mval v, tmp_mval; mstr mstr_src, mstr_dest; collseq *extr_collseq, *db_collseq, *save_gv_target_collseq; coll_hdr extr_collhdr, db_collhdr; gv_key *tmp_gvkey = NULL; /* null-initialize at start, will be malloced later */ char std_null_coll[BIN_HEADER_NUMSZ + 1]; # ifdef GTM_CRYPT gtmcrypt_key_t *encr_key_handles; char *inbuf; int4 index; int req_dec_blk_size, init_status, crypt_status; muext_hash_hdr_ptr_t hash_array = NULL; # endif DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; assert(4 == SIZEOF(coll_hdr)); gvinit(); v.mvtype = MV_STR; len = file_input_bin_get((char **)&ptr); hdr_lvl = EXTR_HEADER_LEVEL(ptr); if (!(((('4' == hdr_lvl) || ('5' == hdr_lvl)) && (BIN_HEADER_SZ == len)) || (('4' > hdr_lvl) && (V3_BIN_HEADER_SZ == len)))) { rts_error(VARLSTCNT(1) ERR_LDBINFMT); mupip_exit(ERR_LDBINFMT); } /* expecting the level in a single character */ assert(' ' == *(ptr + SIZEOF(BIN_HEADER_LABEL) - 3)); if (0 != memcmp(ptr, BIN_HEADER_LABEL, SIZEOF(BIN_HEADER_LABEL) - 2) || ('2' > hdr_lvl) || *(BIN_HEADER_VERSION) < hdr_lvl) { /* ignore the level check */ rts_error(VARLSTCNT(1) ERR_LDBINFMT); mupip_exit(ERR_LDBINFMT); } /* check if extract was generated in UTF-8 mode */ utf8_extract = (0 == MEMCMP_LIT(&ptr[len - BIN_HEADER_LABELSZ], UTF8_NAME)) ? TRUE : FALSE; if ((utf8_extract && !gtm_utf8_mode) || (!utf8_extract && gtm_utf8_mode)) { /* extract CHSET doesn't match $ZCHSET */ if (utf8_extract) rts_error(VARLSTCNT(4) ERR_LOADINVCHSET, 2, LEN_AND_LIT("UTF-8")); else rts_error(VARLSTCNT(4) ERR_LOADINVCHSET, 2, LEN_AND_LIT("M")); mupip_exit(ERR_LDBINFMT); } if ('4' >= hdr_lvl) { /* Binary extracts in V50000-to-V52000 (label=4) and pre-V50000 (label=3) could have a '\0' byte (NULL byte) * in the middle of the string. Replace it with ' ' (space) like it would be in V52000 binary extracts and above. */ for (c = ptr, ctop = c + len; c < ctop; c++) { if ('\0' == *c) *c = ' '; } } util_out_print("Label = !AD\n", TRUE, len, ptr); new_gvn = FALSE; if (hdr_lvl > '3') { memcpy(std_null_coll, ptr + BIN_HEADER_NULLCOLLOFFSET, BIN_HEADER_NUMSZ); std_null_coll[BIN_HEADER_NUMSZ] = '\0'; extr_std_null_coll = STRTOUL(std_null_coll, NULL, 10); if (0 != extr_std_null_coll && 1!= extr_std_null_coll) { rts_error(VARLSTCNT(5) ERR_TEXT, 2, RTS_ERROR_TEXT("Corrupted null collation field in header"), ERR_LDBINFMT); mupip_exit(ERR_LDBINFMT); } } else extr_std_null_coll = 0; # ifdef GTM_CRYPT if ('5' <= hdr_lvl) { int i, num_indexes; len = file_input_bin_get((char **)&ptr); hash_array = (muext_hash_hdr *)malloc(len); /* store hashes of all the files used during extract into muext_hash_hdr structure */ memcpy((char *)hash_array, ptr, len); num_indexes = len / GTMCRYPT_HASH_LEN; encr_key_handles = (gtmcrypt_key_t *)malloc(SIZEOF(gtmcrypt_key_t) * num_indexes); INIT_PROC_ENCRYPTION(crypt_status); GC_BIN_LOAD_ERR(crypt_status); for (index = 0; index < num_indexes; index++) { if (0 == memcmp(hash_array[index].gtmcrypt_hash, EMPTY_GTMCRYPT_HASH, GTMCRYPT_HASH_LEN)) continue; GTMCRYPT_GETKEY(hash_array[index].gtmcrypt_hash, encr_key_handles[index], crypt_status); GC_BIN_LOAD_ERR(crypt_status); } } # endif if ('2' < hdr_lvl) { len = file_input_bin_get((char **)&ptr); if (SIZEOF(coll_hdr) != len) { rts_error(VARLSTCNT(5) ERR_TEXT, 2, RTS_ERROR_TEXT("Corrupt collation header"), ERR_LDBINFMT); mupip_exit(ERR_LDBINFMT); } extr_collhdr = *((coll_hdr *)(ptr)); new_gvn = TRUE; } else gtm_putmsg(VARLSTCNT(3) ERR_OLDBINEXTRACT, 1, hdr_lvl - '0'); if (begin < 2) begin = 2; for (iter = 2; iter < begin; iter++) { if (!(len = file_input_bin_get((char **)&ptr))) { gtm_putmsg(VARLSTCNT(3) ERR_LOADEOF, 1, begin); util_out_print("Error reading record number: !UL\n", TRUE, iter); mupip_error_occurred = TRUE; return; } else if (len == SIZEOF(coll_hdr)) { extr_collhdr = *((coll_hdr *)(ptr)); assert(hdr_lvl > '2'); iter--; } } assert(iter == begin); util_out_print("Beginning LOAD at record number: !UL\n", TRUE, begin); max_data_len = 0; max_subsc_len = 0; global_key_count = key_count = 0; rec_count = begin - 1; extr_collseq = db_collseq = NULL; need_xlation = FALSE; assert(NULL == tmp_gvkey); /* GVKEY_INIT macro relies on this */ GVKEY_INIT(tmp_gvkey, DBKEYSIZE(MAX_KEY_SZ)); /* tmp_gvkey will point to malloced memory after this */ for (; !mupip_DB_full ;) { if (++rec_count > end) break; next_cmpc = 0; mupip_error_occurred = FALSE; if (mu_ctrly_occurred) break; if (mu_ctrlc_occurred) { util_out_print("!AD:!_ Key cnt: !UL max subsc len: !UL max data len: !UL", TRUE, LEN_AND_LIT(gt_lit), key_count, max_subsc_len, max_data_len); util_out_print("Last LOAD record number: !UL", TRUE, key_count ? (rec_count - 1) : 0); mu_gvis(); util_out_print(0, TRUE); mu_ctrlc_occurred = FALSE; } /* reset the stringpool for every record in order to avoid garbage collection */ stringpool.free = stringpool.base; if (!(len = file_input_bin_get((char **)&ptr)) || mupip_error_occurred) break; else if (len == SIZEOF(coll_hdr)) { extr_collhdr = *((coll_hdr *)(ptr)); assert(hdr_lvl > '2'); new_gvn = TRUE; /* next record will contain a new gvn */ rec_count--; /* Decrement as this record does not count as a record for loading purposes */ continue; } rp = (rec_hdr*)(ptr); # ifdef GTM_CRYPT if ('5' <= hdr_lvl) { /* Getting index value from the extracted file. It indicates which database file this record belongs to */ GET_LONG(index, ptr); if (-1 != index) /* Indicates that the record is encrypted. */ { req_dec_blk_size = len - SIZEOF(int4); inbuf = (char *)(ptr + SIZEOF(int4)); GTMCRYPT_DECODE_FAST(encr_key_handles[index], inbuf, req_dec_blk_size, NULL, crypt_status); GC_BIN_LOAD_ERR(crypt_status); } rp = (rec_hdr*)(ptr + SIZEOF(int4)); } # endif btop = ptr + len; cp1 = (unsigned char*)(rp + 1); v.str.addr = (char*)cp1; while (*cp1++) ; v.str.len =INTCAST((char*)cp1 - v.str.addr - 1); if (('2' >= hdr_lvl) || new_gvn) { if ((HASHT_GBLNAME_LEN == v.str.len) && (0 == memcmp(v.str.addr, HASHT_GBLNAME, HASHT_GBLNAME_LEN))) continue; bin_call_db(BIN_BIND, (INTPTR_T)gd_header, (INTPTR_T)&v.str); max_key = gv_cur_region->max_key_size; db_collhdr.act = gv_target->act; db_collhdr.ver = gv_target->ver; db_collhdr.nct = gv_target->nct; } GET_USHORT(rec_len, &rp->rsiz); if (rp->cmpc != 0 || v.str.len > rec_len || mupip_error_occurred) { bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count); mu_gvis(); util_out_print(0, TRUE); continue; } if (new_gvn) { global_key_count = 1; if ((db_collhdr.act != extr_collhdr.act || db_collhdr.ver != extr_collhdr.ver || db_collhdr.nct != extr_collhdr.nct || gv_cur_region->std_null_coll != extr_std_null_coll)) { if (extr_collhdr.act) { if (extr_collseq = ready_collseq((int)extr_collhdr.act)) { if (!do_verify(extr_collseq, extr_collhdr.act, extr_collhdr.ver)) { gtm_putmsg(VARLSTCNT(8) ERR_COLLTYPVERSION, 2, extr_collhdr.act, extr_collhdr.ver, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base); mupip_exit(ERR_COLLTYPVERSION); } } else { gtm_putmsg(VARLSTCNT(7) ERR_COLLATIONUNDEF, 1, extr_collhdr.act, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base); mupip_exit(ERR_COLLATIONUNDEF); } } if (db_collhdr.act) { if (db_collseq = ready_collseq((int)db_collhdr.act)) { if (!do_verify(db_collseq, db_collhdr.act, db_collhdr.ver)) { gtm_putmsg(VARLSTCNT(8) ERR_COLLTYPVERSION, 2, db_collhdr.act, db_collhdr.ver, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base); mupip_exit(ERR_COLLTYPVERSION); } } else { gtm_putmsg(VARLSTCNT(7) ERR_COLLATIONUNDEF, 1, db_collhdr.act, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base); mupip_exit(ERR_COLLATIONUNDEF); } } need_xlation = TRUE; } else need_xlation = FALSE; } new_gvn = FALSE; for (; rp < (rec_hdr*)btop; rp = (rec_hdr*)((unsigned char *)rp + rec_len)) { GET_USHORT(rec_len, &rp->rsiz); if (rec_len + (unsigned char *)rp > btop) { bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count); mu_gvis(); util_out_print(0, TRUE); break; } cp1 = (unsigned char*)(rp + 1); cp2 = gv_currkey->base + rp->cmpc; current = 1; for (;;) { last = current; current = *cp2++ = *cp1++; if (0 == last && 0 == current) break; if (cp1 > (unsigned char *)rp + rec_len || cp2 > (unsigned char *)gv_currkey + gv_currkey->top) { bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count); mu_gvis(); util_out_print(0, TRUE); break; } } if (mupip_error_occurred) break; gv_currkey->end = cp2 - gv_currkey->base - 1; if (need_xlation) { assert(hdr_lvl >= '3'); assert(extr_collhdr.act || db_collhdr.act || extr_collhdr.nct || db_collhdr.nct || extr_std_null_coll != gv_cur_region->std_null_coll); /* gv_currkey would have been modified/translated in the earlier put */ memcpy(gv_currkey->base, cmpc_str, next_cmpc); next_rp = (rec_hdr *)((unsigned char*)rp + rec_len); if ((unsigned char*)next_rp < btop) { next_cmpc = next_rp->cmpc; assert(next_cmpc <= gv_currkey->end); memcpy(cmpc_str, gv_currkey->base, next_cmpc); } else next_cmpc = 0; /* length of the key might change (due to nct variation), * so get a copy of the original key from the extract */ memcpy(dup_key_str, gv_currkey->base, gv_currkey->end + 1); gvkey_char_ptr = dup_key_str; while (*gvkey_char_ptr++) ; gv_currkey->prev = 0; gv_currkey->end = gvkey_char_ptr - dup_key_str; assert(gv_keysize <= tmp_gvkey->top); while (*gvkey_char_ptr) { /* get next subscript (in GT.M internal subsc format) */ subsc_len = 0; tmp_ptr = src_buff; while (*gvkey_char_ptr) *tmp_ptr++ = *gvkey_char_ptr++; subsc_len = tmp_ptr - src_buff; src_buff[subsc_len] = '\0'; if (extr_collseq) { /* undo the extract time collation */ TREF(transform) = TRUE; save_gv_target_collseq = gv_target->collseq; gv_target->collseq = extr_collseq; } else TREF(transform) = FALSE; /* convert the subscript to string format */ end_buff = gvsub2str(src_buff, dest_buff, FALSE); /* transform the string to the current subsc format */ TREF(transform) = TRUE; tmp_mval.mvtype = MV_STR; tmp_mval.str.addr = (char *)dest_buff; tmp_mval.str.len = INTCAST(end_buff - dest_buff); tmp_gvkey->prev = 0; tmp_gvkey->end = 0; if (extr_collseq) gv_target->collseq = save_gv_target_collseq; mval2subsc(&tmp_mval, tmp_gvkey); /* we now have the correctly transformed subscript */ tmp_key_ptr = gv_currkey->base + gv_currkey->end; memcpy(tmp_key_ptr, tmp_gvkey->base, tmp_gvkey->end + 1); gv_currkey->prev = gv_currkey->end; gv_currkey->end += tmp_gvkey->end; gvkey_char_ptr++; } if ( gv_cur_region->std_null_coll != extr_std_null_coll && gv_currkey->prev) { if (extr_std_null_coll == 0) { GTM2STDNULLCOLL(gv_currkey->base, gv_currkey->end); } else { STD2GTMNULLCOLL(gv_currkey->base, gv_currkey->end); } } } if (gv_currkey->end >= max_key) { bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count); mu_gvis(); util_out_print(0, TRUE); continue; } if (max_subsc_len < (gv_currkey->end + 1)) max_subsc_len = gv_currkey->end + 1; v.str.addr = (char*)cp1; v.str.len =INTCAST(rec_len - (cp1 - (unsigned char *)rp)); if (max_data_len < v.str.len) max_data_len = v.str.len; bin_call_db(BIN_PUT, (INTPTR_T)&v, 0); if (mupip_error_occurred) { if (!mupip_DB_full) { bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count); util_out_print(0, TRUE); } break; } key_count++; global_key_count++; } } GTMCRYPT_ONLY( if (NULL != hash_array) free(hash_array); )
bool open_source_file (void) { static readonly unsigned char open_params_list[] = { (unsigned char)iop_readonly, (unsigned char)iop_eol }; mstr fstr; int status, n,index; parse_blk pblk; char *p, buff[MAX_FBUFF + 1]; time_t clock; struct stat statbuf; mval val; mval pars; error_def (ERR_FILEPARSE); memset(&pblk, 0, sizeof(pblk)); pblk.buffer = buff; pblk.buff_size = MAX_FBUFF; pblk.fop = F_SYNTAXO; fstr.addr = (char *)source_file_name; fstr.len = source_name_len; status = parse_file(&fstr, &pblk); if (!(status & 1)) rts_error(VARLSTCNT(5) ERR_FILEPARSE, 2, fstr.len, fstr.addr, status); pars.mvtype = MV_STR; pars.str.len = sizeof(open_params_list); pars.str.addr = (char *)open_params_list; val.mvtype = MV_STR; val.str.len = source_name_len; val.str.addr = (char *)source_file_name; op_open(&val, &pars, 0, 0); dev_in_use = io_curr_device; /* save list file info in use if it is opened */ op_use(&val, &pars); compile_src_dev = io_curr_device; if (tt_so_do_once) { clock = time(0); memcpy(&routine_name[0], "MDEFAULT", sizeof("MDEFAULT") - 1); memcpy(&module_name[0], "MDEFAULT", sizeof("MDEFAULT") - 1); } else { STAT_FILE((char *)&source_file_name[0], &statbuf, status); assert(status == 0); clock = statbuf.st_mtime; p = pblk.l_name; n = pblk.b_name; if (n > sizeof(mident)) n = sizeof(mident); index = 0; if ('_' == *p) { routine_name[index] = '%'; module_name[index] = '_'; p++; index++; } for (; index < n; index++) routine_name[index] = module_name[index] = *p++; for (; index < (sizeof(mident)); index++ ) routine_name[index] = module_name[index] = 0; } p = (char *)GTM_CTIME(&clock); memcpy (rev_time_buf, p + 4, sizeof(rev_time_buf)); io_curr_device = dev_in_use; /* set it back to make open_list_file save the device */ return TRUE; }
/* Importance Sampling */ int4 mu_size_impsample(glist *gl_ptr, int4 M, int4 seed) { boolean_t tn_aborted; double a[MAX_BT_DEPTH + 1]; /* a[j] is # of adjacent block pointers in level j block of cur traversal */ double r[MAX_BT_DEPTH + 1]; /* r[j] is #records in level j block of current traversal */ enum cdb_sc status; int k, h; stat_t rstat; trans_num ret_tn; unsigned int lcl_t_tries; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; inctn_opcode = inctn_invalid_op; /* set gv_target/gv_currkey/gv_cur_region/cs_addrs/cs_data to correspond to <globalname,reg> in gl_ptr */ DO_OP_GVNAME(gl_ptr); if (0 == gv_target->root) { /* Global does not exist (online rollback). Not an error. */ gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return EXIT_NRM; } if (!seed) seed = (int4)(time(0) * process_id); srand48(seed); /* do M random traversals */ INIT_STATS(rstat); for (k = 1; k <= M; k++) { if (mu_ctrlc_occurred || mu_ctrly_occurred) return EXIT_ERR; t_begin(ERR_MUSIZEFAIL, 0); for (;;) { CLEAR_VECTOR(r); CLEAR_VECTOR(a); if (cdb_sc_normal != (status = mu_size_rand_traverse(r, a))) /* WARNING: assignment */ { assert((CDB_STAGNATE > t_tries) || IS_FINAL_RETRY_CODE(status)); t_retry(status); continue; } gv_target->clue.end = 0; gv_target->hist.h[0] = gv_target->hist.h[1]; /* No level 0 block to validate */ DEBUG_ONLY(lcl_t_tries = t_tries); if ((trans_num)0 == (ret_tn = t_end(&gv_target->hist, NULL, TN_NOT_SPECIFIED))) /* WARNING: assignment */ { ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted); if (tn_aborted) { /* Global does not exist (online rollback). Not an error. */ gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return EXIT_NRM; } continue; } accum_stats_impsmpl(&rstat, r, a); break; } } finalize_stats_impsmpl(&rstat); /* display rstat data * Showing the error as 2 standard deviations which is a 95% confidence interval for the * mean number of blocks at each level */ util_out_print("Number of generated samples = !UL", FLUSH, rstat.n); util_out_print("Level Blocks Adjacent 2 sigma(+/-)", FLUSH); for (h = MAX_BT_DEPTH; (0 <= h) && (rstat.blktot[h] < EPS); h--); for ( ; h > 0; h--) util_out_print("!5UL !15UL !15UL !15UL ~ !3UL%", FLUSH, h, (int)ROUND(rstat.blktot[h]), (int)ROUND(mu_int_adj[h]), (int)ROUND(sqrt(rstat.blkerr[h]) * 2), (int)ROUND(sqrt(rstat.blkerr[h]) * 2 / rstat.blktot[h] * 100.0) ); util_out_print("!5UL !15UL !15UL !15UL ~ !3UL%", FLUSH, h, (int)ROUND(rstat.blktot[h]), (int)ROUND(mu_int_adj[h]), (int)ROUND(sqrt(rstat.blkerr[h]) * 2), (int)ROUND(sqrt(rstat.blkerr[h]) * 2 / rstat.blktot[h] * 100.0) ); util_out_print("Total !15UL !15UL !15UL ~ !3UL%", FLUSH, (int)ROUND(rstat.B), (int)ROUND(rstat.AT), (int)ROUND(sqrt(rstat.error) * 2), (int)ROUND(sqrt(rstat.error) * 2 / rstat.B * 100.0) ); return EXIT_NRM; }
void op_zprint(mval *rtn, mval *start_label, int start_int_exp, mval *end_label, int end_int_exp) /* contains label to be located or null string */ /* contains label offset or line number to reference */ /* contains routine to look in or null string */ /* NOTE: If only the first label is specified, the */ /* parser makes the second label the duplicate */ /* of the first. (not so vice versa) */ { mval print_line, null_str; mstr *src1, *src2; uint4 stat1, stat2; rhdtyp *rtn_vector; error_def(ERR_FILENOTFND); error_def(ERR_TXTSRCMAT); error_def(ERR_ZPRTLABNOTFND); error_def(ERR_ZLINKFILE); error_def(ERR_ZLMODULE); MV_FORCE_STR(start_label); MV_FORCE_STR(end_label); MV_FORCE_STR(rtn); if (NULL == (rtn_vector = find_rtn_hdr(&rtn->str))) { op_zlink(rtn, NULL); rtn_vector = find_rtn_hdr(&rtn->str); if (NULL == rtn_vector) rts_error(VARLSTCNT(8) ERR_ZLINKFILE, 2, rtn->str.len, rtn->str.addr, ERR_ZLMODULE, 2, mid_len(&zlink_mname), &zlink_mname.c[0]); } stat1 = get_src_line(rtn, start_label, start_int_exp, &src1); if (stat1 & LABELNOTFOUND) rts_error(VARLSTCNT(1) ERR_ZPRTLABNOTFND); if (stat1 & SRCNOTFND) rts_error(VARLSTCNT(4) ERR_FILENOTFND, 2, rtn_vector->src_full_name.len, rtn_vector->src_full_name.addr); if (stat1 & (SRCNOTAVAIL | AFTERLASTLINE)) return; if (stat1 & (ZEROLINE | NEGATIVELINE)) { null_str.mvtype = MV_STR; null_str.str.len = 0; stat1 = get_src_line(rtn, &null_str, 1, &src1); if (stat1 & AFTERLASTLINE) /* the "null" file */ return; } if (end_int_exp == 0 && (end_label->str.len == 0 || *end_label->str.addr == 0)) stat2 = AFTERLASTLINE; else if ((stat2 = get_src_line(rtn, end_label, end_int_exp, &src2)) & LABELNOTFOUND) rts_error(VARLSTCNT(1) ERR_ZPRTLABNOTFND); if (stat2 & (ZEROLINE | NEGATIVELINE)) return; if (stat2 & AFTERLASTLINE) { null_str.mvtype = MV_STR; null_str.str.len = 0; stat2 = get_src_line(rtn, &null_str, 1, &src2); /* number of lines less one for duplicated zero'th line and one due to termination condition being <= */ assert((INTPTR_T)src2 > 0); src2 += rtn_vector->lnrtab_len - 2; } if (stat1 & CHECKSUMFAIL) { rts_error(VARLSTCNT(1) INFO_MSK(ERR_TXTSRCMAT)); op_wteol(1); } print_line.mvtype = MV_STR; for ( ; src1 <= src2 ; src1++) { if (outofband) outofband_action(FALSE); print_line.str.addr = src1->addr; print_line.str.len = src1->len; op_write(&print_line); op_wteol(1); } return; }
void deferred_signal_handler(void) { void (*signal_routine)(); DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; /* To avoid nested calls to this routine, we set forced_exit to FALSE at the very beginning */ forced_exit = FALSE; if (exit_handler_active) { assert(FALSE); /* at this point in time (June 2003) there is no way we know of to get here, hence the assert */ return; /* since anyway we are exiting currently, resume exit handling instead of reissuing another one */ } /* For signals that get a delayed response so we can get out of crit, we also delay the messages. * This routine will output those delayed messages from the appropriate structures to both the * user and the system console. */ /* note can't use switch here because ERR_xxx are not defined as constants */ if (ERR_KILLBYSIG == forced_exit_err) { send_msg(VARLSTCNT(6) ERR_KILLBYSIG, 4, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal); gtm_putmsg(VARLSTCNT(6) ERR_KILLBYSIG, 4, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal); } else if (ERR_KILLBYSIGUINFO == forced_exit_err) { send_msg(VARLSTCNT(8) ERR_KILLBYSIGUINFO, 6, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.send_pid, signal_info.send_uid); gtm_putmsg(VARLSTCNT(8) ERR_KILLBYSIGUINFO, 6, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.send_pid, signal_info.send_uid); } else if (ERR_KILLBYSIGSINFO1 == forced_exit_err) { send_msg(VARLSTCNT(8) ERR_KILLBYSIGSINFO1, 6, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.int_iadr, signal_info.bad_vadr); gtm_putmsg(VARLSTCNT(8) ERR_KILLBYSIGSINFO1, 6, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.int_iadr, signal_info.bad_vadr); } else if (ERR_KILLBYSIGSINFO2 == forced_exit_err) { send_msg(VARLSTCNT(7) ERR_KILLBYSIGSINFO2, 5, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.int_iadr); gtm_putmsg(VARLSTCNT(7) ERR_KILLBYSIGSINFO2, 5, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.int_iadr); } else if (ERR_KILLBYSIGSINFO3 == forced_exit_err) { send_msg(VARLSTCNT(7) ERR_KILLBYSIGSINFO3, 5, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.bad_vadr); gtm_putmsg(VARLSTCNT(7) ERR_KILLBYSIGSINFO3, 5, GTMIMAGENAMETXT(image_type), process_id, signal_info.signal, signal_info.bad_vadr); } else if (ERR_FORCEDHALT != forced_exit_err || !gtm_quiet_halt) { /* No HALT messages if quiet halt is requested */ send_msg(VARLSTCNT(1) forced_exit_err); gtm_putmsg(VARLSTCNT(1) forced_exit_err); } assert(OK_TO_INTERRUPT); /* Signal intent to exit BEFORE driving condition handlers. This avoids checks that will otherwise fail (for example * if mdb_condition_handler/preemptive_ch gets called below, that could invoke the RESET_GV_TARGET macro which in turn * would assert that gv_target->gd_csa is equal to cs_addrs. This could not be true in case we were in mainline code * that was interrupted by the flush timer for a different region which in turn was interrupted by an external signal * that would drive us to exit. Setting the "process_exiting" variable causes those csa checks to pass. */ SET_PROCESS_EXITING_TRUE; # ifdef DEBUG if (gtm_white_box_test_case_enabled && (WBTEST_DEFERRED_TIMERS == gtm_white_box_test_case_number) && (2 == gtm_white_box_test_case_count)) { DEFER_INTERRUPTS(INTRPT_NO_TIMER_EVENTS); DBGFPF((stderr, "DEFERRED_SIGNAL_HANDLER: will sleep for 20 seconds\n")); LONG_SLEEP(20); DBGFPF((stderr, "DEFERRED_SIGNAL_HANDLER: done sleeping\n")); ENABLE_INTERRUPTS(INTRPT_NO_TIMER_EVENTS); } # endif /* If any special routines are registered to be driven on a signal, drive them now */ if ((0 != exi_condition) && (NULL != call_on_signal)) { signal_routine = call_on_signal; call_on_signal = NULL; /* So we don't recursively call ourselves */ (*signal_routine)(); } /* Note, we do not drive create_fatal_error zshow_dmp() in this routine since any deferrable signals are * by definition not fatal. */ exit(-exi_condition); }
void iorm_close(io_desc *iod, mval *pp) { d_rm_struct *rm_ptr; unsigned char c; char *path, *path2; int fclose_res; int stat_res; int fstat_res; struct stat statbuf, fstatbuf; int p_offset; assert (iod->type == rm); if (iod->state != dev_open) return; rm_ptr = (d_rm_struct *)iod->dev_sp; iorm_use(iod,pp); if (iod->dollar.x && rm_ptr->lastop == RM_WRITE && !iod->dollar.za) iorm_flush(iod); p_offset = 0; while (*(pp->str.addr + p_offset) != iop_eol) { switch (c = *(pp->str.addr + p_offset++)) { case iop_delete: path = iod->trans_name->dollar_io; FSTAT_FILE(rm_ptr->fildes, &fstatbuf, fstat_res); if (-1 == fstat_res) rts_error(VARLSTCNT(1) errno); STAT_FILE(path, &statbuf, stat_res); if (-1 == stat_res) rts_error(VARLSTCNT(1) errno); if (fstatbuf.st_ino == statbuf.st_ino) if (UNLINK(path) == -1) rts_error(VARLSTCNT(1) errno); break; case iop_rename: path = iod->trans_name->dollar_io; path2 = (char*)(pp->str.addr + p_offset + 1); FSTAT_FILE(rm_ptr->fildes, &fstatbuf, fstat_res); if (-1 == fstat_res) rts_error(VARLSTCNT(1) errno); STAT_FILE(path, &statbuf, stat_res); if (-1 == stat_res) rts_error(VARLSTCNT(1) errno); if (fstatbuf.st_ino == statbuf.st_ino) { if (LINK(path, path2) == -1) rts_error(VARLSTCNT(1) errno); if (UNLINK(path) == -1) rts_error(VARLSTCNT(1) errno); } break; default: break; } p_offset += ( io_params_size[c]==IOP_VAR_SIZE ? (unsigned char)(*(pp->str.addr + p_offset) + 1) : io_params_size[c] ); } if (iod->pair.in != iod) assert (iod->pair.out == iod); if (iod->pair.out != iod) assert (iod->pair.in == iod); iod->state = dev_closed; iod->dollar.zeof = FALSE; iod->dollar.x = 0; iod->dollar.y = 0; rm_ptr->lastop = RM_NOOP; /* Do the close first. If the fclose() is done first and we are being called from io_rundown just prior to the execv in a newly JOBbed off process, the fclose() does an implied fflush() which is known to do an lseek() which resets the file pointers of any open (flat) files in the parent due to an archane interaction between child and parent processes prior to an execv() call. The fclose (for stream files) will fail but it will clean up structures orphaned by the close(). */ close(rm_ptr->fildes); if (rm_ptr->filstr != NULL) FCLOSE(rm_ptr->filstr, fclose_res); #ifdef __MVS__ if (rm_ptr->fifo) { if (rm_ptr != (iod->pair.out)->dev_sp || rm_ptr != (iod->pair.in)->dev_sp) { if (rm_ptr != (iod->pair.out)->dev_sp) { rm_ptr = (iod->pair.out)->dev_sp; iod = iod->pair.out; } else { rm_ptr = (iod->pair.in)->dev_sp; iod = iod->pair.in; } assert(NULL != rm_ptr); if(dev_closed != iod->state) { iod->state = dev_closed; iod->dollar.zeof = FALSE; iod->dollar.x = 0; iod->dollar.y = 0; rm_ptr->lastop = RM_NOOP; assert(rm_ptr->fildes>=0); close(rm_ptr->fildes); if (rm_ptr->filstr != NULL) fclose(rm_ptr->filstr); } } } #endif return; }
void dse_range(void) { char lower[256], targ_key[256], upper[256], util_buff[MAX_UTIL_LEN]; block_id from, to, blk, blk_child; sm_uc_ptr_t bp, b_top, key_bot, key_top, key_top1, rp, r_top; char level; int4 dummy_int, nocrit_present; cache_rec_ptr_t dummy_cr; short int rsize, size, size1; int cnt, dummy, lower_len, util_len, upper_len; boolean_t busy_matters, free, got_lonely_star, index, low, lost, star, up, was_crit, was_hold_onto_crit; if (cli_present("FROM") == CLI_PRESENT) { if (!cli_get_hex("FROM", (uint4 *)&from)) return; if (from < 0 || from > cs_addrs->ti->total_blks || !(from % cs_addrs->hdr->bplmap)) { util_out_print("Error: invalid block number.", TRUE); return; } } else from = 1; if (cli_present("TO") == CLI_PRESENT) { if(!cli_get_hex("TO", (uint4 *)&to)) return; if (to < 0 || to > cs_addrs->ti->total_blks || !(to % cs_addrs->hdr->bplmap)) { util_out_print("Error: invalid block number.", TRUE); return; } } else to = cs_addrs->ti->total_blks - 1; if (low = (cli_present("LOWER") == CLI_PRESENT)) { if (!dse_getki(&lower[0], &lower_len, LIT_AND_LEN("LOWER"))) return; } if (up = (cli_present("UPPER") == CLI_PRESENT)) { if (!dse_getki(&upper[0], &upper_len, LIT_AND_LEN("UPPER"))) return; } star = (cli_present("STAR") == CLI_PRESENT); if (!low && !up && !star) { util_out_print("Must specify star, or a lower or upper key limit.", TRUE); return; } index = (cli_present("INDEX") == CLI_PRESENT); lost = (cli_present("LOST") == CLI_PRESENT); dummy = cli_present("BUSY"); if (dummy == CLI_PRESENT) { busy_matters = TRUE; free = FALSE; } else if (dummy == CLI_NEGATED) busy_matters = free = TRUE; else busy_matters = free = FALSE; patch_path[0] = get_dir_root(); cnt = 0; was_crit = cs_addrs->now_crit; nocrit_present = (CLI_NEGATED == cli_present("CRIT")); DSE_GRAB_CRIT_AS_APPROPRIATE(was_crit, was_hold_onto_crit, nocrit_present, cs_addrs, gv_cur_region); for (blk = from; blk <= to ;blk++) { if (util_interrupt) { DSE_REL_CRIT_AS_APPROPRIATE(was_crit, was_hold_onto_crit, nocrit_present, cs_addrs, gv_cur_region); rts_error(VARLSTCNT(1) ERR_CTRLC); break; } if (!(blk % cs_addrs->hdr->bplmap)) continue; if (!(bp = t_qread(blk, &dummy_int, &dummy_cr))) rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); level = ((blk_hdr_ptr_t)bp)->levl; if (index && (level == 0)) continue; if (busy_matters && (free != dse_is_blk_free(blk, &dummy_int, &dummy_cr))) continue; if (((blk_hdr_ptr_t) bp)->bsiz > cs_addrs->hdr->blk_size) b_top = bp + cs_addrs->hdr->blk_size; else if (((blk_hdr_ptr_t) bp)->bsiz < SIZEOF(blk_hdr)) b_top = bp + SIZEOF(blk_hdr); else b_top = bp + ((blk_hdr_ptr_t) bp)->bsiz; rp = bp + SIZEOF(blk_hdr); GET_SHORT(rsize, &((rec_hdr_ptr_t) rp)->rsiz); if (rsize < SIZEOF(rec_hdr)) r_top = rp + SIZEOF(rec_hdr); else r_top = rp + rsize; if (r_top >= b_top) r_top = b_top; got_lonely_star = FALSE; if (((blk_hdr_ptr_t) bp)->levl) { key_top = r_top - SIZEOF(block_id); if (star && (r_top == b_top)) got_lonely_star = TRUE; } else { if (!up && !low) continue; for (key_top = rp + SIZEOF(rec_hdr); key_top < r_top ; ) if (!*key_top++ && !*key_top++) break; } if (!got_lonely_star) { key_bot = rp + SIZEOF(rec_hdr); size = key_top - key_bot; if (size <= 0) continue; if (size > SIZEOF(targ_key)) size = SIZEOF(targ_key); if (lost) { for (key_top1 = rp + SIZEOF(rec_hdr); key_top1 < r_top ; ) if (!*key_top1++) break; size1 = key_top1 - rp - SIZEOF(rec_hdr); if (size1 > SIZEOF(targ_key)) size1 = SIZEOF(targ_key); patch_find_root_search = TRUE; patch_path_count = 1; patch_find_blk = blk; if (dse_is_blk_in(rp, r_top, size1)) continue; } if (low && memcmp(lower, key_bot, MIN(lower_len, size)) > 0) continue; if (up && memcmp(upper, key_bot, MIN(upper_len, size)) < 0) continue; } else { got_lonely_star = FALSE; if (lost) { blk_child = *(block_id_ptr_t)key_top; if (!(bp = t_qread(blk_child, &dummy_int, &dummy_cr))) rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); if (((blk_hdr_ptr_t) bp)->bsiz > cs_addrs->hdr->blk_size) b_top = bp + cs_addrs->hdr->blk_size; else if (((blk_hdr_ptr_t) bp)->bsiz < SIZEOF(blk_hdr)) b_top = bp + SIZEOF(blk_hdr); else b_top = bp + ((blk_hdr_ptr_t) bp)->bsiz; rp = bp + SIZEOF(blk_hdr); GET_SHORT(rsize, &((rec_hdr_ptr_t) rp)->rsiz); if (rsize < SIZEOF(rec_hdr)) r_top = rp + SIZEOF(rec_hdr); else r_top = rp + rsize; if (r_top >= b_top) r_top = b_top; if (((blk_hdr_ptr_t) bp)->levl) key_top = r_top - SIZEOF(block_id); for (key_top1 = rp + SIZEOF(rec_hdr); key_top1 < r_top ; ) if (!*key_top1++) break; size1 = key_top1 - rp - SIZEOF(rec_hdr); if (size1 > 0) { if (size1 > SIZEOF(targ_key)) size1 = SIZEOF(targ_key); patch_find_root_search = TRUE; patch_path_count = 1; patch_find_blk = blk; if (dse_is_blk_in(rp, r_top, size1)) continue; } } } if (!cnt++) util_out_print("!/Blocks in the specified key range:", TRUE); util_out_print("Block: !8XL Level: !2UL", TRUE, blk, level); } DSE_REL_CRIT_AS_APPROPRIATE(was_crit, was_hold_onto_crit, nocrit_present, cs_addrs, gv_cur_region); if (cnt) util_out_print("Found !UL blocks", TRUE, cnt); else util_out_print("None found.", TRUE); return; }
void dse_m_rest ( block_id blk, /* block number */ unsigned char *bml_list, /* start of local list of local bit maps */ int4 bml_size, /* size of each entry in *bml_list */ sm_vuint_ptr_t blks_ptr, /* total free blocks */ bool in_dir_tree) { sm_uc_ptr_t bp, b_top, rp, r_top, bml_ptr, np, ptr; unsigned char util_buff[MAX_UTIL_LEN]; block_id next; int util_len; int4 dummy_int; cache_rec_ptr_t dummy_cr; int4 bml_index; short level, rsize; int4 bplmap; error_def(ERR_DSEBLKRDFAIL); if(!(bp = t_qread (blk, &dummy_int, &dummy_cr))) rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); if (((blk_hdr_ptr_t) bp)->bsiz > cs_addrs->hdr->blk_size) b_top = bp + cs_addrs->hdr->blk_size; else if (((blk_hdr_ptr_t) bp)->bsiz < sizeof(blk_hdr)) b_top = bp + sizeof(blk_hdr); else b_top = bp + ((blk_hdr_ptr_t) bp)->bsiz; level = ((blk_hdr_ptr_t)bp)->levl; bplmap = cs_addrs->hdr->bplmap; for (rp = bp + sizeof (blk_hdr); rp < b_top ;rp = r_top) { if (in_dir_tree || level > 1) /* reread block because it may have been flushed from read */ { if (!(np = t_qread(blk,&dummy_int,&dummy_cr))) /* cache due to LRU buffer scheme and reads in recursive */ rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); /* calls to dse_m_rest. */ if (np != bp) { b_top = np + (b_top - bp); rp = np + (rp - bp); r_top = np + (r_top - bp); bp = np; } } GET_SHORT(rsize,&((rec_hdr_ptr_t)rp)->rsiz); r_top = rp + rsize; if (r_top > b_top) r_top = b_top; if (r_top - rp < (sizeof (rec_hdr) + sizeof (block_id))) break; if (in_dir_tree && level == 0) { for (ptr = rp + sizeof(rec_hdr); ; ) { if (*ptr++ == 0 && *ptr++ == 0) break; } GET_LONG(next,ptr); } else GET_LONG(next,r_top - sizeof (block_id)); if (next < 0 || next >= cs_addrs->ti->total_blks || (next / bplmap * bplmap == next)) { memcpy(util_buff,"Invalid pointer in block ",25); util_len = 25; util_len += i2hex_nofill(blk, &util_buff[util_len], 8); memcpy(&util_buff[util_len], " record offset ",15); util_len += 15; util_len += i2hex_nofill((int)(rp - bp), &util_buff[util_len], 4); util_buff[util_len] = 0; util_out_print((char*)util_buff,TRUE); continue; } bml_index = next / bplmap; bml_ptr = bml_list + bml_index * bml_size; if (bml_busy(next - next / bplmap * bplmap, bml_ptr + sizeof(blk_hdr))) { *blks_ptr = *blks_ptr - 1; if (((blk_hdr_ptr_t) bp)->levl > 1) { dse_m_rest (next, bml_list, bml_size, blks_ptr, in_dir_tree); } else if (in_dir_tree) { assert(((blk_hdr_ptr_t) bp)->levl == 0 || ((blk_hdr_ptr_t) bp)->levl == 1); dse_m_rest (next, bml_list, bml_size, blks_ptr, ((blk_hdr_ptr_t)bp)->levl); } } } return; }
uint4 gdsfilext(uint4 blocks, uint4 filesize, boolean_t trans_in_prog) { sm_uc_ptr_t old_base[2], mmap_retaddr; boolean_t was_crit, is_mm; int result, save_errno, status; DEBUG_ONLY(int first_save_errno); uint4 new_bit_maps, bplmap, map, new_blocks, new_total, max_tot_blks, old_total; uint4 jnl_status; gtm_uint64_t avail_blocks, mmap_sz; off_t new_eof, new_size; trans_num curr_tn; unix_db_info *udi; inctn_opcode_t save_inctn_opcode; int4 prev_extend_blks_to_upgrd; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; cache_rec_ptr_t cr; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; assert(!IS_DSE_IMAGE); assert((cs_addrs->nl == NULL) || (process_id != cs_addrs->nl->trunc_pid)); /* mu_truncate shouldn't extend file... */ assert(!process_exiting); DEBUG_ONLY(old_base[0] = old_base[1] = NULL); assert(!gv_cur_region->read_only); udi = FILE_INFO(gv_cur_region); is_mm = (dba_mm == cs_addrs->hdr->acc_meth); # if !defined(MM_FILE_EXT_OK) if (!udi->grabbed_access_sem && is_mm) return (uint4)(NO_FREE_SPACE); /* should this be changed to show extension not allowed ? */ # endif /* Both blocks and total blocks are unsigned ints so make sure we aren't asking for huge numbers that will overflow and end up doing silly things. */ assert((blocks <= (MAXTOTALBLKS(cs_data) - cs_data->trans_hist.total_blks)) || WBTEST_ENABLED(WBTEST_FILE_EXTEND_ERROR)); # if defined(__sun) || defined(__hpux) cs_data->defer_allocate = TRUE; # endif if (!blocks && (cs_data->defer_allocate || (TRANS_IN_PROG_TRUE == trans_in_prog))) return (uint4)(NO_FREE_SPACE); /* should this be changed to show extension not enabled ? */ bplmap = cs_data->bplmap; /* New total of non-bitmap blocks will be number of current, non-bitmap blocks, plus new blocks desired * There are (bplmap - 1) non-bitmap blocks per bitmap, so add (bplmap - 2) to number of non-bitmap blocks * and divide by (bplmap - 1) to get total number of bitmaps for expanded database. (must round up in this * manner as every non-bitmap block must have an associated bitmap) * Current number of bitmaps is (total number of current blocks + bplmap - 1) / bplmap. * Subtract current number of bitmaps from number needed for expanded database to get number of new bitmaps needed. */ new_bit_maps = DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks - DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks, bplmap) + blocks, bplmap - 1) - DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks, bplmap); new_blocks = blocks + new_bit_maps; assert((0 < (int)new_blocks) || (!cs_data->defer_allocate && (0 == new_blocks))); if (new_blocks + cs_data->trans_hist.total_blks > MAXTOTALBLKS(cs_data)) { assert(WBTEST_ENABLED(WBTEST_FILE_EXTEND_ERROR)); send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(1) ERR_TOTALBLKMAX); return (uint4)(NO_FREE_SPACE); } if (0 != (save_errno = disk_block_available(udi->fd, &avail_blocks, FALSE))) { send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno); rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno); } else { if (!(gtmDebugLevel & GDL_IgnoreAvailSpace)) { /* Bypass this space check if debug flag above is on. Allows us to create a large sparce DB * in space it could never fit it if wasn't sparse. Needed for some tests. */ avail_blocks = avail_blocks / (cs_data->blk_size / DISK_BLOCK_SIZE); if ((blocks * EXTEND_WARNING_FACTOR) > avail_blocks) { if (blocks > (uint4)avail_blocks) { if (!INST_FREEZE_ON_NOSPC_ENABLED(cs_addrs)) return (uint4)(NO_FREE_SPACE); else send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(6) MAKE_MSG_WARNING(ERR_NOSPACEEXT), 4, DB_LEN_STR(gv_cur_region), new_blocks, (uint4)avail_blocks); } else send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DSKSPACEFLOW, 3, DB_LEN_STR(gv_cur_region), (uint4)(avail_blocks - ((new_blocks <= avail_blocks) ? new_blocks : 0))); } } } # ifdef DEBUG if (WBTEST_ENABLED(WBTEST_MM_CONCURRENT_FILE_EXTEND) && dollar_tlevel && !MEMCMP_LIT(gv_cur_region->rname, "DEFAULT")) { SYSTEM("$gtm_dist/mumps -run $gtm_wbox_mrtn"); assert(1 == cs_addrs->nl->wbox_test_seq_num); /* should have been set by mubfilcpy */ cs_addrs->nl->wbox_test_seq_num = 2; /* signal mupip backup to stop sleeping in mubfilcpy */ } # endif /* From here on, we need to use GDSFILEXT_CLNUP before returning to the caller */ was_crit = cs_addrs->now_crit; assert(!cs_addrs->hold_onto_crit || was_crit); /* If we are coming from mupip_extend (which gets crit itself) we better have waited for any unfreezes to occur. * If we are coming from online rollback (when that feature is available), we will come in holding crit and in * the final retry. In that case too, we expect to have waited for unfreezes to occur in the caller itself. * Therefore if we are coming in holding crit from MUPIP, we expect the db to be unfrozen so no need to wait for * freeze. * If we are coming from GT.M and final retry (in which case we come in holding crit) we expect to have waited * for any unfreezes (by invoking tp_crit_all_regions) to occur (TP or non-TP) before coming into this * function. However, there is one exception. In the final retry, if tp_crit_all_regions notices that * at least one of the participating regions did ONLY READs, it will not wait for any freeze on THAT region * to complete before grabbing crit. Later, in the final retry, if THAT region did an update which caused * op_tcommit to invoke bm_getfree->gdsfilext, then we would have come here with a frozen region on which * we hold crit. */ assert(!was_crit || !FROZEN_HARD(cs_data) || (dollar_tlevel && (CDB_STAGNATE <= t_tries))); /* * If we are in the final retry and already hold crit, it is possible that csa->nl->wc_blocked is also set to TRUE * (by a concurrent process in phase2 which encountered an error in the midst of commit and secshr_db_clnup * finished the job for it). In this case we do NOT want to invoke wcs_recover as that will update the "bt" * transaction numbers without correspondingly updating the history transaction numbers (effectively causing * a cdb_sc_blkmod type of restart). Therefore do NOT call grab_crit (which unconditionally invokes wcs_recover) * if we already hold crit. */ if (!was_crit) { for ( ; ; ) { grab_crit(gv_cur_region); if (FROZEN_CHILLED(cs_data)) DO_CHILLED_AUTORELEASE(cs_addrs, cs_data); if (!FROZEN(cs_data) && !IS_REPL_INST_FROZEN) break; rel_crit(gv_cur_region); while (FROZEN(cs_data) || IS_REPL_INST_FROZEN) { hiber_start(1000); if (FROZEN_CHILLED(cs_data) && CHILLED_AUTORELEASE(cs_data)) break; } } } else if (FROZEN_HARD(cs_data) && dollar_tlevel) { /* We don't want to continue with file extension as explained above. Hence return with an error code which * op_tcommit will recognize (as a cdb_sc_needcrit/cdb_sc_instancefreeze type of restart) and restart accordingly. */ assert(CDB_STAGNATE <= t_tries); GDSFILEXT_CLNUP; return (uint4)FINAL_RETRY_FREEZE_PROG; } else WAIT_FOR_REGION_TO_UNCHILL(cs_addrs, cs_data); if (IS_REPL_INST_FROZEN && trans_in_prog) { assert(CDB_STAGNATE <= t_tries); GDSFILEXT_CLNUP; return (uint4)FINAL_RETRY_INST_FREEZE; } assert(cs_addrs->ti->total_blks == cs_data->trans_hist.total_blks); old_total = cs_data->trans_hist.total_blks; if (old_total != filesize) { /* Somebody else has already extended it, since we are in crit, this is trust-worthy. However, in case of MM, * we still need to remap the database */ assert((old_total > filesize) || !is_mm); /* For BG, someone else could have truncated or extended - we have no idea */ GDSFILEXT_CLNUP; return (SS_NORMAL); } if (trans_in_prog && SUSPICIOUS_EXTEND) { if (!was_crit) { GDSFILEXT_CLNUP; return (uint4)(EXTEND_SUSPECT); } /* If free_blocks counter is not ok, then correct it. Do the check again. If still fails, then it means we held * crit through bm_getfree into gdsfilext and still didn't get it right. */ assertpro(!is_free_blks_ctr_ok() && !SUSPICIOUS_EXTEND); } if (JNL_ENABLED(cs_data)) { if (!jgbl.dont_reset_gbl_jrec_time) SET_GBL_JREC_TIME; /* needed before jnl_ensure_open as that can write jnl records */ jpc = cs_addrs->jnl; jbp = jpc->jnl_buff; /* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write * journal records (if it decides to switch to a new journal file). */ ADJUST_GBL_JREC_TIME(jgbl, jbp); jnl_status = jnl_ensure_open(gv_cur_region, cs_addrs); if (jnl_status) { GDSFILEXT_CLNUP; send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(cs_data), DB_LEN_STR(gv_cur_region)); return (uint4)(NO_FREE_SPACE); /* should have better return status */ } } if (is_mm) { cs_addrs->nl->mm_extender_pid = process_id; status = wcs_wtstart(gv_cur_region, 0, NULL, NULL); cs_addrs->nl->mm_extender_pid = 0; assertpro(SS_NORMAL == status); old_base[0] = cs_addrs->db_addrs[0]; old_base[1] = cs_addrs->db_addrs[1]; cs_addrs->db_addrs[0] = NULL; /* don't rely on it until the mmap below */ # ifdef _AIX status = shmdt(old_base[0] - BLK_ZERO_OFF(cs_data->start_vbn)); # else status = munmap((caddr_t)old_base[0], (size_t)(old_base[1] - old_base[0])); # endif if (0 != status) { save_errno = errno; GDSFILEXT_CLNUP; send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(12) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), ERR_SYSCALL, 5, LEN_AND_STR(MEM_UNMAP_SYSCALL), CALLFROM, save_errno); return (uint4)(NO_FREE_SPACE); } } else { /* Due to concurrency issues, it is possible some process had issued a disk read of the GDS block# corresponding * to "old_total" right after a truncate wrote a GDS-block of zeros on disk (to signal end of the db file). * If so, the global buffer containing this block needs to be invalidated now as part of the extend. If not, it is * possible the EOF block on disk is now going to be overwritten by a properly initialized bitmap block (as part * of the gdsfilext below) while the global buffer continues to have an incorrect copy of that bitmap block and * this in turn would cause XXXX failures due to a bad bitmap block in shared memory. (GTM-7519) */ cr = db_csh_get((block_id)old_total); if ((NULL != cr) && ((cache_rec_ptr_t)CR_NOTVALID != cr)) { assert((0 == cr->dirty) && (0 == cr->bt_index) && !cr->stopped); cr->cycle++; cr->blk = CR_BLKEMPTY; } } CHECK_TN(cs_addrs, cs_data, cs_data->trans_hist.curr_tn); /* can issue rts_error TNTOOLARGE */ new_total = old_total + new_blocks; new_eof = BLK_ZERO_OFF(cs_data->start_vbn) + ((off_t)new_total * cs_data->blk_size); # if !defined(__sun) && !defined(__hpux) if (!cs_data->defer_allocate) { new_size = new_eof + cs_data->blk_size; save_errno = posix_fallocate(udi->fd, 0, new_size); DEBUG_ONLY(first_save_errno = save_errno); if ((ENOSPC == save_errno) && IS_GTM_IMAGE) save_errno = extend_wait_for_fallocate(udi, new_size); if (0 != save_errno) { GDSFILEXT_CLNUP; assert(ENOSPC == save_errno); if (ENOSPC != save_errno) send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_PREALLOCATEFAIL, 2, DB_LEN_STR(gv_cur_region), save_errno); return (uint4)(NO_FREE_SPACE); } } # endif save_errno = db_write_eof_block(udi, udi->fd, cs_data->blk_size, new_eof, &(TREF(dio_buff))); if ((ENOSPC == save_errno) && IS_GTM_IMAGE) save_errno = extend_wait_for_write(udi, cs_data->blk_size, new_eof); if (0 != save_errno) { GDSFILEXT_CLNUP; if (ENOSPC != save_errno) send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno); return (uint4)(NO_FREE_SPACE); } if (WBTEST_ENABLED(WBTEST_FILE_EXTEND_INTERRUPT_1)) { LONG_SLEEP(600); assert(FALSE); } /* Ensure the EOF and metadata get to disk BEFORE any bitmap writes. Otherwise, the file size could no longer reflect * a proper extent and subsequent invocations of gdsfilext could corrupt the database. */ if (!IS_STATSDB_CSA(cs_addrs)) { GTM_DB_FSYNC(cs_addrs, udi->fd, status); assert(0 == status); if (0 != status) { GDSFILEXT_CLNUP; send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(8) ERR_DBFILERR, 5, RTS_ERROR_LITERAL("fsync1()"), CALLFROM, status); return (uint4)(NO_FREE_SPACE); } } if (WBTEST_ENABLED(WBTEST_FILE_EXTEND_INTERRUPT_2)) { LONG_SLEEP(600); assert(FALSE); /* Should be killed before that */ } DEBUG_ONLY(prev_extend_blks_to_upgrd = cs_data->blks_to_upgrd;)
void mu_swap_root(glist *gl_ptr, int *root_swap_statistic_ptr) { sgmnt_data_ptr_t csd; sgmnt_addrs *csa; node_local_ptr_t cnl; srch_hist *dir_hist_ptr, *gvt_hist_ptr; gv_namehead *save_targ; block_id root_blk_id, child_blk_id, free_blk_id; sm_uc_ptr_t root_blk_ptr, child_blk_ptr; kill_set kill_set_list; trans_num curr_tn, ret_tn; int level, root_blk_lvl; block_id save_root; boolean_t tn_aborted; unsigned int lcl_t_tries; enum cdb_sc status; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; assert(mu_reorg_process); gv_target = gl_ptr->gvt; gv_target->root = 0; /* reset root so we recompute it in DO_OP_GVNAME below */ gv_target->clue.end = 0; /* reset clue since reorg action on later globals might have invalidated it */ reorg_gv_target->gvname.var_name = gv_target->gvname.var_name; /* needed by SAVE_ROOTSRCH_ENTRY_STATE */ dir_hist_ptr = gv_target->alt_hist; gvt_hist_ptr = &(gv_target->hist); inctn_opcode = inctn_invalid_op; DO_OP_GVNAME(gl_ptr); /* sets gv_target/gv_currkey/gv_cur_region/cs_addrs/cs_data to correspond to <globalname,reg> in gl_ptr */ csa = cs_addrs; cnl = csa->nl; csd = cs_data; /* Be careful to keep csd up to date. With MM, cs_data can change, and * dereferencing an older copy can result in a SIG-11. */ if (gv_cur_region->read_only) return; /* Cannot proceed for read-only data files */ if (0 == gv_target->root) { /* Global does not exist (online rollback). No problem. */ gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return; } if (dba_mm == csd->acc_meth) /* return for now without doing any swapping operation because later mu_truncate * is going to issue the MUTRUNCNOTBG message. */ return; SET_GV_ALTKEY_TO_GBLNAME_FROM_GV_CURRKEY; /* set up gv_altkey to be just the gblname */ /* ------------ Swap root block of global variable tree --------- */ t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { curr_tn = csa->ti->curr_tn; kill_set_list.used = 0; save_root = gv_target->root; gv_target->root = csa->dir_tree->root; gv_target->clue.end = 0; if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr))) { /* Assign directory tree path to dir_hist_ptr */ assert(t_tries < CDB_STAGNATE); gv_target->root = save_root; t_retry(status); continue; } gv_target->root = save_root; gv_target->clue.end = 0; if (cdb_sc_normal != (gvcst_search(gv_currkey, NULL))) { /* Assign global variable tree path to gvt_hist_ptr */ assert(t_tries < CDB_STAGNATE); t_retry(status); continue; } /* We've already search the directory tree in op_gvname/t_retry and obtained gv_target->root. * Should restart with gvtrootmod2 if they don't agree. gvcst_root_search is the final arbiter. * Really need that for debug info and also should assert(gv_currkey is global name). */ root_blk_lvl = gvt_hist_ptr->depth; assert(root_blk_lvl > 0); root_blk_ptr = gvt_hist_ptr->h[root_blk_lvl].buffaddr; root_blk_id = gvt_hist_ptr->h[root_blk_lvl].blk_num; assert((CDB_STAGNATE > t_tries) || (gv_target->root == gvt_hist_ptr->h[root_blk_lvl].blk_num)); free_blk_id = swap_root_or_directory_block(0, root_blk_lvl, dir_hist_ptr, root_blk_id, root_blk_ptr, &kill_set_list, curr_tn); if (RETRY_SWAP == free_blk_id) continue; else if (ABORT_SWAP == free_blk_id) break; update_trans = UPDTRNS_DB_UPDATED_MASK; inctn_opcode = inctn_mu_reorg; assert(1 == kill_set_list.used); need_kip_incr = TRUE; if (!csa->now_crit) WAIT_ON_INHIBIT_KILLS(cnl, MAXWAIT2KILL); DEBUG_ONLY(lcl_t_tries = t_tries); TREF(in_mu_swap_root_state) = MUSWP_INCR_ROOT_CYCLE; assert(!TREF(in_gvcst_redo_root_search)); if ((trans_num)0 == (ret_tn = t_end(gvt_hist_ptr, dir_hist_ptr, TN_NOT_SPECIFIED))) { TREF(in_mu_swap_root_state) = MUSWP_NONE; need_kip_incr = FALSE; assert(NULL == kip_csa); ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted); if (tn_aborted) { /* It is not an error if the global (that once existed) doesn't exist anymore (due to ROLLBACK) */ gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return; } continue; } TREF(in_mu_swap_root_state) = MUSWP_NONE; /* Note that this particular process's csa->root_search_cycle is now behind cnl->root_search_cycle. * This forces a cdb_sc_gvtrootmod2 restart in gvcst_bmp_mark_free below. */ assert(cnl->root_search_cycle > csa->root_search_cycle); gvcst_kill_sort(&kill_set_list); GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, csa); DECR_KIP(csd, csa, kip_csa); *root_swap_statistic_ptr += 1; break; } /* ------------ Swap blocks in branch of directory tree --------- */ for (level = 0; level <= MAX_BT_DEPTH; level++) { t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { curr_tn = csa->ti->curr_tn; kill_set_list.used = 0; save_root = gv_target->root; gv_target->root = csa->dir_tree->root; gv_target->clue.end = 0; if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr))) { /* assign branch path of directory tree into dir_hist_ptr */ assert(t_tries < CDB_STAGNATE); gv_target->root = save_root; t_retry(status); continue; } gv_target->root = save_root; gv_target->clue.end = 0; if (level >= dir_hist_ptr->depth) { /* done */ t_abort(gv_cur_region, csa); return; } child_blk_ptr = dir_hist_ptr->h[level].buffaddr; child_blk_id = dir_hist_ptr->h[level].blk_num; assert(csa->dir_tree->root != child_blk_id); free_blk_id = swap_root_or_directory_block(level + 1, level, dir_hist_ptr, child_blk_id, child_blk_ptr, &kill_set_list, curr_tn); if (level == 0) /* set level as 1 to mark this kill set is for level-0 block in directory tree. * The kill-set level later will be used in gvcst_bmp_markfree to assign a special value to * cw_set_element, which will be eventually used by t_end to write the block to snapshot */ kill_set_list.blk[kill_set_list.used - 1].level = 1; if (RETRY_SWAP == free_blk_id) continue; else if (ABORT_SWAP == free_blk_id) break; update_trans = UPDTRNS_DB_UPDATED_MASK; inctn_opcode = inctn_mu_reorg; assert(1 == kill_set_list.used); need_kip_incr = TRUE; if (!csa->now_crit) WAIT_ON_INHIBIT_KILLS(cnl, MAXWAIT2KILL); DEBUG_ONLY(lcl_t_tries = t_tries); TREF(in_mu_swap_root_state) = MUSWP_DIRECTORY_SWAP; if ((trans_num)0 == (ret_tn = t_end(dir_hist_ptr, NULL, TN_NOT_SPECIFIED))) { TREF(in_mu_swap_root_state) = MUSWP_NONE; need_kip_incr = FALSE; assert(NULL == kip_csa); continue; } TREF(in_mu_swap_root_state) = MUSWP_NONE; gvcst_kill_sort(&kill_set_list); TREF(in_mu_swap_root_state) = MUSWP_FREE_BLK; GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, csa); TREF(in_mu_swap_root_state) = MUSWP_NONE; DECR_KIP(csd, csa, kip_csa); break; } } return; }
void gv_select(char *cli_buff, int n_len, boolean_t freeze, char opname[], glist *gl_head, int *reg_max_rec, int *reg_max_key, int *reg_max_blk) { bool stashed = FALSE; int num_quote, len, gmap_size, new_gmap_size, estimated_entries, count, rslt; char *ptr, *ptr1, *c; mstr gmap[512], *gmap_ptr, *gmap_ptr_base, gmap_beg, gmap_end; mval val, curr_gbl_name; glist *gl_tail, *gl_ptr; #ifdef GTM64 hash_table_int8 ext_hash; ht_ent_int8 *tabent; #else hash_table_int4 ext_hash; ht_ent_int4 *tabent; #endif /* GTM64 */ error_def(ERR_FREEZE); error_def(ERR_DBRDONLY); error_def(ERR_SELECTSYNTAX); error_def(ERR_MUNOFINISH); error_def(ERR_MUNOACTION); error_def(ERR_FREEZECTRL); memset(gmap, 0, SIZEOF(gmap)); gmap_size = SIZEOF(gmap) / SIZEOF(gmap[0]); gmap_ptr_base = &gmap[0]; /* "estimated_entries" is a conservative estimate of the # of entries that could be used up in the gmap array */ estimated_entries = 1; /* take into account the NULL gmap entry at the end of the array */ for (ptr = cli_buff; *ptr; ptr = ptr1) { for (ptr1 = ptr; ; ptr1++) { if (',' == *ptr1) { len = (int)(ptr1 - ptr); ptr1++; break; } else if (!*ptr1) { len = (int)(ptr1 - ptr); break; } } gmap_beg.addr = ptr; c = gmap_beg.addr + len - 1; num_quote = 0; while ('"' == *c) { len--; c--; num_quote++; } if (0 >= len) { gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname)); mupip_exit(ERR_MUNOACTION); } c = gmap_beg.addr; while (0 < num_quote) { if ('"' == *c) { c++; len--; } else { gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname)); mupip_exit(ERR_MUNOACTION); } num_quote--; } gmap_beg.addr = c; if ('^' == *c) { gmap_beg.addr++; len--; } gmap_beg.len = len; c = mu_extr_ident(&gmap_beg); len -= INTCAST(c - gmap_beg.addr); assert(len >= 0); if (0 == len) gmap_end = gmap_beg; else if (gmap_beg.len == 1 && '*' == *c) { gmap_beg.addr = (char*)&percent_lit; gmap_beg.len = SIZEOF(percent_lit); gmap_end.addr = (char*)&tilde_lit; gmap_end.len = SIZEOF(tilde_lit); } else if (1 == len && '*' == *c) { gmap_end = gmap_beg; gmap_beg.len--; *c = '~'; } else if (':' != *c) { gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname)); mupip_exit(ERR_MUNOACTION); } else { gmap_beg.len = INTCAST(c - gmap_beg.addr); c++; gmap_end.addr = c; gmap_end.len = len - 1; if ('^' == *c) { gmap_end.addr++; gmap_end.len--; } c = mu_extr_ident(&gmap_end); MSTR_CMP(gmap_beg, gmap_end, rslt); if (((c - gmap_end.addr) != gmap_end.len) || (0 < rslt)) { gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname)); mupip_exit(ERR_MUNOACTION); } } /* "estimated_entries" is the maximum number of entries that could be used up in the gmap array including the * next global_map call. The actual number of used entries could be much lower than this. * But since determining the actual number would mean scanning the gmap array for the first NULL pointer (a * performance overhead), we do an approximate check instead. */ estimated_entries += MAX_GMAP_ENTRIES_PER_ITER; if (estimated_entries >= gmap_size) { /* Current gmap array does not have enough space. Double size before calling global_map */ new_gmap_size = gmap_size * 2; /* double size of gmap array */ gmap_ptr = (mstr *)malloc(SIZEOF(mstr) * new_gmap_size); memcpy(gmap_ptr, gmap_ptr_base, SIZEOF(mstr) * gmap_size); if (gmap_ptr_base != &gmap[0]) free(gmap_ptr_base); gmap_size = new_gmap_size; gmap_ptr_base = gmap_ptr; } global_map(gmap_ptr_base, &gmap_beg, &gmap_end); DEBUG_ONLY( count = 1; for (gmap_ptr = gmap_ptr_base; gmap_ptr->addr; gmap_ptr++) count++; assert(count < gmap_size); ) } if (freeze) { GTM64_ONLY(init_hashtab_int8(&ext_hash, 0, HASHTAB_COMPACT, HASHTAB_SPARE_TABLE);) NON_GTM64_ONLY(init_hashtab_int4(&ext_hash, 0, HASHTAB_COMPACT, HASHTAB_SPARE_TABLE);) }
trans_num gvcst_bmp_mark_free(kill_set *ks) { block_id bit_map, next_bm, *updptr; blk_ident *blk, *blk_top, *nextblk; trans_num ctn, start_db_fmt_tn; unsigned int len; # if defined(UNIX) && defined(DEBUG) unsigned int lcl_t_tries; # endif int4 blk_prev_version; srch_hist alt_hist; trans_num ret_tn = 0; boolean_t visit_blks; srch_blk_status bmphist; cache_rec_ptr_t cr; enum db_ver ondsk_blkver; enum cdb_sc status; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; TREF(in_gvcst_bmp_mark_free) = TRUE; assert(inctn_bmp_mark_free_gtm == inctn_opcode || inctn_bmp_mark_free_mu_reorg == inctn_opcode); /* Note down the desired_db_format_tn before you start relying on cs_data->fully_upgraded. * If the db is fully_upgraded, take the optimal path that does not need to read each block being freed. * But in order to detect concurrent desired_db_format changes, note down the tn (when the last format change occurred) * before the fully_upgraded check and after having noted down the database current_tn. * If they are the same, then we are guaranteed no concurrent desired_db_format change occurred. * If they are not, then fall through to the non-optimal path where each to-be-killed block has to be visited. * The reason we need to visit every block in case desired_db_format changes is to take care of the case where * MUPIP REORG DOWNGRADE concurrently changes a block that we are about to free. */ start_db_fmt_tn = cs_data->desired_db_format_tn; visit_blks = (!cs_data->fully_upgraded); /* Local evaluation */ assert(!visit_blks || (visit_blks && dba_bg == cs_addrs->hdr->acc_meth)); /* must have blks_to_upgrd == 0 for non-BG */ assert(!dollar_tlevel); /* Should NOT be in TP now */ blk = &ks->blk[0]; blk_top = &ks->blk[ks->used]; if (!visit_blks) { /* Database has been completely upgraded. Free all blocks in one bitmap as part of one transaction. */ assert(cs_data->db_got_to_v5_once); /* assert all V4 fmt blocks (including RECYCLED) have space for V5 upgrade */ inctn_detail.blknum_struct.blknum = 0; /* to indicate no adjustment to "blks_to_upgrd" necessary */ /* If any of the mini transaction below restarts because of an online rollback, we don't want the application * refresh to happen (like $ZONLNRLBK++ or rts_error(DBROLLEDBACK). This is because, although we are currently in * non-tp (dollar_tleve = 0), we could actually be in a TP transaction and have actually faked dollar_tlevel. In * such a case, we should NOT * be issuing a DBROLLEDBACK error as TP transactions are supposed to just restart in * case of an online rollback. So, set the global variable that gtm_onln_rlbk_clnup can check and skip doing the * application refresh, but will reset the clues. The next update will see the cycle mismatch and will accordingly * take the right action. */ for ( ; blk < blk_top; blk = nextblk) { if (0 != blk->flag) { nextblk = blk + 1; continue; } assert(0 < blk->block); assert((int4)blk->block < cs_addrs->ti->total_blks); bit_map = ROUND_DOWN2((int)blk->block, BLKS_PER_LMAP); next_bm = bit_map + BLKS_PER_LMAP; CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ /* Scan for the next local bitmap */ updptr = (block_id *)update_array_ptr; for (nextblk = blk; (0 == nextblk->flag) && (nextblk < blk_top) && ((block_id)nextblk->block < next_bm); ++nextblk) { assert((block_id)nextblk->block - bit_map); *updptr++ = (block_id)nextblk->block - bit_map; } len = (unsigned int)((char *)nextblk - (char *)blk); update_array_ptr = (char *)updptr; alt_hist.h[0].blk_num = 0; /* need for calls to T_END for bitmaps */ alt_hist.h[0].blk_target = NULL; /* need to initialize for calls to T_END */ /* the following assumes SIZEOF(blk_ident) == SIZEOF(int) */ assert(SIZEOF(blk_ident) == SIZEOF(int)); *(int *)update_array_ptr = 0; t_begin(ERR_GVKILLFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { ctn = cs_addrs->ti->curr_tn; /* Need a read fence before reading fields from cs_data as we are reading outside * of crit and relying on this value to detect desired db format state change. */ SHM_READ_MEMORY_BARRIER; if (start_db_fmt_tn != cs_data->desired_db_format_tn) { /* Concurrent db format change has occurred. Need to visit every block to be killed * to determine its block format. Fall through to the non-optimal path below */ ret_tn = 0; break; } bmphist.blk_num = bit_map; if (NULL == (bmphist.buffaddr = t_qread(bmphist.blk_num, (sm_int_ptr_t)&bmphist.cycle, &bmphist.cr))) { t_retry((enum cdb_sc)rdfail_detail); continue; } t_write_map(&bmphist, (uchar_ptr_t)update_array, ctn, -(int4)(nextblk - blk)); UNIX_ONLY(DEBUG_ONLY(lcl_t_tries = t_tries)); if ((trans_num)0 == (ret_tn = t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))) { # ifdef UNIX assert((CDB_STAGNATE == t_tries) || (lcl_t_tries == t_tries - 1)); status = LAST_RESTART_CODE; if ((cdb_sc_onln_rlbk1 == status) || (cdb_sc_onln_rlbk2 == status) || TREF(rlbk_during_redo_root)) { /* t_end restarted due to online rollback. Discard bitmap free-up and return control * to the application. But, before that reset only_reset_clues_if_onln_rlbk to FALSE */ TREF(in_gvcst_bmp_mark_free) = FALSE; send_msg(VARLSTCNT(6) ERR_IGNBMPMRKFREE, 4, REG_LEN_STR(gv_cur_region), DB_LEN_STR(gv_cur_region)); t_abort(gv_cur_region, cs_addrs); return ret_tn; /* actually 0 */ } # endif continue; } break; } if (0 == ret_tn) /* db format change occurred. Fall through to below for loop to visit each block */ { /* Abort any active transaction to get rid of lingering Non-TP artifacts */ t_abort(gv_cur_region, cs_addrs); break; } } } /* for all blocks in the kill_set */
void goq_load(uint4 begin, uint4 end, struct FAB *infab) { int status; msgtype msg; unsigned char *in_buff, *b; unsigned int n; bool is_begin; uint4 rec_count; unsigned short goq_blk_size; short iosb[4]; error_def(ERR_INVMVXSZ); error_def(ERR_MUPIPINFO); error_def(ERR_PREMATEOF); error_def(ERR_LDGOQFMT); error_def(ERR_BEGINST); rec_count = 0; if (begin > 0) is_begin = TRUE; else is_begin = FALSE; goq_blk_size = MVX_BLK_SIZE; infab->fab$w_mrs = goq_blk_size; in_buff = malloc(goq_blk_size + 8); if (is_begin) { status = sys$qio(efn_bg_qio_read, infab->fab$l_stv, IO$_READVBLK, &iosb[0], 0, 0, in_buff, goq_blk_size, (rec_count * goq_blk_size / 512) + 1, 0, 0, 0); if (SS$_NORMAL != status) /* get header block */ rts_error(VARLSTCNT(1) status); sys$synch(efn_bg_qio_read, &iosb[0]); if (SS$_NORMAL != iosb[0]) rts_error(VARLSTCNT(1) iosb[0]); if (iosb[1] != goq_blk_size) { if (M11_BLK_SIZE != iosb[1]) rts_error(VARLSTCNT(1) ERR_INVMVXSZ); goq_blk_size = M11_BLK_SIZE; } while ((SS$_ENDOFFILE != iosb[0]) && (rec_count < begin)) { rec_count++; status = sys$qio(efn_bg_qio_read, infab->fab$l_stv, IO$_READVBLK, &iosb[0], 0, 0, in_buff, goq_blk_size, (rec_count * goq_blk_size / 512) + 1, 0, 0, 0); if (SS$_NORMAL != status) rts_error(VARLSTCNT(1) status); sys$synch(efn_bg_qio_read, &iosb[0]); if ((SS$_NORMAL != iosb[0]) && (SS$_ENDOFFILE != iosb[0])) { rts_error(VARLSTCNT(1) iosb[0]); mupip_exit(iosb[0]); } } for (;rec_count < begin;) { status = sys$qio(efn_bg_qio_read, infab->fab$l_stv, IO$_READVBLK, &iosb[0], 0, 0, in_buff, goq_blk_size, (rec_count * goq_blk_size / 512) + 1, 0, 0, 0); if (SS$_NORMAL != status) rts_error(VARLSTCNT(1) status); sys$synch(efn_bg_qio_read, &iosb[0]); if (SS$_ENDOFFILE == iosb[0]) rts_error(VARLSTCNT(1) ERR_PREMATEOF); if (SS$_NORMAL != iosb[0]) rts_error(VARLSTCNT(1) iosb[0]); rec_count++; } msg.msg_number = ERR_BEGINST; msg.arg_cnt = 3; msg.new_opts = msg.def_opts = 1; msg.fp_cnt = 1; msg.fp[0].n = rec_count; sys$putmsg(&msg, 0, 0, 0); } else { status = sys$qio(efn_bg_qio_read, infab->fab$l_stv, IO$_READVBLK, &iosb[0], 0, 0, in_buff, goq_blk_size, (rec_count * goq_blk_size / 512) + 1, 0, 0, 0); if (SS$_NORMAL != status) rts_error(VARLSTCNT(1) status); sys$synch(efn_bg_qio_read, &iosb[0]); if (SS$_NORMAL != iosb[0]) { rts_error(VARLSTCNT(1) iosb[0]); mupip_exit(iosb[0]); } if (iosb[1] != goq_blk_size) { if (M11_BLK_SIZE != iosb[1]) rts_error(VARLSTCNT(1) ERR_INVMVXSZ); goq_blk_size = M11_BLK_SIZE; } b = in_buff; while ((13 != *b++) && (b - in_buff < goq_blk_size - 28)) ; if (memcmp(b - SIZEOF("~%GOQ"), LIT_AND_LEN("~%GOQ")) || (10 != *b)) { rts_error(VARLSTCNT(1) ERR_LDGOQFMT); mupip_exit(ERR_LDGOQFMT); } for (n = 0; n < 3; n++) { while ((13 != *b++) && b - in_buff < goq_blk_size) ; if (10 != *b++) { rts_error(VARLSTCNT(1) ERR_LDGOQFMT); mupip_exit(ERR_LDGOQFMT); } } msg.msg_number = ERR_MUPIPINFO; msg.arg_cnt = 4; msg.new_opts = msg.def_opts = 1; msg.fp_cnt = 2; msg.fp[0].n = b - in_buff - 1; msg.fp[1].cp = in_buff; sys$putmsg(&msg, 0, 0, 0); while (SS$_ENDOFFILE != iosb[0]) { rec_count++; status = sys$qio(efn_bg_qio_read, infab->fab$l_stv, IO$_READVBLK, &iosb[0], 0, 0, in_buff, goq_blk_size, (rec_count * goq_blk_size / 512) + 1, 0, 0, 0); if (SS$_NORMAL != status) { rts_error(VARLSTCNT(1) status); mupip_exit(status); } sys$synch(efn_bg_qio_read, &iosb[0]); if ((SS$_NORMAL != iosb[0]) && (SS$_ENDOFFILE != iosb[0])) { rts_error(VARLSTCNT(1) iosb[0]); mupip_exit(iosb[0]); } } } if (MVX_BLK_SIZE == goq_blk_size) goq_mvx_load(infab, in_buff, rec_count, end); else goq_m11_load(infab, in_buff, rec_count, end); /***********************************************************************************************/ /* Shut Down */ /***********************************************************************************************/ CLOSE: free(in_buff); gv_cur_region = NULL; status = sys$dassgn(infab->fab$l_stv); if (SS$_NORMAL != status) { rts_error(VARLSTCNT(1) status); mupip_exit(status); } return; }
void gtm_startup(struct startup_vector *svec) /* Note: various references to data copied from *svec could profitably be referenced directly */ { unsigned char *mstack_ptr; void gtm_ret_code(); static readonly unsigned char init_break[1] = {'B'}; int4 lct; int i; static char other_mode_buf[] = "OTHER"; mstr log_name; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; assert(INVALID_IMAGE != image_type); assert(svec->argcnt == SIZEOF(*svec)); IA64_ONLY(init_xfer_table()); get_page_size(); cache_table_relobjs = &cache_table_rebuild; ht_rhash_ch = &hashtab_rehash_ch; jbxm_dump_ch = &jobexam_dump_ch; heartbeat_timer_ptr = &heartbeat_timer; stpgc_ch = &stp_gcol_ch; rtn_fst_table = rtn_names = (rtn_tabent *)svec->rtn_start; rtn_names_end = rtn_names_top = (rtn_tabent *)svec->rtn_end; if (svec->user_stack_size < 4096) svec->user_stack_size = 4096; if (svec->user_stack_size > 8388608) svec->user_stack_size = 8388608; mstack_ptr = (unsigned char *)malloc(svec->user_stack_size); msp = stackbase = mstack_ptr + svec->user_stack_size - mvs_size[MVST_STORIG]; /* mark the stack base so that if error occur during call-in gtm_init(), the unwind * logic in gtmci_ch() will get rid of the stack completely */ fgncal_stack = stackbase; mv_chain = (mv_stent *)msp; mv_chain->mv_st_type = MVST_STORIG; /* Initialize first (anchor) mv_stent so doesn't do anything */ mv_chain->mv_st_next = 0; mv_chain->mv_st_cont.mvs_storig = 0; stacktop = mstack_ptr + 2 * mvs_size[MVST_NTAB]; stackwarn = stacktop + (16 * 1024); break_message_mask = svec->break_message_mask; if (svec->user_strpl_size < STP_INITSIZE) svec->user_strpl_size = STP_INITSIZE; else if (svec->user_strpl_size > STP_MAXINITSIZE) svec->user_strpl_size = STP_MAXINITSIZE; stp_init(svec->user_strpl_size); if (svec->user_indrcache_size > MAX_INDIRECTION_NESTING || svec->user_indrcache_size < MIN_INDIRECTION_NESTING) svec->user_indrcache_size = MIN_INDIRECTION_NESTING; TREF(ind_result_array) = (mval **)malloc(SIZEOF(mval *) * svec->user_indrcache_size); TREF(ind_source_array) = (mval **)malloc(SIZEOF(mval *) * svec->user_indrcache_size); TREF(ind_result_sp) = TREF(ind_result_array); TREF(ind_result_top) = TREF(ind_result_sp) + svec->user_indrcache_size; TREF(ind_source_sp) = TREF(ind_source_array); TREF(ind_source_top) = TREF(ind_source_sp) + svec->user_indrcache_size; rts_stringpool = stringpool; TREF(compile_time) = FALSE; /* assert that is_replicator and run_time is properly set by gtm_imagetype_init invoked at process entry */ # ifdef DEBUG switch (image_type) { case GTM_IMAGE: case GTM_SVC_DAL_IMAGE: assert(is_replicator && run_time); break; case MUPIP_IMAGE: assert(!is_replicator && !run_time); break; default: assert(FALSE); } # endif gtm_utf8_init(); /* Initialize the runtime for Unicode */ /* Initialize alignment requirement for the runtime stringpool */ log_name.addr = DISABLE_ALIGN_STRINGS; log_name.len = STR_LIT_LEN(DISABLE_ALIGN_STRINGS); /* mstr_native_align = logical_truth_value(&log_name, FALSE, NULL) ? FALSE : TRUE; */ mstr_native_align = FALSE; /* TODO: remove this line and uncomment the above line */ getjobname(); INVOKE_INIT_SECSHR_ADDRS; getzprocess(); getzmode(); geteditor(); zcall_init(); cmd_qlf.qlf = glb_cmd_qlf.qlf; cache_init(); # ifdef __sun if (NULL != GETENV(PACKAGE_ENV_TYPE)) /* chose xcall (default) or rpc zcall */ xfer_table[xf_fnfgncal] = (xfer_entry_t)op_fnfgncal_rpc; /* using RPC */ # endif msp -= SIZEOF(stack_frame); frame_pointer = (stack_frame *)msp; memset(frame_pointer,0, SIZEOF(stack_frame)); frame_pointer->temps_ptr = (unsigned char *)frame_pointer; frame_pointer->ctxt = GTM_CONTEXT(gtm_ret_code); frame_pointer->mpc = CODE_ADDRESS(gtm_ret_code); frame_pointer->type = SFT_COUNT; frame_pointer->rvector = (rhdtyp*)malloc(SIZEOF(rhdtyp)); memset(frame_pointer->rvector, 0, SIZEOF(rhdtyp)); symbinit(); /* Variables for supporting $ZSEARCH sorting and wildcard expansion */ TREF(zsearch_var) = lv_getslot(curr_symval); TREF(zsearch_dir1) = lv_getslot(curr_symval); TREF(zsearch_dir2) = lv_getslot(curr_symval); LVVAL_INIT((TREF(zsearch_var)), curr_symval); LVVAL_INIT((TREF(zsearch_dir1)), curr_symval); LVVAL_INIT((TREF(zsearch_dir2)), curr_symval); /* Initialize global pointer to control-C handler. Also used in iott_use */ ctrlc_handler_ptr = &ctrlc_handler; io_init(IS_MUPIP_IMAGE); /* starts with nocenable for GT.M runtime, enabled for MUPIP */ if (!IS_MUPIP_IMAGE) { sig_init(generic_signal_handler, ctrlc_handler_ptr, suspsigs_handler, continue_handler); atexit(gtm_exit_handler); cenable(); /* cenable unless the environment indicates otherwise - 2 steps because this can report errors */ } jobinterrupt_init(); getzdir(); dpzgbini(); zco_init(); /* a base addr of 0 indicates a gtm_init call from an rpc server */ if ((GTM_IMAGE == image_type) && (NULL != svec->base_addr)) jobchild_init(); else { /* Trigger enabled utilities will enable through here */ (TREF(dollar_zmode)).mvtype = MV_STR; (TREF(dollar_zmode)).str.addr = other_mode_buf; (TREF(dollar_zmode)).str.len = SIZEOF(other_mode_buf) -1; } svec->frm_ptr = (unsigned char *)frame_pointer; dollar_ztrap.mvtype = MV_STR; dollar_ztrap.str.len = SIZEOF(init_break); dollar_ztrap.str.addr = (char *)init_break; dollar_zstatus.mvtype = MV_STR; dollar_zstatus.str.len = 0; dollar_zstatus.str.addr = NULL; ecode_init(); zyerror_init(); ztrap_form_init(); ztrap_new_init(); zdate_form_init(svec); dollar_system_init(svec); init_callin_functable(); gtm_env_xlate_init(); SET_LATCH_GLOBAL(&defer_latch, LOCK_AVAILABLE); curr_pattern = pattern_list = &mumps_pattern; pattern_typemask = mumps_pattern.typemask; initialize_pattern_table(); ce_init(); /* initialize compiler escape processing */ /* Initialize local collating sequence */ TREF(transform) = TRUE; lct = find_local_colltype(); if (lct != 0) { TREF(local_collseq) = ready_collseq(lct); if (!TREF(local_collseq)) { exi_condition = -ERR_COLLATIONUNDEF; gtm_putmsg(VARLSTCNT(3) ERR_COLLATIONUNDEF, 1, lct); exit(exi_condition); } } else TREF(local_collseq) = 0; prealloc_gt_timers(); gt_timers_add_safe_hndlrs(); for (i = 0; FNPC_MAX > i; i++) { /* Initialize cache structure for $Piece function */ (TREF(fnpca)).fnpcs[i].pcoffmax = &(TREF(fnpca)).fnpcs[i].pstart[FNPC_ELEM_MAX]; (TREF(fnpca)).fnpcs[i].indx = i; } (TREF(fnpca)).fnpcsteal = (TREF(fnpca)).fnpcs; /* Starting place to look for cache reuse */ (TREF(fnpca)).fnpcmax = &(TREF(fnpca)).fnpcs[FNPC_MAX - 1]; /* The last element */ /* Initialize zwrite subsystem. Better to do it now when we have storage to allocate than * if we fail and storage allocation may not be possible. To that end, pretend we have * seen alias acitivity so those structures are initialized as well. */ assert(FALSE == curr_symval->alias_activity); curr_symval->alias_activity = TRUE; lvzwr_init((enum zwr_init_types)0, (mval *)NULL); curr_symval->alias_activity = FALSE; if ((NULL != (TREF(mprof_env_gbl_name)).str.addr)) turn_tracing_on(TADR(mprof_env_gbl_name), TRUE, (TREF(mprof_env_gbl_name)).str.len > 0); return; }
uint4 gtmcrypt_entry() { void_ptr_t handle, fptr; char_ptr_t err_str, env_ptr, libname_ptr, libpath_ptr; char *gtmcryptlib_fname[] = { GTMCRYPT_INIT_FNAME, GTMCRYPT_CLOSE_FNAME, GTMCRYPT_HASH_GEN_FNAME, GTMCRYPT_ENCRYPT_FNAME, GTMCRYPT_DECRYPT_FNAME, GTMCRYPT_GETKEY_BY_NAME, GTMCRYPT_GETKEY_BY_HASH, GTMCRYPT_STRERROR }; void **gtmcryptlib_fptr[] = { (void **)>mcrypt_init_fnptr, (void **)>mcrypt_close_fnptr, (void **)>mcrypt_hash_gen_fnptr, (void **)>mcrypt_encrypt_fnptr, (void **)>mcrypt_decrypt_fnptr, (void **)>mcrypt_getkey_by_name_fnptr, (void **)>mcrypt_getkey_by_hash_fnptr, (void **)>mcrypt_strerror_fnptr }; int findx, num_dlsyms, plugin_dir_len, save_errno; char libpath[GTM_PATH_MAX], buf[MAX_GTMCRYPT_PLUGIN_STR_LEN], plugin_dir_path[GTM_PATH_MAX]; char resolved_libpath[GTM_PATH_MAX], resolved_gtmdist[GTM_PATH_MAX]; mstr trans, env_var = {0, SIZEOF(GTM_CRYPT_PLUGIN) - 1, GTM_CRYPT_PLUGIN}; if (NULL == (env_ptr = getenv(GTM_DIST))) rts_error(VARLSTCNT(1) ERR_GTMDISTUNDEF); if (NULL == realpath(env_ptr, &resolved_gtmdist[0])) { save_errno = errno; SNPRINTF(dl_err, MAX_ERRSTR_LEN, "Failed to find symbolic link for %s. %s", env_ptr, STRERROR(save_errno)); return ERR_CRYPTDLNOOPEN; } SNPRINTF(plugin_dir_path, GTM_PATH_MAX, "%s/%s", resolved_gtmdist, PLUGIN_DIR_NAME); plugin_dir_len = STRLEN(plugin_dir_path); if ((SS_NORMAL != TRANS_LOG_NAME(&env_var, &trans, buf, SIZEOF(buf), do_sendmsg_on_log2long)) || (0 >= trans.len)) { /* Either $gtm_crypt_plugin is not defined in the environment variable OR it is set to null-string. Fall-back to * using libgtmcrypt.so */ libname_ptr = GTMCRYPT_LIBNAME; } else libname_ptr = &buf[0]; /* value of $gtm_crypt_plugin */ SNPRINTF(libpath, GTM_PATH_MAX, "%s/%s", plugin_dir_path, libname_ptr); if (NULL == realpath(&libpath[0], &resolved_libpath[0])) { save_errno = errno; SNPRINTF(dl_err, MAX_ERRSTR_LEN, "Failed to find symbolic link for %s. %s", libpath, STRERROR(save_errno)); return ERR_CRYPTDLNOOPEN; } /* Symbolic link found. dlopen resolved_libpath */ if (0 != memcmp(&resolved_libpath[0], plugin_dir_path, plugin_dir_len)) { /* resolved_path doesn't contain $gtm_dist/plugin as the prefix */ SNPRINTF(dl_err, MAX_ERRSTR_LEN, "Symbolic link for %s must be relative to %s", libpath, plugin_dir_path); return ERR_CRYPTDLNOOPEN; } handle = dlopen(&resolved_libpath[0], GTMCRYPT_LIBFLAGS); if (NULL == handle) { COPY_DLLERR_MSG(err_str, dl_err); return ERR_CRYPTDLNOOPEN; } num_dlsyms = ARRAYSIZE(gtmcryptlib_fptr); /* number of functions to be dlsym'ed */ for(findx = 0; findx < num_dlsyms; ++findx) { fptr = (void *)dlsym(handle, gtmcryptlib_fname[findx]); if (NULL == fptr) { COPY_DLLERR_MSG(err_str, dl_err); return ERR_CRYPTDLNOOPEN; } *gtmcryptlib_fptr[findx] = fptr; } return 0; }
boolean_t iosocket_bind(socket_struct *socketptr, int4 timepar, boolean_t update_bufsiz) { int temp_1 = 1; char *errptr; int4 errlen, msec_timeout, real_errno; short len; in_port_t actual_port; boolean_t no_time_left = FALSE; d_socket_struct *dsocketptr; struct addrinfo *ai_ptr; char port_buffer[NI_MAXSERV]; int errcode; ABS_TIME cur_time, end_time; GTM_SOCKLEN_TYPE addrlen; GTM_SOCKLEN_TYPE sockbuflen; dsocketptr = socketptr->dev; ai_ptr = (struct addrinfo*)(&socketptr->local.ai); assert(NULL != dsocketptr); dsocketptr->iod->dollar.key[0] = '\0'; if (FD_INVALID != socketptr->temp_sd) { socketptr->sd = socketptr->temp_sd; socketptr->temp_sd = FD_INVALID; } if (timepar != NO_M_TIMEOUT) { msec_timeout = timeout2msec(timepar); sys_get_curr_time(&cur_time); add_int_to_abs_time(&cur_time, msec_timeout, &end_time); } do { temp_1 = 1; if (-1 == tcp_routines.aa_setsockopt(socketptr->sd, SOL_SOCKET, SO_REUSEADDR, &temp_1, SIZEOF(temp_1))) { real_errno = errno; errptr = (char *)STRERROR(real_errno); errlen = STRLEN(errptr); SOCKET_FREE(socketptr); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_SETSOCKOPTERR, 5, RTS_ERROR_LITERAL("SO_REUSEADDR"), real_errno, errlen, errptr); return FALSE; } #ifdef TCP_NODELAY temp_1 = socketptr->nodelay ? 1 : 0; if (-1 == tcp_routines.aa_setsockopt(socketptr->sd, IPPROTO_TCP, TCP_NODELAY, &temp_1, SIZEOF(temp_1))) { real_errno = errno; errptr = (char *)STRERROR(real_errno); errlen = STRLEN(errptr); SOCKET_FREE(socketptr); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_SETSOCKOPTERR, 5, RTS_ERROR_LITERAL("TCP_NODELAY"), real_errno, errlen, errptr); return FALSE; } #endif if (update_bufsiz) { if (-1 == tcp_routines.aa_setsockopt(socketptr->sd, SOL_SOCKET, SO_RCVBUF, &socketptr->bufsiz, SIZEOF(socketptr->bufsiz))) { real_errno = errno; errptr = (char *)STRERROR(real_errno); errlen = STRLEN(errptr); SOCKET_FREE(socketptr); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_SETSOCKOPTERR, 5, RTS_ERROR_LITERAL("SO_RCVBUF"), real_errno, errlen, errptr); return FALSE; } } else { sockbuflen = SIZEOF(socketptr->bufsiz); if (-1 == tcp_routines.aa_getsockopt(socketptr->sd, SOL_SOCKET, SO_RCVBUF, &socketptr->bufsiz, &sockbuflen)) { real_errno = errno; errptr = (char *)STRERROR(real_errno); errlen = STRLEN(errptr); SOCKET_FREE(socketptr); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_GETSOCKOPTERR, 5, RTS_ERROR_LITERAL("SO_RCVBUF"), real_errno, errlen, errptr); return FALSE; } } temp_1 = tcp_routines.aa_bind(socketptr->sd, SOCKET_LOCAL_ADDR(socketptr), ai_ptr->ai_addrlen); if (temp_1 < 0) { real_errno = errno; no_time_left = TRUE; switch (real_errno) { case EADDRINUSE: if (NO_M_TIMEOUT != timepar) { sys_get_curr_time(&cur_time); cur_time = sub_abs_time(&end_time, &cur_time); if (cur_time.at_sec > 0) no_time_left = FALSE; } break; case EINTR: break; default: SOCKET_FREE(socketptr); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_SOCKBIND, 0, real_errno, 0); break; } if (no_time_left) return FALSE; hiber_start(100); tcp_routines.aa_close(socketptr->sd); if (-1 == (socketptr->sd = tcp_routines.aa_socket(ai_ptr->ai_family,ai_ptr->ai_socktype, ai_ptr->ai_protocol))) { real_errno = errno; errptr = (char *)STRERROR(real_errno); errlen = STRLEN(errptr); SOCKET_FREE(socketptr); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_SOCKINIT, 3, real_errno, errlen, errptr); return FALSE; } } } while (temp_1 < 0); /* obtain actual port from the bound address if port 0 was specified */ addrlen = SOCKET_ADDRLEN(socketptr, ai_ptr, local); if (-1 == tcp_routines.aa_getsockname(socketptr->sd, SOCKET_LOCAL_ADDR(socketptr), &addrlen)) { real_errno = errno; errptr = (char *)STRERROR(real_errno); errlen = STRLEN(errptr); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_GETSOCKNAMERR, 3, real_errno, errlen, errptr); return FALSE; } assert(ai_ptr->ai_addrlen == addrlen); GETNAMEINFO(SOCKET_LOCAL_ADDR(socketptr), addrlen, NULL, 0, port_buffer, NI_MAXSERV, NI_NUMERICSERV, errcode); if (0 != errcode) { SOCKET_FREE(socketptr); RTS_ERROR_ADDRINFO(NULL, ERR_GETNAMEINFO, errcode); return FALSE; } actual_port = ATOI(port_buffer); if (0 == socketptr->local.port) socketptr->local.port = actual_port; assert(socketptr->local.port == actual_port); socketptr->state = socket_bound; len = SIZEOF(BOUND) - 1; memcpy(&dsocketptr->iod->dollar.key[0], BOUND, len); dsocketptr->iod->dollar.key[len++] = '|'; memcpy(&dsocketptr->iod->dollar.key[len], socketptr->handle, socketptr->handle_len); len += socketptr->handle_len; dsocketptr->iod->dollar.key[len++] = '|'; SPRINTF(&dsocketptr->iod->dollar.key[len], "%d", socketptr->local.port); return TRUE; }
boolean_t iosocket_wait(io_desc *iod, int4 timepar) { struct timeval utimeout; ABS_TIME cur_time, end_time; struct sockaddr_in peer; /* socket address + port */ fd_set tcp_fd; d_socket_struct *dsocketptr; socket_struct *socketptr, *newsocketptr; socket_interrupt *sockintr; char *errptr; int4 errlen, ii, msec_timeout; int rv, max_fd, len; GTM_SOCKLEN_TYPE size; boolean_t zint_restart; mv_stent *mv_zintdev; short retry_num; error_def(ERR_SOCKACPT); error_def(ERR_SOCKWAIT); error_def(ERR_TEXT); error_def(ERR_SOCKMAX); error_def(ERR_ZINTRECURSEIO); error_def(ERR_STACKCRIT); error_def(ERR_STACKOFLOW); /* check for validity */ assert(iod->type == gtmsocket); dsocketptr = (d_socket_struct *)iod->dev_sp; sockintr = &dsocketptr->sock_save_state; /* Check for restart */ if (!dsocketptr->mupintr) /* Simple path, no worries*/ zint_restart = FALSE; else { /* We have a pending wait restart of some sort - check we aren't recursing on this device */ if (sockwhich_invalid == sockintr->who_saved) GTMASSERT; /* Interrupt should never have an invalid save state */ if (dollar_zininterrupt) rts_error(VARLSTCNT(1) ERR_ZINTRECURSEIO); if (sockwhich_wait != sockintr->who_saved) GTMASSERT; /* ZINTRECURSEIO should have caught */ SOCKET_DEBUG(PRINTF("socwait: *#*#*#*#*#*#*# Restarted interrupted wait\n"); DEBUGSOCKFLUSH); mv_zintdev = io_find_mvstent(iod, FALSE); if (mv_zintdev) { if (sockintr->end_time_valid) /* Restore end_time for timeout */ end_time = sockintr->end_time; /* Done with this mv_stent. Pop it off if we can, else mark it inactive. */ if (mv_chain == mv_zintdev) POP_MV_STENT(); /* pop if top of stack */ else { /* else mark it unused */ mv_zintdev->mv_st_cont.mvs_zintdev.buffer_valid = FALSE; mv_zintdev->mv_st_cont.mvs_zintdev.io_ptr = NULL; } zint_restart = TRUE; SOCKET_DEBUG(PRINTF("socwait: mv_stent found - endtime: %d/%d\n", end_time.at_sec, end_time.at_usec); DEBUGSOCKFLUSH); } else SOCKET_DEBUG(PRINTF("socwait: no mv_stent found !!\n"); DEBUGSOCKFLUSH); dsocketptr->mupintr = FALSE; sockintr->who_saved = sockwhich_invalid; }
/* This function is called primarily to append a new histinfo record to the replication instance file by one of the following * 1) MUPIP REPLIC -SOURCE -START -ROOTPRIMARY command (after forking the child source server) if it created the journal pool. * 2) MUPIP REPLIC -SOURCE -ACTIVATE -ROOTPRIMARY command if this is a propagating primary to root primary transition. * In addition, this function also initializes the "lms_group_info" field in the instance file (from the "inst_info" field) * if the current value is NULL. */ void gtmsource_rootprimary_init(seq_num start_seqno) { unix_db_info *udi; repl_histinfo histinfo; boolean_t was_crit, switch_jnl; gd_region *reg, *region_top; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; uint4 jnl_status; udi = FILE_INFO(jnlpool.jnlpool_dummy_reg); assert(NULL != jnlpool.repl_inst_filehdr); /* Update journal pool fields to reflect this is a root primary startup and updates are enabled */ assert(!udi->s_addrs.hold_onto_crit || jgbl.onlnrlbk); was_crit = udi->s_addrs.now_crit; if (!was_crit) grab_lock(jnlpool.jnlpool_dummy_reg, TRUE, ASSERT_NO_ONLINE_ROLLBACK); jnlpool.repl_inst_filehdr->root_primary_cycle++; /* If this instance is transitioning from a non-rootprimary to rootprimary, switch journal files. * This helps with maintaining accurate value of csd->zqgblmod_tn when the former primary connects * to the current primary through a fetchresync-rollback or receiver-server-autorollback.. */ switch_jnl = (!jnlpool.repl_inst_filehdr->was_rootprimary && (0 < jnlpool.repl_inst_filehdr->num_histinfo)); jnlpool.repl_inst_filehdr->was_rootprimary = TRUE; assert(start_seqno >= jnlpool.jnlpool_ctl->start_jnl_seqno); assert(start_seqno == jnlpool.jnlpool_ctl->jnl_seqno); jnlpool.repl_inst_filehdr->jnl_seqno = start_seqno; assert(jgbl.onlnrlbk || jnlpool.jnlpool_ctl->upd_disabled); if (!jgbl.onlnrlbk) jnlpool.jnlpool_ctl->upd_disabled = FALSE; if (IS_REPL_INST_UUID_NULL(jnlpool.repl_inst_filehdr->lms_group_info)) { /* This is the first time this instance is being brought up either as a root primary or as a propagating * primary. Initialize the "lms_group_info" fields in the instance file header in journal pool shared memory. * They will be flushed to the instance file as part of the "repl_inst_histinfo_add -> repl_inst_flush_filehdr" * function invocation below. */ assert('\0' == jnlpool.repl_inst_filehdr->lms_group_info.created_nodename[0]); assert('\0' == jnlpool.repl_inst_filehdr->lms_group_info.this_instname[0]); assert(!jnlpool.repl_inst_filehdr->lms_group_info.creator_pid); jnlpool.repl_inst_filehdr->lms_group_info = jnlpool.repl_inst_filehdr->inst_info; assert('\0' != jnlpool.repl_inst_filehdr->lms_group_info.created_nodename[0]); DBG_CHECK_CREATED_NODENAME(jnlpool.repl_inst_filehdr->lms_group_info.created_nodename); assert('\0' != jnlpool.repl_inst_filehdr->lms_group_info.this_instname[0]); assert(jnlpool.repl_inst_filehdr->lms_group_info.created_time); assert(jnlpool.repl_inst_filehdr->lms_group_info.creator_pid); } /* Initialize histinfo fields */ memcpy(histinfo.root_primary_instname, jnlpool.repl_inst_filehdr->inst_info.this_instname, MAX_INSTNAME_LEN - 1); histinfo.root_primary_instname[MAX_INSTNAME_LEN - 1] = '\0'; assert('\0' != histinfo.root_primary_instname[0]); histinfo.start_seqno = start_seqno; assert(jnlpool.jnlpool_ctl->strm_seqno[0] == jnlpool.repl_inst_filehdr->strm_seqno[0]); assert(jnlpool.repl_inst_filehdr->is_supplementary || (0 == jnlpool.jnlpool_ctl->strm_seqno[0])); histinfo.strm_seqno = (!jnlpool.repl_inst_filehdr->is_supplementary) ? 0 : jnlpool.jnlpool_ctl->strm_seqno[0]; histinfo.root_primary_cycle = jnlpool.repl_inst_filehdr->root_primary_cycle; assert(process_id == getpid()); histinfo.creator_pid = process_id; JNL_SHORT_TIME(histinfo.created_time); histinfo.strm_index = 0; histinfo.history_type = HISTINFO_TYPE_NORMAL; NULL_INITIALIZE_REPL_INST_UUID(histinfo.lms_group); /* The following fields will be initialized in the "repl_inst_histinfo_add" function call below. * histinfo.histinfo_num * histinfo.prev_histinfo_num * histinfo.last_histinfo_num[] */ /* Add the histinfo record to the instance file and flush the changes in the journal pool to the file header */ repl_inst_histinfo_add(&histinfo); if (!was_crit) rel_lock(jnlpool.jnlpool_dummy_reg); if (switch_jnl) { SET_GBL_JREC_TIME; /* jnl_ensure_open/jnl_file_extend and its callees assume jgbl.gbl_jrec_time is set */ for (reg = gd_header->regions, region_top = gd_header->regions + gd_header->n_regions; reg < region_top; reg++) { gv_cur_region = reg; change_reg(); /* sets cs_addrs/cs_data (needed by jnl_ensure_open) */ if (!JNL_ENABLED(cs_addrs)) continue; grab_crit(gv_cur_region); jpc = cs_addrs->jnl; /* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order of jnl * records. This needs to be done BEFORE the jnl_ensure_open as that could write journal records * (if it decides to switch to a new journal file) */ jbp = jpc->jnl_buff; ADJUST_GBL_JREC_TIME(jgbl, jbp); jnl_status = jnl_ensure_open(); if (0 == jnl_status) { if (EXIT_ERR == SWITCH_JNL_FILE(jpc)) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(4) ERR_JNLEXTEND, 2, JNL_LEN_STR(cs_data)); } else { if (SS_NORMAL != jpc->status) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(7) jnl_status, 4, JNL_LEN_STR(cs_data), DB_LEN_STR(gv_cur_region), jpc->status); else rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(cs_data), DB_LEN_STR(gv_cur_region)); } rel_crit(gv_cur_region); } } }
int gtmrecv_start_helpers(int n_readers, int n_writers) { /* Set flag in recvpool telling the receiver server to start n_readers and n_writers helper processes. * Wait for receiver server to complete the process - completed successfully, or terminated with error */ upd_helper_ctl_ptr_t upd_helper_ctl; upd_helper_entry_ptr_t helper, helper_top; char err_str[BUFSIZ]; int avail_slots, started_readers, started_writers; error_def(ERR_REPLERR); error_def(ERR_REPLINFO); error_def(ERR_REPLWARN); assert(0 != n_readers || 0 != n_writers); upd_helper_ctl = recvpool.upd_helper_ctl; /* let's clean up dead helpers first so we get an accurate count of available slots */ upd_helper_ctl->reap_helpers = HELPER_REAP_NOWAIT; while (HELPER_REAP_NONE != upd_helper_ctl->reap_helpers && SRV_ALIVE == is_recv_srv_alive()) SHORT_SLEEP(GTMRECV_WAIT_FOR_UPD_SHUTDOWN); upd_helper_ctl->reap_helpers = HELPER_REAP_NONE; /* just in case recvr died */ /* count available slots so receiver doesn't have to */ for (avail_slots = 0, helper = upd_helper_ctl->helper_list, helper_top = helper + MAX_UPD_HELPERS; helper < helper_top; helper++) { if (0 == helper->helper_pid) { avail_slots++; helper->helper_pid_prev = 0; /* force out abnormally terminated helpers as well */ helper->helper_shutdown = NO_SHUTDOWN; /* clean state */ } } if (avail_slots < n_readers + n_writers) { SNPRINTF(err_str, SIZEOF(err_str), "%d helpers will exceed the maximum allowed (%d), limit the helpers to %d\n", n_readers + n_writers, MAX_UPD_HELPERS, avail_slots); gtm_putmsg(VARLSTCNT(4) ERR_REPLERR, 2, LEN_AND_STR(err_str)); return ABNORMAL_SHUTDOWN; } upd_helper_ctl->start_n_readers = n_readers; upd_helper_ctl->start_n_writers = n_writers; SHM_WRITE_MEMORY_BARRIER; upd_helper_ctl->start_helpers = TRUE; /* hey receiver, let's go, start 'em up */ while (upd_helper_ctl->start_helpers && SRV_ALIVE == is_recv_srv_alive()) SHORT_SLEEP(GTMRECV_WAIT_FOR_SRV_START); if (!upd_helper_ctl->start_helpers) { started_readers = upd_helper_ctl->start_n_readers; started_writers = upd_helper_ctl->start_n_writers; SNPRINTF(err_str, SIZEOF(err_str), "%s %d out of %d readers and %d out of %d writers started", ((started_readers + started_writers) == (n_readers + n_writers)) ? "All" : "Only", started_readers, n_readers, started_writers, n_writers); if ((started_readers + started_writers) == (n_readers + n_writers)) { gtm_putmsg(VARLSTCNT(4) ERR_REPLINFO, 2, LEN_AND_STR(err_str)); return NORMAL_SHUTDOWN; } gtm_putmsg(VARLSTCNT(4) ERR_REPLWARN, 2, LEN_AND_STR(err_str)); return ABNORMAL_SHUTDOWN; } gtm_putmsg(VARLSTCNT(4) ERR_REPLERR, 2, LEN_AND_LIT("Receiver server is not alive to start helpers. Start receiver server first")); return ABNORMAL_SHUTDOWN; }
void dir_srch (parse_blk *pfil) { struct stat statbuf; int stat_res; lv_val *dir1, *dir2, *tmp; mstr tn; short p2_len; char filb[MAX_FBUFF + 1], patb[sizeof(ptstr)], *c, *lastd, *top, *p2, *c1, ch; mval pat_mval, sub, compare; bool wildname, seen_wd; struct dirent *dent; DIR *dp; plength *plen; int closedir_res; op_kill(zsrch_dir1); op_kill(zsrch_dir2); if (!pfil->b_name) return; /* nothing to search for */ ESTABLISH(dir_ch); pat_mval.mvtype = MV_STR; pat_mval.str.addr = patb; /* patb should be sizeof(ptstr.buff) but instead is sizeof(ptstr) since the C compiler * complains about the former and the latter is just 4 bytes more */ pat_mval.str.len = 0; sub.mvtype = MV_STR; sub.str.len = 0; compare.mvtype = MV_STR; compare.str.len = 0; wildname = (pfil->fnb & F_WILD_NAME) != 0; dir1 = zsrch_dir1; dir2 = zsrch_dir2; if (pfil->fnb & F_WILD_DIR) { seen_wd = FALSE; for (c = pfil->l_dir, lastd = c, top = c + pfil->b_dir; c < top;) { ch = *c++; if (ch == '/') /* note the start of each directory segment */ { if (seen_wd) break; lastd = c; } if (ch == '?' || ch == '*') seen_wd = TRUE; } assert(c <= top); sub.str.addr = pfil->l_dir; sub.str.len = lastd - sub.str.addr; tmp = op_putindx(VARLSTCNT(2) dir1, &sub); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; for(;;) { tn.addr = lastd; /* wildcard segment */ tn.len = c - lastd - 1; lastd = c; genpat(&tn, &pat_mval); seen_wd = FALSE; p2 = c - 1; for (; c < top;) { ch = *c++; if (ch == '/') /* note the start of each directory segment */ { if (seen_wd) break; lastd = c; } if (ch == '?' || ch == '*') seen_wd = TRUE; } p2_len = lastd - p2; /* length of non-wild segment after wild section */ for (;;) { pop_top(dir1, &sub); /* get next item off the top */ if (!sub.str.len) break; memcpy(filb, sub.str.addr, sub.str.len); filb[sub.str.len] = 0; sub.str.addr = filb; dp = OPENDIR(filb); if (!dp) continue; while(READDIR(dp, dent)) { compare.str.addr = &dent->d_name[0]; compare.str.len = strlen(&dent->d_name[0]); assert(compare.str.len); if ( dent->d_name[0] == '.' && (compare.str.len == 1 || (compare.str.len == 2 && dent->d_name[1] == '.')) ) { continue; /* don't want to read . and .. */ } if (compare.str.len + sub.str.len + p2_len > MAX_FBUFF) continue; if (do_pattern(&compare, &pat_mval)) { /* got a hit */ if (stringpool.free + compare.str.len + sub.str.len + p2_len + 1 > stringpool.top) stp_gcol(compare.str.len + sub.str.len + p2_len + 1); /* concatenate directory and name */ c1 = (char *)stringpool.free; tn = sub.str; s2pool(&tn); tn = compare.str; s2pool(&tn); tn.addr = p2; tn.len = p2_len; s2pool(&tn); *stringpool.free++ = 0; compare.str.addr = c1; compare.str.len += sub.str.len + p2_len; STAT_FILE(compare.str.addr, &statbuf, stat_res); if (-1 == stat_res) continue; if (!(statbuf.st_mode & S_IFDIR)) continue; /* put in results tree */ tmp = op_putindx(VARLSTCNT(2) dir2, &compare); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; } } CLOSEDIR(dp, closedir_res); } tmp = dir1; dir1 = dir2; dir2 = tmp; if (c >= top) break; } } else { sub.str.addr = pfil->l_dir; sub.str.len = pfil->b_dir; tmp = op_putindx(VARLSTCNT(2) dir1, &sub); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; } if (wildname) { tn.addr = pfil->l_name; tn.len = pfil->b_name + pfil->b_ext; genpat(&tn, &pat_mval); } for (;;) { pop_top(dir1, &sub); /* get next item off the top */ if (!sub.str.len) break; if (wildname) { memcpy(filb, sub.str.addr, sub.str.len); filb[sub.str.len] = 0; sub.str.addr = filb; dp = OPENDIR(filb); if (!dp) continue; while(READDIR(dp, dent)) { compare.str.addr = &dent->d_name[0]; compare.str.len = strlen(&dent->d_name[0]); if ( dent->d_name[0] == '.' && (compare.str.len == 1 || (compare.str.len == 2 && dent->d_name[1] == '.'))) { continue; /* don't want to read . and .. */ } if (compare.str.len + sub.str.len > MAX_FBUFF) continue; if (do_pattern(&compare, &pat_mval)) { /* got a hit */ if (stringpool.free + compare.str.len + sub.str.len > stringpool.top) stp_gcol(compare.str.len + sub.str.len); /* concatenate directory and name */ c = (char *)stringpool.free; tn = sub.str; s2pool(&tn); tn = compare.str; s2pool(&tn); compare.str.addr = c; compare.str.len += sub.str.len; /* put in results tree */ tmp = op_putindx(VARLSTCNT(2) ind_var, &compare); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; plen = (plength *)&tmp->v.m[1]; plen->p.pblk.b_esl = compare.str.len; plen->p.pblk.b_dir = sub.str.len; for (c = &compare.str.addr[sub.str.len], c1 = top = &compare.str.addr[compare.str.len]; c < top; ) { if (*c++ != '.') break; } for (; c < top;) { if (*c++ == '.') c1 = c - 1; } plen->p.pblk.b_ext = top - c1; plen->p.pblk.b_name = plen->p.pblk.b_esl - plen->p.pblk.b_dir - plen->p.pblk.b_ext; } } CLOSEDIR(dp, closedir_res); } else { assert(pfil->fnb & F_WILD_DIR); compare.str.addr = pfil->l_name; compare.str.len = pfil->b_name + pfil->b_ext; if (compare.str.len + sub.str.len > MAX_FBUFF) continue; memcpy(filb, sub.str.addr, sub.str.len); filb[sub.str.len] = 0; sub.str.addr = filb; if (stringpool.free + compare.str.len + sub.str.len > stringpool.top) stp_gcol(compare.str.len + sub.str.len); /* concatenate directory and name */ c1 = (char *)stringpool.free; tn = sub.str; s2pool(&tn); tn = compare.str; s2pool(&tn); compare.str.addr = c1; compare.str.len += sub.str.len; /* put in results tree */ tmp = op_putindx(VARLSTCNT(2) ind_var, &compare); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; plen = (plength *)&tmp->v.m[1]; plen->p.pblk.b_esl = compare.str.len; plen->p.pblk.b_dir = sub.str.len; plen->p.pblk.b_name = pfil->b_name; plen->p.pblk.b_ext = pfil->b_ext; } } op_kill(zsrch_dir1); op_kill(zsrch_dir2); REVERT; }
/* Initialize communication stuff */ int gtmrecv_comm_init(in_port_t port) { struct addrinfo *ai_ptr = NULL, hints; const int enable_reuseaddr = 1; struct linger disable_linger = {0, 0}; int rc; int errcode; char port_buffer[NI_MAXSERV]; int port_buffer_len; int temp_sock_fd; int af; if (FD_INVALID != gtmrecv_listen_sock_fd) /* Initialization done already */ return (0); /* Create the socket used for communicating with primary */ af = ((GTM_IPV6_SUPPORTED && !ipv4_only) ? AF_INET6 : AF_INET); if (FD_INVALID == (temp_sock_fd = socket(af, SOCK_STREAM, IPPROTO_TCP))) { af = AF_INET; if (FD_INVALID == (temp_sock_fd = socket(af, SOCK_STREAM, IPPROTO_TCP))) { rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2, RTS_ERROR_LITERAL("Error with receiver server socket create"), ERRNO); return (-1); } } /* Make it known to the world that you are ready for a Source Server */ SERVER_HINTS(hints, af); SPRINTF(port_buffer, "%hu", port); if (0 != (errcode = getaddrinfo(NULL, port_buffer, &hints, &ai_ptr))) { CLOSEFILE(temp_sock_fd, rc); RTS_ERROR_ADDRINFO_CTX(NULL, ERR_GETADDRINFO, errcode, "FAILED in obtaining IP address on receiver server."); return -1; } gtmrecv_listen_sock_fd = temp_sock_fd; if (0 > setsockopt(gtmrecv_listen_sock_fd, SOL_SOCKET, SO_LINGER, (const void *)&disable_linger, SIZEOF(disable_linger))) rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2, RTS_ERROR_LITERAL("Error with receiver server listen socket disable linger"), ERRNO); if (0 > setsockopt(gtmrecv_listen_sock_fd, SOL_SOCKET, SO_REUSEADDR, (const void *)&enable_reuseaddr, SIZEOF(enable_reuseaddr))) { rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2, RTS_ERROR_LITERAL("Error with receiver server listen socket enable reuseaddr"), ERRNO); } if (0 > BIND(gtmrecv_listen_sock_fd, ai_ptr->ai_addr, ai_ptr->ai_addrlen)) { rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2, RTS_ERROR_LITERAL("Could not bind local address"), ERRNO); CLOSEFILE_RESET(gtmrecv_listen_sock_fd, rc); /* resets "gtmrecv_listen_sock_fd" to FD_INVALID */ return (-1); } if (0 > listen(gtmrecv_listen_sock_fd, 5)) { rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2, RTS_ERROR_LITERAL("Could not listen"), ERRNO); CLOSEFILE_RESET(gtmrecv_listen_sock_fd, rc); /* resets "gtmrecv_listen_sock_fd" to FD_INVALID */ return (-1); } return (0); }
int op_fnzsearch (mval *file, mint indx, mval *ret) { struct stat statbuf; int stat_res; parse_blk pblk; plength *plen, pret; char buf1[MAX_FBUFF + 1]; /* buffer to hold translated name */ mval sub; mstr tn; lv_val *ind_tmp; error_def(ERR_INVSTRLEN); MV_FORCE_STR(file); if (file->str.len > MAX_FBUFF) rts_error(VARLSTCNT(4) ERR_INVSTRLEN, 2, file->str.len, MAX_FBUFF); MV_FORCE_MVAL(&ind_val, indx); ind_var = op_srchindx(VARLSTCNT(2) zsrch_var, &ind_val); if (ind_var) { assert(ind_var->v.mvtype & MV_STR); if (file->str.len != ind_var->v.str.len || memcmp(file->str.addr, ind_var->v.str.addr, file->str.len)) { op_kill(ind_var); ind_var = (lv_val*) 0; } } if (ind_var) { for (;;) { pret.p.pint = pop_top(ind_var, ret); /* get next element off the top */ if (!ret->str.len) break; memcpy(buf1, ret->str.addr, ret->str.len); buf1[ret->str.len] = 0; STAT_FILE(buf1, &statbuf, stat_res); if (-1 == stat_res) { if (errno == ENOENT) continue; rts_error(VARLSTCNT(1) errno); } break; } } else { memset(&pblk, 0, sizeof(pblk)); pblk.buffer = buf1; pblk.buff_size = MAX_FBUFF; if (!(parse_file(&file->str, &pblk) & 1)) { ret->mvtype = MV_STR; ret->str.len = 0; } else { assert(!ind_var); buf1[pblk.b_esl] = 0; /* establish new search context */ ind_var = op_putindx(VARLSTCNT(2) zsrch_var, &ind_val); ind_var->v = *file; /* zsrch_var(indx)=original spec */ if (!(pblk.fnb & F_WILD)) { sub.mvtype = MV_STR; sub.str.len = pblk.b_esl; sub.str.addr = buf1; s2pool(&sub.str); ind_tmp = op_putindx(VARLSTCNT(2) ind_var, &sub); ind_tmp->v.mvtype = MV_STR; ind_tmp->v.str.len = 0; plen = (plength *)&ind_tmp->v.m[1]; plen->p.pblk.b_esl = pblk.b_esl; plen->p.pblk.b_dir = pblk.b_dir; plen->p.pblk.b_name = pblk.b_name; plen->p.pblk.b_ext = pblk.b_ext; } else dir_srch(&pblk); for (;;) { pret.p.pint = pop_top(ind_var, ret); /* get next element off the top */ if (!ret->str.len) break; memcpy(buf1, ret->str.addr, ret->str.len); buf1[ret->str.len] = 0; STAT_FILE(buf1, &statbuf, stat_res); if (-1 == stat_res) { if (errno == ENOENT) continue; rts_error(VARLSTCNT(1) errno); } break; } } } assert(pret.p.pblk.b_esl == ret->str.len); return pret.p.pint; }
void op_zrupdate(int argcnt, ...) { mval *objfilespec; va_list var; mval objpath; char tranbuf[MAX_FBUFF + 1], *chptr, chr; open_relinkctl_sgm *linkctl; relinkrec_t *rec; plength plen; int status, fextlen, fnamlen, fcnt; parse_blk pblk; struct stat outbuf; int stat_res; boolean_t seenfext, fileexists; mstr objdir, rtnname; uint4 hash, prev_hash_index; /* Currently only expecting one value per invocation right now. That will change in phase 2 hence the stdarg setup */ va_start(var, argcnt); assert(1 == argcnt); objfilespec = va_arg(var, mval *); va_end(var); MV_FORCE_STR(objfilespec); /* First some pre-processing to determine if an explicit file name or type was specified. If so, it must be ".o" for * this phase of implementation. Later phases may allow ".m" to be specified but not initially. Use parse_file() to * parse everything out and isolate any extention. */ memset(&pblk, 0, SIZEOF(pblk)); pblk.buffer = tranbuf; pblk.buff_size = (unsigned char)(MAX_FBUFF); /* Pass size of buffer - 1 (standard protocol for parse_file) */ pblk.def1_buf = DOTOBJEXT; /* Default .o file type if not specified */ pblk.def1_size = SIZEOF(DOTOBJEXT) - 1; pblk.fop = F_SYNTAXO; /* Syntax check only - bypass directory existence check */ status = parse_file(&objfilespec->str, &pblk); if (ERR_PARNORMAL != status) rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_FILEPARSE, 2, objfilespec->str.len, objfilespec->str.addr, status); tranbuf[pblk.b_esl] = '\0'; /* Needed for subsequent STAT_FILE */ seenfext = FALSE; if (0 != pblk.b_ext) { /* If a file extension was specified - get the extension sans any potential wildcard character */ for (chptr = pblk.l_ext + 1, fextlen = pblk.b_ext - 1; 0 < fextlen; chptr++, fextlen--) { /* Check each character in the extension except first which is the dot if ext exists at all */ if (WILDCARD != *chptr) { /* We see a char that isn't a wildcard character. If we've already seen our "o" file extension, * this char makes our requirement filetype impossible so raise an error. */ if (seenfext || (OBJEXT != *chptr)) /* No return from this error */ rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_FILEPARSE, 2, objfilespec->str.len, objfilespec->str.addr, ERR_TEXT, 2, RTS_ERROR_TEXT("Unsupported filetype specified")); seenfext = TRUE; } } } /* Do a simlar check for the file type */ if (0 != pblk.b_name) { /* A file name was specified (if not, tiz probably hard to find the file but that can be dealt with later). * Like in the above, the name must be comprised of valid chars for routine names. */ for (chptr = pblk.l_name, fnamlen = pblk.b_name; 0 < fnamlen; chptr++, fnamlen--) { if (WILDCARD != *chptr) { /* Substitute '%' for '_'. While this substitution is really only valid on the first char, only the * first char check allows "%" so a 2nd or later char check would fail the '%' substitution anyway. */ chr = ('_' == *chptr) ? '%' : *chptr; /* We see a char that isn't a wildcard character. If this is the first character, it can be * alpha or percent. If the second or later character, it can be alphanumeric. */ if (((fnamlen != pblk.b_name) && !VALID_MNAME_NFCHAR(chr)) /* 2nd char or later check */ || ((fnamlen == pblk.b_name) && !VALID_MNAME_FCHAR(chr))) /* 1st char check */ { rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_FILEPARSE, 2, objfilespec->str.len, objfilespec->str.addr, ERR_TEXT, 2, RTS_ERROR_TEXT("Filename is not a valid routine name")); } } } } /* When specifying a non-wildcarded object file, it is possible for the file to have been removed, in which case we still * need to update its relinkctl entry (if exists) to notify other processes about the object's deletion. */ if (!(pblk.fnb & F_WILD) & seenfext) { /* If no wildcards in path and saw the extension we want - no need to wash through zsearch() */ objdir.addr = pblk.l_dir; objdir.len = pblk.b_dir; linkctl = relinkctl_attach(&objdir); /* Create/attach/open relinkctl file */ if (NULL == linkctl) /* Non-existant path and no associated relinkctl file */ /* Note this reference to errno depends on nothing in relinkctl_attach() doing anything to modify * errno after the realpath() call. No return from this error. */ rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_FILEPARSE, 2, objfilespec->str.len, objfilespec->str.addr, errno); /* What we do at this point depends on the following conditions: * * 1) If the specified file exists, we can add it to relinkctl file and/or update its cycle. * 2) If the file doesn't exist on disk but the routine is found in the relinkctl file, update cycle. * 3) If no file and no entry, just ignore it and do nothing (info error removed by request). */ STAT_FILE(tranbuf, &outbuf, stat_res); if (-1 == stat_res) { if (ENOENT != errno) rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_FILEPARSE, 2, objfilespec->str.len, objfilespec->str.addr, errno); fileexists = FALSE; } else fileexists = TRUE; rtnname.len = pblk.b_name; rtnname.addr = pblk.l_name; CONVERT_FILENAME_TO_RTNNAME(rtnname); /* Set rtnname right before searching in relinkctl file */ assert(valid_mname(&rtnname)); COMPUTE_RELINKCTL_HASH(&rtnname, hash); rec = relinkctl_find_record(linkctl, &rtnname, hash, &prev_hash_index); if ((NULL == rec) && !fileexists) return; /* No file and no entry - ignore */ /* Either the file exists or the entry exists so add or update it */ rec = relinkctl_insert_record(linkctl, &rtnname); RELINKCTL_CYCLE_INCR(rec, linkctl); /* Increment cycle indicating change to world */ return; } /* If we have a wildcarded request or one without the object filetype, reprocess the string with $ZSEARCH using our * defined stream to resolve wildcards. Then loop through processing each file returned. In this loop, we just ignore * any file that doesn't have a ".o" extension. */ op_fnzsearch((mval *)&literal_null, STRM_ZRUPDATE, 0, &objpath); /* Clear any existing cache */ for (fcnt = 0; ; fcnt++) { plen.p.pint = op_fnzsearch(objfilespec, STRM_ZRUPDATE, 0, &objpath); if (0 == objpath.str.len) { /* End of file list */ if (0 == fcnt) /* Still looking to process our first file - give no objects found message as a "soft" message * (INFO level message - supressed in other than direct mode) */ rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) MAKE_MSG_INFO(ERR_FILEPARSE), 2, objfilespec->str.len, objfilespec->str.addr, ERR_TEXT, 2, RTS_ERROR_TEXT("No object files found")); break; } /* The extension contains the extension-start character ('.') so we are looking for the extension '.o' hence * the length must be 2 and the 2nd char must be OBJEXT. */ if ((2 == plen.p.pblk.b_ext) && (OBJEXT == *(objpath.str.addr + plen.p.pblk.b_dir + plen.p.pblk.b_name + 1))) { /* This is (probably) an object file. Double check file is a file and not a directory */ memcpy(tranbuf, objpath.str.addr, objpath.str.len); /* Need null terminated version for STAT */ tranbuf[objpath.str.len] = '\0'; /* Not guaranteed null termination from op_fnzsearch */ STAT_FILE(tranbuf, &outbuf, stat_res); /* If either something happened to the file since op_fnzsearch() saw it or the file is not a file, then * ignore it. */ if ((-1 == stat_res) || !S_ISREG(outbuf.st_mode)) { fcnt--; /* Don't count files not found for whatever reason */ continue; } /* Before opening the relinkctl file, verify we actually do have a valid routine name */ rtnname.len = plen.p.pblk.b_name; rtnname.addr = objpath.str.addr + plen.p.pblk.b_dir; CONVERT_FILENAME_TO_RTNNAME(rtnname); /* Set rtnname right before searching in relinkctl file */ if (!valid_mname(&rtnname)) { fcnt--; continue; /* Ignore files that are invalid wildcard matches */ } objdir.addr = objpath.str.addr; objdir.len = plen.p.pblk.b_dir; linkctl = relinkctl_attach(&objdir); /* Create/attach/open relinkctl file */ if (NULL == linkctl) { fcnt--; /* Path disappeared - don't count it */ continue; } rec = relinkctl_insert_record(linkctl, &rtnname); RELINKCTL_CYCLE_INCR(rec, linkctl); /* Increment cycle indicating change to world */ } else fcnt--; /* Don't count ignored files */ } }
int iomb_dataread (int timeout) { GBLREF io_pair io_curr_device; bool ret, timed; int status, i; int4 msec_timeout; /* timeout in milliseconds */ io_desc *io_ptr; d_mb_struct *mb_ptr; TID timer_id; error_def(ERR_IOEOF); io_ptr = io_curr_device.in; io_ptr->dollar.zeof = FALSE; mb_ptr = (d_mb_struct *) io_ptr->dev_sp; mb_ptr->in_top = mb_ptr->inbuf; assert (io_ptr->state == dev_open); mb_ptr->timer_on = TRUE; timer_id = (TID) iomb_dataread; i = 0; ret = TRUE; out_of_time = FALSE; if (timeout == NO_M_TIMEOUT) { timed = FALSE; msec_timeout = NO_M_TIMEOUT; } else { timed = TRUE; msec_timeout = timeout2msec(timeout); if (msec_timeout > 0) { start_timer(timer_id, msec_timeout, ring_alarm2, 0, NULL); } } while (!out_of_time) { DOREADRL(mb_ptr->channel, mb_ptr->inbuf, mb_ptr->maxmsg, status); if (1 > status) { if (status == 0) { if (!timed || msec_timeout == 0) { io_ptr->dollar.za = 0; io_ptr->dollar.zeof = TRUE; if (io_ptr->error_handler.len > 0) rts_error(VARLSTCNT(1) ERR_IOEOF); return FALSE; } } else if (status == -1) { io_ptr->dollar.za = 9; if ((timed) && (!out_of_time)) cancel_timer(timer_id); rts_error(VARLSTCNT(1) errno); } } else { mb_ptr->in_top += status; mb_ptr->in_pos = mb_ptr->inbuf; return TRUE; } } }
void tp_unwind(uint4 newlevel, enum tp_unwind_invocation invocation_type, int *tprestart_rc) { mlk_pvtblk **prior, *mlkp; mlk_tp *oldlock, *nextlock; int tl; lv_val *save_lv, *curr_lv, *lv; tp_var *restore_ent; mv_stent *mvc; boolean_t restore_lv, rollback_locks; lvscan_blk *lvscan, *lvscan_next, first_lvscan; int elemindx, rc; lvTree *lvt_child; /* We are about to clean up structures. Defer MUPIP STOP/signal handling until function end. */ DEFER_INTERRUPTS(INTRPT_IN_TP_UNWIND); /* Unwind the requested TP levels */ # if defined(DEBUG_REFCNT) || defined(DEBUG_ERRHND) DBGFPF((stderr, "\ntp_unwind: Beginning TP unwind process\n")); # endif restore_lv = (RESTART_INVOCATION == invocation_type); lvscan = &first_lvscan; lvscan->next = NULL; lvscan->elemcnt = 0; assert((tp_sp <= tpstackbase) && (tp_sp > tpstacktop)); assert((tp_pointer <= (tp_frame *)tpstackbase) && (tp_pointer > (tp_frame *)tpstacktop)); for (tl = dollar_tlevel; tl > newlevel; --tl) { DBGRFCT((stderr, "\ntp_unwind: Unwinding level %d -- tp_pointer: 0x"lvaddr"\n", tl, tp_pointer)); assertpro(NULL != tp_pointer); for (restore_ent = tp_pointer->vars; NULL != restore_ent; restore_ent = tp_pointer->vars) { /*********************************************************************************/ /* TP_VAR_CLONE sets the var_cloned flag, showing that the tree has been cloned */ /* If var_cloned is not set, it shows that curr_lv and save_lv are still sharing */ /* the tree, so it should not be killed. */ /*********************************************************************************/ curr_lv = restore_ent->current_value; save_lv = restore_ent->save_value; assert(curr_lv); assert(save_lv); assert(LV_IS_BASE_VAR(curr_lv)); assert(LV_IS_BASE_VAR(save_lv)); assert(0 < curr_lv->stats.trefcnt); assert(curr_lv->tp_var); assert(curr_lv->tp_var == restore_ent); /* In order to restart sub-transactions, this would have to maintain * the chain that currently is not built by op_tstart() */ if (restore_lv) { rc = tp_unwind_restlv(curr_lv, save_lv, restore_ent, NULL, tprestart_rc); # ifdef GTM_TRIGGER if (0 != rc) { dollar_tlevel = tl; /* Record fact if we unwound some tp_frames */ ENABLE_INTERRUPTS(INTRPT_IN_TP_UNWIND); /* drive any MUPIP STOP/signals deferred * while in this function */ TPUNWND_WBOX_TEST; /* Debug-only wbox-test to simulate SIGTERM */ INVOKE_RESTART; } # endif } else if (restore_ent->var_cloned) { /* curr_lv has been cloned. * Note: LV_CHILD(save_lv) can be non-NULL only if restore_ent->var_cloned is TRUE */ DBGRFCT((stderr, "\ntp_unwind: Not restoring curr_lv and is cloned\n")); lvt_child = LV_GET_CHILD(save_lv); if (NULL != lvt_child) { /* If subtree exists, we have to blow away the cloned tree */ DBGRFCT((stderr, "\ntp_unwind: save_lv has children\n")); assert(save_lv->tp_var); DBGRFCT((stderr,"\ntp_unwind: For lv_val 0x"lvaddr": Deleting saved lv_val 0x"lvaddr"\n", curr_lv, save_lv)); assert(LVT_PARENT(lvt_child) == (lvTreeNode *)save_lv); lv_kill(save_lv, DOTPSAVE_FALSE, DO_SUBTREE_TRUE); } restore_ent->var_cloned = FALSE; } else { /* If not cloned, we still have to reduce the reference counts of any * container vars in the untouched tree that were added to keep anything * they referenced from disappearing. */ DBGRFCT((stderr, "\ntp_unwind: Not restoring curr_lv and is NOT cloned\n")); lvt_child = LV_GET_CHILD(curr_lv); if (NULL != lvt_child) { DBGRFCT((stderr, "\ntp_unwind: curr_lv has children and so reducing ref counts\n")); TPUNWND_CNTNRS_IN_TREE(curr_lv); } } LV_FREESLOT(save_lv); /* Not easy to predict what the trefcnt will be except that it should be greater than zero. In * most cases, it will have its own hash table ref plus the extras we added but it is also * possible that the entry has been kill *'d in which case the ONLY ref that will be left is * our own increment but there is no [quick] way to distinguish this case so we just * test for > 0. */ assert(0 < curr_lv->stats.trefcnt); assert(0 < curr_lv->stats.crefcnt); DECR_CREFCNT(curr_lv); /* Remove the copy refcnt we added in in op_tstart() or lv_newname() */ DECR_BASE_REF_NOSYM(curr_lv, FALSE); curr_lv->tp_var = NULL; tp_pointer->vars = restore_ent->next; free(restore_ent); } if ((tp_pointer->fp == frame_pointer) && (MVST_TPHOLD == mv_chain->mv_st_type) && (msp == (unsigned char *)mv_chain)) POP_MV_STENT(); if (NULL == tp_pointer->old_tp_frame) tp_sp = tpstackbase; else tp_sp = (unsigned char *)tp_pointer->old_tp_frame; if (tp_sp > tpstackbase) rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_STACKUNDERFLO); if (tp_pointer->tp_save_all_flg) --tp_pointer->sym->tp_save_all; if ((NULL != (tp_pointer = tp_pointer->old_tp_frame)) /* Note assignment */ && ((tp_pointer < (tp_frame *)tp_sp) || (tp_pointer > (tp_frame *)tpstackbase) || (tp_pointer < (tp_frame *)tpstacktop))) rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_STACKUNDERFLO); } if ((0 != newlevel) && restore_lv) { /* Restore current context (without releasing) */ assertpro(NULL != tp_pointer); DBGRFCT((stderr, "\n\n** tp_unwind: Newlevel (%d) != 0 loop processing\n", newlevel)); for (restore_ent = tp_pointer->vars; NULL != restore_ent; restore_ent = restore_ent->next) { curr_lv = restore_ent->current_value; save_lv = restore_ent->save_value; assert(curr_lv); assert(save_lv); assert(LV_IS_BASE_VAR(curr_lv)); assert(LV_IS_BASE_VAR(save_lv)); assert(curr_lv->tp_var); assert(curr_lv->tp_var == restore_ent); assert(0 < curr_lv->stats.trefcnt); rc = tp_unwind_restlv(curr_lv, save_lv, restore_ent, &lvscan, tprestart_rc); # ifdef GTM_TRIGGER if (0 != rc) { dollar_tlevel = tl; /* Record fact if we unwound some levels */ ENABLE_INTERRUPTS(INTRPT_IN_TP_UNWIND); /* drive any MUPIP STOP/signals deferred while * in this function */ TPUNWND_WBOX_TEST; /* Debug-only wbox-test to simulate SIGTERM */ INVOKE_RESTART; } # endif assert(0 < curr_lv->stats.trefcnt); /* Should have its own hash table ref plus the extras we added */ assert(0 < curr_lv->stats.crefcnt); } /* If we have any lv_vals queued up to be scanned for container vars, do that now */ DBGRFCT((stderr, "\ntp_unwind: Starting deferred rescan of lv trees needing refcnt processing\n")); while (0 < lvscan->elemcnt) { assert(ARY_SCNCNTNR_DIM >= lvscan->elemcnt); for (elemindx = 0; lvscan->elemcnt > elemindx; ++elemindx) { lv = lvscan->ary_scncntnr[elemindx]; DBGRFCT((stderr, "\n**tp_unwind_process_lvscan_array: Deferred processing lv 0x"lvaddr"\n", lv)); assert(LV_IS_BASE_VAR(lv)); /* This is the final level being restored so redo the counters on these vars */ TPREST_CNTNRS_IN_TREE(lv); } /* If we allocated any secondary blocks, we are done with them now so release them. Only the * very last block on the chain is the original block that was automatically allocated which * should not be freed in this fashion. */ lvscan_next = lvscan->next; if (NULL != lvscan_next) { /* There is another block on the chain so this one can be freed */ free(lvscan); DBGRFCT((stderr, "\ntp_unwind_process_lvscan_array: Freeing lvscan array\n")); lvscan = lvscan_next; } else { /* Since this is the original block allocated on the C stack which we may reuse, * zero the element count. */ lvscan->elemcnt = 0; DBGRFCT((stderr, "\ntp_unwind_process_lvscan_array: Setting elemcnt to 0 in original " "lvscan block\n")); assert(lvscan == &first_lvscan); } } } assert(0 == lvscan->elemcnt); /* verify no elements queued that were not scanned */ rollback_locks = (COMMIT_INVOCATION != invocation_type); for (prior = &mlk_pvt_root, mlkp = *prior; NULL != mlkp; mlkp = *prior) { if (mlkp->granted) { /* This was a pre-existing lock */ for (oldlock = mlkp->tp; (NULL != oldlock) && ((int)oldlock->tplevel > newlevel); oldlock = nextlock) { /* Remove references to the lock from levels being unwound */ nextlock = oldlock->next; free(oldlock); } if (rollback_locks) { if (NULL == oldlock) { /* Lock did not exist at the tp level being unwound to */ mlk_unlock(mlkp); mlk_pvtblk_delete(prior); continue; } else { /* Lock still exists but restore lock state as it was when the transaction started. */ mlkp->level = oldlock->level; mlkp->zalloc = oldlock->zalloc; } } if ((NULL != oldlock) && (oldlock->tplevel == newlevel)) { /* Remove lock reference from level being unwound to, * now that any {level,zalloc} state information has been restored. */ assert((NULL == oldlock->next) || (oldlock->next->tplevel < newlevel)); mlkp->tp = oldlock->next; /* update root reference pointer */ free(oldlock); } else mlkp->tp = oldlock; /* update root reference pointer */ prior = &mlkp->next; } else mlk_pvtblk_delete(prior); } DBGRFCT((stderr, "tp_unwind: Processing complete\n")); dollar_tlevel = newlevel; ENABLE_INTERRUPTS(INTRPT_IN_TP_UNWIND); /* check if any MUPIP STOP/signals were deferred while in this function */ }
void op_indtext(mval *lab, mint offset, mval *rtn, mval *dst) { bool rval; mstr *obj, object; mval mv_off; oprtype opt; triple *ref; icode_str indir_src; error_def(ERR_INDMAXNEST); error_def(ERR_STACKOFLOW); error_def(ERR_STACKCRIT); MV_FORCE_STR(lab); indir_src.str.len = lab->str.len; indir_src.str.len += SIZEOF("+^") - 1; indir_src.str.len += MAX_NUM_SIZE; indir_src.str.len += rtn->str.len; ENSURE_STP_FREE_SPACE(indir_src.str.len); DBG_MARK_STRINGPOOL_UNEXPANDABLE; /* Now that we have ensured enough space in the stringpool, we dont expect any more * garbage collections or expansions until we are done with the below initialization. */ /* Push an mval pointing to the complete entry ref on to the stack so the string is valid even * if garbage collection occurs before cache_put() */ PUSH_MV_STENT(MVST_MVAL); mv_chain->mv_st_cont.mvs_mval.mvtype = 0; /* so stp_gcol (if invoked below) does not get confused by this otherwise * incompletely initialized mval in the M-stack */ mv_chain->mv_st_cont.mvs_mval.str.addr = (char *)stringpool.free; memcpy(stringpool.free, lab->str.addr, lab->str.len); stringpool.free += lab->str.len; *stringpool.free++ = '+'; MV_FORCE_MVAL(&mv_off, offset); MV_FORCE_STRD(&mv_off); /* goes at stringpool.free. we already made enough space in the stp_gcol() call */ *stringpool.free++ = '^'; memcpy(stringpool.free, rtn->str.addr, rtn->str.len); stringpool.free += rtn->str.len; mv_chain->mv_st_cont.mvs_mval.str.len = INTCAST(stringpool.free - (unsigned char*)mv_chain->mv_st_cont.mvs_mval.str.addr); mv_chain->mv_st_cont.mvs_mval.mvtype = MV_STR; /* initialize mvtype now that mval has been otherwise completely set up */ DBG_MARK_STRINGPOOL_EXPANDABLE; /* Now that we are done with stringpool.free initializations, mark as free for expansion */ indir_src.str = mv_chain->mv_st_cont.mvs_mval.str; indir_src.code = indir_text; if (NULL == (obj = cache_get(&indir_src))) { comp_init(&indir_src.str); rval = f_text(&opt, OC_FNTEXT); if (!comp_fini(rval, &object, OC_IRETMVAL, &opt, indir_src.str.len)) { assert(mv_chain->mv_st_type == MVST_MVAL); POP_MV_STENT(); return; } indir_src.str.addr = mv_chain->mv_st_cont.mvs_mval.str.addr; cache_put(&indir_src, &object); *ind_result_sp++ = dst; if (ind_result_sp >= ind_result_top) rts_error(VARLSTCNT(1) ERR_INDMAXNEST); assert(mv_chain->mv_st_type == MVST_MVAL); POP_MV_STENT(); /* unwind the mval entry before the new frame gets added by comp_indir below */ comp_indr(&object); return; } *ind_result_sp++ = dst; if (ind_result_sp >= ind_result_top) rts_error(VARLSTCNT(1) ERR_INDMAXNEST); assert(mv_chain->mv_st_type == MVST_MVAL); POP_MV_STENT(); /* unwind the mval entry before the new frame gets added by comp_indir below */ comp_indr(obj); return; }