void io_dev_close (io_log_name *d) { static readonly unsigned char p[] = {iop_rundown, iop_eol}; mval pp; if (d->iod->pair.in == io_std_device.in && d->iod->pair.out == io_std_device.out) { if (prin_in_dev_failure || prin_out_dev_failure) return; } VMS_ONLY(ESTABLISH(lastchance3);)
void gvcmz_netopen_attempt(struct CLB *c) { unsigned char *ptr, *proto_str; int prc_vec_size; int status; #ifdef BIGENDIAN jnl_process_vector temp_vect; #endif VMS_ONLY( ESTABLISH(gvcmz_netopen_ch); /* our old servers run only on VMS; no need for retry on other OSs */ clb = c; /* need this assignment since we can't pass c to gvcmz_netopen_ch */ )
void kill_var(void) { ESTABLISH(replication_ch); if (gv_cur_region->read_only) { gv_replication_error = TRUE; REVERT; return; } assert(gv_cur_region->dyn.addr->acc_meth == dba_cm); gvcmx_kill(TRUE); REVERT; }
void dse_dmp(void) { boolean_t dmp_res, glo_present, zwr_present; patch_fdmp_recs = 0; glo_present = (CLI_PRESENT == cli_present("GLO")); zwr_present = (CLI_PRESENT == cli_present("ZWR")); if (glo_present || zwr_present) { if (CLOSED_FMT == dse_dmp_format) { util_out_print("Error: must open an output file before dump.", TRUE); return; } if (gtm_utf8_mode && (GLO_FMT == glo_present)) { util_out_print("Error: GLO format is not supported in UTF-8 mode. Use ZWR format.", TRUE); return; } if (OPEN_FMT == dse_dmp_format) { dse_dmp_format = (glo_present ? GLO_FMT : ZWR_FMT); if (!gtm_utf8_mode) dse_fdmp_output(LIT_AND_LEN("; DSE EXTRACT")); else dse_fdmp_output(LIT_AND_LEN("; DSE EXTRACT UTF-8")); dse_fdmp_output(STR_AND_LEN(format_label[dse_dmp_format])); } else if ((glo_present ? GLO_FMT : ZWR_FMT) != dse_dmp_format) { util_out_print("Error: current output file already contains !AD records.", TRUE, LEN_AND_STR(&format_label[dse_dmp_format][MESS_OFF])); return; } patch_is_fdmp = TRUE; ESTABLISH(dse_dmp_handler); } else patch_is_fdmp = FALSE; if (CLI_PRESENT == cli_present("RECORD") || CLI_PRESENT == cli_present("OFFSET")) dmp_res = dse_r_dmp(); else dmp_res = dse_b_dmp(); if (patch_is_fdmp) { REVERT; if (dmp_res) util_out_print("!UL !AD records written.!/", TRUE, patch_fdmp_recs, LEN_AND_STR(&format_label[dse_dmp_format][MESS_OFF])); } return; }
STATICFNDEF void mu_rndwn_all_helper(shm_parms *parm_buff, char *fname, int *exit_status, int *tmp_exit_status) { replpool_identifier replpool_id; boolean_t ret_status, jnlpool_sem_created; unsigned char ipcs_buff[MAX_IPCS_ID_BUF], *ipcs_ptr; ESTABLISH(mu_rndwn_all_helper_ch); if (validate_db_shm_entry(parm_buff, fname, tmp_exit_status)) { if (SS_NORMAL == *tmp_exit_status) { /* shm still exists */ mu_gv_cur_reg_init(); gv_cur_region->dyn.addr->fname_len = strlen(fname); STRNCPY_STR(gv_cur_region->dyn.addr->fname, fname, gv_cur_region->dyn.addr->fname_len); if (mu_rndwn_file(gv_cur_region, FALSE)) gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_MUFILRNDWNSUC, 2, DB_LEN_STR(gv_cur_region)); else { /* Save semid so that it will not be removed by mu_rndwn_sem_all() */ add_to_semids_list(FILE_INFO(gv_cur_region)->semid); *exit_status = ERR_MUNOTALLSEC; } mu_gv_cur_reg_free(); } else { /* shm has been cleaned up by "validate_db_shm_entry" so no need of any more cleanup here */ assert(ERR_SHMREMOVED == *tmp_exit_status); *tmp_exit_status = SS_NORMAL; /* reset tmp_exit_status for below logic to treat this as normal */ } } else if ((SS_NORMAL == *tmp_exit_status) && validate_replpool_shm_entry(parm_buff, (replpool_id_ptr_t)&replpool_id, tmp_exit_status)) { if (SS_NORMAL == *tmp_exit_status) { assert(JNLPOOL_SEGMENT == replpool_id.pool_type || RECVPOOL_SEGMENT == replpool_id.pool_type); ret_status = mu_rndwn_repl_instance(&replpool_id, TRUE, FALSE, &jnlpool_sem_created); ipcs_ptr = i2asc((uchar_ptr_t)ipcs_buff, parm_buff->shmid); *ipcs_ptr = '\0'; gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(6) (JNLPOOL_SEGMENT == replpool_id.pool_type) ? (ret_status ? ERR_MUJPOOLRNDWNSUC : ERR_MUJPOOLRNDWNFL) : (ret_status ? ERR_MURPOOLRNDWNSUC : ERR_MURPOOLRNDWNFL), 4, LEN_AND_STR(ipcs_buff), LEN_AND_STR(replpool_id.instfilename)); if (!ret_status) *exit_status = ERR_MUNOTALLSEC; } else { /* shm has been cleaned up by "validate_replpool_shm_entry" so no need of any more cleanup here */ assert(ERR_SHMREMOVED == *tmp_exit_status); *tmp_exit_status = SS_NORMAL; /* reset tmp_exit_status for below logic to treat this as normal */ } } REVERT; }
static void lke_process(void) { uint4 status; $DESCRIPTOR (prompt, "LKE> "); ESTABLISH(util_ch); status = CLI$DCL_PARSE(0, &lke_cmd, &lib$get_input, &lib$get_input, &prompt); if (RMS$_EOF == status) lke_exit(); else if (CLI$_NORMAL == status) { util_out_open(&output_qualifier); CLI$DISPATCH(); util_out_close(); } }
void go_call_db(int routine, char *parm1, int parm2) { /* In order to duplicate the VMS functionality, which is to trap all errors in mupip_load_ch * and continue in go_load after they occur, it is necessary to call these routines from a * subroutine due to the limitations of condition handlers and unwinding on UNIX. */ ESTABLISH(mupip_load_ch); switch(routine) { case GO_PUT_SUB: str2gvkey_gvfunc(parm1, parm2); break; case GO_PUT_DATA: mupip_put_gvdata(parm1 , parm2); break; } REVERT; }
void trans_code(void) { mval dummy; int level2go; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; if (SFT_ZINTR & proc_act_type) { /* Need different translator EP */ jobinterrupt_process(); return; } assert(err_act); if (stringpool.base != rts_stringpool.base) stringpool = rts_stringpool; assert(SFT_ZTRAP == proc_act_type || SFT_DEV_ACT == proc_act_type); /* The frame_pointer->mpc of error-causing M routine should always be set * to 'error_return' irrespective of the validity of $etrap code to make sure * the error-occuring frame is always unwound and the new error is rethrown * at one level below */ if (IS_ETRAP) SET_ERROR_FRAME(frame_pointer); /* reset error_frame to point to frame_pointer */ if (!(ztrap_form & ZTRAP_CODE) && !IS_ETRAP && POP_SPECIFIED) { GOLEVEL(level2go, TRUE); } dummy.mvtype = MV_STR; dummy.str = *err_act; TREF(trans_code_pop) = push_mval(&dummy); ESTABLISH(trans_code_ch); op_commarg(TREF(trans_code_pop), ((ztrap_form & ZTRAP_CODE) || IS_ETRAP) ? indir_linetail : indir_goto); REVERT; # ifdef UNIX if (NULL != gtm_err_dev) { remove_rms(gtm_err_dev); gtm_err_dev = NULL; } # endif trans_code_finish(); return; }
void lke(void) { char buff[MAX_LINE]; $DESCRIPTOR (command, buff); uint4 status; short len; bool dcl; DCL_THREADGBL_ACCESS; GTM_THREADGBL_INIT; gtm_imagetype_init(LKE_IMAGE); gtm_env_init(); /* read in all environment variables */ util_out_open(0); SET_EXIT_HANDLER(exi_blk, generic_exit_handler, exi_condition); /* Establish exit handler */ ESTABLISH(util_base_ch); status =lp_id(&lkid); if (SS$_NORMAL != status) rts_error(VARLSTCNT(1) status); get_page_size(); stp_init(STP_INITSIZE); rts_stringpool = stringpool; getjobname(); INVOKE_INIT_SECSHR_ADDRS; ast_init(); initialize_pattern_table(); gvinit(); region_init(TRUE); getjobnum(); status = lib$get_foreign(&command, 0, &len, 0); if ((status & 1) && len > 0) { command.dsc$w_length = len; status = CLI$DCL_PARSE(&command, &lke_cmd, &lib$get_input, 0, 0); if (CLI$_NORMAL == status) { util_out_open(&output_qualifier); CLI$DISPATCH(); util_out_close(); } lke_exit(); } for (;;) lke_process(); }
void io_dev_close (io_log_name *d) { static readonly unsigned char p[] = { iop_eol}; mval pp; if (d->iod->pair.in == io_std_device.in && d->iod->pair.out == io_std_device.out) { if (prin_in_dev_failure || prin_out_dev_failure) return; } ESTABLISH(lastchance3); pp.mvtype = MV_STR; pp.str.addr = (char *) p; pp.str.len = sizeof(p); if (d->iod->pair.in && d->iod->pair.in->state == dev_open) (d->iod->pair.in->disp_ptr->close)(d->iod->pair.in, &pp); if (d->iod->pair.out && d->iod->pair.out->state == dev_open) (d->iod->pair.out->disp_ptr->close)(d->iod->pair.out, &pp); REVERT; }
void bin_call_db(int routine, INTPTR_T parm1, INTPTR_T parm2) { error_def(ERR_CORRUPT); /* In order to duplicate the VMS functionality, which is to trap all errors in mupip_load_ch and continue in bin_load after they occur, it is necessary to call these routines from a subroutine due to the limitations of condition handlers and unwinding on UNIX */ ESTABLISH(mupip_load_ch); switch(routine) { case BIN_PUT: op_gvput((mval *)parm1); break; case BIN_BIND: gv_bind_name((gd_addr *)parm1, (mstr *)parm2); break; case ERR_COR: rts_error(VARLSTCNT(4) ERR_CORRUPT, 2, parm1, parm2); break; } REVERT; }
void trans_code_finish(void) { mval dummy; frame_pointer->type = proc_act_type; proc_act_type = 0; /* Save/restore restart_pc over dispatch of this error handler */ PUSH_MV_STENT(MVST_RSTRTPC); mv_chain->mv_st_cont.mvs_rstrtpc.restart_pc_save = restart_pc; mv_chain->mv_st_cont.mvs_rstrtpc.restart_ctxt_save = restart_ctxt; if (0 != dollar_zyerror.str.len) { dummy.mvtype = MV_STR; dummy.str = dollar_zyerror.str; ESTABLISH(zyerr_ch); op_commarg(&dummy, indir_do); REVERT; op_newintrinsic(SV_ZYERROR); /* for user's convenience */ assert(NULL == zyerr_frame); zyerr_frame = frame_pointer; } return; }
void mu_extract(void) { int stat_res, truncate_res; int reg_max_rec, reg_max_key, reg_max_blk, reg_std_null_coll; int iter, format, local_errno, int_nlen; boolean_t freeze = FALSE, logqualifier, success; char format_buffer[FORMAT_STR_MAX_SIZE], ch_set_name[MAX_CHSET_NAME], cli_buff[MAX_LINE], label_buff[LABEL_STR_MAX_SIZE], gbl_name_buff[MAX_MIDENT_LEN + 2]; /* 2 for null and '^' */ glist gl_head, *gl_ptr; gd_region *reg, *region_top; mu_extr_stats global_total, grand_total; uint4 item_code, devbufsiz, maxfield; unsigned short label_len, n_len, ch_set_len, buflen; unsigned char *outbuf, *outptr, *chptr, *leadptr; struct stat statbuf; mval val, curr_gbl_name, op_val, op_pars; mstr chset_mstr; gtm_chset_t saved_out_set; static unsigned char ochset_set = FALSE; static readonly unsigned char open_params_list[] = { (unsigned char)iop_noreadonly, (unsigned char)iop_nowrap, (unsigned char)iop_stream, (unsigned char)iop_eol }; static readonly unsigned char no_param = (unsigned char)iop_eol; coll_hdr extr_collhdr; error_def(ERR_NOSELECT); error_def(ERR_GTMASSERT); error_def(ERR_EXTRACTCTRLY); error_def(ERR_EXTRACTFILERR); error_def(ERR_MUPCLIERR); error_def(ERR_MUNOACTION); error_def(ERR_MUNOFINISH); error_def(ERR_RECORDSTAT); error_def(ERR_NULLCOLLDIFF); /* Initialize all local character arrays to zero before using */ memset(cli_buff, 0, sizeof(cli_buff)); memset(outfilename, 0, sizeof(outfilename)); memset(label_buff, 0, sizeof(label_buff)); memset(format_buffer, 0, sizeof(format_buffer)); active_device = io_curr_device.out; mu_outofband_setup(); if (CLI_PRESENT == cli_present("OCHSET")) { ch_set_len = sizeof(ch_set_name); if (cli_get_str("OCHSET", ch_set_name, &ch_set_len)) { if (0 == ch_set_len) mupip_exit(ERR_MUNOACTION); /* need to change to OPCHSET error when added */ ch_set_name[ch_set_len] = '\0'; #ifdef KEEP_zOS_EBCDIC if ( (iconv_t)0 != active_device->output_conv_cd) ICONV_CLOSE_CD(active_device->output_conv_cd); if (DEFAULT_CODE_SET != active_device->out_code_set) ICONV_OPEN_CD(active_device->output_conv_cd, INSIDE_CH_SET, ch_set_name); #else chset_mstr.addr = ch_set_name; chset_mstr.len = ch_set_len; SET_ENCODING(active_device->ochset, &chset_mstr); get_chset_desc(&chset_names[active_device->ochset]); #endif ochset_set = TRUE; } } logqualifier = (CLI_NEGATED != cli_present("LOG")); if (CLI_PRESENT == cli_present("FREEZE")) freeze = TRUE; n_len = sizeof(format_buffer); if (FALSE == cli_get_str("FORMAT", format_buffer, &n_len)) { n_len = sizeof("ZWR") - 1; memcpy(format_buffer, "ZWR", n_len); } int_nlen = n_len; lower_to_upper((uchar_ptr_t)format_buffer, (uchar_ptr_t)format_buffer, int_nlen); if (0 == memcmp(format_buffer, "ZWR", n_len)) format = MU_FMT_ZWR; else if (0 == memcmp(format_buffer, "GO", n_len)) { if (gtm_utf8_mode) { util_out_print("Extract error: GO format is not supported in UTF-8 mode. Use ZWR format.", TRUE); mupip_exit(ERR_MUPCLIERR); } format = MU_FMT_GO; } else if (0 == memcmp(format_buffer, "BINARY", n_len)) format = MU_FMT_BINARY; else { util_out_print("Extract error: bad format type", TRUE); mupip_exit(ERR_MUPCLIERR); } n_len = sizeof(cli_buff); if (FALSE == cli_get_str((char *)select_text, cli_buff, &n_len)) { n_len = 1; cli_buff[0] = '*'; } /* gv_select will select globals */ gv_select(cli_buff, n_len, freeze, (char *)select_text, &gl_head, ®_max_rec, ®_max_key, ®_max_blk); if (!gl_head.next) { rts_error(VARLSTCNT(1) ERR_NOSELECT); mupip_exit(ERR_NOSELECT); } /* For binary format, check whether all regions have same null collation order */ if (MU_FMT_BINARY == format) { for (reg = gd_header->regions, region_top = gd_header->regions + gd_header->n_regions, reg_std_null_coll = -1; reg < region_top ; reg++) { if (reg->open) { if (reg_std_null_coll != reg->std_null_coll) { if (reg_std_null_coll == -1) reg_std_null_coll = reg->std_null_coll; else { rts_error(VARLSTCNT(1) ERR_NULLCOLLDIFF); mupip_exit(ERR_NULLCOLLDIFF); } } } } assert(-1 != reg_std_null_coll); } grand_total.recknt = grand_total.reclen = grand_total.keylen = grand_total.datalen = 0; global_total.recknt = global_total.reclen = global_total.keylen = global_total.datalen = 0; n_len = sizeof(outfilename); if (FALSE == cli_get_str("FILE", outfilename, &n_len)) { rts_error(VARLSTCNT(1) ERR_MUPCLIERR); mupip_exit(ERR_MUPCLIERR); } if (-1 == Stat((char *)outfilename, &statbuf)) { if (ENOENT != errno) { local_errno = errno; perror("Error opening output file"); mupip_exit(local_errno); } } else { util_out_print("Error opening output file: !AD -- File exists", TRUE, n_len, outfilename); mupip_exit(ERR_MUNOACTION); } op_pars.mvtype = MV_STR; op_pars.str.len = sizeof(open_params_list); op_pars.str.addr = (char *)open_params_list; op_val.mvtype = MV_STR; op_val.str.len = filename_len = n_len; op_val.str.addr = (char *)outfilename; (*op_open_ptr)(&op_val, &op_pars, 0, 0); ESTABLISH(mu_extract_handler); op_use(&op_val, &op_pars); if (MU_FMT_BINARY == format) { /* binary header label format: * fixed length text, fixed length date & time, * fixed length max blk size, fixed length max rec size, fixed length max key size, fixed length std_null_coll * 32-byte padded user-supplied string */ outbuf = (unsigned char *)malloc(sizeof(BIN_HEADER_LABEL) + sizeof(BIN_HEADER_DATEFMT) - 1 + 4 * BIN_HEADER_NUMSZ + BIN_HEADER_LABELSZ); outptr = outbuf; MEMCPY_LIT(outptr, BIN_HEADER_LABEL); outptr += STR_LIT_LEN(BIN_HEADER_LABEL); stringpool.free = stringpool.base; op_horolog(&val); stringpool.free = stringpool.base; op_fnzdate(&val, (mval *)&mu_bin_datefmt, &null_str, &null_str, &val); memcpy(outptr, val.str.addr, val.str.len); outptr += val.str.len; WRITE_NUMERIC(reg_max_blk); WRITE_NUMERIC(reg_max_rec); WRITE_NUMERIC(reg_max_key); WRITE_NUMERIC(reg_std_null_coll); if (gtm_utf8_mode) { MEMCPY_LIT(outptr, UTF8_NAME); label_len = STR_LIT_LEN(UTF8_NAME); outptr[label_len++] = ' '; } else label_len = 0; buflen = sizeof(label_buff); if (FALSE == cli_get_str("LABEL", label_buff, &buflen)) { MEMCPY_LIT(&outptr[label_len], EXTR_DEFAULT_LABEL); buflen = STR_LIT_LEN(EXTR_DEFAULT_LABEL); } else memcpy(&outptr[label_len], label_buff, buflen); label_len += buflen; if (label_len > BIN_HEADER_LABELSZ) { /* Label size exceeds the space, so truncate the label and back off to the valid beginning (i.e. to the leading byte) of the last character that can entirely fit in the space */ label_len = BIN_HEADER_LABELSZ; chptr = &outptr[BIN_HEADER_LABELSZ]; UTF8_LEADING_BYTE(chptr, outptr, leadptr); assert(chptr - leadptr < 4); if (leadptr < chptr) label_len -= (chptr - leadptr); } outptr += label_len; for (iter = label_len; iter < BIN_HEADER_LABELSZ; iter++) *outptr++ = ' '; label_len = outptr - outbuf; if (!ochset_set) { #ifdef KEEP_zOS_EBCDIC /* extract ascii header for binary by default */ /* Do we need to restore it somewhere? */ saved_out_set = (io_curr_device.out)->out_code_set; (io_curr_device.out)->out_code_set = DEFAULT_CODE_SET; #else saved_out_set = (io_curr_device.out)->ochset; (io_curr_device.out)->ochset = CHSET_M; #endif } op_val.str.addr = (char *)(&label_len); op_val.str.len = sizeof(label_len); op_write(&op_val); op_val.str.addr = (char *)outbuf; op_val.str.len = label_len; op_write(&op_val); } else { assert((MU_FMT_GO == format) || (MU_FMT_ZWR == format)); label_len = sizeof(label_buff); if (FALSE == cli_get_str("LABEL", label_buff, &label_len)) { MEMCPY_LIT(label_buff, EXTR_DEFAULT_LABEL); label_len = STR_LIT_LEN(EXTR_DEFAULT_LABEL); } if (gtm_utf8_mode) { label_buff[label_len++] = ' '; MEMCPY_LIT(&label_buff[label_len], UTF8_NAME); label_len += STR_LIT_LEN(UTF8_NAME); } label_buff[label_len++] = '\n'; op_val.mvtype = MV_STR; op_val.str.len = label_len; op_val.str.addr = label_buff; op_write(&op_val); stringpool.free = stringpool.base; op_horolog(&val); stringpool.free = stringpool.base; op_fnzdate(&val, &datefmt, &null_str, &null_str, &val); op_val = val; op_val.mvtype = MV_STR; op_write(&op_val); if (MU_FMT_ZWR == format) { op_val.str.addr = " ZWR"; op_val.str.len = sizeof(" ZWR") - 1; op_write(&op_val); } op_wteol(1); } REVERT; ESTABLISH(mu_extract_handler1); success = TRUE; for (gl_ptr = gl_head.next; gl_ptr; gl_ptr = gl_ptr->next) { if (mu_ctrly_occurred) break; if (mu_ctrlc_occurred) { gbl_name_buff[0]='^'; memcpy(&gbl_name_buff[1], gl_ptr->name.str.addr, gl_ptr->name.str.len); gtm_putmsg(VARLSTCNT(8) ERR_RECORDSTAT, 6, gl_ptr->name.str.len + 1, gbl_name_buff, global_total.recknt, global_total.keylen, global_total.datalen, global_total.reclen); mu_ctrlc_occurred = FALSE; } gv_bind_name(gd_header, &gl_ptr->name.str); if (MU_FMT_BINARY == format) { label_len = sizeof(extr_collhdr); op_val.mvtype = MV_STR; op_val.str.addr = (char *)(&label_len); op_val.str.len = sizeof(label_len); op_write(&op_val); extr_collhdr.act = gv_target->act; extr_collhdr.nct = gv_target->nct; extr_collhdr.ver = gv_target->ver; op_val.str.addr = (char *)(&extr_collhdr); op_val.str.len = sizeof(extr_collhdr); op_write(&op_val); } /* Note: Do not change the order of the expression below. * Otherwise if success is FALSE, mu_extr_gblout() will not be called at all. * We want mu_extr_gblout() to be called irrespective of the value of success */ success = mu_extr_gblout(&gl_ptr->name, &global_total, format) && success; if (logqualifier) { gbl_name_buff[0]='^'; memcpy(&gbl_name_buff[1], gl_ptr->name.str.addr, gl_ptr->name.str.len); gtm_putmsg(VARLSTCNT(8) ERR_RECORDSTAT, 6, gl_ptr->name.str.len + 1, gbl_name_buff, global_total.recknt, global_total.keylen, global_total.datalen, global_total.reclen); mu_ctrlc_occurred = FALSE; } grand_total.recknt += global_total.recknt; if (grand_total.reclen < global_total.reclen) grand_total.reclen = global_total.reclen; if (grand_total.keylen < global_total.keylen) grand_total.keylen = global_total.keylen; if (grand_total.datalen < global_total.datalen) grand_total.datalen = global_total.datalen; } op_val.mvtype = op_pars.mvtype = MV_STR; op_val.str.addr = (char *)outfilename;; op_val.str.len = filename_len; op_pars.str.len = sizeof(no_param); op_pars.str.addr = (char *)&no_param; op_close(&op_val, &op_pars); REVERT; if (mu_ctrly_occurred) { gtm_putmsg(VARLSTCNT(1) ERR_EXTRACTCTRLY); mupip_exit(ERR_MUNOFINISH); } gtm_putmsg(VARLSTCNT(8) ERR_RECORDSTAT, 6, LEN_AND_LIT(gt_lit), grand_total.recknt, grand_total.keylen, grand_total.datalen, grand_total.reclen); if (MU_FMT_BINARY == format) { /* truncate the last newline charactor flushed by op_close */ STAT_FILE((char *)outfilename, &statbuf, stat_res); if (-1 == stat_res) rts_error(VARLSTCNT(1) errno); TRUNCATE_FILE((const char *)outfilename, statbuf.st_size - 1, truncate_res); if (-1 == truncate_res) rts_error(VARLSTCNT(1) errno); } mupip_exit(success ? SS_NORMAL : ERR_MUNOFINISH); }
void mu_int_reg(gd_region *reg, boolean_t *return_value, boolean_t return_after_open) { boolean_t read_only, was_crit; freeze_status status; node_local_ptr_t cnl; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; sgmnt_data *csd_copy_ptr; gd_segment *seg; int gtmcrypt_errno; # ifdef DEBUG boolean_t need_to_wait = FALSE; int trynum; uint4 curr_wbox_seq_num; # endif *return_value = FALSE; jnlpool_init_needed = TRUE; ESTABLISH(mu_int_reg_ch); if (dba_usr == reg->dyn.addr->acc_meth) { util_out_print("!/Can't integ region !AD; not GDS format", TRUE, REG_LEN_STR(reg)); mu_int_skipreg_cnt++; return; } gv_cur_region = reg; if (reg_cmcheck(reg)) { util_out_print("!/Can't integ region across network", TRUE); mu_int_skipreg_cnt++; return; } gvcst_init(gv_cur_region); if (gv_cur_region->was_open) { /* already open under another name */ gv_cur_region->open = FALSE; return; } if (return_after_open) { *return_value = TRUE; return; } change_reg(); csa = &FILE_INFO(gv_cur_region)->s_addrs; cnl = csa->nl; csd = csa->hdr; read_only = gv_cur_region->read_only; assert(NULL != mu_int_master); /* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */ assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd))); /* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks. * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG) * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified */ if (!csd->fully_upgraded) { ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */ if (online_specified) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region)); util_out_print(NO_ONLINE_ERR_MSG, TRUE); mu_int_skipreg_cnt++; return; } } if (!ointeg_this_reg || read_only) { status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE, FALSE, !read_only); switch (status) { case REG_ALREADY_FROZEN: if (csa->read_only_fs) break; util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; case REG_FLUSH_ERROR: gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG), DB_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; case REG_HAS_KIP: /* We have already waited for KIP to reset. This time do not wait for KIP */ status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE, FALSE, !read_only); if (REG_ALREADY_FROZEN == status) { if (csa->read_only_fs) break; util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; } else if (REG_FLUSH_ERROR == status) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG), DB_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; } assert(REG_FREEZE_SUCCESS == status); /* no break */ case REG_FREEZE_SUCCESS: break; default: assert(FALSE); /* no break */ } if (read_only && (dba_bg == csa->hdr->acc_meth) && !mu_int_wait_rdonly(csa, MUPIP_INTEG)) { mu_int_skipreg_cnt++; return; } } if (!ointeg_this_reg) { /* Take a copy of the file-header. To ensure it is consistent, do it while holding crit. */ was_crit = csa->now_crit; if (!was_crit) grab_crit(gv_cur_region); memcpy((uchar_ptr_t)&mu_int_data, (uchar_ptr_t)csd, SIZEOF(sgmnt_data)); if (!was_crit) rel_crit(gv_cur_region); memcpy(mu_int_master, MM_ADDR(csd), MASTER_MAP_SIZE(csd)); csd_copy_ptr = &mu_int_data; } else { if (!ss_initiate(gv_cur_region, util_ss_ptr, &csa->ss_ctx, preserve_snapshot, MUPIP_INTEG)) { mu_int_skipreg_cnt++; assert(NULL != csa->ss_ctx); ss_release(&csa->ss_ctx); ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */ assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */ assert(!FROZEN_HARD(csd)); /* Ensure region is unfrozen before returning from ss_initiate */ assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */ return; } assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */ assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */ csd_copy_ptr = &csa->ss_ctx->ss_shm_ptr->shadow_file_header; # if defined(DEBUG) curr_wbox_seq_num = 1; cnl->wbox_test_seq_num = curr_wbox_seq_num; /* indicate we took the next step */ GTM_WHITE_BOX_TEST(WBTEST_OINTEG_WAIT_ON_START, need_to_wait, TRUE); if (need_to_wait) /* wait for them to take next step */ { trynum = 30; /* given 30 cycles to tell you to go */ while ((curr_wbox_seq_num == cnl->wbox_test_seq_num) && trynum--) LONG_SLEEP(1); cnl->wbox_test_seq_num++; /* let them know we took the next step */ assert(trynum); } # endif } if (USES_ANY_KEY(csd_copy_ptr)) { /* Initialize mu_int_encrypt_key_handle to be used in mu_int_read */ seg = gv_cur_region->dyn.addr; INIT_DB_OR_JNL_ENCRYPTION(&mu_int_encr_handles, csd_copy_ptr, seg->fname_len, (char *)seg->fname, gtmcrypt_errno); if (0 != gtmcrypt_errno) { GTMCRYPT_REPORT_ERROR(gtmcrypt_errno, gtm_putmsg, seg->fname_len, seg->fname); mu_int_skipreg_cnt++; return; } } *return_value = mu_int_fhead(); REVERT; return; }
void dir_srch(parse_blk *pfil) { struct stat statbuf; int stat_res; lv_val *dir1, *dir2, *tmp; mstr tn; short p2_len; char filb[MAX_FBUFF + 1], patb[SIZEOF(ptstr)], *c, *lastd, *top, *p2, *c1, ch; mval pat_mval, sub, compare; boolean_t wildname, seen_wd; struct dirent *dent; DIR *dp; plength *plen; int closedir_res; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; op_kill(TREF(zsearch_dir1)); op_kill(TREF(zsearch_dir2)); if (!pfil->b_name) return; /* nothing to search for */ ESTABLISH(dir_ch); pat_mval.mvtype = MV_STR; pat_mval.str.addr = patb; /* patb should be SIZEOF(ptstr.buff) but instead is SIZEOF(ptstr) since the C compiler * complains about the former and the latter is just 4 bytes more */ pat_mval.str.len = 0; sub.mvtype = MV_STR; sub.str.len = 0; compare.mvtype = MV_STR; compare.str.len = 0; wildname = (pfil->fnb & F_WILD_NAME) != 0; dir1 = TREF(zsearch_dir1); dir2 = TREF(zsearch_dir2); if (pfil->fnb & F_WILD_DIR) { seen_wd = FALSE; for (c = pfil->l_dir, lastd = c, top = c + pfil->b_dir; c < top;) { ch = *c++; if (ch == '/') /* note the start of each directory segment */ { if (seen_wd) break; lastd = c; } if (ch == '?' || ch == '*') seen_wd = TRUE; } assert(c <= top); sub.str.addr = pfil->l_dir; sub.str.len = INTCAST(lastd - sub.str.addr); tmp = op_putindx(VARLSTCNT(2) dir1, &sub); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; for (;;) { tn.addr = lastd; /* wildcard segment */ tn.len = INTCAST(c - lastd - 1); lastd = c; genpat(&tn, &pat_mval); seen_wd = FALSE; p2 = c - 1; for (; c < top;) { ch = *c++; if (ch == '/') /* note the start of each directory segment */ { if (seen_wd) break; lastd = c; } if (ch == '?' || ch == '*') seen_wd = TRUE; } p2_len = lastd - p2; /* length of non-wild segment after wild section */ for (;;) { pop_top(dir1, &sub); /* get next item off the top */ if (!sub.str.len) break; memcpy(filb, sub.str.addr, sub.str.len); filb[sub.str.len] = 0; sub.str.addr = filb; dp = OPENDIR(filb); if (!dp) continue; while (READDIR(dp, dent)) { compare.str.addr = &dent->d_name[0]; compare.str.len = STRLEN(&dent->d_name[0]); UNICODE_ONLY( if (gtm_utf8_mode) compare.mvtype &= ~MV_UTF_LEN; /* to force "char_len" to be recomputed * in do_pattern */ ) assert(compare.str.len); if (('.' == dent->d_name[0]) && ((1 == compare.str.len) || ((2 == compare.str.len) && ('.' == dent->d_name[1])))) continue; /* don't want to read . and .. */ if (compare.str.len + sub.str.len + p2_len > MAX_FBUFF) continue; if (do_pattern(&compare, &pat_mval)) { /* got a hit */ ENSURE_STP_FREE_SPACE(compare.str.len + sub.str.len + p2_len + 1); /* concatenate directory and name */ c1 = (char *)stringpool.free; tn = sub.str; s2pool(&tn); tn = compare.str; s2pool(&tn); tn.addr = p2; tn.len = p2_len; s2pool(&tn); *stringpool.free++ = 0; compare.str.addr = c1; compare.str.len += sub.str.len + p2_len; STAT_FILE(compare.str.addr, &statbuf, stat_res); if (-1 == stat_res) continue; if (!(statbuf.st_mode & S_IFDIR)) continue; /* put in results tree */ tmp = op_putindx(VARLSTCNT(2) dir2, &compare); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; } } CLOSEDIR(dp, closedir_res); } tmp = dir1; dir1 = dir2; dir2 = tmp; if (c >= top) break; }
mupip() { unsigned int status; int4 inid = 0; int4 nid = 0; /* system ID, node number */ int4 days = 128; /* days to expiration */ int4 lic_x = 0; /* license value */ char *h = NULL; /* license data base */ char *pak = NULL; /* pak record */ int4 mdl = 0; /* hardw. model type */ $DESCRIPTOR(dprd, gtm_product); $DESCRIPTOR(dver, gtm_version); DCL_THREADGBL_ACCESS; GTM_THREADGBL_INIT; gtm_imagetype_init(MUPIP_IMAGE); gtm_env_init(); /* read in all environment variables */ licensed = TRUE; TREF(transform) = TRUE; in_backup = FALSE; util_out_open(0); SET_EXIT_HANDLER(exi_blk, generic_exit_handler, exi_condition); /* Establish exit handler */ ESTABLISH(util_base_ch); get_page_size(); getjobnum(); INVOKE_INIT_SECSHR_ADDRS; # ifdef NOLICENSE status = SS$_NORMAL; lid = 1; lic_x = 32767; # else if (NULL == (h = la_getdb(LMDB))) /* license db in mem */ status = LP_NOCNFDB; else status = SS$_NORMAL; if (1 == (status & 1)) /* licensing: node+ system */ status = lm_mdl_nid(&mdl, &nid, &inid); if (1 == (status & 1)) /* licensing: license */ { dprd.dsc$w_length = gtm_product_len; dver.dsc$w_length = gtm_version_len; status = lp_licensed(h, &dprd, &dver, mdl, nid, &lid, &lic_x, &days, pak); } # endif if (1 == (status & 1)) /* licensing: license units */ status = LP_ACQUIRE(pak, lic_x, lid, &lkid); /* def in cryptdef */ # ifdef NOLICENSE status = SS$_NORMAL; # else if (LP_NOCNFDB != status) la_freedb(h); if (1 == (status & 1)) /* licensing */ { if (days < 14) lm_putmsgu(ERR_WILLEXPIRE, 0, 0); } else { licensed = FALSE; if (LP_INVCSM != status) rts_error(VARLSTCNT(1) status); } # endif ast_init(); initialize_pattern_table(); INIT_GBL_ROOT(); stp_init(STP_INITSIZE); rts_stringpool = stringpool; mupip_getcmd(); mupip_exit(SS$_NORMAL); }
void gds_rundown(void) { boolean_t cancelled_dbsync_timer, cancelled_timer, have_standalone_access, ipc_deleted, skip_database_rundown; boolean_t is_cur_process_ss_initiator, remove_shm, vermismatch, we_are_last_user, we_are_last_writer, is_mm; now_t now; /* for GET_CUR_TIME macro */ char *time_ptr, time_str[CTIME_BEFORE_NL + 2]; /* for GET_CUR_TIME macro */ gd_region *reg; int save_errno, status, rc; int4 semval, ftok_semval, sopcnt, ftok_sopcnt; short crash_count; sm_long_t munmap_len; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; struct shmid_ds shm_buf; struct sembuf sop[2], ftok_sop[2]; uint4 jnl_status; unix_db_info *udi; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; shm_snapshot_t *ss_shm_ptr; uint4 ss_pid, onln_rlbk_pid, holder_pid; boolean_t was_crit; jnl_status = 0; reg = gv_cur_region; /* Local copy */ /* early out for cluster regions * to avoid tripping the assert below. * Note: * This early out is consistent with VMS. It has been * noted that all of the gtcm assignments * to gv_cur_region should use the TP_CHANGE_REG * macro. This would also avoid the assert problem * and should be done eventually. */ if (dba_cm == reg->dyn.addr->acc_meth) return; udi = FILE_INFO(reg); csa = &udi->s_addrs; csd = csa->hdr; assert(csa == cs_addrs && csd == cs_data); if ((reg->open) && (dba_usr == csd->acc_meth)) { change_reg(); gvusr_rundown(); return; } ESTABLISH(gds_rundown_ch); assert(reg->open); /* if we failed to open, dbinit_ch should have taken care of proper clean up */ assert(!reg->opening); /* see comment above */ switch(csd->acc_meth) { /* Pass mm and bg through */ case dba_bg: is_mm = FALSE; break; case dba_mm: is_mm = TRUE; break; case dba_usr: assert(FALSE); default: REVERT; return; } assert(!csa->hold_onto_crit || (csa->now_crit && jgbl.onlnrlbk)); /* If we are online rollback, we should already be holding crit and should release it only at the end of this module. This * is usually done by noting down csa->now_crit in a local variable (was_crit) and using it whenever we are about to * grab_crit. But, there are instances (like mupip_set_journal.c) where we grab_crit but invoke gds_rundown without any * preceeding rel_crit. Such code relies on the fact that gds_rundown does rel_crit unconditionally (to get locks to a known * state). So, augment csa->now_crit with jgbl.onlnrlbk to track if we can rel_crit unconditionally or not in gds_rundown. */ was_crit = (csa->now_crit && jgbl.onlnrlbk); /* Cancel any pending flush timer for this region by this task */ cancelled_timer = FALSE; cancelled_dbsync_timer = FALSE; CANCEL_DB_TIMERS(reg, csa, cancelled_timer, cancelled_dbsync_timer); we_are_last_user = FALSE; if (!csa->persistent_freeze) region_freeze(reg, FALSE, FALSE, FALSE); if (!was_crit) { rel_crit(reg); /* get locks to known state */ mutex_cleanup(reg); } DEFER_INTERRUPTS(INTRPT_IN_GDS_RUNDOWN); /* If the process has standalone access, it has udi->grabbed_access_sem set to TRUE at this point. Note that down * in a local variable as the udi->grabbed_access_sem is set to TRUE even for non-standalone access below and hence * we can't rely on that later to determine if the process had standalone access or not when it entered this function. * We need to guarantee that none else access database file header when semid/shmid fields are reset. * We already have created ftok semaphore in db_init or, mu_rndwn_file and did not remove it. * So just lock it. We do it in blocking mode. */ have_standalone_access = udi->grabbed_access_sem; /* process holds standalone access */ /* The only process that can invoke gds_rundown while holding access control semaphore is RECOVER/ROLLBACK. All the others * (like MUPIP SET -FILE/MUPIP EXTEND would have invoked db_ipcs_reset() before invoking gds_rundown (from * mupip_exit_handler). The only exception is when these processes encounter a terminate signal and they reach * mupip_exit_handler while holding access control semaphore. Assert accordingly. */ assert(!have_standalone_access || mupip_jnl_recover || process_exiting); /* If we have standalone access, then ensure that a concurrent online rollback cannot be running at the same time as it * needs the access control lock as well. The only expection is we are online rollback and currently running down. */ onln_rlbk_pid = csa->nl->onln_rlbk_pid; assert(!have_standalone_access || mupip_jnl_recover || !onln_rlbk_pid || !is_proc_alive(onln_rlbk_pid, 0)); skip_database_rundown = FALSE; if (!have_standalone_access) { /* We need to guarantee that no one else access database file header when semid/shmid fields are reset. * We already have created ftok semaphore in db_init or mu_rndwn_file and did not remove it. So just * lock it. We do it in blocking mode. */ if (!ftok_sem_lock(reg, FALSE, FALSE)) rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg)); FTOK_TRACE(csa, csa->ti->curr_tn, ftok_ops_lock, process_id); sop[0].sem_num = 0; sop[0].sem_op = 0; /* Wait for 0 */ sop[1].sem_num = 0; sop[1].sem_op = 1; /* Lock */ sopcnt = 2; sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO | IPC_NOWAIT; /* Don't wait the first time thru */ SEMOP(udi->semid, sop, sopcnt, status, NO_WAIT); if (-1 == status) /* We couldn't get it in one shot -- see if we already have it */ { save_errno = errno; holder_pid = semctl(udi->semid, 0, GETPID); if (holder_pid == process_id) { send_msg(VARLSTCNT(5) MAKE_MSG_INFO(ERR_CRITSEMFAIL), 2, DB_LEN_STR(reg), ERR_RNDWNSEMFAIL); REVERT; ENABLE_INTERRUPTS(INTRPT_IN_GDS_RUNDOWN); return; /* Already in rundown for this region */ } if (EAGAIN != save_errno) { assert(FALSE); rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown first semop/semctl"), save_errno); } /* Before attempting again in the blocking mode, see if the holding process is an online rollback. * If so, it is likely we won't get the access control semaphore anytime soon. In that case, we * are better off skipping rundown and continuing with sanity cleanup and exit. */ skip_database_rundown = (onln_rlbk_pid || csd->file_corrupt); if (!skip_database_rundown) { sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO; /* Try again - blocking this time */ SEMOP(udi->semid, sop, 2, status, FORCED_WAIT); if (-1 == status) /* We couldn't get it at all.. */ rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno); } } udi->grabbed_access_sem = !skip_database_rundown; } /* else we we hold the access control semaphore and therefore have standalone access. We do not release it now - we * release it later in mupip_exit_handler.c. Since we already hold the access control semaphore, we don't need the * ftok semaphore and trying it could cause deadlock */ /* At this point we are guaranteed no one else is doing a db_init/rundown as we hold the access control semaphore */ assert(csa->ref_cnt); /* decrement private ref_cnt before shared ref_cnt decrement. */ csa->ref_cnt--; /* Currently journaling logic in gds_rundown() in VMS relies on this order to detect last writer */ assert(!csa->ref_cnt); --csa->nl->ref_cnt; if (memcmp(csa->nl->now_running, gtm_release_name, gtm_release_name_len + 1)) { /* VERMISMATCH condition. Possible only if DSE */ assert(dse_running); vermismatch = TRUE; } else vermismatch = FALSE; if (-1 == shmctl(udi->shmid, IPC_STAT, &shm_buf)) { save_errno = errno; rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown shmctl"), save_errno); } else we_are_last_user = (1 == shm_buf.shm_nattch) && !vermismatch; assert(!have_standalone_access || we_are_last_user || jgbl.onlnrlbk); /* recover => one user except ONLINE ROLLBACK */ if (-1 == (semval = semctl(udi->semid, 1, GETVAL))) rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno); we_are_last_writer = (1 == semval) && (FALSE == reg->read_only) && !vermismatch;/* There's one writer left and I am it */ assert(!we_are_last_writer || !skip_database_rundown); assert(!we_are_last_user || !skip_database_rundown); assert(!(have_standalone_access && !reg->read_only) || we_are_last_writer || jgbl.onlnrlbk); /* recover + R/W region => one writer except ONLINE ROLLBACK */ if (!have_standalone_access && (-1 == (ftok_semval = semctl(udi->ftok_semid, 1, GETVAL)))) rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno); if (NULL != csa->ss_ctx) ss_destroy_context(csa->ss_ctx); /* SS_MULTI: If multiple snapshots are supported, then we have to run through each of the snapshots */ assert(1 == MAX_SNAPSHOTS); ss_shm_ptr = (shm_snapshot_ptr_t)SS_GETSTARTPTR(csa); ss_pid = ss_shm_ptr->ss_info.ss_pid; is_cur_process_ss_initiator = (process_id == ss_pid); if (ss_pid && (is_cur_process_ss_initiator || we_are_last_user)) { /* Try getting snapshot crit latch. If we don't get latch, we won't hang for eternity and will skip * doing the orphaned snapshot cleanup. It will be cleaned up eventually either by subsequent MUPIP * INTEG or by a MUPIP RUNDOWN. */ if (ss_get_lock_nowait(reg) && (ss_pid == ss_shm_ptr->ss_info.ss_pid) && (is_cur_process_ss_initiator || !is_proc_alive(ss_pid, 0))) { ss_release(NULL); ss_release_lock(reg); } } /* If csa->nl->donotflush_dbjnl is set, it means mupip recover/rollback was interrupted and therefore we need not flush * shared memory contents to disk as they might be in an inconsistent state. Moreover, any more flushing will only cause * future rollback to undo more journal records (PBLKs). In this case, we will go ahead and remove shared memory (without * flushing the contents) in this routine. A reissue of the recover/rollback command will restore the database to a * consistent state. */ if (!csa->nl->donotflush_dbjnl && !reg->read_only && !vermismatch) { /* If we had an orphaned block and were interrupted, set wc_blocked so we can invoke wcs_recover. Do it ONLY * if there is NO concurrent online rollback running (as we need crit to set wc_blocked) */ if (csa->wbuf_dqd) { /* If we had an orphaned block and were interrupted, mupip_exit_handler will invoke secshr_db_clnup which * will clear this field and so we should never come to gds_rundown with a non-zero wbuf_dqd. The only * exception is if we are recover/rollback in which case gds_rundown (from mur_close_files) is invoked * BEFORE secshr_db_clnup in mur_close_files. * Note: It is NOT possible for online rollback to reach here with wbuf_dqd being non-zero. This is because * the moment we apply the first PBLK, we stop all interrupts and hence can never be interrupted in * wcs_wtstart or wcs_get_space. Assert accordingly. */ assert(mupip_jnl_recover && !jgbl.onlnrlbk && !skip_database_rundown); if (!was_crit) grab_crit(reg); SET_TRACEABLE_VAR(csd->wc_blocked, TRUE); BG_TRACE_PRO_ANY(csa, wcb_gds_rundown); send_msg(VARLSTCNT(8) ERR_WCBLOCKED, 6, LEN_AND_LIT("wcb_gds_rundown"), process_id, &csa->ti->curr_tn, DB_LEN_STR(reg)); csa->wbuf_dqd = 0; wcs_recover(reg); if (is_mm) { assert(FALSE); csd = csa->hdr; } BG_TRACE_PRO_ANY(csa, lost_block_recovery); if (!was_crit) rel_crit(reg); } if (JNL_ENABLED(csd) && IS_GTCM_GNP_SERVER_IMAGE) originator_prc_vec = NULL; /* If we are the last writing user, then everything must be flushed */ if (we_are_last_writer) { /* Time to flush out all of our buffers */ if (is_mm) { if (csa->total_blks != csa->ti->total_blks) /* do remap if file had been extended */ { if (!was_crit) grab_crit(reg); wcs_mm_recover(reg); csd = csa->hdr; if (!was_crit) rel_crit(reg); } csa->nl->remove_shm = TRUE; } if (csd->wc_blocked && jgbl.onlnrlbk) { /* if the last update done by online rollback was not committed in the normal code-path but was * completed by secshr_db_clnup, wc_blocked will be set to TRUE. But, since online rollback never * invokes grab_crit (since csa->hold_onto_crit is set to TRUE), wcs_recover is never invoked. This * could result in the last update never getting flushed to the disk and if online rollback happened * to be the last writer then the shared memory will be flushed and removed and the last update will * be lost. So, force wcs_recover if we find ourselves in such a situation. But, wc_blocked is * possible only if phase1 or phase2 errors are induced using white box test cases */ assert(WB_COMMIT_ERR_ENABLED); wcs_recover(reg); } /* Note WCSFLU_SYNC_EPOCH ensures the epoch is synced to the journal and indirectly * also ensures that the db is fsynced. We don't want to use it in the calls to * wcs_flu() from t_end() and tp_tend() since we can defer it to out-of-crit there. * In this case, since we are running down, we don't have any such option. */ csa->nl->remove_shm = wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH); /* Since we_are_last_writer, we should be guaranteed that wcs_flu() did not change csd, (in * case of MM for potential file extension), even if it did a grab_crit(). Therefore, make * sure that's true. */ assert(csd == csa->hdr); assert(0 == memcmp(csd->label, GDS_LABEL, GDS_LABEL_SZ - 1)); } else if (((cancelled_timer && (0 > csa->nl->wcs_timers)) || cancelled_dbsync_timer) && !skip_database_rundown) { /* cancelled pending db or jnl flush timers - flush database and journal buffers to disk */ if (!was_crit) grab_crit(reg); /* we need to sync the epoch as the fact that there is no active pending flush timer implies * there will be noone else who will flush the dirty buffers and EPOCH to disk in a timely fashion */ wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH); if (!was_crit) rel_crit(reg); assert((dba_mm == cs_data->acc_meth) || (csd == cs_data)); csd = cs_data; /* In case this is MM and wcs_flu() remapped an extended database, reset csd */ } /* Do rundown journal processing after buffer flushes since they require jnl to be open */ if (JNL_ENABLED(csd)) { /* the following tp_change_reg() is not needed due to the assert csa == cs_addrs at the beginning * of gds_rundown(), but just to be safe. To be removed by 2002!! --- nars -- 2001/04/25. */ tp_change_reg(); /* call this because jnl_ensure_open checks cs_addrs rather than gv_cur_region */ jpc = csa->jnl; jbp = jpc->jnl_buff; if (jbp->fsync_in_prog_latch.u.parts.latch_pid == process_id) { assert(FALSE); COMPSWAP_UNLOCK(&jbp->fsync_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0); } if (jbp->io_in_prog_latch.u.parts.latch_pid == process_id) { assert(FALSE); COMPSWAP_UNLOCK(&jbp->io_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0); } if ((((NOJNL != jpc->channel) && !JNL_FILE_SWITCHED(jpc)) || we_are_last_writer && (0 != csa->nl->jnl_file.u.inode)) && !skip_database_rundown) { /* We need to close the journal file cleanly if we have the latest generation journal file open * or if we are the last writer and the journal file is open in shared memory (not necessarily * by ourselves e.g. the only process that opened the journal got shot abnormally) * Note: we should not infer anything from the shared memory value of csa->nl->jnl_file.u.inode * if we are not the last writer as it can be concurrently updated. */ if (!was_crit) grab_crit(reg); if (JNL_ENABLED(csd)) { SET_GBL_JREC_TIME; /* jnl_ensure_open/jnl_put_jrt_pini/pfin/jnl_file_close all need it */ /* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write * journal records (if it decides to switch to a new journal file). */ ADJUST_GBL_JREC_TIME(jgbl, jbp); jnl_status = jnl_ensure_open(); if (0 == jnl_status) { /* If we_are_last_writer, we would have already done a wcs_flu() which would * have written an epoch record and we are guaranteed no further updates * since we are the last writer. So, just close the journal. * Although we assert pini_addr should be non-zero for last_writer, we * play it safe in PRO and write a PINI record if not written already. */ assert(!jbp->before_images || is_mm || !we_are_last_writer || 0 != jpc->pini_addr); if (we_are_last_writer && 0 == jpc->pini_addr) jnl_put_jrt_pini(csa); if (0 != jpc->pini_addr) jnl_put_jrt_pfin(csa); /* If not the last writer and no pending flush timer left, do jnl flush now */ if (!we_are_last_writer && (0 > csa->nl->wcs_timers)) { if (SS_NORMAL == (jnl_status = jnl_flush(reg))) { assert(jbp->freeaddr == jbp->dskaddr); jnl_fsync(reg, jbp->dskaddr); assert(jbp->fsync_dskaddr == jbp->dskaddr); } else { send_msg(VARLSTCNT(9) ERR_JNLFLUSH, 2, JNL_LEN_STR(csd), ERR_TEXT, 2, RTS_ERROR_TEXT("Error with journal flush in gds_rundown"), jnl_status); assert(NOJNL == jpc->channel);/* jnl file lost has been triggered */ /* In this routine, all code that follows from here on does not * assume anything about the journaling characteristics of this * database so it is safe to continue execution even though * journaling got closed in the middle. */ } } jnl_file_close(reg, we_are_last_writer, FALSE); } else send_msg(VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(reg)); } if (!was_crit) rel_crit(reg); } } if (we_are_last_writer) /* Flush the fileheader last and harden the file to disk */ { if (!was_crit) grab_crit(reg); /* To satisfy crit requirement in fileheader_sync() */ memset(csd->machine_name, 0, MAX_MCNAMELEN); /* clear the machine_name field */ if (!have_standalone_access && we_are_last_user) { /* mupip_exit_handler will do this after mur_close_file */ csd->semid = INVALID_SEMID; csd->shmid = INVALID_SHMID; csd->gt_sem_ctime.ctime = 0; csd->gt_shm_ctime.ctime = 0; } fileheader_sync(reg); if (!was_crit) rel_crit(reg); if (FALSE == is_mm) { if (-1 == fsync(udi->fd)) /* Sync it all */ { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno); } } else { /* Now do final MM file sync before exit */ # if !defined(TARGETED_MSYNC) && !defined(NO_MSYNC) if (-1 == fsync(udi->fd)) /* Sync it all */ { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno); } # else if (-1 == msync((caddr_t)csa->db_addrs[0], (size_t)(csa->db_addrs[1] - csa->db_addrs[0]), MS_SYNC)) { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file msync at close"), errno); } # endif } } } /* end if (!reg->read_only && !csa->nl->donotflush_dbjnl) */ /* We had cancelled all db timers at start of rundown. In case as part of rundown (wcs_flu above), we had started * any timers, cancel them BEFORE setting reg->open to FALSE (assert in wcs_clean_dbsync relies on this). */ CANCEL_DB_TIMERS(reg, csa, cancelled_timer, cancelled_dbsync_timer); if (reg->read_only && we_are_last_user && !have_standalone_access) { /* mupip_exit_handler will do this after mur_close_file */ db_ipcs.semid = INVALID_SEMID; db_ipcs.shmid = INVALID_SHMID; db_ipcs.gt_sem_ctime = 0; db_ipcs.gt_shm_ctime = 0; db_ipcs.fn_len = reg->dyn.addr->fname_len; memcpy(db_ipcs.fn, reg->dyn.addr->fname, reg->dyn.addr->fname_len); db_ipcs.fn[reg->dyn.addr->fname_len] = 0; /* request gtmsecshr to flush. read_only cannot flush itself */ if (0 != send_mesg2gtmsecshr(FLUSH_DB_IPCS_INFO, 0, (char *)NULL, 0)) rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gtmsecshr failed to update database file header")); } /* Done with file now, close it */ CLOSEFILE_RESET(udi->fd, rc); /* resets "udi->fd" to FD_INVALID */ if (-1 == rc) { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, LEN_AND_LIT("Error during file close"), errno); } /* Unmap storage if mm mode but only the part that is not the fileheader (so shows up in dumps) */ if (is_mm) { munmap_len = (sm_long_t)((csa->db_addrs[1] - csa->db_addrs[0]) - ROUND_UP(SIZEOF_FILE_HDR(csa->hdr), MSYNC_ADDR_INCS)); if (munmap_len > 0) { munmap((caddr_t)(csa->db_addrs[0] + ROUND_UP(SIZEOF_FILE_HDR(csa->hdr), MSYNC_ADDR_INCS)), (size_t)(munmap_len)); # ifdef DEBUG_DB64 rel_mmseg((caddr_t)csa->db_addrs[0]); # endif } } /* If we had skipped flushing journal and database buffers due to a concurrent online rollback, increment the counter * indicating that in the shared memory so that online rollback can report the # of such processes when it shuts down. */ if (skip_database_rundown) /* indicates flushing was skipped */ csa->nl->dbrndwn_skip_cnt++; /* If we are online rollback, report the # of processes that skipped rundown because we were holding the access control * semaphore */ if (jgbl.onlnrlbk && csa->nl->dbrndwn_skip_cnt) { send_msg(VARLSTCNT(3) ERR_RNDWNSKIPCNT, 1, csa->nl->dbrndwn_skip_cnt); csa->nl->dbrndwn_skip_cnt = 0; } /* Detach our shared memory while still under lock so reference counts will be correct for the next process to run down * this region. In the process also get the remove_shm status from node_local before detaching. * If csa->nl->donotflush_dbjnl is TRUE, it means we can safely remove shared memory without compromising data * integrity as a reissue of recover will restore the database to a consistent state. */ remove_shm = !vermismatch && (csa->nl->remove_shm || csa->nl->donotflush_dbjnl); rel_crit(reg); /* Since we are about to detach from the shared memory, release crit and reset onln_rlbk_pid */ if (jgbl.onlnrlbk) { /* We are done with online rollback on this region Indicate to other processes by setting the onln_rlbk_pid to 0 */ csa->hold_onto_crit = FALSE; csa->nl->onln_rlbk_pid = 0; } status = shmdt((caddr_t)csa->nl); csa->nl = NULL; /* dereferencing nl after detach is not right, so we set it to NULL so that we can test before dereference*/ if (-1 == status) send_msg(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, LEN_AND_LIT("Error during shmdt"), errno); REMOVE_CSA_FROM_CSADDRSLIST(csa); /* remove "csa" from list of open regions (cs_addrs_list) */ reg->open = FALSE; /* If file is still not in good shape, die here and now before we get rid of our storage */ assertpro(0 == csa->wbuf_dqd); ipc_deleted = FALSE; /* If we are the very last user, remove shared storage id and the semaphores */ if (we_are_last_user) { /* remove shared storage, only if last writer to rundown did a successful wcs_flu() */ assert(!vermismatch); if (remove_shm) { ipc_deleted = TRUE; if (0 != shm_rmid(udi->shmid)) rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove shared memory")); } else if (is_src_server || is_updproc) { gtm_putmsg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id); send_msg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id); } else send_msg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id); /* mupip recover/rollback don't release the semaphore here, but do it later in db_ipcs_reset (invoked from * mur_close_files()) */ if (!have_standalone_access) { if (0 != sem_rmid(udi->semid)) rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove semaphore")); udi->grabbed_access_sem = FALSE; } } else { assert(!have_standalone_access || jgbl.onlnrlbk); if (!jgbl.onlnrlbk) { /* If we were writing, get rid of our writer access count semaphore */ if (!reg->read_only) if (0 != (save_errno = do_semop(udi->semid, 1, -1, SEM_UNDO))) rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown write semaphore release"), save_errno); /* Now remove the rundown lock */ if (!skip_database_rundown) { /* Do it only if we skipped getting the access control semaphore above */ if (0 != (save_errno = do_semop(udi->semid, 0, -1, SEM_UNDO))) rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown rundown semaphore release"), save_errno); udi->grabbed_access_sem = FALSE; } } /* else access control semaphore will be released in db_ipcs_reset */ } if (!have_standalone_access) { if (!ftok_sem_release(reg, !have_standalone_access, FALSE)) rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg)); FTOK_TRACE(csa, csa->ti->curr_tn, ftok_ops_release, process_id); } ENABLE_INTERRUPTS(INTRPT_IN_GDS_RUNDOWN); if (!ipc_deleted) { GET_CUR_TIME; if (is_src_server) gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Source server"), REG_LEN_STR(reg)); if (is_updproc) gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Update process"), REG_LEN_STR(reg)); if (mupip_jnl_recover && (!jgbl.onlnrlbk || !we_are_last_user)) { gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg)); send_msg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg)); } } REVERT; }
void gvzwr_fini(zshow_out *out, int pat) { char m[SIZEOF(mident_fixed)]; mval local, data; gv_key *old; gvnh_reg_t *gvnh_reg; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; if (!gv_currkey) gvinit(); ESTABLISH(gvzwrite_ch); zwr_output = out; assert(INVALID_GV_TARGET == reset_gv_target); reset_gv_target = gv_target; DBG_CHECK_GVTARGET_GVCURRKEY_IN_SYNC(CHECK_CSA_TRUE); gvzwrite_block->gd_reg = gv_cur_region; gvzwrite_block->old_targ = (unsigned char *)gv_target; old = (gv_key *)malloc(SIZEOF(gv_key) + gv_currkey->end); gvzwrite_block->old_key = (unsigned char *)old; memcpy(gvzwrite_block->old_key, gv_currkey, SIZEOF(gv_key) + gv_currkey->end); gvzwrite_block->gv_last_subsc_null = TREF(gv_last_subsc_null); gvzwrite_block->gv_some_subsc_null = TREF(gv_some_subsc_null); if (!pat) { local = *gvzwrite_block->pat; if (local.str.len) /* New reference. Will get new gv_target.. */ { gv_target = NULL; gv_currkey->base[0] = '\0'; op_gvname(VARLSTCNT(1) &local); op_gvdata(&data); if (!(MV_FORCE_INTD(&data))) { if (!undef_inhibit) sgnl_gvundef(); } else { gvzwrite_block->fixed = (gvzwrite_block->fixed ? TRUE : FALSE); gvzwr_var(MV_FORCE_INTD(&data), 0); } } else /* Old (naked) reference. Keep previous gv_target reference */ { if (gv_currkey->prev == 0) rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_GVNAKED); gv_currkey->end = gv_currkey->prev; gv_currkey->base[gv_currkey->end] = 0; gv_currkey->prev = 0; /* If gvnh_reg corresponds to a spanning global, then determine * gv_cur_region/gv_target/gd_targ_* variables based on updated gv_currkey. */ gvnh_reg = TREF(gd_targ_gvnh_reg); /* set by op_gvname in previous call */ GV_BIND_SUBSNAME_FROM_GVNH_REG_IF_GVSPAN(gvnh_reg, gd_header, gv_currkey); op_gvdata(&data); if (!(MV_FORCE_INTD(&data))) { if (!undef_inhibit) sgnl_gvundef(); } else { gvzwrite_block->fixed = (gvzwrite_block->fixed ? TRUE : FALSE); gvzwr_var((int4)MV_FORCE_INTD(&data), 0); } } } else { gv_target = NULL; gv_currkey->base[0] = '\0'; local.mvtype = MV_STR; local.str.addr = &m[0]; local.str.len = 1; m[0] = '%'; gvzwrite_block->fixed = FALSE; for (; ;) { op_gvname(VARLSTCNT(1) &local); if (do_pattern(&local, gvzwrite_block->pat)) { op_gvdata(&data); if ((MV_FORCE_INTD(&data))) { gvzwr_var((int4)MV_FORCE_INTD(&data), 0); } } op_gvorder(&local); if (local.str.len) { assert(local.str.len <= MAX_MIDENT_LEN + 1); local.str.addr++; local.str.len--; memcpy(&m[0], local.str.addr, local.str.len); local.str.addr = &m[0]; } else break; } } gvzwrite_clnup(); /* this routine is called by gvzwrite_ch() too */ REVERT; return; }
void gvzwr_fini(zshow_out *out, int pat) { char m[sizeof(mident_fixed)]; mval local, data; gv_key *old; error_def(ERR_GVNAKED); if (!gv_currkey) gvinit(); ESTABLISH(gvzwrite_ch); zwr_output = out; assert(INVALID_GV_TARGET == reset_gv_target); reset_gv_target = gv_target; DBG_CHECK_GVTARGET_CSADDRS_IN_SYNC; gvzwrite_block.gd_reg = gv_cur_region; gvzwrite_block.old_targ = (unsigned char *)gv_target; old = (gv_key *)malloc(sizeof(gv_key) + gv_currkey->end); gvzwrite_block.old_key = (unsigned char *)old; memcpy(gvzwrite_block.old_key, gv_currkey, sizeof(gv_key) + gv_currkey->end); gvzwrite_block.old_map = gd_map; gvzwrite_block.old_map_top = gd_map_top; if (!pat) { local = *gvzwrite_block.pat; if (local.str.len) /* New reference. Will get new gv_target.. */ { gv_target = NULL; gv_currkey->base[0] = '\0'; op_gvname(VARLSTCNT(1) &local); op_gvdata(&data); if (!(MV_FORCE_INTD(&data))) sgnl_gvundef(); else { gvzwrite_block.fixed = (gvzwrite_block.fixed ? TRUE : FALSE); gvzwr_var(MV_FORCE_INTD(&data), 0); } } else /* Old (naked) reference. Keep previous gv_target reference */ { if (gv_currkey->prev == 0) rts_error(VARLSTCNT(1) ERR_GVNAKED); gv_currkey->end = gv_currkey->prev; gv_currkey->base[ gv_currkey->end ] = 0; gv_currkey->prev = 0; op_gvdata(&data); if (!(MV_FORCE_INTD(&data))) sgnl_gvundef(); else { gvzwrite_block.fixed = (gvzwrite_block.fixed ? TRUE : FALSE); gvzwr_var((int4)MV_FORCE_INTD(&data), 0); } } } else { gv_target = NULL; gv_currkey->base[0] = '\0'; local.mvtype = MV_STR; local.str.addr = &m[0]; local.str.len = 1; m[0] = '%'; gvzwrite_block.fixed = FALSE; for (; ;) { op_gvname(VARLSTCNT(1) &local); if (do_pattern(&local, gvzwrite_block.pat)) { op_gvdata(&data); if ((MV_FORCE_INTD(&data))) { gvzwr_var((int4)MV_FORCE_INTD(&data), 0); } } op_gvorder(&local); if (local.str.len) { assert(local.str.len <= MAX_MIDENT_LEN + 1); local.str.addr++; local.str.len--; memcpy(&m[0], local.str.addr, local.str.len); local.str.addr = &m[0]; } else break; } } gvzwrite_clnup(); /* this routine is called by gvzwrite_ch() too */ REVERT; return; }
void mupip_restore(void) { static readonly char label[] = GDS_LABEL; char db_name[MAX_FN_LEN + 1], *inbuf, *p; inc_list_struct *ptr; inc_header *inhead; sgmnt_data *old_data; short iosb[4]; unsigned short n_len; int4 status, vbn, rsize, temp, save_errno; uint4 rest_blks, totblks; trans_num curr_tn; uint4 ii; block_id blk_num; bool extend; uint4 cli_status; BFILE *in; int i, db_fd; uint4 old_blk_size, old_tot_blks, bplmap; short old_start_vbn; off_t new_eof; char buff[DISK_BLOCK_SIZE]; char msg_buffer[1024], *newmap, *newmap_bptr; mstr msg_string; char addr[SA_MAXLEN+1]; unsigned char tcp[5]; backup_type type; unsigned short port; int4 timeout, cut, match; char debug_info[256]; void (*common_read)(); char *errptr; pid_t waitpid_res; error_def(ERR_MUPRESTERR); error_def(ERR_MUPCLIERR); error_def(ERR_IOEOF); extend = TRUE; if (CLI_NEGATED == (cli_status = cli_present("EXTEND"))) extend = FALSE; mu_outofband_setup(); mu_gv_cur_reg_init(); n_len = sizeof(db_name); if (cli_get_str("DATABASE", db_name, &n_len) == FALSE) mupip_exit(ERR_MUPCLIERR); strcpy((char *)gv_cur_region->dyn.addr->fname, db_name); gv_cur_region->dyn.addr->fname_len = n_len; if (!mu_rndwn_file(gv_cur_region, TRUE)) { util_out_print("Error securing stand alone access to output file !AD. Aborting restore.", TRUE, n_len, db_name); mupip_exit(ERR_MUPRESTERR); } OPENFILE(db_name, O_RDWR, db_fd); if (-1 == db_fd) { save_errno = errno; util_out_print("Error accessing output file !AD. Aborting restore.", TRUE, n_len, db_name); errptr = (char *)STRERROR(save_errno); util_out_print("open : !AZ", TRUE, errptr); mupip_exit(save_errno); } murgetlst(); inbuf = (char*)malloc(INC_BACKUP_CHUNK_SIZE); old_data = (sgmnt_data*)malloc(sizeof(sgmnt_data)); LSEEKREAD(db_fd, 0, old_data, sizeof(sgmnt_data), save_errno); if (0 != save_errno) { util_out_print("Error accessing output file !AD. Aborting restore.", TRUE, n_len, db_name); if (-1 != save_errno) { errptr = (char *)STRERROR(save_errno); util_out_print("read : !AZ", TRUE, errptr); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(save_errno); } else { db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_IOEOF); } } if (memcmp(&old_data->label[0], &label[0], GDS_LABEL_SZ)) { util_out_print("Output file !AD has an unrecognizable format", TRUE, n_len, db_name); free(old_data); free(inbuf); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } curr_tn = old_data->trans_hist.curr_tn; old_blk_size = old_data->blk_size; old_tot_blks = old_data->trans_hist.total_blks; old_start_vbn = old_data->start_vbn; bplmap = old_data->bplmap; free(old_data); msg_string.addr = msg_buffer; msg_string.len = sizeof(msg_buffer); inhead = (inc_header *)malloc(sizeof(inc_header) + 8); inhead = (inc_header *)((((int4)inhead) + 7) & -8); rest_blks = 0; for (ptr = in_files.next; ptr; ptr = ptr->next) { /* --- determine source type --- */ type = backup_to_file; if (0 == ptr->input_file.len) continue; else if ('|' == *(ptr->input_file.addr + ptr->input_file.len - 1)) { type = backup_to_exec; ptr->input_file.len--; *(ptr->input_file.addr + ptr->input_file.len) = '\0'; } else if (ptr->input_file.len > 5) { lower_to_upper(tcp, (uchar_ptr_t)ptr->input_file.addr, 5); if (0 == memcmp(tcp, "TCP:/", 5)) { type = backup_to_tcp; cut = 5; while ('/' == *(ptr->input_file.addr + cut)) cut++; ptr->input_file.len -= cut; p = ptr->input_file.addr; while (p < ptr->input_file.addr + ptr->input_file.len) { *p = *(p + cut); p++; } *p = '\0'; } } /* --- open the input stream --- */ restore_read_errno = 0; switch(type) { case backup_to_file: common_read = iob_read; if ((in = iob_open_rd(ptr->input_file.addr, DISK_BLOCK_SIZE, BLOCKING_FACTOR)) == NULL) { save_errno = errno; util_out_print("Error accessing input file !AD. Aborting restore.", TRUE, ptr->input_file.len, ptr->input_file.addr); errptr = (char *)STRERROR(save_errno); util_out_print("open : !AZ", TRUE, errptr); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(save_errno); } ESTABLISH(iob_io_error); break; case backup_to_exec: pipe_child = 0; common_read = exec_read; in = (BFILE *)malloc(sizeof(BFILE)); if (0 > (in->fd = gtm_pipe(ptr->input_file.addr, input_from_comm))) { util_out_print("Error creating input pipe from !AD.", TRUE, ptr->input_file.len, ptr->input_file.addr); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } #ifdef DEBUG_ONLINE PRINTF("file descriptor for the openned pipe is %d.\n", in->fd); PRINTF("the command passed to gtm_pipe is %s.\n", ptr->input_file.addr); #endif break; case backup_to_tcp: common_read = tcp_read; /* parse the input */ switch (match = SSCANF(ptr->input_file.addr, "%[^:]:%hu", addr, &port)) { case 1 : port = DEFAULT_BKRS_PORT; case 2 : break; default : util_out_print("Error : A hostname has to be specified.", TRUE); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } if ((0 == cli_get_int("NETTIMEOUT", &timeout)) || (0 > timeout)) timeout = DEFAULT_BKRS_TIMEOUT; in = (BFILE *)malloc(sizeof(BFILE)); iotcp_fillroutine(); if (0 > (in->fd = tcp_open(addr, port, timeout, TRUE))) { util_out_print("Error establishing TCP connection to !AD.", TRUE, ptr->input_file.len, ptr->input_file.addr); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } break; default: util_out_print("Aborting restore!/", TRUE); util_out_print("Unrecognized input format !AD", TRUE, ptr->input_file.len, ptr->input_file.addr); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } COMMON_READ(in, inhead, sizeof(inc_header)); if (memcmp(&inhead->label[0], INC_HEADER_LABEL, INC_HDR_LABEL_SZ)) { util_out_print("Input file !AD has an unrecognizable format", TRUE, ptr->input_file.len, ptr->input_file.addr); free(inbuf); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } if (curr_tn != inhead->start_tn) { util_out_print("Transaction in input file !AD does not align with database TN.!/DB: !XL!_Input file: !XL", TRUE, ptr->input_file.len, ptr->input_file.addr, curr_tn, inhead->start_tn); free(inbuf); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } if (old_blk_size != inhead->blk_size) { util_out_print("Incompatable block size. Output file !AD has block size !XL,", TRUE, n_len, db_name); util_out_print("while input file !AD is from a database with block size !XL,", TRUE, ptr->input_file.len, ptr->input_file.addr, inhead->blk_size); free(inbuf); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } if (old_tot_blks != inhead->db_total_blks) { if (old_tot_blks > inhead->db_total_blks || !extend) { totblks = old_tot_blks - DIVIDE_ROUND_UP(old_tot_blks, DISK_BLOCK_SIZE); util_out_print("Incompatable database sizes. Output file !AD has!/ !UL (!XL hex) total blocks,", TRUE, n_len, db_name, totblks, totblks); totblks = inhead->db_total_blks - DIVIDE_ROUND_UP(inhead->db_total_blks, DISK_BLOCK_SIZE); util_out_print("while input file !AD is from a database with!/ !UL (!XL hex) total blocks", TRUE, ptr->input_file.len, ptr->input_file.addr, totblks, totblks); free(inbuf); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } else { /* this part of the code is similar to gdsfilext except that you don't need to do * most of the work that gdsfilext does. However, for situations where the database * extended since the last backup (the beginning of this incremental backup), and * there are new bitmaps that are never touched later on by GT.M, these bitmaps * will have tn == 0, which prevents the backup process to pick up these blocks, * so, we need to initialize these bitmaps here */ new_eof = ((off_t)(old_start_vbn - 1) * DISK_BLOCK_SIZE) + ((off_t)inhead->db_total_blks * old_blk_size); memset(buff, 0, DISK_BLOCK_SIZE); LSEEKWRITE(db_fd, new_eof, buff, DISK_BLOCK_SIZE, status); if (0 != status) { util_out_print("Aborting restore!/", TRUE); util_out_print("lseek or write error : Unable to extend output file !AD!/", TRUE, n_len, db_name); util_out_print(" from !UL (!XL hex) total blocks to !UL (!XL hex) total blocks.!/", TRUE, old_tot_blks, old_tot_blks, inhead->db_total_blks, inhead->db_total_blks); util_out_print(" Current input file is !AD with !UL (!XL hex) total blocks!/", TRUE, ptr->input_file.len, ptr->input_file.addr, inhead->db_total_blks, inhead->db_total_blks); gtm_putmsg(VARLSTCNT(1) status); free(inbuf); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } /* --- initialize all new bitmaps, just in case they are not touched later --- */ if (DIVIDE_ROUND_DOWN(inhead->db_total_blks, bplmap) > DIVIDE_ROUND_DOWN(old_tot_blks, bplmap)) { /* -- similar logic exist in bml_newmap.c, which need to pick up any new updates here -- */ newmap = (char *)malloc(old_blk_size); ((blk_hdr *)newmap)->bsiz = BM_SIZE(bplmap); ((blk_hdr *)newmap)->levl = LCL_MAP_LEVL; ((blk_hdr *)newmap)->tn = curr_tn; newmap_bptr = newmap + sizeof(blk_hdr); *newmap_bptr++ = THREE_BLKS_FREE; memset(newmap_bptr, FOUR_BLKS_FREE, BM_SIZE(bplmap) - sizeof(blk_hdr) - 1); for (ii = ROUND_UP(old_tot_blks, bplmap); ii <= inhead->db_total_blks; ii += bplmap) { new_eof = (off_t)(old_start_vbn - 1) * DISK_BLOCK_SIZE + (off_t)ii * old_blk_size; LSEEKWRITE(db_fd, new_eof, newmap, old_blk_size, status); if (0 != status) { util_out_print("Aborting restore!/", TRUE); util_out_print("Bitmap 0x!XL initialization error!", TRUE, ii); gtm_putmsg(VARLSTCNT(1) status); free(inbuf); free(newmap); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } } free(newmap); } old_tot_blks = inhead->db_total_blks; } } COMMON_READ(in, &rsize, sizeof(int4)); for ( ; ;) { /* rsize is the size of the record, including the size, but, since the size has already been read in, this will read in the current record and the size for the next record */ /* ensure we have a reasonable record size, at least */ if (rsize - sizeof(int4) - sizeof(block_id) > old_blk_size) { util_out_print("Invalid information in restore file !AD. Aborting restore.", TRUE, ptr->input_file.len, ptr->input_file.addr); iob_close(in); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(ERR_MUPRESTERR); } COMMON_READ(in, inbuf, rsize); if (!memcmp(inbuf, &end_msg[0], sizeof end_msg - 1)) break; rest_blks++; blk_num = *(block_id*)inbuf; vbn = old_start_vbn - 1 + (old_blk_size / DISK_BLOCK_SIZE * blk_num); LSEEKWRITE(db_fd, (off_t)vbn * DISK_BLOCK_SIZE, inbuf + sizeof(block_id), rsize - sizeof(block_id) - sizeof(int4), save_errno); if (0 != save_errno) { util_out_print("Error accessing output file !AD. Aborting restore.", TRUE, n_len, db_name); errptr = (char *)STRERROR(save_errno); util_out_print("write : !AZ", TRUE, errptr); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(save_errno); } GET_LONG(temp, (inbuf + rsize - sizeof(int4))); rsize = temp; } GET_LONG(temp, (inbuf + rsize - sizeof(int4))); rsize = temp; vbn = 0; for (i = 0; ; i++) /* Restore file header */ { COMMON_READ(in, inbuf, rsize); if (!memcmp(inbuf, &hdr_msg[0], sizeof hdr_msg - 1)) break; LSEEKWRITE(db_fd, vbn, inbuf, rsize - sizeof(int4), save_errno); if (0 != save_errno) { util_out_print("Error accessing output file !AD. Aborting restore.", TRUE, n_len, db_name); errptr = (char *)STRERROR(save_errno); util_out_print("write : !AZ", TRUE, errptr); db_ipcs_reset(gv_cur_region, TRUE); mu_gv_cur_reg_free(); mupip_exit(save_errno); } vbn += rsize - sizeof(int4); GET_LONG(temp, (inbuf + rsize - sizeof(int4))); rsize = temp; } curr_tn = inhead->end_tn; switch (type) { case backup_to_file: REVERT; iob_close(in); break; case backup_to_exec: close(in->fd); if ((pipe_child > 0) && (FALSE != is_proc_alive(pipe_child, 0))) WAITPID(pipe_child, (int *)&status, 0, waitpid_res); break; case backup_to_tcp: break; } } util_out_print("!/RESTORE COMPLETED", TRUE); util_out_print("!UL blocks restored", TRUE, rest_blks); free(inbuf); db_ipcs_reset(gv_cur_region, FALSE); mu_gv_cur_reg_free(); mupip_exit(SS_NORMAL); }
void jobchild_init(void) { unsigned int status; job_params_type jparms; /* Transfer data */ unsigned char *transfer_addr; rhdtyp *base_addr; unsigned short i, arg_len; char run_file_name[FILE_NAME_SIZE + 2], *c; gcall_args job_arglist; mval job_args[MAX_ACTUALS]; error_def (ERR_RUNPARAMERR); static char interactive_mode_buf[] = "INTERACTIVE"; static char other_mode_buf[] = "OTHER"; error_def(ERR_TEXT); ESTABLISH(job_init_ch); /* * Check if environment variable ppid - job parent pid * exists. If it does not, we are a regular gtm process, * else, we are a child process of a job command. */ if ((c = GETENV(CHILD_FLAG_ENV)) && strlen(c)) { /* * We are a Jobbed process. * Get Job parameters and set up environment * to run the Job command */ /* Clear the environment variable so that subsequent child * mumps processes can start normal initialization. */ if (PUTENV(CLEAR_CHILD_FLAG_ENV)) { util_out_print("Unable to clear gtmj0 process !UL exiting.", TRUE, process_id); rts_error(VARLSTCNT(1) errno); } /* read parameters into parameter structure */ ojchildparms(&jparms, &job_arglist, job_args); /* Execute the command to be run before executing the actual M routine */ if (jparms.startup.len) SYSTEM(jparms.startup.addr); /* Set up job's input, output and error files. Redirect them, if necessary. */ /* It is needed since the middle process would not have always done this(under jobpid == TRUE cases) */ if (!(status = ojchildioset(&jparms))) rts_error(VARLSTCNT(4) ERR_TEXT, 2, LEN_AND_LIT("Failed to set STDIN/OUT/ERR for the job")); job_addr(&jparms.routine, &jparms.label, jparms.offset, (char **)&base_addr, (char **)&transfer_addr); /* Set process priority */ if (jparms.baspri) nice((int) jparms.baspri); /* Set up $ZMODE to "OTHER" */ dollar_zmode.mvtype = MV_STR; dollar_zmode.str.addr = &other_mode_buf[0]; dollar_zmode.str.len = sizeof(other_mode_buf) -1; } else { /* If we are not a child, setup a dummy mumps routine */ if (MUMPS_RUN == invocation_mode) { mstr routine, label; int offset; arg_len = FILE_NAME_SIZE; if (!cli_get_str("INFILE", run_file_name, &arg_len)) rts_error(VARLSTCNT(1) ERR_RUNPARAMERR); lref_parse((uchar_ptr_t)run_file_name, &routine, &label, &offset); job_addr(&routine, &label, offset, (char **)&base_addr, (char **)&transfer_addr); } else if (MUMPS_CALLIN & invocation_mode) /* call-in mode */ { base_addr = make_cimode(); transfer_addr = PTEXT_ADR(base_addr); } else /* direct mode */ { base_addr = make_dmode(); transfer_addr = PTEXT_ADR(base_addr); } job_arglist.callargs = 0; /* Set up $ZMODE to "INTERACTIVE" */ dollar_zmode.mvtype = MV_STR; dollar_zmode.str.addr = &interactive_mode_buf[0]; dollar_zmode.str.len = sizeof(interactive_mode_buf) -1; } gtm_init_env(base_addr, transfer_addr); if (MUMPS_CALLIN & invocation_mode) { SET_CI_ENV(ci_ret_code_exit); } if (job_arglist.callargs) { callg((int(*)())push_parm, &job_arglist); frame_pointer->type |= SFT_EXTFUN; } REVERT; }
main(int argc, char *argv[]) /* MAIN Purpose : 1 Get privilege mask 2 Setup SMG environment unless no VT or DFU$NOSMG is set 3 Get and Parse command (syntax only) 4 Dispatch to correct subroutine Inputs : Command line (if specified through foreign command) Outputs : returns last status code to DCL in case of single command processing. In interactive mode always returns SS$_NORMAL. */ { const rms_eof=98938,smg$_eof=1213442; struct { short status, count; int extra ; } iosb; static char command_line[255], *e; unsigned int out_len,ret_len,prvmask; void reset_ctrl(), clean_ctrlc(), prev_screen(), next_screen(), dump_screen(), toggle_width() ; int smg_flag, x, y, i, ttype; int cursor_on = SMG$M_CURSOR_ON; $DESCRIPTOR(input_line , command_line); $DESCRIPTOR(prompt,"DFU> "); $DESCRIPTOR(terminal,"SYS$COMMAND"); $DESCRIPTOR(top_txt,"< DFU V2.2 (Freeware) >"); $DESCRIPTOR(status_txt,"Statistics"); $DESCRIPTOR(do_key,"DO"); $DESCRIPTOR(pf2,"PF2"); $DESCRIPTOR(pf4,"PF4"); $DESCRIPTOR(prev,"PREV_SCREEN"); $DESCRIPTOR(next,"NEXT_SCREEN"); $DESCRIPTOR(select,"SELECT"); $DESCRIPTOR(help,"HELP"); /* First find out how we got called ( by RUN, or a foreign command */ ret_len = 0; #if 0 status = lib$get_foreign(&input_line,0,&ret_len,0); #else status = 1; #if 0 strcpy(command_line,argv[1]); #endif #endif out_len = ret_len; smg$enable = TRUE; key_tab = 0; disp2_id = 0; cip = 0; setvbuf(stdout, NULL, _IONBF, 0); // need this to see i/o at all #if 0 smg$enable = FALSE; vms_mm = check_vms_mm(); #else /* Now create the SMG environment */ colls=80; rows=24; SMG$CREATE_PASTEBOARD(&paste_id, 0, &rows, &colls,&SMG$M_KEEP_CONTENTS,&ttype,0); if ((e = (char *) getenv("DFU$NOSMG")) && *e) smg$enable = FALSE; else { if (ttype != SMG$K_VTTERMTABLE) smg$enable = FALSE; if (ttype != SMG$K_VTTERMTABLE) SMG$DELETE_PASTEBOARD(&paste_id,&i0); } SMG$CREATE_VIRTUAL_KEYBOARD(&keyb_id,0,0,0,0); if (smg$enable) /* Setup key table */ { SMG$ERASE_PASTEBOARD(&paste_id, 0, 0, 0, 0, 0, 0); SMG$CREATE_KEY_TABLE(&key_tab); colls -=2; orig_colls = colls; smg_flag = SMG$M_KEY_NOECHO + SMG$M_KEY_TERMINATE; SMG$ADD_KEY_DEF(&key_tab,&do_key,0, &smg_flag, &do_key,0); SMG$ADD_KEY_DEF(&key_tab,&pf4,0, &smg_flag,&pf4,0); SMG$ADD_KEY_DEF(&key_tab,&prev,0, &smg_flag, &prev,0); SMG$ADD_KEY_DEF(&key_tab,&next,0, &smg_flag, &next,0); SMG$ADD_KEY_DEF(&key_tab,&pf2,0, &smg_flag, &help,0); SMG$ADD_KEY_DEF(&key_tab,&help,0, &smg_flag, &help,0); SMG$ADD_KEY_DEF(&key_tab,&select,0, &smg_flag, &select,0); SMG$CREATE_VIRTUAL_DISPLAY(&i500, &colls , &disp1_id, &SMG$M_BORDER, 0, 0); x = 508 - rows; y = rows - 7; SMG$CREATE_VIEWPORT(&disp1_id,&x,&i1,&y,&colls); SMG$CREATE_VIRTUAL_DISPLAY(&i2, &colls, &status_id, 0 , 0, 0); SMG$CREATE_VIRTUAL_DISPLAY(&i2, &colls, &disp2_id, 0 , 0, 0); SMG$SET_BROADCAST_TRAPPING(&paste_id,brdcst_ast,0); SMG$LABEL_BORDER(&disp1_id, &top_txt, 0, 0,&SMG$M_BOLD, 0, 0); SMG$LABEL_BORDER(&status_id, &status_txt, 0, 0,&SMG$M_BOLD, 0, 0); SMG$PASTE_VIRTUAL_DISPLAY(&disp1_id, &paste_id, &i2,&i2,0); x = rows - 4; SMG$PASTE_VIRTUAL_DISPLAY(&status_id, &paste_id, &x,&i2,0); x = rows - 1; SMG$PASTE_VIRTUAL_DISPLAY(&disp2_id, &paste_id, &x,&i2,0); x = 508 - rows; SMG$SET_CURSOR_ABS(&disp1_id,&x,&i1); SMG$SET_CURSOR_ABS(&disp2_id,&i1,&i1); SMG$BEGIN_PASTEBOARD_UPDATE(&paste_id); } #endif sprintf(outbuf,"\n Disk and File Utilities for OpenVMS DFU V2.2"); put_disp(); sprintf(outbuf," Freeware version"); put_disp(); sprintf(outbuf," Copyright © 1995 Digital Equipment Corporation\n"); put_disp(); if (smg$enable) { /* Enter additional info */ sprintf(outbuf," DFU functions are : \n"); put_disp(); sprintf(outbuf," DEFRAGMENT : Defragment files"); put_disp(); sprintf(outbuf," DELETE : Delete files by File-ID; delete directory (trees)"); put_disp(); sprintf(outbuf," DIRECTORY : Manipulate directories"); put_disp(); sprintf(outbuf," REPORT : Generate a complete disk report"); put_disp(); sprintf(outbuf," SEARCH : Fast file search"); put_disp(); sprintf(outbuf," SET : Modify file attributes"); put_disp(); sprintf(outbuf," UNDELETE : Recover deleted files"); put_disp(); sprintf(outbuf," VERIFY : Check and repair disk structure"); put_disp(); SMG$END_PASTEBOARD_UPDATE(&paste_id); } prvmask = 0; status = dfu_check_access(&prvmask); /*Get the privilege mask */ /* Setup terminal channel for control purposes; get the terminal chars */ status = SYS$ASSIGN(&terminal, &tchan, 0,0); status = SYS$QIOW(0,tchan, IO$_SENSEMODE,0,0,0,&orgttchar,12,0,0,0,0); for (i = 0; i < 3; i++) ttchar[i] = orgttchar[i]; ttchar[2] &= ~TT2$M_EDITING; /* Clear line edit bit */ clean_ctrlc(); /* Enable CTRL/W if needed */ if (ret_len==0) { if (smg$enable) status = SMG$READ_COMPOSED_LINE(&keyb_id,&key_tab,&input_line,&prompt, &out_len,&disp2_id,0,0,0,0,0); else status = SMG$READ_COMPOSED_LINE(&keyb_id,0,&input_line,&prompt, &out_len,0,0,0,0,0,0); } memcpy (command_line, input_line.dsc$a_pointer, input_line.dsc$w_length); cip = 1; /* Main loop starts here. Get a command and pasre it*/ for (;;) { /* loop forever until EXIT is entered */ if(status==smg$_eof) status = exit_command(prvmask); if ((status&1) != 1) goto endfor; if (out_len == 0) goto endfor; /* First catch special screen commands */ if (smg$enable) { status = strncmp(command_line, "PREV_SCREEN", 11); if (status == 0) { prev_screen(); goto endfor; } status = strncmp(command_line, "DO",2); if (status == 0) { status = spawn_command(prvmask); goto endfor; } status = strncmp(command_line, "PF4",3); if (status == 0) { dump_screen(); goto endfor; } status = strncmp(command_line, "NEXT_SCREEN", 11); if (status == 0) { next_screen(); goto endfor; } status = strncmp(command_line, "SELECT", 6); if (status == 0) { toggle_width(); goto endfor; } SMG$ERASE_DISPLAY(&disp1_id, 0, 0, 0, 0); SMG$ERASE_DISPLAY(&status_id, 0, 0, 0, 0); SMG$CHANGE_VIEWPORT(&disp1_id,&x,&i1,&y,&colls); SMG$SET_CURSOR_ABS(&disp1_id,&x,&i1); } /* Catch the CLI errors do avoid disrupting the SMG screen... */ #if 0 VAXC$ESTABLISH(prim_hand); #endif status = CLI$DCL_PARSE(&input_line,&dfu_tables,0 /* not yet lib$get_input*/,0,&prompt); // check added & before dfu_tables #if 0 VAXC$ESTABLISH(NULL); #endif if (status == CLI$_NOCOMD) singlemsg(0,status); if ((status & 1 ) != 1) goto endfor; else /* Now dispatch if no errors */ { reset_ctrl(); CLI$DISPATCH(prvmask); clean_ctrlc(); cip = 0; status = brdcst_ast(); if (smg$enable) SMG$SET_CURSOR_MODE(&paste_id, &cursor_on); } endfor: if (ret_len !=0) { /* Single command processing , so exit here */ status += 0x10000000; /* Do not echo the error on DCL level */ if (smg$enable) { if (colls != orig_colls) toggle_width(); SMG$SET_CURSOR_ABS(&disp2_id,&i2,&i1); } exit(status); } /* Get next line */ cip = 0; #if 1 if (smg$enable) { SMG$ERASE_LINE(&disp2_id, &i1, &i1); SMG$SET_CURSOR_ABS(&disp2_id,&i1,&i1); status = SMG$READ_COMPOSED_LINE(&keyb_id,&key_tab,&input_line, &prompt,&out_len,&disp2_id,0,0,0,0,0); /*Get next command */ cip = 1; } else status = SMG$READ_COMPOSED_LINE(&keyb_id,0,&input_line, &prompt,&out_len,0,0,0,0,0,0); /*Get next command */ #else printf("%s",prompt.dsc$a_pointer); out_len = read(0,command_line,254); out_len--; command_line[out_len]=0; if (strncmp(command_line,"exit",4)==0) return 0; #endif } } /* END of MAIN */
void op_fntext(mval *label, int int_exp, mval *rtn, mval *ret) /* label contains label to be located or null string */ /* int_exp contains label offset or line number to reference */ /* ret is used to return the correct string to caller */ { char *cp, *ctop; int i, lbl, letter; mval *temp_rtn, temp_mval; mstr *sld; uint4 stat; rhdtyp *rtn_vector; error_def(ERR_TXTNEGLIN); error_def(ERR_TXTSRCMAT); error_def(ERR_ZLINKFILE); error_def(ERR_ZLMODULE); MV_FORCE_STR(label); MV_FORCE_STR(rtn); temp_rtn = &temp_mval; *temp_rtn = *rtn; /* make a copy of the routine in case the caller used the same mval for rtn and ret */ ret->str.len = 0; /* make ret an emptystring in case the return is by way of the condition handler */ ret->mvtype = MV_STR; sld = (mstr *)NULL; ESTABLISH(fntext_ch); /* to swallow errors and permit an emptystring result */ if ((int_exp == 0) && ((label->str.len == 0) || (*label->str.addr == 0))) stat = ZEROLINE; else stat = get_src_line(temp_rtn, label, int_exp, &sld); if ((FALSE == (stat & CHECKSUMFAIL)) && (FALSE == (stat & NEGATIVELINE))) { if (stat & ZEROLINE) { if (NULL == (rtn_vector = find_rtn_hdr(&temp_rtn->str))) { /* not here, so try to bring it in */ op_zlink(temp_rtn, 0); rtn_vector = find_rtn_hdr(&temp_rtn->str); } if (NULL != rtn_vector) { ret->str.addr = cp = (char *)&rtn_vector->routine_name; for (ctop = cp + sizeof(mident); *cp && cp < ctop; cp++) ; ret->str.len = cp - ret->str.addr; } } else if (NULL != sld) ret->str = *sld; } REVERT; /* If non-empty, copy result to stringpool and * convert any tabs in linestart to spaces */ if (ret->str.len) { if (stringpool.free + ret->str.len > stringpool.top) stp_gcol(ret->str.len); cp = stringpool.free; for (i = 0, lbl = 1; i < ret->str.len; i++) { letter = ret->str.addr[i]; if (lbl) { if ((' ' == letter) || ('\t' == letter)) { letter = ' '; lbl = 0; } *cp++ = letter; } else { if ((' ' != letter) && ('\t' != letter)) { memcpy(cp, &ret->str.addr[i], ret->str.len - i); break; } else *cp++ = ' '; } } ret->str.addr=stringpool.free; stringpool.free += ret->str.len; } return; }
void compile_source_file(unsigned short flen, char *faddr, boolean_t MFtIsReqd) { plength plen; mval fstr, ret; int i, rc; unsigned char *p; boolean_t wildcarded, dm_action; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; if (MAX_FBUFF < flen) { dec_err(VARLSTCNT(4) ERR_FILEPARSE, 2, flen, faddr); TREF(dollar_zcstatus) = ERR_ERRORSUMMARY; } else { object_file_des = FD_INVALID; fstr.mvtype = MV_STR; fstr.str.addr = faddr; fstr.str.len = flen; ESTABLISH(source_ch); tt_so_do_once = FALSE; zsrch_clr(STRM_COMP_SRC); /* Clear any existing search cache */ for (i = 0; ; i++) { plen.p.pint = op_fnzsearch(&fstr, STRM_COMP_SRC, 0, &ret); if (!ret.str.len) { if (!i) { dec_err(VARLSTCNT(4) ERR_FILENOTFND, 2, fstr.str.len, fstr.str.addr); TREF(dollar_zcstatus) = ERR_ERRORSUMMARY; } break; } assert(ret.mvtype == MV_STR); assert(ret.str.len <= MAX_FBUFF); source_name_len = ret.str.len; memcpy(source_file_name, ret.str.addr, source_name_len); source_file_name[source_name_len] = 0; p = &source_file_name[plen.p.pblk.b_dir]; if ((plen.p.pblk.b_dir >= SIZEOF("/dev/") - 1) && !MEMCMP_LIT(source_file_name, "/dev/")) tt_so_do_once = TRUE; else if (MFtIsReqd && (plen.p.pblk.b_ext != 2 || ('M' != p[plen.p.pblk.b_name + 1] && 'm' != p[plen.p.pblk.b_name + 1]))) { /* M filetype is required but not present */ dec_err(VARLSTCNT(4) ERR_FILEPARSE, 2, source_name_len, source_file_name); TREF(dollar_zcstatus) = ERR_ERRORSUMMARY; continue; } if (compiler_startup()) TREF(dollar_zcstatus) = ERR_ERRORSUMMARY; if (FD_INVALID != object_file_des) { CLOSEFILE_RESET(object_file_des, rc); /* resets "object_file_des" to FD_INVALID */ if (-1 == rc) rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_OBJFILERR, 2, object_name_len, object_file_name, errno); } if (tt_so_do_once) break; } REVERT; } }
void gds_rundown(void) { bool is_mm, we_are_last_user, we_are_last_writer; boolean_t ipc_deleted, remove_shm, cancelled_timer, cancelled_dbsync_timer, vermismatch; now_t now; /* for GET_CUR_TIME macro */ char *time_ptr, time_str[CTIME_BEFORE_NL + 2]; /* for GET_CUR_TIME macro */ gd_region *reg; int save_errno, status; int4 semval, ftok_semval, sopcnt, ftok_sopcnt; short crash_count; sm_long_t munmap_len; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; struct shmid_ds shm_buf; struct sembuf sop[2], ftok_sop[2]; uint4 jnl_status; unix_db_info *udi; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; error_def(ERR_CRITSEMFAIL); error_def(ERR_DBCCERR); error_def(ERR_DBFILERR); error_def(ERR_DBRNDWNWRN); error_def(ERR_ERRCALL); error_def(ERR_GBLOFLOW); error_def(ERR_GTMASSERT); error_def(ERR_IPCNOTDEL); error_def(ERR_JNLFLUSH); error_def(ERR_RNDWNSEMFAIL); error_def(ERR_TEXT); error_def(ERR_WCBLOCKED); forced_exit = FALSE; /* Okay, we're dying already -- let rel_crit live in peace now. * If coming through a DAL, not necessarily dying. what to do then? -- nars -- 8/15/2001 */ grabbed_access_sem = FALSE; jnl_status = 0; reg = gv_cur_region; /* Local copy */ /* * early out for cluster regions * to avoid tripping the assert below. * Note: * This early out is consistent with VMS. It has been * noted that all of the gtcm assignments * to gv_cur_region should use the TP_CHANGE_REG * macro. This would also avoid the assert problem * and should be done eventually. */ if (dba_cm == reg->dyn.addr->acc_meth) return; udi = FILE_INFO(reg); csa = &udi->s_addrs; csd = csa->hdr; assert(csa == cs_addrs && csd == cs_data); if ((reg->open) && (dba_usr == csd->acc_meth)) { change_reg(); gvusr_rundown(); return; } ESTABLISH(gds_rundown_ch); if (!reg->open) /* Not open, no point to rundown */ { if (reg->opening) /* Died partway open, kill rest of way */ { rel_crit(reg); mutex_cleanup(reg); /* revist this to handle MM properly SMW 98/12/16 if (NULL != csa->nl) { status = shmdt((caddr_t)csa->nl); if (-1 == status) send_msg(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, LEN_AND_LIT("Error during shmdt"), errno); } */ shmdt((caddr_t)csa->nl); csa->nl = NULL; } REVERT; return; } switch(csd->acc_meth) { /* Pass mm and bg through */ case dba_bg: is_mm = FALSE; break; case dba_mm: is_mm = TRUE; break; case dba_usr: assert(FALSE); default: REVERT; return; } /* Cancel any pending flush timer for this region by this task */ CANCEL_DB_TIMERS(reg, cancelled_timer, cancelled_dbsync_timer); we_are_last_user = FALSE; if (!csa->persistent_freeze) region_freeze(reg, FALSE, FALSE, FALSE); assert(!csa->read_lock); rel_crit(reg); /* get locks to known state */ mutex_cleanup(reg); /* * We need to guarantee that none else access database file header when semid/shmid fields are reset. * We already have created ftok semaphore in db_init or, mu_rndwn_file and did not remove it. * So just lock it. We do it in blocking mode. */ if (!ftok_sem_lock(reg, FALSE, FALSE)) rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg)); /* * For mupip_jnl_recover we already have database access control semaphore. * We do not release it. We release it from mur_close_files. */ if (!mupip_jnl_recover) { sop[0].sem_num = 0; sop[0].sem_op = 0; /* Wait for 0 */ sop[1].sem_num = 0; sop[1].sem_op = 1; /* Lock */ sopcnt = 2; sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO | IPC_NOWAIT; /* Don't wait the first time thru */ SEMOP(udi->semid, sop, sopcnt, status); if (-1 == status) /* We couldn't get it in one shot -- see if we already have it */ { save_errno = errno; /* see comment about Linux specific difference in behaviour of semctl() with GETPID in gds_rundown_ch() */ if (semctl(udi->semid, 0, GETPID) == process_id) { send_msg(VARLSTCNT(5) MAKE_MSG_INFO(ERR_CRITSEMFAIL), 2, DB_LEN_STR(reg), ERR_RNDWNSEMFAIL); REVERT; return; /* Already in rundown for this region */ } if (EAGAIN != save_errno) { assert(FALSE); rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown first semop/semctl"), save_errno); } sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO; /* Try again - blocking this time */ SEMOP(udi->semid, sop, 2, status); if (-1 == status) /* We couldn't get it at all.. */ rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno); } } grabbed_access_sem = TRUE; /* * We now have the dbinit/rundown lock, so we are alone in this code for this region * and nobody else can attach. * See if we are all alone in accessing this database shared memory. */ assert(csa->ref_cnt); /* decrement private ref_cnt before shared ref_cnt decrement. */ csa->ref_cnt--; /* Currently journaling logic in gds_rundown() in VMS relies on this order to detect last writer */ assert(!csa->ref_cnt); --csa->nl->ref_cnt; if (memcmp(csa->nl->now_running, gtm_release_name, gtm_release_name_len + 1)) { /* VERMISMATCH condition. Possible only if DSE */ assert(dse_running); vermismatch = TRUE; } else vermismatch = FALSE; if (-1 == shmctl(udi->shmid, IPC_STAT, &shm_buf)) { save_errno = errno; rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown shmctl"), save_errno); } else we_are_last_user = (1 == shm_buf.shm_nattch) && !vermismatch; assert(!mupip_jnl_recover || we_are_last_user); /* recover => one user */ if (-1 == (semval = semctl(udi->semid, 1, GETVAL))) rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno); we_are_last_writer = (1 == semval) && (FALSE == reg->read_only) && !vermismatch;/* There's one writer left and I am it */ assert(!(mupip_jnl_recover && !reg->read_only) || we_are_last_writer); /* recover + R/W region => one writer */ if (-1 == (ftok_semval = semctl(udi->ftok_semid, 1, GETVAL))) rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno); /* If csa->nl->donotflush_dbjnl is set, it means mupip recover/rollback was interrupted and therefore we should * not flush shared memory contents to disk as they might be in an inconsistent state. * In this case, we will go ahead and remove shared memory (without flushing the contents) in this routine. * A reissue of the recover/rollback command will restore the database to a consistent state. * Otherwise, if we have write access to this region, let us perform a few writing tasks. */ if (csa->nl->donotflush_dbjnl) csa->wbuf_dqd = 0; /* ignore csa->wbuf_dqd status as we do not care about the cache contents */ else if (!reg->read_only && !vermismatch) { /* If we had an orphaned block and were interrupted, set wc_blocked so we can invoke wcs_recover */ if (csa->wbuf_dqd) { grab_crit(reg); SET_TRACEABLE_VAR(csd->wc_blocked, TRUE); BG_TRACE_PRO_ANY(csa, wcb_gds_rundown); send_msg(VARLSTCNT(8) ERR_WCBLOCKED, 6, LEN_AND_LIT("wcb_gds_rundown"), process_id, &csa->ti->curr_tn, DB_LEN_STR(reg)); csa->wbuf_dqd = 0; wcs_recover(reg); if (is_mm) { assert(FALSE); csd = csa->hdr; } BG_TRACE_PRO_ANY(csa, lost_block_recovery); rel_crit(reg); } if (JNL_ENABLED(csd) && (GTCM_GNP_SERVER_IMAGE == image_type)) originator_prc_vec = NULL; /* If we are the last writing user, then everything must be flushed */ if (we_are_last_writer) { /* Time to flush out all of our buffers */ if (is_mm) { if (csa->total_blks != csa->ti->total_blks) /* do remap if file had been extended */ { grab_crit(reg); wcs_mm_recover(reg); csd = csa->hdr; rel_crit(reg); } csa->nl->remove_shm = TRUE; } /* Note WCSFLU_SYNC_EPOCH ensures the epoch is synced to the journal and indirectly * also ensures that the db is fsynced. We don't want to use it in the calls to * wcs_flu() from t_end() and tp_tend() since we can defer it to out-of-crit there. * In this case, since we are running down, we don't have any such option. */ csa->nl->remove_shm = wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH); /* Since we_are_last_writer, we should be guaranteed that wcs_flu() did not change csd, (in * case of MM for potential file extension), even if it did a grab_crit(). Therefore, make * sure that's true. */ assert(csd == csa->hdr); assert(0 == memcmp(csd->label, GDS_LABEL, GDS_LABEL_SZ - 1)); csd->trans_hist.header_open_tn = csd->trans_hist.curr_tn; } else if ((cancelled_timer && (0 > csa->nl->wcs_timers)) || cancelled_dbsync_timer) { /* cancelled pending db or jnl flush timers - flush database and journal buffers to disk */ grab_crit(reg); /* we need to sync the epoch as the fact that there is no active pending flush timer implies * there will be noone else who will flush the dirty buffers and EPOCH to disk in a timely fashion */ wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH); rel_crit(reg); assert((dba_mm == cs_data->acc_meth) || (csd == cs_data)); csd = cs_data; /* In case this is MM and wcs_flu() remapped an extended database, reset csd */ } /* Do rundown journal processing after buffer flushes since they require jnl to be open */ if (JNL_ENABLED(csd)) { /* the following tp_change_reg() is not needed due to the assert csa == cs_addrs at the beginning * of gds_rundown(), but just to be safe. To be removed by 2002!! --- nars -- 2001/04/25. */ tp_change_reg(); /* call this because jnl_ensure_open checks cs_addrs rather than gv_cur_region */ jpc = csa->jnl; jbp = jpc->jnl_buff; if (jbp->fsync_in_prog_latch.u.parts.latch_pid == process_id) { assert(FALSE); COMPSWAP_UNLOCK(&jbp->fsync_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0); } if (jbp->io_in_prog_latch.u.parts.latch_pid == process_id) { assert(FALSE); COMPSWAP_UNLOCK(&jbp->io_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0); } if (((NOJNL != jpc->channel) && !JNL_FILE_SWITCHED(jpc)) || we_are_last_writer && (0 != csa->nl->jnl_file.u.inode)) { /* We need to close the journal file cleanly if we have the latest generation journal file open * or if we are the last writer and the journal file is open in shared memory (not necessarily * by ourselves e.g. the only process that opened the journal got shot abnormally) * Note: we should not infer anything from the shared memory value of csa->nl->jnl_file.u.inode * if we are not the last writer as it can be concurrently updated. */ grab_crit(reg); if (JNL_ENABLED(csd)) { SET_GBL_JREC_TIME; /* jnl_ensure_open/jnl_put_jrt_pini/pfin/jnl_file_close all need it */ /* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write * journal records (if it decides to switch to a new journal file). */ ADJUST_GBL_JREC_TIME(jgbl, jbp); jnl_status = jnl_ensure_open(); if (0 == jnl_status) { /* If we_are_last_writer, we would have already done a wcs_flu() which would * have written an epoch record and we are guaranteed no further updates * since we are the last writer. So, just close the journal. * Although we assert pini_addr should be non-zero for last_writer, we * play it safe in PRO and write a PINI record if not written already. */ assert(!jbp->before_images || is_mm || !we_are_last_writer || 0 != jpc->pini_addr); if (we_are_last_writer && 0 == jpc->pini_addr) jnl_put_jrt_pini(csa); if (0 != jpc->pini_addr) jnl_put_jrt_pfin(csa); /* If not the last writer and no pending flush timer left, do jnl flush now */ if (!we_are_last_writer && (0 > csa->nl->wcs_timers)) { if (SS_NORMAL == (jnl_status = jnl_flush(reg))) { assert(jbp->freeaddr == jbp->dskaddr); jnl_fsync(reg, jbp->dskaddr); assert(jbp->fsync_dskaddr == jbp->dskaddr); } else { send_msg(VARLSTCNT(9) ERR_JNLFLUSH, 2, JNL_LEN_STR(csd), ERR_TEXT, 2, RTS_ERROR_TEXT("Error with journal flush in gds_rundown"), jnl_status); assert(NOJNL == jpc->channel);/* jnl file lost has been triggered */ /* In this routine, all code that follows from here on does not * assume anything about the journaling characteristics of this * database so it is safe to continue execution even though * journaling got closed in the middle. */ } } jnl_file_close(reg, we_are_last_writer, FALSE); } else send_msg(VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(reg)); } rel_crit(reg); } } if (we_are_last_writer) /* Flush the fileheader last and harden the file to disk */ { grab_crit(reg); /* To satisfy crit requirement in fileheader_sync() */ memset(csd->machine_name, 0, MAX_MCNAMELEN); /* clear the machine_name field */ if (!mupip_jnl_recover && we_are_last_user) { /* mupip_jnl_recover will do this after mur_close_file */ csd->semid = INVALID_SEMID; csd->shmid = INVALID_SHMID; csd->gt_sem_ctime.ctime = 0; csd->gt_shm_ctime.ctime = 0; } fileheader_sync(reg); rel_crit(reg); if (FALSE == is_mm) { if (-1 == fsync(udi->fd)) /* Sync it all */ { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno); } } else { /* Now do final MM file sync before exit */ #if !defined(TARGETED_MSYNC) && !defined(NO_MSYNC) if (-1 == fsync(udi->fd)) /* Sync it all */ { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno); } #else if (-1 == msync((caddr_t)csa->db_addrs[0], (size_t)(csa->db_addrs[1] - csa->db_addrs[0]), MS_SYNC)) { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file msync at close"), errno); } #endif } } } /* end if (!reg->read_only && !csa->nl->donotflush_dbjnl) */ if (reg->read_only && we_are_last_user && !mupip_jnl_recover) { /* mupip_jnl_recover will do this after mur_close_file */ db_ipcs.semid = INVALID_SEMID; db_ipcs.shmid = INVALID_SHMID; db_ipcs.gt_sem_ctime = 0; db_ipcs.gt_shm_ctime = 0; db_ipcs.fn_len = reg->dyn.addr->fname_len; memcpy(db_ipcs.fn, reg->dyn.addr->fname, reg->dyn.addr->fname_len); db_ipcs.fn[reg->dyn.addr->fname_len] = 0; /* request gtmsecshr to flush. read_only cannot flush itself */ if (0 != send_mesg2gtmsecshr(FLUSH_DB_IPCS_INFO, 0, (char *)NULL, 0)) rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gtmsecshr failed to update database file header")); } /* Done with file now, close it */ if (-1 == close(udi->fd)) { rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, LEN_AND_LIT("Error during file close"), errno); } /* Unmap storage if mm mode but only the part that is not the fileheader (so shows up in dumps) */ if (is_mm) { munmap_len = (sm_long_t)((csa->db_addrs[1] - csa->db_addrs[0]) - ROUND_UP(SIZEOF_FILE_HDR(csa->hdr), MSYNC_ADDR_INCS)); if (munmap_len > 0) { munmap((caddr_t)(csa->db_addrs[0] + ROUND_UP(SIZEOF_FILE_HDR(csa->hdr), MSYNC_ADDR_INCS)), (size_t)(munmap_len)); #ifdef DEBUG_DB64 rel_mmseg((caddr_t)csa->db_addrs[0]); #endif } } /* Detach our shared memory while still under lock so reference counts will be * correct for the next process to run down this region. * In the process also get the remove_shm status from node_local before detaching. * If csa->nl->donotflush_dbjnl is TRUE, it means we can safely remove shared memory without compromising data * integrity as a reissue of recover will restore the database to a consistent state. */ remove_shm = !vermismatch && (csa->nl->remove_shm || csa->nl->donotflush_dbjnl); status = shmdt((caddr_t)csa->nl); csa->nl = NULL; /* dereferencing nl after detach is not right, so we set it to NULL so that we can test before dereference*/ if (-1 == status) send_msg(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, LEN_AND_LIT("Error during shmdt"), errno); reg->open = FALSE; /* If file is still not in good shape, die here and now before we get rid of our storage */ if (csa->wbuf_dqd) GTMASSERT; ipc_deleted = FALSE; /* If we are the very last user, remove shared storage id and the semaphores */ if (we_are_last_user) { /* remove shared storage, only if last writer to rundown did a successful wcs_flu() */ assert(!vermismatch); if (remove_shm) { ipc_deleted = TRUE; if (0 != shm_rmid(udi->shmid)) rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove shared memory")); } else if (is_src_server || is_updproc) { gtm_putmsg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id); send_msg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id); } else send_msg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id); /* * Don't release semaphore in case of mupip recover/rollback; since it has standalone access. * It will release the semaphore in mur_close_files. */ if (!mupip_jnl_recover) { if (0 != sem_rmid(udi->semid)) rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove semaphore")); grabbed_access_sem = FALSE; } } else { assert(!mupip_jnl_recover); /* If we were writing, get rid of our writer access count semaphore */ if (!reg->read_only) if (0 != (save_errno = do_semop(udi->semid, 1, -1, SEM_UNDO))) rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown write semaphore release"), save_errno); /* Now remove the rundown lock */ if (0 != (save_errno = do_semop(udi->semid, 0, -1, SEM_UNDO))) rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown rundown semaphore release"), save_errno); grabbed_access_sem = FALSE; } if (!ftok_sem_release(reg, !mupip_jnl_recover, FALSE)) rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg)); if (!ipc_deleted) { GET_CUR_TIME; if (is_src_server) gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Source server"), REG_LEN_STR(reg)); if (is_updproc) gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Update process"), REG_LEN_STR(reg)); if (mupip_jnl_recover) { gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg)); send_msg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr, LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg)); } } REVERT; }
void mupip_upgrade(void) { bool rbno; unsigned char *upgrd_buff[2], upgrd_label[GDS_LABEL_SZ]="UPGRADE0304"; char fn[256]; char answer[4]; unsigned short fn_len; int4 fd, save_errno, old_hdr_size, new_hdr_size, status, bufsize, dsize, datasize[2]; int4 old_hdr_size_vbn, new_hdr_size_vbn; int fstat_res; off_t last_full_grp_startoff, old_file_len, old_file_len2, read_off, write_off, old_start_vbn_off; block_id last_full_grp_startblk; v3_sgmnt_data old_head_data, *old_head; sgmnt_data new_head_data, *new_head; struct stat stat_buf; error_def(ERR_MUNODBNAME); error_def(ERR_MUNOUPGRD); error_def(ERR_DBOPNERR); error_def(ERR_DBRDONLY); error_def(ERR_DBFILOPERR); error_def(ERR_DBPREMATEOF); ESTABLISH(mupip_upgrade_ch); fn_len = sizeof(fn); if (!cli_get_str("FILE", fn, &fn_len)) rts_error(VARLSTCNT(1) ERR_MUNODBNAME); if (!(mupip_upgrade_standalone(fn, &upgrade_standalone_sems))) rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); if (-1 == (fd = OPEN(fn, O_RDWR))) { save_errno = errno; if (-1 != (fd = OPEN(fn, O_RDONLY))) { util_out_print("Cannot update read-only database.", FLUSH); rts_error(VARLSTCNT(5) ERR_DBRDONLY, 2, fn_len, fn, errno); } rts_error(VARLSTCNT(5) ERR_DBRDONLY, 2, fn_len, fn, save_errno); } /* Confirm before proceed */ if (!mu_upgrd_confirmed(TRUE)) { util_out_print("Upgrade canceled by user", FLUSH); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } util_out_print("Do not interrupt to avoid damage in database!!", FLUSH); util_out_print("Mupip upgrade started ...!/", FLUSH); mu_upgrd_sig_init(); /* get file status */ FSTAT_FILE(fd, &stat_buf, fstat_res); if (-1 == fstat_res) rts_error(VARLSTCNT(5) ERR_DBOPNERR, 2, fn_len, fn, errno); old_file_len = stat_buf.st_size; /* Prepare v3.x file header buffer */ old_hdr_size = sizeof(*old_head); old_head = &old_head_data; /* Prepare v4.x file header buffer */ new_hdr_size = sizeof(*new_head); new_head = &new_head_data; memset(new_head, 0, new_hdr_size); old_hdr_size_vbn = DIVIDE_ROUND_UP(old_hdr_size, DISK_BLOCK_SIZE); new_hdr_size_vbn = DIVIDE_ROUND_UP(new_hdr_size, DISK_BLOCK_SIZE); /* READ header from V3.x file */ LSEEKREAD(fd, 0, old_head, old_hdr_size, status); if (0 != status) if (-1 == status) rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn); else rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); /* Check version */ if (memcmp(&old_head->label[0], GDS_LABEL, GDS_LABEL_SZ - 1)) { if (memcmp(&old_head->label[0], GDS_LABEL, GDS_LABEL_SZ - 3)) { /* it is not a GTM database */ close(fd); util_out_print("File !AD is not a GT.M database.!/", FLUSH, fn_len, fn); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); }else { /* it is GTM database */ /* is it not v3.x database? */ if (memcmp(&old_head->label[GDS_LABEL_SZ - 3],GDS_V30,2) !=0 && memcmp(&old_head->label[GDS_LABEL_SZ - 3],GDS_ALT_V30,2) != 0) { close(fd); util_out_print("File !AD has an unrecognized database version!/", FLUSH, fn_len, fn); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } } } else { /* Note: We assume that if the V4.x header and current GT.M file header * has same field names, they are at same offset */ /* READ the header from file again as V4.x header */ LSEEKREAD(fd, 0, new_head, new_hdr_size, status); if (0 != status) if (-1 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); else rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn); if (QWNE(new_head->reg_seqno, seq_num_zero) || QWNE(new_head->resync_seqno, seq_num_zero) || (new_head->resync_tn != 0) || new_head->repl_state != repl_closed) { util_out_print("!AD might already have been upgraded", FLUSH, fn_len, fn); util_out_print("Do you wish to continue with the upgrade? [y/n] ", FLUSH); SCANF("%s", answer); if (answer[0] != 'y' && answer[0] != 'Y') { close(fd); util_out_print("Upgrade canceled by user", FLUSH); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } } init_replication(new_head); new_head->max_update_array_size = new_head->max_non_bm_update_array_size = ROUND_UP2(MAX_NON_BITMAP_UPDATE_ARRAY_SIZE(new_head), UPDATE_ARRAY_ALIGN_SIZE); new_head->max_update_array_size += ROUND_UP2(MAX_BITMAP_UPDATE_ARRAY_SIZE, UPDATE_ARRAY_ALIGN_SIZE); new_head->mutex_spin_parms.mutex_hard_spin_count = MUTEX_HARD_SPIN_COUNT; new_head->mutex_spin_parms.mutex_sleep_spin_count = MUTEX_SLEEP_SPIN_COUNT; new_head->mutex_spin_parms.mutex_spin_sleep_mask = MUTEX_SPIN_SLEEP_MASK; new_head->semid = INVALID_SEMID; new_head->shmid = INVALID_SHMID; if (JNL_ALLOWED(new_head)) { /* Following 3 are new fields starting from V43001. * Initialize them appropriately. */ new_head->epoch_interval = DEFAULT_EPOCH_INTERVAL; new_head->alignsize = DISK_BLOCK_SIZE * JNL_DEF_ALIGNSIZE; if (!new_head->jnl_alq) new_head->jnl_alq = JNL_ALLOC_DEF; /* note new_head->jnl_deq is carried over without any change even if it is zero since a zero * jnl file extension size is supported starting V43001 */ new_head->autoswitchlimit = ALIGNED_ROUND_DOWN(JNL_ALLOC_MAX, new_head->jnl_alq, new_head->jnl_deq); /* following field is assumed as non-zero by set_jnl_info starting V43001A */ if (JNL_ALLOWED(new_head) && !new_head->jnl_buffer_size) new_head->jnl_buffer_size = JNL_BUFFER_DEF; } else { new_head->epoch_interval = 0; new_head->alignsize = 0; new_head->autoswitchlimit = 0; } new_head->yield_lmt = DEFAULT_YIELD_LIMIT; /* writing header */ LSEEKWRITE(fd, 0, new_head, new_hdr_size, status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); close(fd); util_out_print("File !AD successfully upgraded.!/", FLUSH, fn_len, fn); if (0 != sem_rmid(upgrade_standalone_sems)) { util_out_print("Error with sem_rmid : %d [0x%x]", TRUE, upgrade_standalone_sems, upgrade_standalone_sems); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } mupip_exit(SS_NORMAL); } util_out_print("Old header size: !SL", FLUSH, old_hdr_size); util_out_print("New header size: !SL", FLUSH, new_hdr_size); if (old_head->createinprogress) { close(fd); util_out_print("Database creation in progress on file !AD.!/", FLUSH, fn_len, fn); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } if (old_head->file_corrupt) { close(fd); util_out_print("Database !AD is corrupted.!/", FLUSH, fn_len, fn); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } if ((((off_t)old_head->start_vbn - 1) * DISK_BLOCK_SIZE + (off_t)old_head->trans_hist.total_blks * old_head->blk_size + (off_t)DISK_BLOCK_SIZE != old_file_len) && (((off_t)old_head->start_vbn - 1) * DISK_BLOCK_SIZE + (off_t)old_head->trans_hist.total_blks * old_head->blk_size + (off_t)old_head->blk_size != old_file_len)) { util_out_print("Incorrect start_vbn !SL or, block size !SL or, total blocks !SL", FLUSH, old_head->start_vbn, old_head->blk_size, old_head->trans_hist.total_blks); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } if (ROUND_DOWN(old_head->blk_size, DISK_BLOCK_SIZE) != old_head->blk_size) { util_out_print("Database block size !SL is not divisible by DISK_BLOCK_SIZE", FLUSH, old_head->blk_size); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } mu_upgrd_header(old_head, new_head); /* Update header from v3.x to v4.x */ new_head->start_vbn = new_hdr_size_vbn + 1; new_head->free_space = 0; new_head->wc_blocked_t_end_hist.evnt_cnt = old_head->wc_blocked_t_end_hist2.evnt_cnt; new_head->wc_blocked_t_end_hist.evnt_tn = old_head->wc_blocked_t_end_hist2.evnt_tn; init_replication(new_head); /* A simple way of doing mupip upgrade is to move all the data after file header towards the eof to make space and write down the header. This does not need any computation or, change in data/index blocks. This is a slow process because it has mainly I/O, though no manipulation of database structures. or index blocks. This is okay for small database. A time efficient way is to physically move second group of BLKS_PER_LMAP number of blocks towards the eof and move first group of BLKS_PER_LMAP number of blocks in place of 2nd group. Finally adjust all indices to point to the blocks correctly. Also adjust master bit map. (note: we cannot move first group from the beginning). Detail algorithm as follows: --------------------------- // Allocate two buffers each to hold one group of data. Read v3.x header and upgrade to v4.x if file is big enough read group 1 in buff[0] read_off = offset of starting block of 2nd group. read group 2 in buff[1] write buff[0] at offset read_off last_full_grp_startblk = points to the block where 2nd group of 512 blocks of old file will be written back. //Instead of searching for a free group we will write at the last full group //Say, we have 3000 blocks. last_full_grp_startblk = 2048 // (not 2560, because it is not full) //All data from that point upto eof will be read and saved in buffer read all remaining data from the point last_full_grp_startblk upto eof in buff[0] write buff[1] at the point of last_full_grp_startblk Now write buff[0] at the end of last write //Graphical Example: Each letter corresponds to a group of 512 blocks where first block // is local bit map. Last group U may be a group of less than 512 blocks. // Extend towards right -------------------------------------------------------> // old permutation: [v3 head] A B C D E F G H I J K L M N O P Q R S T U // new permutation: [v4 head ] A C D E F G H I J K L M N O P Q R S T B U Finally traverse the tree and adjust block pointers Adjust master map write new v4.x header at bof else bufsize = size of data for a group rbno = 0 // read buffer no. This switches between 0 and 1 read_off = 0 write_off = 0 upgrd_buff[rbno] = new header data_size[rbno] = new header size rbno = INVERT(rbno); do while not eof data_size[rbno] = MIN(bufsize, remaining_data_size) Read data of size data_size[rbno] in upgrd_buff[rbno] and adjust read_off rbno = INVERT(rbno); Write upgrd_buff[rbno] of datasize[rbno] at write_off and increase write_off Enddo rbno = INVERT(rbno) Write upgrd_buff[rbno] of datasize[rbno] at write_off endif */ bufsize = old_head->blk_size * BLKS_PER_LMAP; upgrd_buff[0] = (unsigned char*) malloc(bufsize); upgrd_buff[1] = (unsigned char*) malloc(bufsize); read_off = old_start_vbn_off = (off_t)(old_head->start_vbn - 1) * DISK_BLOCK_SIZE; /* start vbn offset in bytes */ last_full_grp_startblk = ROUND_DOWN(new_head->trans_hist.total_blks, BLKS_PER_LMAP); /* in block_id */ last_full_grp_startoff = old_start_vbn_off + (off_t)last_full_grp_startblk * new_head->blk_size; /* offset in bytes */ /* this calculation is used because some 3.2x database has GDS blk_size bytes at the end instead of DISK_BLOCK_SIZE bytes. */ old_file_len2 = old_head->start_vbn * DISK_BLOCK_SIZE + (off_t)old_head->blk_size * old_head->trans_hist.total_blks; /* Change Label to a temporary dummy value, so that other GTM process does not come while doing upgrade and corrupts database */ LSEEKWRITE(fd, 0, upgrd_label, GDS_LABEL_SZ - 1, status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); if (old_head->trans_hist.total_blks > BLKS_PER_LMAP * 2) { /* recalculate start_vbn and free space, because there will be a gap after header */ new_head->start_vbn = old_head->start_vbn + bufsize / DISK_BLOCK_SIZE; new_head->free_space = bufsize - (new_hdr_size_vbn - old_hdr_size_vbn) * DISK_BLOCK_SIZE; util_out_print("New starting VBN is: !SL !/", FLUSH, new_head->start_vbn); /* read 1st group of blocks */ LSEEKREAD(fd, read_off, upgrd_buff[0], bufsize, status); if (0 != status) if (-1 == status) rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn); else rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); read_off = read_off + bufsize; /* read 2nd group of blocks */ LSEEKREAD(fd, read_off, upgrd_buff[1], bufsize, status); if (0 != status) if (-1 == status) rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn); else rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); /* write 1st group of blocks in place of 2nd group */ write_off = old_start_vbn_off + bufsize; LSEEKWRITE(fd, write_off, upgrd_buff[0], bufsize, status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); /* read last group (# of blks <= BLKS_PER_LMAP) */ dsize = old_file_len2 - last_full_grp_startoff; assert (dsize <= bufsize); LSEEKREAD(fd, last_full_grp_startoff, upgrd_buff[0], dsize, status); if (0 != status) if (-1 == status) rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn); else rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); /* write 2nd group of blocks */ LSEEKWRITE(fd, last_full_grp_startoff, upgrd_buff[1], bufsize, status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); /* write last group read from old file */ LSEEKWRITE(fd, last_full_grp_startoff + bufsize, upgrd_buff[0], dsize, status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); util_out_print("Please wait while index is being adjusted...!/", FLUSH); mu_upgrd_adjust_blkptr(1L, TRUE, new_head, fd, fn, fn_len); mu_upgrd_adjust_mm(new_head->master_map, DIVIDE_ROUND_UP(new_head->trans_hist.total_blks+1,BLKS_PER_LMAP)); /* writing header */ LSEEKWRITE(fd, 0, new_head, new_hdr_size, status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); } else /* very small database */ { rbno = 0; write_off = 0; datasize[rbno] = new_hdr_size; memcpy(upgrd_buff[0], new_head, new_hdr_size); rbno = INVERT(rbno); while(read_off < old_file_len2) { datasize[rbno] = MIN (old_file_len2 - read_off, bufsize); LSEEKREAD(fd, read_off, upgrd_buff[rbno], datasize[rbno], status); if (0 != status) if (-1 == status) rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn); else rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); read_off += datasize[rbno]; rbno = INVERT(rbno); LSEEKWRITE(fd, write_off, upgrd_buff[rbno], datasize[rbno], status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); write_off+= datasize[rbno]; } rbno = INVERT(rbno); LSEEKWRITE(fd, write_off, upgrd_buff[rbno], datasize[rbno], status); if (0 != status) rts_error(VARLSTCNT(5) ERR_DBFILOPERR, 2, fn_len, fn, status); } /* end if small database */ free(upgrd_buff[0]); free(upgrd_buff[1]); close(fd); util_out_print("File !AD successfully upgraded.!/", FLUSH, fn_len, fn); REVERT; if (0 != sem_rmid(upgrade_standalone_sems)) { util_out_print("Error with sem_rmid : %d [0x%x]", TRUE, upgrade_standalone_sems, upgrade_standalone_sems); rts_error(VARLSTCNT(1) ERR_MUNOUPGRD); } mupip_exit(SS_NORMAL); }
void mu_int_reg(gd_region *reg, boolean_t *return_value) { boolean_t read_only, was_crit; freeze_status status; node_local_ptr_t cnl; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; # ifdef DEBUG boolean_t need_to_wait = FALSE; int trynum; uint4 curr_wbox_seq_num; # endif sgmnt_data *csd_copy_ptr; gd_segment *seg; int gtmcrypt_errno; *return_value = FALSE; UNIX_ONLY(jnlpool_init_needed = TRUE); ESTABLISH(mu_int_reg_ch); if (dba_usr == reg->dyn.addr->acc_meth) { util_out_print("!/Can't integ region !AD; not GDS format", TRUE, REG_LEN_STR(reg)); mu_int_skipreg_cnt++; return; } gv_cur_region = reg; if (reg_cmcheck(reg)) { util_out_print("!/Can't integ region across network", TRUE); mu_int_skipreg_cnt++; return; } gvcst_init(gv_cur_region); if (gv_cur_region->was_open) { /* already open under another name */ gv_cur_region->open = FALSE; return; } change_reg(); csa = &FILE_INFO(gv_cur_region)->s_addrs; cnl = csa->nl; csd = csa->hdr; read_only = gv_cur_region->read_only; assert(NULL != mu_int_master); /* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */ assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd))); /* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks. * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG) * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified */ # ifdef GTM_SNAPSHOT if (!csd->fully_upgraded) { ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */ if (online_specified) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region)); util_out_print(NO_ONLINE_ERR_MSG, TRUE); mu_int_skipreg_cnt++; return; } } # endif if (!ointeg_this_reg || read_only) { status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE); switch (status) { case REG_ALREADY_FROZEN: UNIX_ONLY(if (csa->read_only_fs) break); util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; case REG_HAS_KIP: /* We have already waited for KIP to reset. This time do not wait for KIP */ status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE); if (REG_ALREADY_FROZEN == status) { UNIX_ONLY(if (csa->read_only_fs) break); util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; } break; case REG_FREEZE_SUCCESS: break; default: assert(FALSE); }
void dir_srch (parse_blk *pfil) { struct stat statbuf; int stat_res; lv_val *dir1, *dir2, *tmp; mstr tn; short p2_len; char filb[MAX_FBUFF + 1], patb[sizeof(ptstr)], *c, *lastd, *top, *p2, *c1, ch; mval pat_mval, sub, compare; bool wildname, seen_wd; struct dirent *dent; DIR *dp; plength *plen; int closedir_res; op_kill(zsrch_dir1); op_kill(zsrch_dir2); if (!pfil->b_name) return; /* nothing to search for */ ESTABLISH(dir_ch); pat_mval.mvtype = MV_STR; pat_mval.str.addr = patb; /* patb should be sizeof(ptstr.buff) but instead is sizeof(ptstr) since the C compiler * complains about the former and the latter is just 4 bytes more */ pat_mval.str.len = 0; sub.mvtype = MV_STR; sub.str.len = 0; compare.mvtype = MV_STR; compare.str.len = 0; wildname = (pfil->fnb & F_WILD_NAME) != 0; dir1 = zsrch_dir1; dir2 = zsrch_dir2; if (pfil->fnb & F_WILD_DIR) { seen_wd = FALSE; for (c = pfil->l_dir, lastd = c, top = c + pfil->b_dir; c < top;) { ch = *c++; if (ch == '/') /* note the start of each directory segment */ { if (seen_wd) break; lastd = c; } if (ch == '?' || ch == '*') seen_wd = TRUE; } assert(c <= top); sub.str.addr = pfil->l_dir; sub.str.len = lastd - sub.str.addr; tmp = op_putindx(VARLSTCNT(2) dir1, &sub); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; for(;;) { tn.addr = lastd; /* wildcard segment */ tn.len = c - lastd - 1; lastd = c; genpat(&tn, &pat_mval); seen_wd = FALSE; p2 = c - 1; for (; c < top;) { ch = *c++; if (ch == '/') /* note the start of each directory segment */ { if (seen_wd) break; lastd = c; } if (ch == '?' || ch == '*') seen_wd = TRUE; } p2_len = lastd - p2; /* length of non-wild segment after wild section */ for (;;) { pop_top(dir1, &sub); /* get next item off the top */ if (!sub.str.len) break; memcpy(filb, sub.str.addr, sub.str.len); filb[sub.str.len] = 0; sub.str.addr = filb; dp = OPENDIR(filb); if (!dp) continue; while(READDIR(dp, dent)) { compare.str.addr = &dent->d_name[0]; compare.str.len = strlen(&dent->d_name[0]); assert(compare.str.len); if ( dent->d_name[0] == '.' && (compare.str.len == 1 || (compare.str.len == 2 && dent->d_name[1] == '.')) ) { continue; /* don't want to read . and .. */ } if (compare.str.len + sub.str.len + p2_len > MAX_FBUFF) continue; if (do_pattern(&compare, &pat_mval)) { /* got a hit */ if (stringpool.free + compare.str.len + sub.str.len + p2_len + 1 > stringpool.top) stp_gcol(compare.str.len + sub.str.len + p2_len + 1); /* concatenate directory and name */ c1 = (char *)stringpool.free; tn = sub.str; s2pool(&tn); tn = compare.str; s2pool(&tn); tn.addr = p2; tn.len = p2_len; s2pool(&tn); *stringpool.free++ = 0; compare.str.addr = c1; compare.str.len += sub.str.len + p2_len; STAT_FILE(compare.str.addr, &statbuf, stat_res); if (-1 == stat_res) continue; if (!(statbuf.st_mode & S_IFDIR)) continue; /* put in results tree */ tmp = op_putindx(VARLSTCNT(2) dir2, &compare); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; } } CLOSEDIR(dp, closedir_res); } tmp = dir1; dir1 = dir2; dir2 = tmp; if (c >= top) break; } } else { sub.str.addr = pfil->l_dir; sub.str.len = pfil->b_dir; tmp = op_putindx(VARLSTCNT(2) dir1, &sub); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; } if (wildname) { tn.addr = pfil->l_name; tn.len = pfil->b_name + pfil->b_ext; genpat(&tn, &pat_mval); } for (;;) { pop_top(dir1, &sub); /* get next item off the top */ if (!sub.str.len) break; if (wildname) { memcpy(filb, sub.str.addr, sub.str.len); filb[sub.str.len] = 0; sub.str.addr = filb; dp = OPENDIR(filb); if (!dp) continue; while(READDIR(dp, dent)) { compare.str.addr = &dent->d_name[0]; compare.str.len = strlen(&dent->d_name[0]); if ( dent->d_name[0] == '.' && (compare.str.len == 1 || (compare.str.len == 2 && dent->d_name[1] == '.'))) { continue; /* don't want to read . and .. */ } if (compare.str.len + sub.str.len > MAX_FBUFF) continue; if (do_pattern(&compare, &pat_mval)) { /* got a hit */ if (stringpool.free + compare.str.len + sub.str.len > stringpool.top) stp_gcol(compare.str.len + sub.str.len); /* concatenate directory and name */ c = (char *)stringpool.free; tn = sub.str; s2pool(&tn); tn = compare.str; s2pool(&tn); compare.str.addr = c; compare.str.len += sub.str.len; /* put in results tree */ tmp = op_putindx(VARLSTCNT(2) ind_var, &compare); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; plen = (plength *)&tmp->v.m[1]; plen->p.pblk.b_esl = compare.str.len; plen->p.pblk.b_dir = sub.str.len; for (c = &compare.str.addr[sub.str.len], c1 = top = &compare.str.addr[compare.str.len]; c < top; ) { if (*c++ != '.') break; } for (; c < top;) { if (*c++ == '.') c1 = c - 1; } plen->p.pblk.b_ext = top - c1; plen->p.pblk.b_name = plen->p.pblk.b_esl - plen->p.pblk.b_dir - plen->p.pblk.b_ext; } } CLOSEDIR(dp, closedir_res); } else { assert(pfil->fnb & F_WILD_DIR); compare.str.addr = pfil->l_name; compare.str.len = pfil->b_name + pfil->b_ext; if (compare.str.len + sub.str.len > MAX_FBUFF) continue; memcpy(filb, sub.str.addr, sub.str.len); filb[sub.str.len] = 0; sub.str.addr = filb; if (stringpool.free + compare.str.len + sub.str.len > stringpool.top) stp_gcol(compare.str.len + sub.str.len); /* concatenate directory and name */ c1 = (char *)stringpool.free; tn = sub.str; s2pool(&tn); tn = compare.str; s2pool(&tn); compare.str.addr = c1; compare.str.len += sub.str.len; /* put in results tree */ tmp = op_putindx(VARLSTCNT(2) ind_var, &compare); tmp->v.mvtype = MV_STR; tmp->v.str.len = 0; plen = (plength *)&tmp->v.m[1]; plen->p.pblk.b_esl = compare.str.len; plen->p.pblk.b_dir = sub.str.len; plen->p.pblk.b_name = pfil->b_name; plen->p.pblk.b_ext = pfil->b_ext; } } op_kill(zsrch_dir1); op_kill(zsrch_dir2); REVERT; }
void io_init(bool term_ctrl) { static readonly unsigned char open_params_list[2] = { (unsigned char)iop_newversion, (unsigned char)iop_eol }; static readonly unsigned char null_params_list[2] = { (unsigned char)iop_nl, (unsigned char)iop_eol }; static readonly unsigned char no_params = (unsigned char)iop_eol; static readonly unsigned char shr_params[3] = { (unsigned char)iop_shared, (unsigned char)iop_readonly, (unsigned char)iop_eol }; int4 status; mval val; mstr tn; MSTR_CONST (gtm_netout, "GTM_NETOUT"); MSTR_CONST (sys_net, "SYS$NET"); char buf1[MAX_TRANS_NAME_LEN]; /* buffer to hold translated name */ mval pars; io_log_name *inp, *outp; io_log_name *ln; error_def(ERR_LOGTOOLONG); io_init_name(); /* default logical names */ io_root_log_name = (io_log_name *)malloc(SIZEOF(*io_root_log_name)); memset(io_root_log_name, 0, SIZEOF(*io_root_log_name)); val.mvtype = MV_STR; val.str.addr = "0"; val.str.len = 1; ln = get_log_name(&val.str, INSERT); assert(ln != 0); val.str = gtm_principal; status = TRANS_LOG_NAME(&val.str, &tn, buf1, SIZEOF(buf1), dont_sendmsg_on_log2long); if (SS_NOLOGNAM == status) dollar_principal = 0; else if (SS_NORMAL == status) dollar_principal = get_log_name(&tn, INSERT); # ifdef UNIX else if (SS_LOG2LONG == status) rts_error(VARLSTCNT(5) ERR_LOGTOOLONG, 3, val.str.len, val.str.addr, SIZEOF(buf1) - 1); # endif else rts_error(VARLSTCNT(1) status); /* open devices */ val.str = sys_input; inp = get_log_name(&val.str, INSERT); pars.mvtype = MV_STR; status = TRANS_LOG_NAME(&val.str, &tn, buf1, SIZEOF(buf1), dont_sendmsg_on_log2long); if (SS_NOLOGNAM == status) { pars.str.len = SIZEOF(null_params_list); pars.str.addr = (char *)null_params_list; } else if (SS_NORMAL == status) { if (!io_is_rm(&val.str)) { pars.str.len = SIZEOF(no_params); pars.str.addr = (char *)&no_params; } else if (io_is_sn(&val.str)) { pars.str.len = SIZEOF(open_params_list); pars.str.addr = (char *)open_params_list; } else { pars.str.len = SIZEOF(shr_params); pars.str.addr = (char *)shr_params; } } # ifdef UNIX else if (SS_LOG2LONG == status) rts_error(VARLSTCNT(5) ERR_LOGTOOLONG, 3, val.str.len, val.str.addr, SIZEOF(buf1) - 1); # endif else rts_error(VARLSTCNT(1) status); ESTABLISH(io_init_ch); (*op_open_ptr)(&val, &pars, 0, 0); io_curr_device.in = io_std_device.in = inp->iod; val.str = sys_output; if ((SS_NORMAL == TRANS_LOG_NAME(>m_netout, &tn, buf1, SIZEOF(buf1), do_sendmsg_on_log2long)) && (SS_NORMAL == TRANS_LOG_NAME(&sys_net, &tn, buf1, SIZEOF(buf1), do_sendmsg_on_log2long)) && io_is_sn(&sys_net)) val.str = sys_net; outp = get_log_name(&val.str, INSERT); status = TRANS_LOG_NAME(&val.str, &tn, buf1, SIZEOF(buf1), dont_sendmsg_on_log2long); if ((SS_NORMAL != status) && (SS_NOLOGNAM != status)) { # ifdef UNIX if (SS_LOG2LONG == status) rts_error(VARLSTCNT(5) ERR_LOGTOOLONG, 3, val.str.len, val.str.addr, SIZEOF(buf1) - 1); else # endif rts_error(VARLSTCNT(1) status); } if ((val.str.addr == sys_net.addr) && (pars.str.addr == (char *)open_params_list)) /* sys$net is the only input thing that uses open_params_list */ outp->iod = io_curr_device.in; /* For terminals and mailboxes and sockets, SYS$INPUT and SYS$OUTPUT may point to the same device. If input is one of those, then check translated name for output against translated name for input; in that case they should be joined by their logical names */ if (((tt == io_curr_device.in->type) || (mb == io_curr_device.in->type) || (gtmsocket == io_curr_device.in->type)) && same_device_check(tn, buf1)) outp->iod = io_curr_device.in; if (!outp->iod) { if (status == SS_NOLOGNAM) { pars.str.len = SIZEOF(null_params_list); pars.str.addr = (char *)null_params_list; } else if (status == SS_NORMAL) { pars.str.len = SIZEOF(open_params_list); pars.str.addr = (char *)open_params_list; } (*op_open_ptr)(&val, &pars, 0, 0); } io_curr_device.out = io_std_device.out = outp->iod; term_setup(term_ctrl); io_std_device.out->pair = io_std_device; io_std_device.in->pair = io_std_device; io_std_device.out->perm = io_std_device.in->perm = TRUE; for (ln = io_root_log_name; ln; ln = ln->next) ln->iod = io_std_device.in; if (dollar_principal) dollar_principal->iod = io_std_device.in; pars.str.len = SIZEOF(no_params); pars.str.addr = (char *)&no_params; val.str.len = io_curr_device.in->trans_name->len; val.str.addr = io_std_device.in->trans_name->dollar_io; op_use(&val, &pars); REVERT; return; }