void mu_int_reg(gd_region *reg, boolean_t *return_value, boolean_t return_after_open) { boolean_t read_only, was_crit; freeze_status status; node_local_ptr_t cnl; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; sgmnt_data *csd_copy_ptr; gd_segment *seg; int gtmcrypt_errno; # ifdef DEBUG boolean_t need_to_wait = FALSE; int trynum; uint4 curr_wbox_seq_num; # endif *return_value = FALSE; jnlpool_init_needed = TRUE; ESTABLISH(mu_int_reg_ch); if (dba_usr == reg->dyn.addr->acc_meth) { util_out_print("!/Can't integ region !AD; not GDS format", TRUE, REG_LEN_STR(reg)); mu_int_skipreg_cnt++; return; } gv_cur_region = reg; if (reg_cmcheck(reg)) { util_out_print("!/Can't integ region across network", TRUE); mu_int_skipreg_cnt++; return; } gvcst_init(gv_cur_region); if (gv_cur_region->was_open) { /* already open under another name */ gv_cur_region->open = FALSE; return; } if (return_after_open) { *return_value = TRUE; return; } change_reg(); csa = &FILE_INFO(gv_cur_region)->s_addrs; cnl = csa->nl; csd = csa->hdr; read_only = gv_cur_region->read_only; assert(NULL != mu_int_master); /* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */ assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd))); /* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks. * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG) * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified */ if (!csd->fully_upgraded) { ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */ if (online_specified) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region)); util_out_print(NO_ONLINE_ERR_MSG, TRUE); mu_int_skipreg_cnt++; return; } } if (!ointeg_this_reg || read_only) { status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE, FALSE, !read_only); switch (status) { case REG_ALREADY_FROZEN: if (csa->read_only_fs) break; util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; case REG_FLUSH_ERROR: gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG), DB_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; case REG_HAS_KIP: /* We have already waited for KIP to reset. This time do not wait for KIP */ status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE, FALSE, !read_only); if (REG_ALREADY_FROZEN == status) { if (csa->read_only_fs) break; util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; } else if (REG_FLUSH_ERROR == status) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG), DB_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; } assert(REG_FREEZE_SUCCESS == status); /* no break */ case REG_FREEZE_SUCCESS: break; default: assert(FALSE); /* no break */ } if (read_only && (dba_bg == csa->hdr->acc_meth) && !mu_int_wait_rdonly(csa, MUPIP_INTEG)) { mu_int_skipreg_cnt++; return; } } if (!ointeg_this_reg) { /* Take a copy of the file-header. To ensure it is consistent, do it while holding crit. */ was_crit = csa->now_crit; if (!was_crit) grab_crit(gv_cur_region); memcpy((uchar_ptr_t)&mu_int_data, (uchar_ptr_t)csd, SIZEOF(sgmnt_data)); if (!was_crit) rel_crit(gv_cur_region); memcpy(mu_int_master, MM_ADDR(csd), MASTER_MAP_SIZE(csd)); csd_copy_ptr = &mu_int_data; } else { if (!ss_initiate(gv_cur_region, util_ss_ptr, &csa->ss_ctx, preserve_snapshot, MUPIP_INTEG)) { mu_int_skipreg_cnt++; assert(NULL != csa->ss_ctx); ss_release(&csa->ss_ctx); ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */ assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */ assert(!FROZEN_HARD(csd)); /* Ensure region is unfrozen before returning from ss_initiate */ assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */ return; } assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */ assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */ csd_copy_ptr = &csa->ss_ctx->ss_shm_ptr->shadow_file_header; # if defined(DEBUG) curr_wbox_seq_num = 1; cnl->wbox_test_seq_num = curr_wbox_seq_num; /* indicate we took the next step */ GTM_WHITE_BOX_TEST(WBTEST_OINTEG_WAIT_ON_START, need_to_wait, TRUE); if (need_to_wait) /* wait for them to take next step */ { trynum = 30; /* given 30 cycles to tell you to go */ while ((curr_wbox_seq_num == cnl->wbox_test_seq_num) && trynum--) LONG_SLEEP(1); cnl->wbox_test_seq_num++; /* let them know we took the next step */ assert(trynum); } # endif } if (USES_ANY_KEY(csd_copy_ptr)) { /* Initialize mu_int_encrypt_key_handle to be used in mu_int_read */ seg = gv_cur_region->dyn.addr; INIT_DB_OR_JNL_ENCRYPTION(&mu_int_encr_handles, csd_copy_ptr, seg->fname_len, (char *)seg->fname, gtmcrypt_errno); if (0 != gtmcrypt_errno) { GTMCRYPT_REPORT_ERROR(gtmcrypt_errno, gtm_putmsg, seg->fname_len, seg->fname); mu_int_skipreg_cnt++; return; } } *return_value = mu_int_fhead(); REVERT; return; }
bool mubinccpy(backup_reg_list *list) { static readonly mval null_str = {MV_STR, 0, 0 , 0 , 0, 0}; int backup_socket; int4 size, size1, bsize, bm_num, hint, lmsize, save_blks, rsize, match, timeout, outsize; uint4 status, total_blks, bplmap, gds_ratio, blks_per_buff, counter, i, lcnt, read_size; uchar_ptr_t bm_blk_buff, ptr1, ptr1_top, ptr, ptr_top; char_ptr_t outptr, data_ptr; unsigned short rd_iosb[4], port; enum db_acc_method access; blk_hdr *bp, *bptr; struct FAB *fcb, temp_fab, mubincfab; struct RAB temp_rab, mubincrab; inc_header *outbuf; mval val; mstr *file; sgmnt_data_ptr_t header; char *common, addr[SA_MAXLEN + 1]; void (*common_write)(); void (*common_close)(); muinc_blk_hdr_ptr_t sblkh_p; trans_num blk_tn; block_id blk_num_base, blk_num; boolean_t is_bitmap_blk, backup_this_blk; enum db_ver dummy_odbv; int4 blk_bsiz; error_def(ERR_BCKUPBUFLUSH); error_def(ERR_COMMITWAITSTUCK); error_def(ERR_DBCCERR); error_def(ERR_ERRCALL); assert(list->reg == gv_cur_region); assert(incremental); /* Make sure inc_header can be same size on all platforms. Some platforms pad 8 byte aligned structures that end on a 4 byte boundary and some do not. It is critical that this structure is the same size on all platforms as it is sent across TCP connections when doing TCP backup. */ assert(0 == (SIZEOF(inc_header) % 8)); /* ================= Initialization and some checks ======================== */ header = list->backup_hdr; file = &(list->backup_file); if (!mubtomag) mubmaxblk = BACKUP_TEMPFILE_BUFF_SIZE; fcb = ((vms_gds_info *)(gv_cur_region->dyn.addr->file_cntl->file_info))->fab; if (list->tn >= header->trans_hist.curr_tn) { util_out_print("!/TRANSACTION number is greater than or equal to current transaction,", TRUE); util_out_print("No blocks backed up from database !AD", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); return TRUE; } /* =========== open backup destination and define common_write ================= */ backup_write_errno = 0; backup_close_errno = 0; switch(list->backup_to) { case backup_to_file: /* open the file and define the common_write function */ mubincfab = cc$rms_fab; mubincfab.fab$b_fac = FAB$M_PUT; mubincfab.fab$l_fop = FAB$M_CBT | FAB$M_MXV | FAB$M_TEF | FAB$M_POS & (~FAB$M_RWC) & (~FAB$M_RWO); mubincfab.fab$l_fna = file->addr; mubincfab.fab$b_fns = file->len; mubincfab.fab$l_alq = cs_addrs->hdr->start_vbn + STARTING_BLOCKS * cs_addrs->hdr->blk_size / DISK_BLOCK_SIZE; mubincfab.fab$w_mrs = mubmaxblk; mubincfab.fab$w_deq = EXTEND_SIZE; switch (status = sys$create(&mubincfab)) { case RMS$_NORMAL: case RMS$_CREATED: case RMS$_SUPERSEDE: case RMS$_FILEPURGED: break; default: gtm_putmsg(status, 0, mubincfab.fab$l_stv); util_out_print("Error: Cannot create backup file !AD.", TRUE, mubincfab.fab$b_fns, mubincfab.fab$l_fna); return FALSE; } mubincrab = cc$rms_rab; mubincrab.rab$l_fab = &mubincfab; mubincrab.rab$l_rop = RAB$M_WBH; if (RMS$_NORMAL != (status = sys$connect(&mubincrab))) { gtm_putmsg(status, 0, mubincrab.rab$l_stv); util_out_print("Error: Cannot connect to backup file !AD.", TRUE, mubincfab.fab$b_fns, mubincfab.fab$l_fna); mubincfab.fab$l_fop |= FAB$M_DLT; sys$close(&mubincfab); return FALSE; } common = (char *)(&mubincrab); common_write = file_write; common_close = file_close; break; case backup_to_exec: util_out_print("Error: Backup to pipe is yet to be implemented.", TRUE); util_out_print("Error: Your request to backup database !AD to !AD is currently not valid.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna, file->len, file->addr); return FALSE; case backup_to_tcp: iotcp_fillroutine(); /* parse it first */ switch (match = SSCANF(file->addr, "%[^:]:%hu", addr, &port)) { case 1 : port = DEFAULT_BKRS_PORT; case 2 : break; default : util_out_print("ERROR: A hostname has to be specified to backup through a TCP connection.", TRUE); return FALSE; } if ((0 == cli_get_int("NETTIMEOUT", &timeout)) || (0 > timeout)) timeout = DEFAULT_BKRS_TIMEOUT; if (0 > (backup_socket = tcp_open(addr, port, timeout, FALSE))) { util_out_print("ERROR: Cannot open tcp connection due to the above error.", TRUE); return FALSE; } common_write = tcp_write; common_close = tcp_close; common = (char *)(&backup_socket); break; default : util_out_print("ERROR: Backup format !UL not supported.", TRUE, list->backup_to); util_out_print("Error: Your request to backup database !AD to !AD is not valid.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna, file->len, file->addr); return FALSE; } /* ============================= write inc_header =========================================== */ outptr = malloc(SIZEOF(inc_header)); outbuf = (inc_header *)outptr; MEMCPY_LIT(&outbuf->label[0], INC_HEADER_LABEL); stringpool.free = stringpool.base; op_horolog(&val); stringpool.free = stringpool.base; op_fnzdate(&val, &mu_bin_datefmt, &null_str, &null_str, &val); memcpy(&outbuf->date[0], val.str.addr, val.str.len); memcpy(&outbuf->reg[0], gv_cur_region->rname, MAX_RN_LEN); outbuf->start_tn = list->tn; outbuf->end_tn = header->trans_hist.curr_tn; outbuf->db_total_blks = header->trans_hist.total_blks; outbuf->blk_size = header->blk_size; outbuf->blks_to_upgrd = header->blks_to_upgrd; COMMON_WRITE(common, outptr, SIZEOF(inc_header)); free(outptr); if (mu_ctrly_occurred || mu_ctrlc_occurred) { error_mupip = TRUE; COMMON_CLOSE(common); util_out_print("WARNING: DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); return FALSE; } /* ============================ read/write appropriate blocks =============================== */ bsize = header->blk_size; gds_ratio = bsize / DISK_BLOCK_SIZE; blks_per_buff = BACKUP_READ_SIZE / bsize; read_size = blks_per_buff * bsize; outsize = SIZEOF(muinc_blk_hdr) + bsize; outptr = (char_ptr_t)malloc(MAX(outsize, mubmaxblk)); sblkh_p = (muinc_blk_hdr_ptr_t)outptr; data_ptr = (char_ptr_t)(sblkh_p + 1); bp = (blk_hdr_ptr_t)mubbuf; bm_blk_buff = (uchar_ptr_t)malloc(SIZEOF(blk_hdr) + (BLKS_PER_LMAP * BML_BITS_PER_BLK / BITS_PER_UCHAR)); mubincrab.rab$l_rbf = outptr; save_blks = 0; access = header->acc_meth; memset(sblkh_p, 0, SIZEOF(*sblkh_p)); if (access == dba_bg) bp = mubbuf; else { ptr = cs_addrs->db_addrs[0] + (cs_addrs->hdr->start_vbn - 1) * DISK_BLOCK_SIZE; ptr_top = cs_addrs->db_addrs[1] + 1; } sblkh_p->use.bkup.ondsk_blkver = GDSNOVER; for (blk_num_base = 0; blk_num_base < header->trans_hist.total_blks; blk_num_base += blks_per_buff) { if (online && (0 != cs_addrs->shmpool_buffer->failed)) break; if (header->trans_hist.total_blks - blk_num_base < blks_per_buff) { blks_per_buff = header->trans_hist.total_blks - blk_num_base; read_size = blks_per_buff * bsize; } if (access == dba_bg) { if ((SS$_NORMAL != (status = sys$qiow(EFN$C_ENF, fcb->fab$l_stv, IO$_READVBLK, &rd_iosb, 0, 0, bp, read_size, cs_addrs->hdr->start_vbn + (gds_ratio * blk_num_base), 0, 0, 0))) || (SS$_NORMAL != (status = rd_iosb[0]))) { gtm_putmsg(VARLSTCNT(1) status); util_out_print("Error reading data from database !AD.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } } else { assert(dba_mm == access); bp = ptr + blk_num_base * bsize; } bptr = (blk_hdr *)bp; /* The blocks we back up will be whatever version they are. There is no implicit conversion in this part of the backup/restore. Since we aren't even looking at the blocks (and indeed some of these blocks could potentially contain unintialized garbage data), we set the block version to GDSNOVER to signal that the block version is unknown. The above applies to "regular" blocks but not to bitmap blocks which we know are initialized. Because we have to read the bitmap blocks, they will be converted as necessary. */ for (i = 0; i < blks_per_buff && ((blk_num_base + i) < header->trans_hist.total_blks); i++, bptr = (blk_hdr *)((char *)bptr + bsize)) { blk_num = blk_num_base + i; if (mu_ctrly_occurred || mu_ctrlc_occurred) { free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); util_out_print("WARNING: DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); return FALSE; } /* Before we check if this block needs backing up, check if this is a new bitmap block or not. If it is, we can fall through and back it up as normal. But if this is NOT a bitmap block, use the existing bitmap to determine if this block has ever been allocated or not. If not, we don't want to even look at this block. It could be uninitialized which will just make things run slower if we go to read it and back it up. */ if (0 != ((BLKS_PER_LMAP - 1) & blk_num)) { /* Not a local bitmap block */ if (!gvcst_blk_ever_allocated(bm_blk_buff + SIZEOF(blk_hdr), ((blk_num * BML_BITS_PER_BLK) % (BLKS_PER_LMAP * BML_BITS_PER_BLK)))) continue; /* Bypass never-set blocks to avoid conversion problems */ is_bitmap_blk = FALSE; if (SIZEOF(v15_blk_hdr) <= (blk_bsiz = ((v15_blk_hdr_ptr_t)bptr)->bsiz)) { /* We have either a V4 block or uninitialized garbage */ if (blk_bsiz > bsize) /* This is not a valid V4 block so ignore it */ continue; blk_tn = ((v15_blk_hdr_ptr_t)bptr)->tn; } else { /* Assume V5 block */ if ((blk_bsiz = bptr->bsiz) > bsize) /* Not a valid V5 block either */ continue; blk_tn = bptr->tn; } } else { /* This is a bitmap block so save it into our bitmap block buffer. It is used as the basis of whether or not we have to process a given block or not. We process allocated and recycled blocks leaving free (never used) blocks alone as they have no data worth saving. But after saving it, upgrade it to the current format if necessary. */ is_bitmap_blk = TRUE; memcpy(bm_blk_buff, bptr, BM_SIZE(header->bplmap)); if (SIZEOF(v15_blk_hdr) <= ((v15_blk_hdr_ptr_t)bm_blk_buff)->bsiz) { /* This is a V4 format block -- needs upgrading */ status = gds_blk_upgrade(bm_blk_buff, bm_blk_buff, bsize, &dummy_odbv); if (SS_NORMAL != status) { free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); util_out_print("Error: Block 0x!XL is too large for automatic upgrade", TRUE, sblkh_p->blkid); return FALSE; } } assert(BM_SIZE(header->bplmap) == ((blk_hdr_ptr_t)bm_blk_buff)->bsiz); assert(LCL_MAP_LEVL == ((blk_hdr_ptr_t)bm_blk_buff)->levl); assert(gvcst_blk_is_allocated(bm_blk_buff + SIZEOF(blk_hdr), ((blk_num * BML_BITS_PER_BLK) % (BLKS_PER_LMAP * BML_BITS_PER_BLK)))); blk_bsiz = BM_SIZE(header->bplmap); blk_tn = ((blk_hdr_ptr_t)bm_blk_buff)->tn; } /* The conditions for backing up a block or ignoring it (in order of evaluation): 1) If blk is larger than size of db at time backup was initiated, we ignore the block. 2) Always backup blocks 0, 1, and 2 as these are the only blocks that can contain data and still have a transaction number of 0. 3) For bitmap blocks, if blks_to_upgrd != 0 and the TN is 0 and the block number >= last_blk_at_last_bkup, then backup the block. This way we get the correct version of the bitmap block in the restore (otherwise have no clue what version to create them in as bitmaps are created with a TN of 0 when before image journaling is enabled). 4) If the block TN is below our TN threshold, ignore the block. 5) Else if none of the above conditions, backup the block. */ if (online && (header->trans_hist.curr_tn <= blk_tn)) backup_this_blk = FALSE; else if (3 > blk_num || (is_bitmap_blk && 0 != header->blks_to_upgrd && (trans_num)0 == blk_tn && blk_num >= list->last_blk_at_last_bkup)) backup_this_blk = TRUE; else if ((blk_tn < list->tn)) backup_this_blk = FALSE; else backup_this_blk = TRUE; if (!backup_this_blk) { if (online) cs_addrs->nl->nbb = blk_num; continue; /* not applicable */ } sblkh_p->blkid = blk_num; memcpy(data_ptr, bptr, blk_bsiz); sblkh_p->valid_data = TRUE; /* Validation marker */ COMMON_WRITE(common, outptr, outsize); if (online) { if (0 != cs_addrs->shmpool_buffer->failed) break; cs_addrs->nl->nbb = blk_num; } save_blks++; } } /* ============================= write saved information for online backup ========================== */ if (online && (0 == cs_addrs->shmpool_buffer->failed)) { /* -------- make sure everyone involved finishes -------- */ cs_addrs->nl->nbb = BACKUP_NOT_IN_PROGRESS; /* By getting crit here, we ensure that there is no process still in transaction logic that sees (nbb != BACKUP_NOT_IN_PRORESS). After rel_crit(), any process that enters transaction logic will see (nbb == BACKUP_NOT_IN_PRORESS) because we just set it to that value. At this point, backup buffer is complete and there will not be any more new entries in the backup buffer until the next backup. */ grab_crit(gv_cur_region); assert(cs_data == cs_addrs->hdr); if (dba_bg == cs_data->acc_meth) { /* Now that we have crit, wait for any pending phase2 updates to finish. Since phase2 updates happen * outside of crit, we dont want them to keep writing to the backup temporary file even after the * backup is complete and the temporary file has been deleted. */ if (cs_addrs->nl->wcs_phase2_commit_pidcnt && !wcs_phase2_commit_wait(cs_addrs, NULL)) { gtm_putmsg(VARLSTCNT(7) ERR_COMMITWAITSTUCK, 5, process_id, 1, cs_addrs->nl->wcs_phase2_commit_pidcnt, DB_LEN_STR(gv_cur_region)); rel_crit(gv_cur_region); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } } if (debug_mupip) { util_out_print("MUPIP INFO: Current Transaction # at end of backup is 0x!16@XQ", TRUE, &cs_data->trans_hist.curr_tn); } rel_crit(gv_cur_region); counter = 0; while (0 != cs_addrs->shmpool_buffer->backup_cnt) { if (0 != cs_addrs->shmpool_buffer->failed) { util_out_print("Process !UL encountered the following error.", TRUE, cs_addrs->shmpool_buffer->failed); if (0 != cs_addrs->shmpool_buffer->backup_errno) gtm_putmsg(VARLSTCNT(1) cs_addrs->shmpool_buffer->backup_errno); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } backup_buffer_flush(gv_cur_region); if (++counter > MAX_BACKUP_FLUSH_TRY) { gtm_putmsg(VARLSTCNT(1) ERR_BCKUPBUFLUSH); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } if (counter & 0xF) wcs_sleep(counter); else { /* Force shmpool recovery to see if it can find the lost blocks */ if (!shmpool_lock_hdr(gv_cur_region)) { gtm_putmsg(VARLSTCNT(9) ERR_DBCCERR, 2, REG_LEN_STR(gv_cur_region), ERR_ERRCALL, 3, CALLFROM); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); assert(FALSE); return FALSE;; } shmpool_abandoned_blk_chk(gv_cur_region, TRUE); shmpool_unlock_hdr(gv_cur_region); } } /* -------- Open the temporary file -------- */ temp_fab = cc$rms_fab; temp_fab.fab$b_fac = FAB$M_GET; temp_fab.fab$l_fna = list->backup_tempfile; temp_fab.fab$b_fns = strlen(list->backup_tempfile); temp_rab = cc$rms_rab; temp_rab.rab$l_fab = &temp_fab; for (lcnt = 1; MAX_OPEN_RETRY >= lcnt; lcnt++) { if (RMS$_FLK != (status = sys$open(&temp_fab, NULL, NULL))) break; wcs_sleep(lcnt); } if (RMS$_NORMAL != status) { gtm_putmsg(status, 0, temp_fab.fab$l_stv); util_out_print("WARNING: DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } if (RMS$_NORMAL != (status = sys$connect(&temp_rab))) { gtm_putmsg(status, 0, temp_rab.rab$l_stv); util_out_print("WARNING: DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } /* -------- read and write every record in the temporary file -------- */ while (1) { temp_rab.rab$w_usz = outsize; temp_rab.rab$l_ubf = outptr; status = sys$get(&temp_rab); if (RMS$_NORMAL != status) { if (RMS$_EOF == status) status = RMS$_NORMAL; break; } assert(outsize == temp_rab.rab$w_rsz); /* Still validly sized blk? */ assert((outsize - SIZEOF(shmpool_blk_hdr)) >= ((blk_hdr_ptr_t)(outptr + SIZEOF(shmpool_blk_hdr)))->bsiz); COMMON_WRITE(common, outptr, temp_rab.rab$w_rsz); } if (RMS$_NORMAL != status) { gtm_putmsg(status, 0, temp_rab.rab$l_stv); util_out_print("WARNING: DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } /* ---------------- Close the temporary file ----------------------- */ if (RMS$_NORMAL != (status = sys$close(&temp_fab))) { gtm_putmsg(status, 0, temp_fab.fab$l_stv); util_out_print("WARNING: DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } } /* ============================= write end_msg and fileheader ======================================= */ if ((!online) || (0 == cs_addrs->shmpool_buffer->failed)) { MEMCPY_LIT(outptr, END_MSG); /* Although the write only need be of length SIZEOF(END_MSG) - 1 for file IO, if the write is going to TCP we have to write all these records with common length so just write the "regular" sized buffer. The extra garbage left over from the last write will be ignored as we key only on the this end text. */ COMMON_WRITE(common, outptr, outsize); ptr1 = header; size1 = ROUND_UP(SIZEOF(sgmnt_data), DISK_BLOCK_SIZE); ptr1_top = ptr1 + size1; for (;ptr1 < ptr1_top ; ptr1 += size1) { if ((size1 = ptr1_top - ptr1) > mubmaxblk) size1 = (mubmaxblk / DISK_BLOCK_SIZE) * DISK_BLOCK_SIZE; COMMON_WRITE(common, ptr1, size1); } MEMCPY_LIT(outptr, HDR_MSG); COMMON_WRITE(common, outptr, SIZEOF(HDR_MSG)); ptr1 = MM_ADDR(header); size1 = ROUND_UP(MASTER_MAP_SIZE(header), DISK_BLOCK_SIZE); ptr1_top = ptr1 + size1; for (;ptr1 < ptr1_top ; ptr1 += size1) { if ((size1 = ptr1_top - ptr1) > mubmaxblk) size1 = (mubmaxblk / DISK_BLOCK_SIZE) * DISK_BLOCK_SIZE; COMMON_WRITE(common, ptr1, size1); } MEMCPY_LIT(outptr, MAP_MSG); COMMON_WRITE(common, outptr, SIZEOF(MAP_MSG)); } /* ================== close backup destination, output and return ================================== */ if (online && (0 != cs_addrs->shmpool_buffer->failed)) { util_out_print("Process !UL encountered the following error.", TRUE, cs_addrs->shmpool_buffer->failed); if (0 != cs_addrs->shmpool_buffer->backup_errno) gtm_putmsg(VARLSTCNT(1) cs_addrs->shmpool_buffer->backup_errno); free(outptr); free(bm_blk_buff); error_mupip = TRUE; COMMON_CLOSE(common); return FALSE; } COMMON_CLOSE(common); free(outptr); free(bm_blk_buff); util_out_print("DB file !AD incrementally backed up in !AD", TRUE, fcb->fab$b_fns, fcb->fab$l_fna, file->len, file->addr); util_out_print("!UL blocks saved.", TRUE, save_blks); util_out_print("Transactions from 0x!16@XQ to 0x!16@XQ are backed up.", TRUE, &cs_addrs->shmpool_buffer->inc_backup_tn, &header->trans_hist.curr_tn); cs_addrs->hdr->last_inc_backup = header->trans_hist.curr_tn; if (record) cs_addrs->hdr->last_rec_backup = header->trans_hist.curr_tn; file_backed_up = TRUE; return TRUE; }
/* * This is a plain way to read file header. * User needs to take care of concurrency issue etc. * Parameters : * fn : full name of a database file. * header: Pointer to database file header structure (may not be in shared memory) * len: size of header (may be just SGMNT_HDR_LEN or SIZEOF_FILE_HDR_MAX) */ boolean_t file_head_read(char *fn, sgmnt_data_ptr_t header, int4 len) { int save_errno, fd, header_size; struct stat stat_buf; error_def(ERR_DBFILOPERR); error_def(ERR_DBNOTGDS); header_size = sizeof(sgmnt_data); OPENFILE(fn, O_RDONLY, fd); if (-1 == fd) { save_errno = errno; gtm_putmsg(VARLSTCNT(5) ERR_DBFILOPERR, 2, LEN_AND_STR(fn), save_errno); return FALSE; } FSTAT_FILE(fd, &stat_buf, save_errno); if (-1 == save_errno) { save_errno = errno; gtm_putmsg(VARLSTCNT(5) ERR_DBFILOPERR, 2, LEN_AND_STR(fn), save_errno); CLOSEFILE(fd, save_errno); return FALSE; } if (!S_ISREG(stat_buf.st_mode) || stat_buf.st_size < header_size) { gtm_putmsg(VARLSTCNT(4) ERR_DBNOTGDS, 2, LEN_AND_STR(fn)); CLOSEFILE(fd, save_errno); return FALSE; } LSEEKREAD(fd, 0, header, header_size, save_errno); if (0 != save_errno) { gtm_putmsg(VARLSTCNT(5) ERR_DBFILOPERR, 2, LEN_AND_STR(fn), save_errno); CLOSEFILE(fd, save_errno); return FALSE; } if (memcmp(header->label, GDS_LABEL, GDS_LABEL_SZ - 1)) { gtm_putmsg(VARLSTCNT(4) ERR_DBNOTGDS, 2, LEN_AND_STR(fn)); CLOSEFILE(fd, save_errno); return FALSE; } CHECK_DB_ENDIAN(header, strlen(fn), fn); assert(MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(header)); assert(SGMNT_HDR_LEN == len || SIZEOF_FILE_HDR(header) <= len); if (SIZEOF_FILE_HDR(header) <= len) { LSEEKREAD(fd, ROUND_UP(SGMNT_HDR_LEN + 1, DISK_BLOCK_SIZE), MM_ADDR(header), MASTER_MAP_SIZE(header), save_errno); if (0 != save_errno) { gtm_putmsg(VARLSTCNT(5) ERR_DBFILOPERR, 2, LEN_AND_STR(fn), save_errno); CLOSEFILE(fd, save_errno); return FALSE; } } CLOSEFILE(fd, save_errno); if (0 != save_errno) { gtm_putmsg(VARLSTCNT(5) ERR_DBFILOPERR, 2, LEN_AND_STR(fn), save_errno); return FALSE; } return TRUE; }
void mu_int_reg(gd_region *reg, boolean_t *return_value) { boolean_t read_only, was_crit; freeze_status status; node_local_ptr_t cnl; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; # ifdef DEBUG boolean_t need_to_wait = FALSE; int trynum; uint4 curr_wbox_seq_num; # endif sgmnt_data *csd_copy_ptr; gd_segment *seg; int gtmcrypt_errno; *return_value = FALSE; UNIX_ONLY(jnlpool_init_needed = TRUE); ESTABLISH(mu_int_reg_ch); if (dba_usr == reg->dyn.addr->acc_meth) { util_out_print("!/Can't integ region !AD; not GDS format", TRUE, REG_LEN_STR(reg)); mu_int_skipreg_cnt++; return; } gv_cur_region = reg; if (reg_cmcheck(reg)) { util_out_print("!/Can't integ region across network", TRUE); mu_int_skipreg_cnt++; return; } gvcst_init(gv_cur_region); if (gv_cur_region->was_open) { /* already open under another name */ gv_cur_region->open = FALSE; return; } change_reg(); csa = &FILE_INFO(gv_cur_region)->s_addrs; cnl = csa->nl; csd = csa->hdr; read_only = gv_cur_region->read_only; assert(NULL != mu_int_master); /* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */ assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd))); /* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks. * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG) * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified */ # ifdef GTM_SNAPSHOT if (!csd->fully_upgraded) { ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */ if (online_specified) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region)); util_out_print(NO_ONLINE_ERR_MSG, TRUE); mu_int_skipreg_cnt++; return; } } # endif if (!ointeg_this_reg || read_only) { status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE); switch (status) { case REG_ALREADY_FROZEN: UNIX_ONLY(if (csa->read_only_fs) break); util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; case REG_HAS_KIP: /* We have already waited for KIP to reset. This time do not wait for KIP */ status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE); if (REG_ALREADY_FROZEN == status) { UNIX_ONLY(if (csa->read_only_fs) break); util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; } break; case REG_FREEZE_SUCCESS: break; default: assert(FALSE); }