void mu_int_maps(void) { unsigned char *local; uchar_ptr_t disk; boolean_t agree, disk_full, local_full, master_full; int maps, mapsize, mcnt, lcnt, bcnt; unsigned int level; uint_ptr_t dskmap_p; uint4 dskmap, lfree, *lmap, map_blk_size; block_id blkno, last_bmp; enum db_ver ondsk_blkver; mu_int_offset[0] = 0; maps = (mu_int_data.trans_hist.total_blks + mu_int_data.bplmap - 1) / mu_int_data.bplmap; local = mu_int_locals; map_blk_size = BM_SIZE(mu_int_data.bplmap); last_bmp = ((mu_int_data.trans_hist.total_blks / mu_int_data.bplmap) * mu_int_data.bplmap); mapsize = mu_int_data.bplmap; for (mcnt = 0; mcnt < maps; mcnt++, local += BM_MINUS_BLKHDR_SIZE(mapsize)) { assert(mapsize == mu_int_data.bplmap); blkno = mcnt * mu_int_data.bplmap; bml_busy(blkno, mu_int_locals); disk = mu_int_read(blkno, &ondsk_blkver); /* ondsk_blkver set to GDSV4 or GDSV6 (GDSVCURR) */ if (!disk) { mu_int_path[0] = blkno; mu_int_plen = 1; mu_int_err(ERR_DBREADBM, 0, 0, 0, 0, 0, 0, LCL_MAP_LEVL); continue; } if (LCL_MAP_LEVL != (level = (unsigned int)((blk_hdr_ptr_t)disk)->levl)) { mu_int_path[0] = blkno; mu_int_plen = 1; mu_int_err(ERR_DBLVLINC, 0, 0, 0, 0, 0, 0, level); } if (((blk_hdr_ptr_t)disk)->bsiz < map_blk_size) { mu_int_path[0] = blkno; mu_int_plen = 1; mu_int_err(ERR_DBMBSIZMN, 0, 0, 0, 0, 0, 0, level); continue; } if (((blk_hdr_ptr_t)disk)->bsiz > map_blk_size) { mu_int_path[0] = blkno; mu_int_plen = 1; mu_int_err(ERR_DBMBSIZMX, 0, 0, 0, 0, 0, 0, level); continue; } if (tn_reset_this_reg) { ((blk_hdr_ptr_t)disk)->tn = 0; mu_int_write(blkno, disk); if (GDSVCURR != mu_int_data.desired_db_format) mu_int_blks_to_upgrd++; } else if (GDSVCURR != ondsk_blkver) mu_int_blks_to_upgrd++; if (((blk_hdr_ptr_t)disk)->tn >= mu_int_data.trans_hist.curr_tn) { if (trans_errors < disp_trans_errors) { mu_int_path[0] = blkno; mu_int_plen = 1; mu_int_err(ERR_DBMBTNSIZMX, 0, 0, 0, 0, 0, 0, level); trans_errors++; } else { mu_int_errknt++; trans_errors++; } } master_full = !bit_set(mcnt, mu_int_master); if (last_bmp == blkno) mapsize = (mu_int_data.trans_hist.total_blks - blkno); disk_full = (NO_FREE_SPACE == bml_find_free(0, disk + SIZEOF(blk_hdr), mapsize)); agree = TRUE; for (lcnt = 0, dskmap_p = (uint_ptr_t)(disk + SIZEOF(blk_hdr)), lmap = (uint4 *)local; lcnt < mapsize; lcnt += SIZEOF(int4) * BITS_PER_UCHAR / BML_BITS_PER_BLK, dskmap_p++, lmap++) /* # of bits/ bits per blk */ { GET_LONG(dskmap, dskmap_p); /* do a quick check to see if there is anything wrong with the bitmaps entries that fit into an int4. We need to check that the free bit matches the in-memory copy and that there are no illegal combinations. There are four combinations per block (00 = busy, 01 = free, 10 = unused (invalid bitmap value), 11 = free block which was previously used). We use the following calculation to determine if there are any illegal combinations within the current int4. 00011011 take original value from bitmap on disk. and 01010101 mask off "reused" bit. ---------- 00010001 = free blocks. xor 01010101 toggle free bit ---------- 01000100 = busy blocks << 1 ---------- 10001000 = mask checking the "reused" bit for the busy blocks and 00011011 original value from bitmap on disk. ---------- 00001000 non-zero indicates an illegal combination somewhere in the int4. */ if ((dskmap & SIXTEEN_BLKS_FREE) != *lmap || ((((dskmap & SIXTEEN_BLKS_FREE) ^ SIXTEEN_BLKS_FREE) << 1) & dskmap)) { if (agree) { agree = FALSE; mu_int_path[0] = blkno; mu_int_plen = 1; if (mu_map_errs < disp_map_errors) mu_int_err(ERR_DBLOCMBINC, 0, 0, 0, 0, 0, 0, level); else mu_int_errknt++; } for (bcnt = 0; bcnt < SIZEOF(int4) * BITS_PER_UCHAR / BML_BITS_PER_BLK; bcnt++) { if (!(mu_int_isvalid_mask[bcnt] ^ (dskmap & mu_int_mask[bcnt]))) { mu_int_path[0] = blkno + lcnt + bcnt; mu_int_plen = 1; mu_int_err(ERR_DBBFSTAT, 0, 0, 0, 0, 0, 0, level); } else if ((lfree = mu_int_isfree_mask[bcnt] & *(lmap)) ^ mu_int_isfree_mask[bcnt] & dskmap) { mu_int_path[0] = blkno + lcnt + bcnt; mu_int_plen = 1; /* for the following two mu_int_err(), we should actually be calculating the * actual level of the mu_int_path[0]. But this would need a read() of the block, * which might slow down the process. We should consider this however at a * later time. */ if (!lfree) mu_int_err(ERR_DBMRKFREE, 0, 0, 0, 0, 0, 0, LCL_MAP_LEVL); else if (mu_map_errs < disp_map_errors) { mu_int_err(ERR_DBMRKBUSY, 0, 0, 0, 0, 0, 0, LCL_MAP_LEVL); mu_map_errs++; } else { mu_int_errknt++; mu_map_errs++; } } } } } if (!agree) { local_full = (NO_FREE_SPACE == bml_find_free(0, local, mapsize)); if (local_full || disk_full) { mu_int_path[0] = blkno; mu_int_plen = 1; if (mu_map_errs < disp_map_errors) { mu_int_err(master_full ? (local_full ? ERR_DBMBPFLINT : ERR_DBMBPFLDLBM) : (local_full ? ERR_DBMBPFRDLBM : ERR_DBMBPFRINT), 0, 0, 0, 0, 0, 0, level); } else mu_int_errknt++; } else if (master_full) { if (mu_map_errs < disp_map_errors) { mu_int_path[0] = blkno; mu_int_plen = 1; mu_int_err( ERR_DBMBPFLDIS, 0, 0, 0, 0, 0, 0, level); } else mu_int_errknt++; } } else if (disk_full ^ master_full) { if (mu_map_errs < disp_map_errors) { mu_int_path[0] = blkno; mu_int_plen = 1; mu_int_err(master_full ? ERR_DBMBPINCFL : ERR_DBMBMINCFRE, 0, 0, 0, 0, 0, 0, level); } else mu_int_errknt++; } free(disk); } if (mu_map_errs >= disp_map_errors) { util_out_print("Maximum number of incorrectly busy errors to display: !UL, has been exceeded", TRUE, disp_map_errors); util_out_print("!UL incorrectly busy errors encountered", TRUE, mu_map_errs); } return; }
void dse_m_rest ( block_id blk, /* block number */ unsigned char *bml_list, /* start of local list of local bit maps */ int4 bml_size, /* size of each entry in *bml_list */ sm_vuint_ptr_t blks_ptr, /* total free blocks */ bool in_dir_tree) { sm_uc_ptr_t bp, b_top, rp, r_top, bml_ptr, np, ptr; unsigned char util_buff[MAX_UTIL_LEN]; block_id next; int util_len; int4 dummy_int; cache_rec_ptr_t dummy_cr; int4 bml_index; short level, rsize; int4 bplmap; error_def(ERR_DSEBLKRDFAIL); if(!(bp = t_qread (blk, &dummy_int, &dummy_cr))) rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); if (((blk_hdr_ptr_t) bp)->bsiz > cs_addrs->hdr->blk_size) b_top = bp + cs_addrs->hdr->blk_size; else if (((blk_hdr_ptr_t) bp)->bsiz < sizeof(blk_hdr)) b_top = bp + sizeof(blk_hdr); else b_top = bp + ((blk_hdr_ptr_t) bp)->bsiz; level = ((blk_hdr_ptr_t)bp)->levl; bplmap = cs_addrs->hdr->bplmap; for (rp = bp + sizeof (blk_hdr); rp < b_top ;rp = r_top) { if (in_dir_tree || level > 1) /* reread block because it may have been flushed from read */ { if (!(np = t_qread(blk,&dummy_int,&dummy_cr))) /* cache due to LRU buffer scheme and reads in recursive */ rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); /* calls to dse_m_rest. */ if (np != bp) { b_top = np + (b_top - bp); rp = np + (rp - bp); r_top = np + (r_top - bp); bp = np; } } GET_SHORT(rsize,&((rec_hdr_ptr_t)rp)->rsiz); r_top = rp + rsize; if (r_top > b_top) r_top = b_top; if (r_top - rp < (sizeof (rec_hdr) + sizeof (block_id))) break; if (in_dir_tree && level == 0) { for (ptr = rp + sizeof(rec_hdr); ; ) { if (*ptr++ == 0 && *ptr++ == 0) break; } GET_LONG(next,ptr); } else GET_LONG(next,r_top - sizeof (block_id)); if (next < 0 || next >= cs_addrs->ti->total_blks || (next / bplmap * bplmap == next)) { memcpy(util_buff,"Invalid pointer in block ",25); util_len = 25; util_len += i2hex_nofill(blk, &util_buff[util_len], 8); memcpy(&util_buff[util_len], " record offset ",15); util_len += 15; util_len += i2hex_nofill((int)(rp - bp), &util_buff[util_len], 4); util_buff[util_len] = 0; util_out_print((char*)util_buff,TRUE); continue; } bml_index = next / bplmap; bml_ptr = bml_list + bml_index * bml_size; if (bml_busy(next - next / bplmap * bplmap, bml_ptr + sizeof(blk_hdr))) { *blks_ptr = *blks_ptr - 1; if (((blk_hdr_ptr_t) bp)->levl > 1) { dse_m_rest (next, bml_list, bml_size, blks_ptr, in_dir_tree); } else if (in_dir_tree) { assert(((blk_hdr_ptr_t) bp)->levl == 0 || ((blk_hdr_ptr_t) bp)->levl == 1); dse_m_rest (next, bml_list, bml_size, blks_ptr, ((blk_hdr_ptr_t)bp)->levl); } } } return; }
void dse_maps(void) { block_id blk, bml_blk; blk_segment *bs1, *bs_ptr; int4 blk_seg_cnt, blk_size; /* needed for BLK_INIT, BLK_SEG and BLK_FINI macros */ sm_uc_ptr_t bp; char util_buff[MAX_UTIL_LEN]; int4 bml_size, bml_list_size, blk_index, bml_index; int4 total_blks, blks_in_bitmap; int4 bplmap, dummy_int; unsigned char *bml_list; cache_rec_ptr_t cr, dummy_cr; bt_rec_ptr_t btr; int util_len; uchar_ptr_t blk_ptr; boolean_t was_crit; uint4 jnl_status; srch_blk_status blkhist; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; if (CLI_PRESENT == cli_present("BUSY") || CLI_PRESENT == cli_present("FREE") || CLI_PRESENT == cli_present("MASTER") || CLI_PRESENT == cli_present("RESTORE_ALL")) { if (gv_cur_region->read_only) rts_error(VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region)); } CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ csa = cs_addrs; assert(&FILE_INFO(gv_cur_region)->s_addrs == csa); was_crit = csa->now_crit; if (csa->critical) crash_count = csa->critical->crashcnt; csd = csa->hdr; bplmap = csd->bplmap; if (CLI_PRESENT == cli_present("BLOCK")) { if (!cli_get_hex("BLOCK", (uint4 *)&blk)) return; if (blk < 0 || blk >= csa->ti->total_blks) { util_out_print("Error: invalid block number.", TRUE); return; } patch_curr_blk = blk; } else blk = patch_curr_blk; if (CLI_PRESENT == cli_present("FREE")) { if (0 == bplmap) { util_out_print("Cannot perform map updates: bplmap field of file header is zero.", TRUE); return; } if (blk / bplmap * bplmap == blk) { util_out_print("Cannot perform action on a map block.", TRUE); return; } bml_blk = blk / bplmap * bplmap; bm_setmap(bml_blk, blk, FALSE); return; } if (CLI_PRESENT == cli_present("BUSY")) { if (0 == bplmap) { util_out_print("Cannot perform map updates: bplmap field of file header is zero.", TRUE); return; } if (blk / bplmap * bplmap == blk) { util_out_print("Cannot perform action on a map block.", TRUE); return; } bml_blk = blk / bplmap * bplmap; bm_setmap(bml_blk, blk, TRUE); return; } blk_size = csd->blk_size; if (CLI_PRESENT == cli_present("MASTER")) { if (0 == bplmap) { util_out_print("Cannot perform maps updates: bplmap field of file header is zero.", TRUE); return; } if (!was_crit) grab_crit(gv_cur_region); bml_blk = blk / bplmap * bplmap; if (dba_mm == csd->acc_meth) bp = MM_BASE_ADDR(csa) + (off_t)bml_blk * blk_size; else { assert(dba_bg == csd->acc_meth); if (!(bp = t_qread(bml_blk, &dummy_int, &dummy_cr))) rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); } if ((csa->ti->total_blks / bplmap) * bplmap == bml_blk) total_blks = (csa->ti->total_blks - bml_blk); else total_blks = bplmap; if (NO_FREE_SPACE == bml_find_free(0, bp + SIZEOF(blk_hdr), total_blks)) bit_clear(bml_blk / bplmap, csa->bmm); else bit_set(bml_blk / bplmap, csa->bmm); if (bml_blk > csa->nl->highest_lbm_blk_changed) csa->nl->highest_lbm_blk_changed = bml_blk; if (!was_crit) rel_crit(gv_cur_region); return; } if (CLI_PRESENT == cli_present("RESTORE_ALL")) { if (0 == bplmap) { util_out_print("Cannot perform maps updates: bplmap field of file header is zero.", TRUE); return; } total_blks = csa->ti->total_blks; assert(ROUND_DOWN2(blk_size, 2 * SIZEOF(int4)) == blk_size); bml_size = BM_SIZE(bplmap); bml_list_size = (total_blks + bplmap - 1) / bplmap * bml_size; bml_list = (unsigned char *)malloc(bml_list_size); for (blk_index = 0, bml_index = 0; blk_index < total_blks; blk_index += bplmap, bml_index++) bml_newmap((blk_hdr_ptr_t)(bml_list + bml_index * bml_size), bml_size, csa->ti->curr_tn); if (!was_crit) { grab_crit(gv_cur_region); csa->hold_onto_crit = TRUE; /* need to do this AFTER grab_crit */ } blk = get_dir_root(); assert(blk < bplmap); csa->ti->free_blocks = total_blks - DIVIDE_ROUND_UP(total_blks, bplmap); bml_busy(blk, bml_list + SIZEOF(blk_hdr)); csa->ti->free_blocks = csa->ti->free_blocks - 1; dse_m_rest(blk, bml_list, bml_size, &csa->ti->free_blocks, TRUE); for (blk_index = 0, bml_index = 0; blk_index < total_blks; blk_index += bplmap, bml_index++) { t_begin_crit(ERR_DSEFAIL); CHECK_TN(csa, csd, csd->trans_hist.curr_tn); /* can issue rts_error TNTOOLARGE */ CWS_RESET; CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ assert(csa->ti->early_tn == csa->ti->curr_tn); blk_ptr = bml_list + bml_index * bml_size; blkhist.blk_num = blk_index; if (!(blkhist.buffaddr = t_qread(blkhist.blk_num, &blkhist.cycle, &blkhist.cr))) rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); BLK_INIT(bs_ptr, bs1); BLK_SEG(bs_ptr, blk_ptr + SIZEOF(blk_hdr), bml_size - SIZEOF(blk_hdr)); BLK_FINI(bs_ptr, bs1); t_write(&blkhist, (unsigned char *)bs1, 0, 0, LCL_MAP_LEVL, TRUE, FALSE, GDS_WRITE_KILLTN); BUILD_AIMG_IF_JNL_ENABLED(csd, csa->ti->curr_tn); t_end(&dummy_hist, NULL, csa->ti->curr_tn); } /* Fill in master map */ for (blk_index = 0, bml_index = 0; blk_index < total_blks; blk_index += bplmap, bml_index++) { blks_in_bitmap = (blk_index + bplmap <= total_blks) ? bplmap : total_blks - blk_index; assert(1 < blks_in_bitmap); /* the last valid block in the database should never be a bitmap block */ if (NO_FREE_SPACE != bml_find_free(0, (bml_list + bml_index * bml_size) + SIZEOF(blk_hdr), blks_in_bitmap)) bit_set(blk_index / bplmap, csa->bmm); else bit_clear(blk_index / bplmap, csa->bmm); if (blk_index > csa->nl->highest_lbm_blk_changed) csa->nl->highest_lbm_blk_changed = blk_index; } if (!was_crit) { csa->hold_onto_crit = FALSE; /* need to do this before the rel_crit */ rel_crit(gv_cur_region); } if (unhandled_stale_timer_pop) process_deferred_stale(); free(bml_list); csd->kill_in_prog = csd->abandoned_kills = 0; return; } MEMCPY_LIT(util_buff, "!/Block "); util_len = SIZEOF("!/Block ") - 1; util_len += i2hex_nofill(blk, (uchar_ptr_t)&util_buff[util_len], 8); memcpy(&util_buff[util_len], " is marked !AD in its local bit map.!/", SIZEOF(" is marked !AD in its local bit map.!/") - 1); util_len += SIZEOF(" is marked !AD in its local bit map.!/") - 1; util_buff[util_len] = 0; if (!was_crit) grab_crit(gv_cur_region); util_out_print(util_buff, TRUE, 4, dse_is_blk_free(blk, &dummy_int, &dummy_cr) ? "free" : "busy"); if (!was_crit) rel_crit(gv_cur_region); return; }