void bt_malloc(sgmnt_addrs *csa) { unsigned int n; sgmnt_data_ptr_t csd; csd = csa->hdr; /* check that the file header is quad word aligned */ if ((-(SIZEOF(uint4) * 2) & (sm_long_t)csd) != (sm_long_t)csd) GTMASSERT; if ((-(SIZEOF(uint4) * 2) & SIZEOF_FILE_HDR(csd)) != SIZEOF_FILE_HDR(csd)) GTMASSERT; csa->nl->bt_header_off = (n = (uint4)(SIZEOF_FILE_HDR(csd))); csa->nl->th_base_off = (n += csd->bt_buckets * SIZEOF(bt_rec)); /* hash table */ csa->nl->th_base_off += SIZEOF(que_ent); /* tnque comes after fl and bl of blkque */ csa->nl->bt_base_off = (n += SIZEOF(bt_rec)); /* th_queue anchor referenced above */ assert((n += (csd->n_bts * SIZEOF(bt_rec))) == (SIZEOF_FILE_HDR(csd)) + (BT_SIZE(csd))); /* DON'T use n after this */ bt_init(csa); bt_refresh(csa, TRUE); return; }
void ccp_reqwm_interrupt(ccp_db_header **pdb) { ccp_db_header *db; sgmnt_addrs *csa; uint4 status; assert(lib$ast_in_prog()); db = *pdb; csa = db->segment; if (csa == NULL || csa->nl->ccp_state == CCST_CLOSED) return; switch (db->wm_iosb.cond) { case SS$_DEADLOCK: ccp_signal_cont(SS$_DEADLOCK); /* Just try again */ ccp_request_write_mode(db); return; case SS$_CANCEL: /* Lock cancelled by close */ return; case SS$_VALNOTVALID: /* Force reads from disk */ db->wm_iosb.valblk[CCP_VALBLK_TRANS_HIST] = 0; db->last_lk_sequence = db->master_map_start_tn = 0; /* Drop through ... */ case SS$_NORMAL: if (db->wm_iosb.valblk[CCP_VALBLK_TRANS_HIST] == csa->ti->curr_tn + csa->ti->lock_sequence) { /* No change to current tn, do not need to update header */ if (csa->now_crit) { assert (csa->nl->in_crit == process_id); csa->nl->in_crit = 0; (void)mutex_unlockw(csa->critical, csa->critical->crashcnt, &csa->now_crit); /***** Check error status here? *****/ } ccp_writedb5(db); } else { if (csa->nl->in_crit == 0) { if (mutex_lockwim(csa->critical, csa->critical->crashcnt, &csa->now_crit) == cdb_sc_normal) csa->nl->in_crit = process_id; /* now_crit was set by mutex_lockwim */ else if (csa->nl->in_crit == 0) /***** Why is this re-tested? *****/ { status = sys$setimr(0, delta_100_msec, ccp_reqwm_interrupt, &db->wmcrit_timer_id, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ return; } } status = sys$qio(0, FILE_INFO(db->greg)->fab->fab$l_stv, IO$_READVBLK, &db->qio_iosb, ccp_writedb2, db, &db->glob_sec->trans_hist, BT_SIZE(csa->hdr) + SIZEOF(th_index), TH_BLOCK, 0, 0, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ } return; default: ccp_signal_cont(db->wm_iosb.cond); /***** Is this reasonable? *****/ return; } }
uint4 mur_block_count_correct(reg_ctl_list *rctl) { unsigned int native_size, size; sgmnt_data_ptr_t mu_data; int4 mu_int_ovrhd; uint4 total_blks; uint4 status; uint4 new_bit_maps, bplmap, new_blocks; MUR_CHANGE_REG(rctl); mu_data = cs_data; switch (mu_data->acc_meth) { default: GTMASSERT; break; #if defined(VMS) && defined(GT_CX_DEF) case dba_bg: /* necessary to do calculation in this manner to prevent double rounding causing an error */ if (mu_data->unbacked_cache) mu_int_ovrhd = DIVIDE_ROUND_UP(SIZEOF_FILE_HDR(mu_data) + mu_data->free_space + mu_data->lock_space_size, DISK_BLOCK_SIZE); else mu_int_ovrhd = DIVIDE_ROUND_UP(SIZEOF_FILE_HDR(mu_data) + BT_SIZE(mu_data) + mu_data->free_space + mu_data->lock_space_size, DISK_BLOCK_SIZE); break; #else case dba_bg: #endif case dba_mm: mu_int_ovrhd = (int4)DIVIDE_ROUND_UP(SIZEOF_FILE_HDR(mu_data) + mu_data->free_space, DISK_BLOCK_SIZE); break; } mu_int_ovrhd += 1; assert(mu_int_ovrhd == mu_data->start_vbn); size = mu_int_ovrhd + (mu_data->blk_size / DISK_BLOCK_SIZE) * mu_data->trans_hist.total_blks; native_size = gds_file_size(gv_cur_region->dyn.addr->file_cntl); /* In the following tests, the EOF block should always be 1 greater than the actual size of the file. * This is due to the GDS being allocated in even DISK_BLOCK_SIZE-byte blocks. */ if (native_size && (size < native_size)) { total_blks = (dba_mm == mu_data->acc_meth) ? cs_addrs->total_blks : cs_addrs->ti->total_blks; if (JNL_ENABLED(cs_addrs)) cs_addrs->jnl->pini_addr = 0; /* Stop simulation of GTM process journal record writing (if any active)*/ /* If journaling, gdsfilext will need to write an inctn record. The timestamp of that journal record will * need to be adjusted to the current system time to reflect that it is recovery itself writing that record * instead of simulating GT.M activity. Since the variable jgbl.dont_reset_gbl_jrec_time is still set, gdsfilext * will NOT modify jgbl.gbl_jrec_time. Temporarily reset it to allow for adjustments to gbl_jrec_time. */ assert(jgbl.dont_reset_gbl_jrec_time); jgbl.dont_reset_gbl_jrec_time = FALSE; /* Calculate the number of blocks to add based on the difference between the real file size and the file size * computed from the header->total_blks. Takes into account that gdsfilext() will automatically add new_bit_maps * to the amount of blocks we request. */ bplmap = cs_data->bplmap; new_blocks = (native_size - size)/(mu_data->blk_size / DISK_BLOCK_SIZE); new_bit_maps = DIVIDE_ROUND_UP(total_blks + new_blocks, bplmap) - DIVIDE_ROUND_UP(total_blks, bplmap); if (SS_NORMAL != (status = gdsfilext(new_blocks - new_bit_maps, total_blks))) { jgbl.dont_reset_gbl_jrec_time = TRUE; return (status); } jgbl.dont_reset_gbl_jrec_time = TRUE; DEBUG_ONLY( /* Check that the filesize and blockcount in the fileheader match now after the extend */ size = mu_int_ovrhd + (mu_data->blk_size / DISK_BLOCK_SIZE) * mu_data->trans_hist.total_blks; native_size = gds_file_size(gv_cur_region->dyn.addr->file_cntl); assert(size == native_size); ) }