void mlk_prcblk_delete(mlk_ctldata_ptr_t ctl, mlk_shrblk_ptr_t d, uint4 pid) { mlk_prcblk_ptr_t pr; sm_int_ptr_t prpt; for (prpt = &d->pending; *prpt; ) { pr = (mlk_prcblk_ptr_t)R2A(*prpt); if ((pr->process_id == pid) && (--pr->ref_cnt <= 0)) { pr->ref_cnt = 0; if (pr->next == 0) *prpt = 0; else A2R(*prpt, R2A(pr->next)); memset(pr, 0, sizeof(*pr)); A2R(pr->next, R2A(ctl->prcfree)); A2R(ctl->prcfree, pr); assert(ctl->prcfree >= 0); ctl->prccnt++; if (0 != pid) break; } else prpt = &pr->next; } return; }
char gtcmtr_lke_showrep(struct CLB *lnk, show_request *sreq) { gd_region *cur_region; sgmnt_addrs *cs_adr; mlk_ctldata *lke_ctl; ssize_t ls_len; mstr dnode; show_reply srep; uint4 status; boolean_t was_crit; cur_region = gv_cur_region = gtcm_find_region(curr_entry, sreq->rnum)->reghead->reg; if (dba_bg == cur_region->dyn.addr->acc_meth || dba_mm == cur_region->dyn.addr->acc_meth) { cs_adr = &FILE_INFO(cur_region)->s_addrs; ls_len = cs_adr->lock_addrs[1] - cs_adr->lock_addrs[0]; lke_ctl = (mlk_ctldata *)malloc(ls_len); /* Prevent any modification of the lock space while we make a local copy of it */ if (cs_adr->critical != NULL) crash_count = cs_adr->critical->crashcnt; was_crit = cs_adr->now_crit; if (!was_crit) grab_crit(cur_region); longcpy((uchar_ptr_t)lke_ctl, cs_adr->lock_addrs[0], ls_len); if (!was_crit) rel_crit(cur_region); util_cm_print(lnk, 0, NULL, RESET); dnode.len = sreq->nodelength; dnode.addr = sreq->node; if (lke_ctl->blkroot != 0) (void)lke_showtree(lnk, (mlk_shrblk_ptr_t)R2A(lke_ctl->blkroot), sreq->all, sreq->wait, sreq->pid, dnode, FALSE, NULL); free(lke_ctl); } srep.code = CMMS_U_LKESHOW; lnk->cbl = SIZEOF(srep.code); lnk->ast = NULL; #ifndef vinu_marker assert(0 == offsetof(show_reply, code)); lnk->mbf = (unsigned char *)&srep; /* no need since lnk->mbf can be re-used. vinu 06/27/01 */ status = cmi_write(lnk); if (CMI_ERROR(status)) { /* This routine is a server routine; not sure why it does error processing similar to a client. vinu 06/27/01 */ ((link_info *)(lnk->usr))->neterr = TRUE; gvcmz_error(CMMS_U_LKESHOW, status); } else lnk->mbf = (unsigned char *)sreq; /* don't restore if lnk->mbf isn't modified. vinu 06/27/01 */ #else /* server calls to cmi_* should do error processing as a callback. vinu 06/27/01 */ *lnk->mbf = srep.code; cmi_write(lnk); #endif return CM_READ; }
void mlk_prcblk_add(gd_region *reg, mlk_ctldata_ptr_t ctl, mlk_shrblk_ptr_t d, uint4 pid) { mlk_prcblk_ptr_t pr; sm_int_ptr_t prpt; int lcnt; for (prpt = &d->pending, lcnt = FILE_INFO(reg)->s_addrs.hdr->lock_space_size / PRC_FACTOR; *prpt && lcnt; prpt = &pr->next, lcnt--) { pr = (mlk_prcblk_ptr_t)R2A(*prpt); if (pr->process_id == pid) { pr->ref_cnt++; return; } } if (!lcnt) GTMASSERT; if (ctl->prccnt < 1) { mlk_shrclean(reg, ctl, (mlk_shrblk_ptr_t)R2A(ctl->blkroot)); if (ctl->prccnt < 1) return; } ctl->prccnt--; pr = (mlk_prcblk_ptr_t)R2A(ctl->prcfree); if (0 == pr->next) { assert(0 == ctl->prccnt); ctl->prcfree = 0; } else A2R(ctl->prcfree, R2A(pr->next)); A2R(*prpt, pr); pr->process_id = pid; pr->ref_cnt = 1; pr->next = pr->unlock = 0; return; }
void mlk_shrclean(gd_region *region, mlk_ctldata_ptr_t ctl, mlk_shrblk_ptr_t d) { mlk_shrblk_ptr_t d0, d1; mlk_prcblk_ptr_t p; int4 status, lcnt = 0, max_loop_tries; unsigned int time[2],icount; bool delete_status; sgmnt_addrs *csa; max_loop_tries = (int4)(((sm_uc_ptr_t)R2A(ctl->subtop) - (sm_uc_ptr_t)ctl) / SIZEOF(mlk_shrblk)); /* although more than the actual, it is better than underestimating */ for (d = d0 = (mlk_shrblk_ptr_t)R2A(d->rsib), d1 = NULL; d != d1 && max_loop_tries > lcnt; lcnt++) { delete_status = FALSE; if (d0->children) mlk_shrclean(region, ctl, (mlk_shrblk_ptr_t)R2A(d0->children)); d1 = (mlk_shrblk_ptr_t)R2A(d0->rsib); if (d0->pending) { for (p = (mlk_prcblk_ptr_t)R2A(d0->pending); ; p = (mlk_prcblk_ptr_t)R2A(p->next)) { if (PENDING_PROC_ALIVE(p,time,icount,status)) { /* process pending does not exist, free prcblk */ p->process_id = 0; p->ref_cnt = 0; } if (p->next == 0) break; } } mlk_prcblk_delete(ctl, d0, 0); if (d0->owner) { if (PROC_ALIVE(d0,time,icount,status)) { /* process that owned lock has left image, free lock */ csa = &FILE_INFO(region)->s_addrs; d0->owner = 0; d0->sequence = csa->hdr->trans_hist.lock_sequence++; delete_status = mlk_shrblk_delete_if_empty(ctl,d0); } }else delete_status = mlk_shrblk_delete_if_empty(ctl,d0); if (delete_status && d0 == d) { d = d0 = (d0 == d1) ? NULL : d1; d1 = NULL; } else d0 = d1; } return; }
char gtcmtr_lke_clearrep(struct CLB *lnk, clear_request *creq) { gd_region *cur_region; sgmnt_addrs *cs_adr; mlk_ctldata_ptr_t lke_ctl; mstr dnode; show_reply srep; uint4 status; cur_region = gv_cur_region = gtcm_find_region(curr_entry, creq->rnum)->reghead->reg; if (dba_bg == cur_region->dyn.addr->acc_meth || dba_mm == cur_region->dyn.addr->acc_meth) { cs_adr = &FILE_INFO(cur_region)->s_addrs; lke_ctl = (mlk_ctldata_ptr_t)cs_adr->lock_addrs[0]; util_cm_print(lnk, 0, NULL, RESET); dnode.len = creq->nodelength; dnode.addr = creq->node; if (cs_adr->critical != NULL) crash_count = cs_adr->critical->crashcnt; grab_crit(cur_region); if (lke_ctl->blkroot != 0) lke_cleartree(cur_region, lnk, lke_ctl, (mlk_shrblk_ptr_t)R2A(lke_ctl->blkroot), creq->all, creq->interactive, creq->pid, dnode); rel_crit(cur_region); } srep.code = CMMS_U_LKEDELETE; lnk->cbl = sizeof(srep.code); lnk->ast = NULL; #ifndef vinu_marker assert(0 == offsetof(show_reply, code)); lnk->mbf = (unsigned char *)&srep; /* no need since lnk->mbf can be re-used. vinu 06/27/01 */ status = cmi_write(lnk); if (CMI_ERROR(status)) { /* This routine is a server routine; not sure why it does error processing similar to a client. vinu 06/27/01 */ ((link_info *)(lnk->usr))->neterr = TRUE; gvcmz_error(CMMS_U_LKEDELETE, status); } else lnk->mbf = (unsigned char *)creq; /* don't restore if lnk->mbf isn't modified. vinu 06/27/01 */ #else /* server calls to cmi_* should do error processing as a callback. vinu 06/27/01 */ *lnk->mbf = srep.code; cmi_write(lnk); #endif return CM_READ; }
bool mlk_shrblk_delete_if_empty(mlk_ctldata_ptr_t ctl, mlk_shrblk_ptr_t d) { mlk_shrblk_ptr_t r, l, p; mlk_shrsub_ptr_t sub; if (d->children != 0 || d->owner != 0 || d->pending != 0) return FALSE; if (d->parent == 0) p = NULL; else p = (mlk_shrblk_ptr_t)R2A(d->parent); l = (mlk_shrblk_ptr_t)R2A(d->lsib); r = (mlk_shrblk_ptr_t)R2A(d->rsib); if (d == r) if (p == NULL) ctl->blkroot = 0; else p->children = 0; else { assert(d != l); A2R(r->lsib, l); A2R(l->rsib, r); if (p != NULL && (mlk_shrblk_ptr_t)R2A(p->children) == d) A2R(p->children, r); else if ((mlk_shrblk_ptr_t)R2A(ctl->blkroot) == d) A2R(ctl->blkroot, r); } sub = (mlk_shrsub_ptr_t)R2A(d->value); PUT_ZERO(sub->backpointer); p = (mlk_shrblk_ptr_t)R2A(ctl->blkfree); memset(d, 0, SIZEOF(mlk_shrblk)); A2R(d->rsib, p); A2R(ctl->blkfree, d); ++ctl->blkcnt; return TRUE; }
void lke_show(void) { bool locks, all = TRUE, wait = TRUE, interactive = FALSE, match = FALSE, memory = TRUE, nocrit = TRUE; boolean_t exact = FALSE, was_crit; int4 pid; size_t ls_len; int n; char regbuf[MAX_RN_LEN], nodebuf[32], one_lockbuf[MAX_KEY_SZ]; mlk_ctldata_ptr_t ctl; mstr reg, node, one_lock; int shr_sub_len = 0; float ls_free = 0; /* Free space in bottleneck subspace */ /* Get all command parameters */ reg.addr = regbuf; reg.len = SIZEOF(regbuf); node.addr = nodebuf; node.len = SIZEOF(nodebuf); one_lock.addr = one_lockbuf; one_lock.len = SIZEOF(one_lockbuf); if (lke_getcli(&all, &wait, &interactive, &pid, ®, &node, &one_lock, &memory, &nocrit, &exact) == 0) return; /* Search all regions specified on the command line */ for (gv_cur_region = gd_header->regions, n = 0; n != gd_header->n_regions; ++gv_cur_region, ++n) { /* If region matches and is open */ if ((reg.len == 0 || gv_cur_region->rname_len == reg.len && memcmp(gv_cur_region->rname, reg.addr, reg.len) == 0) && gv_cur_region->open) { match = TRUE; util_out_print("!/!AD!/", NOFLUSH, REG_LEN_STR(gv_cur_region)); /* If distributed database, the region is located on another node */ if (gv_cur_region->dyn.addr->acc_meth == dba_cm) { # if defined(LKE_WORKS_OK_WITH_CM) /* Obtain lock info from the remote node */ locks = gtcmtr_lke_showreq(gv_cur_region->dyn.addr->cm_blk, gv_cur_region->cmx_regnum, all, wait, pid, &node); # else gtm_putmsg(VARLSTCNT(10) ERR_UNIMPLOP, 0, ERR_TEXT, 2, LEN_AND_LIT("GT.CM region - locks must be displayed on the local node"), ERR_TEXT, 2, REG_LEN_STR(gv_cur_region)); continue; # endif } else if (gv_cur_region->dyn.addr->acc_meth == dba_bg || gv_cur_region->dyn.addr->acc_meth == dba_mm) { /* Local region */ cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs; ls_len = (size_t)(cs_addrs->lock_addrs[1] - cs_addrs->lock_addrs[0]); ctl = (mlk_ctldata_ptr_t)malloc(ls_len); /* Prevent any modification of the lock space while we make a local copy of it */ if (cs_addrs->critical != NULL) crash_count = cs_addrs->critical->crashcnt; was_crit = cs_addrs->now_crit; if (!nocrit && !was_crit) grab_crit(gv_cur_region); longcpy((uchar_ptr_t)ctl, (uchar_ptr_t)cs_addrs->lock_addrs[0], ls_len); assert((ctl->max_blkcnt > 0) && (ctl->max_prccnt > 0) && ((ctl->subtop - ctl->subbase) > 0)); if (!nocrit && !was_crit) rel_crit(gv_cur_region); shr_sub_len = 0; locks = ctl->blkroot == 0 ? FALSE: lke_showtree(NULL, (mlk_shrblk_ptr_t)R2A(ctl->blkroot), all, wait, pid, one_lock, memory, &shr_sub_len); /* lock space usage consists of: control_block + nodes(locks) + processes + substrings */ /* any of those subspaces can be bottleneck. * Therefore we will report the subspace which is running out. */ ls_free = MIN(((float)ctl->blkcnt) / ctl->max_blkcnt, ((float)ctl->prccnt) / ctl->max_prccnt); ls_free = MIN(1-(((float)shr_sub_len) / (ctl->subtop - ctl->subbase)), ls_free); ls_free *= 100; /* Scale to [0-100] range. (couldn't do this inside util_out_print) */ if (ls_free < 1) /* No memory? Notify user. */ gtm_putmsg(VARLSTCNT(4) ERR_LOCKSPACEFULL, 2, DB_LEN_STR(gv_cur_region)); if (ls_free < 1 || memory) { if (ctl->subtop > ctl->subfree) gtm_putmsg(VARLSTCNT(10) ERR_LOCKSPACEINFO, 8, REG_LEN_STR(gv_cur_region), (ctl->max_prccnt - ctl->prccnt), ctl->max_prccnt, (ctl->max_blkcnt - ctl->blkcnt), ctl->max_blkcnt, LEN_AND_LIT(" not ")); else gtm_putmsg(VARLSTCNT(10) ERR_LOCKSPACEINFO, 8, REG_LEN_STR(gv_cur_region), (ctl->max_prccnt - ctl->prccnt), ctl->max_prccnt, (ctl->max_blkcnt - ctl->blkcnt), ctl->max_blkcnt, LEN_AND_LIT(" ")); } free(ctl); } else { gtm_putmsg(VARLSTCNT(2) ERR_BADREGION, 0); locks = TRUE; } if (!locks) { gtm_putmsg(VARLSTCNT(4) ERR_NOLOCKMATCH, 2, REG_LEN_STR(gv_cur_region)); } assert((ls_free <= 100) && (ls_free >= 0)); gtm_putmsg(VARLSTCNT(4) ERR_LOCKSPACEUSE, 2, ((int)ls_free), cs_addrs->hdr->lock_space_size/OS_PAGELET_SIZE); } } if (!match && reg.len != 0) rts_error(VARLSTCNT(4) ERR_NOREGION, 2, reg.len, reg.addr); }
bool lke_cleartree( gd_region *region, struct CLB *lnk, mlk_ctldata_ptr_t ctl, mlk_shrblk_ptr_t tree, bool all, bool interactive, int4 pid, mstr one_lock) { mlk_shrblk_ptr_t node, oldnode, start[KDIM]; unsigned char subscript_offset[KDIM]; static char name_buffer[SDIM]; static MSTR_DEF(name, 0, name_buffer); int depth = 0; bool locks = FALSE, locked, deleted; error_def(ERR_CTRLC); node = start[0] = tree; subscript_offset[0] = 0; for (;;) { name.len = subscript_offset[depth]; /* Display the lock node */ locked = lke_showlock(lnk, node, &name, all, FALSE, interactive, pid, one_lock); locks |= locked; /* If it was locked, clear it and wake up any processes waiting for it */ if (locked && lke_clearlock(region, lnk, ctl, node, &name, all, interactive, pid) && node->pending != 0) mlk_wake_pending(ctl, node, region); /* Move to the next node */ if (node->children == 0) { /* This node has no children, so move to the right */ oldnode = node; node = (mlk_shrblk_ptr_t)R2A(node->rsib); while (node == start[depth]) { /* There are no more siblings to the right at this depth, so move up and then right */ if (node->parent == 0) { /* We're already at the top, so we're done */ assert(depth == 0); (void)mlk_shrblk_delete_if_empty(ctl, node); return locks; } --depth; node = (mlk_shrblk_ptr_t)R2A(node->parent); (void)mlk_shrblk_delete_if_empty(ctl, oldnode); oldnode = node; node = (mlk_shrblk_ptr_t)R2A(node->rsib); } deleted = mlk_shrblk_delete_if_empty(ctl, oldnode); if (deleted && start[depth] == oldnode) start[depth] = node; } else { /* This node has children, so move down */ ++depth; node = start[depth] = (mlk_shrblk_ptr_t)R2A(node->children); subscript_offset[depth] = name.len; } if (util_interrupt) rts_error(VARLSTCNT(1) ERR_CTRLC); } }
void lke_show(void) { bool locks, all = TRUE, wait = TRUE, interactive = FALSE, match = FALSE, memory = TRUE, nocrit = TRUE; boolean_t exact = FALSE, was_crit; int4 pid; size_t ls_len; int n; char regbuf[MAX_RN_LEN], nodebuf[32], one_lockbuf[MAX_KEY_SZ]; mlk_ctldata_ptr_t ctl; mstr reg, node, one_lock; error_def(ERR_UNIMPLOP); error_def(ERR_TEXT); /* Get all command parameters */ reg.addr = regbuf; reg.len = SIZEOF(regbuf); node.addr = nodebuf; node.len = SIZEOF(nodebuf); one_lock.addr = one_lockbuf; one_lock.len = SIZEOF(one_lockbuf); if (lke_getcli(&all, &wait, &interactive, &pid, ®, &node, &one_lock, &memory, &nocrit, &exact) == 0) return; /* Search all regions specified on the command line */ for (gv_cur_region = gd_header->regions, n = 0; n != gd_header->n_regions; ++gv_cur_region, ++n) { /* If region matches and is open */ if ((reg.len == 0 || gv_cur_region->rname_len == reg.len && memcmp(gv_cur_region->rname, reg.addr, reg.len) == 0) && gv_cur_region->open) { match = TRUE; util_out_print("!/!AD!/", NOFLUSH, REG_LEN_STR(gv_cur_region)); /* If distributed database, the region is located on another node */ if (gv_cur_region->dyn.addr->acc_meth == dba_cm) { # if defined(LKE_WORKS_OK_WITH_CM) /* Obtain lock info from the remote node */ locks = gtcmtr_lke_showreq(gv_cur_region->dyn.addr->cm_blk, gv_cur_region->cmx_regnum, all, wait, pid, &node); # else gtm_putmsg(VARLSTCNT(10) ERR_UNIMPLOP, 0, ERR_TEXT, 2, LEN_AND_LIT("GT.CM region - locks must be displayed on the local node"), ERR_TEXT, 2, REG_LEN_STR(gv_cur_region)); continue; # endif } else if (gv_cur_region->dyn.addr->acc_meth == dba_bg || gv_cur_region->dyn.addr->acc_meth == dba_mm) { /* Local region */ cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs; ls_len = (size_t)(cs_addrs->lock_addrs[1] - cs_addrs->lock_addrs[0]); ctl = (mlk_ctldata_ptr_t)malloc(ls_len); /* Prevent any modification of the lock space while we make a local copy of it */ if (cs_addrs->critical != NULL) crash_count = cs_addrs->critical->crashcnt; was_crit = cs_addrs->now_crit; if (!nocrit && !was_crit) grab_crit(gv_cur_region); longcpy((uchar_ptr_t)ctl, (uchar_ptr_t)cs_addrs->lock_addrs[0], ls_len); if (!nocrit && !was_crit) rel_crit(gv_cur_region); locks = ctl->blkroot == 0 ? FALSE: lke_showtree(NULL, (mlk_shrblk_ptr_t)R2A(ctl->blkroot), all, wait, pid, one_lock, memory); free(ctl); } else { util_out_print(NULL, RESET); util_out_print("Region is not BG, MM, or CM", FLUSH); locks = TRUE; } if (!locks) { util_out_print(NULL, RESET); util_out_print("No locks were found in !AD", FLUSH, REG_LEN_STR(gv_cur_region)); } } } if (!match && reg.len != 0) rts_error(VARLSTCNT(4) ERR_NOREGION, 2, reg.len, reg.addr); }
void mlk_wake_pending(mlk_ctldata_ptr_t ctl, mlk_shrblk_ptr_t d, gd_region *reg) { mlk_prcblk_ptr_t next, pr; sm_uint_ptr_t empty_slot, ctop; sgmnt_addrs *csa; boolean_t remote_pid; int crit_wake_res; /* also used in macro DO_CRIT_WAKE */ int lcnt; csa = &FILE_INFO(reg)->s_addrs; if (!d->pending) return; ctl->wakeups++; /* Before updating d->sequence ensure there is no process owning this lock, since otherwise when the owner process attempts * to release the lock it will fail as its private copy of "p->sequence" will not match the shared memory "d->sequence". */ assert(!d->owner); d->sequence = csa->hdr->trans_hist.lock_sequence++; /* This node is being awakened (GTCM) */ BG_TRACE_PRO_ANY(csa, mlock_wakeups); /* Record halted slumbers */ if (reg->dyn.addr->acc_meth == dba_bg && csa->hdr->clustered) { remote_pid = FALSE; for (empty_slot = ctl->clus_pids, ctop = &ctl->clus_pids[NUM_CLST_LCKS-1]; *empty_slot && empty_slot <= ctop; empty_slot++) ; for (pr = (mlk_prcblk_ptr_t)R2A(d->pending), lcnt = csa->hdr->lock_space_size / PRC_FACTOR; lcnt; lcnt--) { next = (pr->next) ? (mlk_prcblk_ptr_t)R2A(pr->next) : 0; /* in case it's deleted */ if ((pr->process_id & NODENUMBER) == (process_id & NODENUMBER)) { DO_CRIT_WAKE; } else if (empty_slot <= ctop) { remote_pid = TRUE; *empty_slot = pr->process_id; empty_slot++; } if (next) pr = next; else break; } if (remote_pid) ccp_cluster_lock_wake(reg); } else { for (pr = (mlk_prcblk_ptr_t)R2A(d->pending), lcnt = csa->hdr->lock_space_size / PRC_FACTOR; lcnt; lcnt--) { next = (pr->next) ? (mlk_prcblk_ptr_t)R2A(pr->next) : 0; /* in case it's deleted */ DO_CRIT_WAKE; /* Wake one process to keep things orderly, if it loses its way, others * will jump in after a timout */ if (GONE == crit_wake_res && next) pr = next; else break; } } if (!lcnt) GTMASSERT; return; }
void lke_clear(void) { bool locks, all = TRUE, wait = FALSE, interactive = TRUE, match = FALSE, memory = FALSE, nocrit = FALSE; boolean_t exact = TRUE, was_crit; int4 pid; int n; char regbuf[MAX_RN_LEN], nodebuf[32], one_lockbuf[MAX_KEY_SZ]; mlk_ctldata_ptr_t ctl; mstr reg, node, one_lock; /* Get all command parameters */ reg.addr = regbuf; reg.len = SIZEOF(regbuf); node.addr = nodebuf; node.len = SIZEOF(nodebuf); one_lock.addr = one_lockbuf; one_lock.len = SIZEOF(one_lockbuf); if (lke_getcli(&all, &wait, &interactive, &pid, ®, &node, &one_lock, &memory, &nocrit, &exact) == 0) return; /* Search all regions specified on the command line */ for (gv_cur_region = gd_header->regions, n = 0; n != gd_header->n_regions; ++gv_cur_region, ++n) { /* If region matches and is open */ if ((reg.len == 0 || gv_cur_region->rname_len == reg.len && memcmp(gv_cur_region->rname, reg.addr, reg.len) == 0) && gv_cur_region->open) { match = TRUE; util_out_print("!/!AD!/", NOFLUSH, REG_LEN_STR(gv_cur_region)); /* If distributed database, the region is located on another node */ if (gv_cur_region->dyn.addr->acc_meth == dba_cm) { # if defined(LKE_WORKS_OK_WITH_CM) /* Remote lock clears are not supported, so LKE CLEAR -EXACT qualifier * will not be supported on GT.CM.*/ locks = gtcmtr_lke_clearreq(gv_cur_region->dyn.addr->cm_blk, gv_cur_region->cmx_regnum, all, interactive, pid, &node); # else gtm_putmsg(VARLSTCNT(10) ERR_UNIMPLOP, 0, ERR_TEXT, 2, LEN_AND_LIT("GT.CM region - locks must be cleared on the local node"), ERR_TEXT, 2, REG_LEN_STR(gv_cur_region)); continue; # endif } else if ((dba_bg == gv_cur_region->dyn.addr->acc_meth) || (dba_mm == gv_cur_region->dyn.addr->acc_meth)) { /* Local region */ cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs; ctl = (mlk_ctldata_ptr_t)cs_addrs->lock_addrs[0]; /* Prevent any modifications of locks while we are clearing */ if (cs_addrs->critical != NULL) crash_count = cs_addrs->critical->crashcnt; was_crit = cs_addrs->now_crit; if (!was_crit) grab_crit(gv_cur_region); locks = ctl->blkroot == 0 ? FALSE : lke_cleartree(gv_cur_region, NULL, ctl, (mlk_shrblk_ptr_t)R2A(ctl->blkroot), all, interactive, pid, one_lock, exact); if (!was_crit) rel_crit(gv_cur_region); } else { gtm_putmsg(VARLSTCNT(2) ERR_BADREGION, 0); locks = TRUE; } if (!locks) { gtm_putmsg(VARLSTCNT(4) ERR_NOLOCKMATCH, 2, REG_LEN_STR(gv_cur_region)); } } } if (!match && reg.len != 0) rts_error(VARLSTCNT(4) ERR_NOREGION, 2, reg.len, reg.addr); }