/* * Cut and replace a section of a dynarray. * @pda: dynamic array to splice. * @off: offset into array to begin splice. * @nrmv: number of items to remove. * @base: start array to splice from. * @nadd: number of new items to splice into the array. */ int psc_dynarray_splice(struct psc_dynarray *pda, int off, int nrmv, const void *base, int nadd) { int oldlen, rc, rem; void **p; oldlen = psc_dynarray_len(pda); psc_assert(nadd >= 0); psc_assert(nrmv >= 0); psc_assert(off + nrmv <= oldlen); rc = psc_dynarray_ensurelen(pda, oldlen + nadd - nrmv); if (rc) return (rc); p = pda->pda_items + off; if (nadd != nrmv) { rem = oldlen - off - nrmv; memmove(nadd > nrmv ? p + nadd - nrmv : p + nadd, p + nrmv, rem * sizeof(void *)); pda->pda_pos += nadd - nrmv; } memcpy(p, base, nadd * sizeof(void *)); return (0); }
void slm_fcmh_dtor(struct fidc_membh *f) { struct fcmh_mds_info *fmi; int rc, vfsid; fmi = fcmh_2_fmi(f); if (fcmh_isreg(f)) { psc_assert(psc_dynarray_len(&fmi->fmi_ptrunc_clients) == 0); psc_dynarray_free(&fmi->fmi_ptrunc_clients); } if (fcmh_isreg(f) || fcmh_isdir(f)) { /* XXX Need to worry about other modes here */ if (!fmi->fmi_ctor_rc) { slfid_to_vfsid(fcmh_2_fid(f), &vfsid); rc = mdsio_release(vfsid, &rootcreds, fcmh_2_mfh(f)); psc_assert(rc == 0); } } if (fcmh_isdir(f)) { slfid_to_vfsid(fcmh_2_fid(f), &vfsid); rc = mdsio_release(vfsid, &rootcreds, fcmh_2_dino_mfh(f)); psc_assert(rc == 0); } if (fmi->fmi_inodeh.inoh_extras) PSCFREE(fmi->fmi_inodeh.inoh_extras); }
void pfl_heap_remove(struct pfl_heap *ph, void *p) { struct pfl_heap_entry *phe, *che; void *c, *minc; int idx, i; psc_assert(ph->ph_nitems > 0); psc_assert(p); phe = PSC_AGP(p, ph->ph_entoff); p = ph->ph_base[idx = phe->phe_idx] = ph->ph_base[--ph->ph_nitems]; phe = PSC_AGP(p, ph->ph_entoff); phe->phe_idx = idx; /* bubble down */ for (;;) { for (minc = p, idx = phe->phe_idx * 2 + 1, i = 0; i < 2 && idx < ph->ph_nitems; idx++, i++) { c = ph->ph_base[idx]; if (ph->ph_cmpf(c, minc) == -1) minc = c; } if (minc == p) break; che = PSC_AGP(minc, ph->ph_entoff); _pfl_heap_swap(ph, phe, che); } }
void psc_dynarray_swap(struct psc_dynarray *da, int a, int b) { void *tmp; psc_assert(a >= 0); psc_assert(b >= 0); psc_assert(a < psc_dynarray_len(da)); psc_assert(b < psc_dynarray_len(da)); if (a != b) SWAP(da->pda_items[a], da->pda_items[b], tmp); }
void _bmap_op_done(const struct pfl_callerinfo *pci, struct bmap *b, const char *fmt, ...) { va_list ap; BMAP_LOCK_ENSURE(b); psc_assert(!(b->bcm_flags & BMAPF_TOFREE)); va_start(ap, fmt); _psclogv_pci(pci, SLSS_BMAP, 0, fmt, ap); va_end(ap); if (!psc_atomic32_read(&b->bcm_opcnt)) { b->bcm_flags |= BMAPF_TOFREE; DEBUG_BMAP(PLL_DIAG, b, "free bmap now"); BMAP_ULOCK(b); /* * Invoke service specific bmap cleanup callbacks: * mds_bmap_destroy(), iod_bmap_finalcleanup(), and * msl_bmap_final_cleanup(). */ if (sl_bmap_ops.bmo_final_cleanupf) sl_bmap_ops.bmo_final_cleanupf(b); bmap_remove(b); } else { bmap_wake_locked(b); BMAP_ULOCK(b); } }
int bcr_update_inodeinfo(struct bcrcupd *bcr) { struct fidc_membh *f; struct stat stb; struct bmap *b; b = bcr_2_bmap(bcr); f = b->bcm_fcmh; psc_assert(bcr->bcr_crcup.fg.fg_fid == f->fcmh_fg.fg_fid); if (bcr->bcr_crcup.fg.fg_gen != f->fcmh_fg.fg_gen) return (ESTALE); if ((f->fcmh_flags & FCMH_IOD_BACKFILE) == 0) return (EBADF); if (fstat(fcmh_2_fd(f), &stb) == -1) return (errno); bcr->bcr_crcup.fsize = stb.st_size; bcr->bcr_crcup.nblks = stb.st_blocks; bcr->bcr_crcup.utimgen = f->fcmh_sstb.sst_utimgen; return (0); }
/** * pflnet_getifnfordst - Obtain an interface name (e.g. eth0) for the * given destination address. * @ifa0: base of ifaddrs list to directly compare for localhost. * @sa: destination address. * @ifn: value-result interface name to fill in. */ void pflnet_getifnfordst(const struct ifaddrs *ifa0, const struct sockaddr *sa, char ifn[IFNAMSIZ]) { const struct sockaddr_in *sin; const struct ifaddrs *ifa; psc_assert(sa->sa_family == AF_INET); sin = (void *)sa; /* * Scan interfaces for addr since netlink * will always give us the lo interface. */ for (ifa = ifa0; ifa; ifa = ifa->ifa_next) if (ifa->ifa_addr->sa_family == sa->sa_family && memcmp(&sin->sin_addr, &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr, sizeof(sin->sin_addr)) == 0) { strlcpy(ifn, ifa->ifa_name, IFNAMSIZ); return; } #ifdef HAVE_RTNETLINK pflnet_getifnfordst_rtnetlink(sa, ifn); #elif defined(RTM_GET) pflnet_getifnfordst_rtsock(sa, ifn); #else errno = ENOTSUP; psc_fatal("getifnfordst"); #endif }
int bcr_update_inodeinfo(struct bcrcupd *bcr) { struct fidc_membh *f; struct stat stb; struct bmap *b; b = bcr_2_bmap(bcr); f = b->bcm_fcmh; if (bcr->bcr_crcup.fg.fg_fid == FID_ANY) return (EINVAL); psc_assert(bcr->bcr_crcup.fg.fg_fid == f->fcmh_fg.fg_fid); if (bcr->bcr_crcup.fg.fg_gen != f->fcmh_fg.fg_gen) { OPSTAT_INCR("brcupdate-stale"); return (ESTALE); } if ((f->fcmh_flags & FCMH_IOD_BACKFILE) == 0) return (EBADF); if (fstat(fcmh_2_fd(f), &stb) == -1) return (errno); /* Used by mds_bmap_crc_update() */ bcr->bcr_crcup.fsize = stb.st_size; bcr->bcr_crcup.nblks = stb.st_blocks; bcr->bcr_crcup.utimgen = f->fcmh_sstb.sst_utimgen; return (0); }
int _mds_fcmh_setattr(int vfsid, struct fidc_membh *f, int to_set, const struct srt_stat *sstb, int log) { struct srt_stat sstb_out; int rc; FCMH_LOCK_ENSURE(f); FCMH_BUSY_ENSURE(f); FCMH_ULOCK(f); if (log) mds_reserve_slot(1); rc = mdsio_setattr(vfsid, fcmh_2_mfid(f), sstb, to_set, &rootcreds, &sstb_out, fcmh_2_mfh(f), log ? mdslog_namespace : NULL); if (log) mds_unreserve_slot(1); if (!rc) { psc_assert(sstb_out.sst_fid == fcmh_2_fid(f)); FCMH_LOCK(f); f->fcmh_sstb = sstb_out; FCMH_ULOCK(f); } return (rc); }
void psclist_sort(void **p, struct psclist_head *hd, int n, ptrdiff_t off, void (*sortf)(void *, size_t, size_t, int (*)(const void *, const void *)), int (*cmpf)(const void *, const void *)) { struct psc_listentry *e; void *next, *prev; int j = 0; psc_assert(n >= 0); if (n < 2) return; psclist_for_each(e, hd) p[j++] = ((char *)e) - off; sortf(p, n, sizeof(*p), cmpf); prev = hd; for (j = 0; j < n; j++, prev = e) { e = (void *)((char *)p[j] + off); if (j + 1 == n) next = hd; else next = (char *)p[j + 1] + off; psc_lentry_prev(e) = prev; psc_lentry_next(e) = next; } psc_listhd_first(hd) = (void *)((char *)p[0] + off); psc_listhd_last(hd) = prev; }
int slc_fcmh_ctor(struct fidc_membh *f, __unusedx int flags) { struct fcmh_cli_info *fci; struct sl_resource *res; struct sl_site *s; sl_siteid_t siteid; int i; fci = fcmh_get_pri(f); slc_fcmh_refresh_age(f); INIT_PSC_LISTENTRY(&fci->fci_lentry); siteid = FID_GET_SITEID(fcmh_2_fid(f)); psc_assert(f->fcmh_flags & FCMH_INITING); if (fcmh_2_fid(f) != SLFID_ROOT && siteid != msl_rmc_resm->resm_siteid) { s = libsl_siteid2site(siteid); if (s == NULL) { psclog_errorx("fid "SLPRI_FID" has " "invalid site ID %d", fcmh_2_fid(f), siteid); return (ESTALE); } SITE_FOREACH_RES(s, res, i) if (res->res_type == SLREST_MDS) { fci->fci_resm = psc_dynarray_getpos( &res->res_members, 0); return (0); } psclog_errorx("fid "SLPRI_FID" has invalid site ID %d", fcmh_2_fid(f), siteid); return (ESTALE); }
void pfl_heap_add(struct pfl_heap *ph, void *c) { struct pfl_heap_entry *che, *phe; size_t nalloc; void *p; psc_assert(c); che = PSC_AGP(c, ph->ph_entoff); if (ph->ph_nitems == ph->ph_nalloc) { nalloc = MAX(8, 2 * ph->ph_nalloc); ph->ph_base = psc_realloc(ph->ph_base, nalloc * sizeof(void *), 0); ph->ph_nalloc = nalloc; } ph->ph_base[che->phe_idx = ph->ph_nitems++] = c; /* bubble up */ while (che->phe_idx > 0) { p = ph->ph_base[(che->phe_idx - 1) / 2]; if (ph->ph_cmpf(p, c) != 1) break; phe = PSC_AGP(p, ph->ph_entoff); _pfl_heap_swap(ph, phe, che); } }
/* * Access an item in a dynamic array. * @pda: dynamic array to access. * @pos: item index. */ void * psc_dynarray_getpos(const struct psc_dynarray *pda, int pos) { psc_assert(pos >= 0); if (pos >= psc_dynarray_len(pda)) psc_fatalx("out of bounds array access"); return (pda->pda_items[pos]); }
/* * Release a read lock on the PFL file system modules list. */ void pflfs_modules_rdunpin(void) { spinlock(&pflfs_modules_lock); psc_assert(pflfs_modules_pins > 0); pflfs_modules_pins--; freelock(&pflfs_modules_lock); }
/* * A modification operation to the MDFS has ended. If other operations * are ongoing, we need to re-wake the cursor thread to ensure a * transaction group is active as it is not guaranteed to be awake. */ void slm_zfs_cursor_end(void) { spinlock(&slm_cursor_lock); psc_assert(slm_cursor_update_needed > 0); if (--slm_cursor_update_needed && !slm_cursor_update_inprog) psc_waitq_wakeall(&slm_cursor_waitq); freelock(&slm_cursor_lock); }
/* * Set the item for a position in a dynamic array. * @pda: dynamic array to access. * @pos: item index. * @p: item. */ void psc_dynarray_setpos(struct psc_dynarray *pda, int pos, void *p) { psc_assert(pos >= 0); if (pos >= pda->pda_nalloc) psc_fatalx("out of bounds array access"); pda->pda_items[pos] = p; if (pos >= pda->pda_pos) pda->pda_pos = pos + 1; }
int mdsio_fcmh_refreshattr(struct fidc_membh *f, struct srt_stat *out_sstb) { int locked, rc, vfsid; pthread_t pthr; pthr = pthread_self(); locked = FCMH_RLOCK(f); fcmh_wait_locked(f, (f->fcmh_flags & FCMH_BUSY) && f->fcmh_owner != pthr); rc = slfid_to_vfsid(fcmh_2_fid(f), &vfsid); psc_assert(rc == 0); rc = mdsio_getattr(vfsid, fcmh_2_mfid(f), fcmh_2_mfh(f), &rootcreds, &f->fcmh_sstb); psc_assert(rc == 0); if (out_sstb) *out_sstb = f->fcmh_sstb; FCMH_URLOCK(f, locked); return (rc); }
void * startf(void *arg) { struct thr *thr = arg; int32_t ov, mask; int i; pthread_barrier_wait(&barrier); mask = 1 << thr->pos; for (i = 0; i < niter; i++) { ov = psc_atomic32_setmask_getold(&v32, mask); psc_assert((ov & mask) == 0); ov = psc_atomic32_clearmask_getold(&v32, mask); psc_assert(ov & mask); sched_yield(); } pthread_barrier_wait(&barrier); return (NULL); }
/* * Remove the given position from the dynarray. This API assumes the * dynarray is unordered so it will reposition the final element in the * emptied slot. Use a different API if this is undesirable. */ void psc_dynarray_removepos(struct psc_dynarray *pda, int pos) { void **p; p = psc_dynarray_get_mutable(pda); psc_assert(pos >= 0 && pos < psc_dynarray_len(pda)); if (pos != psc_dynarray_len(pda) - 1) p[pos] = p[psc_dynarray_len(pda) - 1]; pda->pda_pos--; }
/* * Initialize per-fcmh dircache structures. */ void dircache_init(struct fidc_membh *d) { struct fcmh_cli_info *fci = fcmh_2_fci(d); psc_assert(!(d->fcmh_flags & FCMHF_INIT_DIRCACHE)); d->fcmh_flags |= FCMHF_INIT_DIRCACHE; pll_init(&fci->fci_dc_pages, struct dircache_page, dcp_lentry, &d->fcmh_lock); pfl_rwlock_init(&fci->fcid_dircache_rwlock); }
void * pfl_heap_peekidx(struct pfl_heap *ph, int idx) { struct pfl_heap_entry *phe; void *p; if (idx >= ph->ph_nitems) return (NULL); p = ph->ph_base[idx]; phe = PSC_AGP(p, ph->ph_entoff); psc_assert(phe->phe_idx == idx); return (p); }
/** * psc_fault_register - */ struct psc_fault * _psc_fault_register(const char *name) { struct psc_fault *pflt; char *p; pflt = psc_fault_lookup(name); psc_assert(pflt == NULL); p = strstr(name, "_FAULT_"); psc_assert(p); psc_assert(strlen(p + 7) < sizeof(pflt->pflt_name)); /* expected format: <daemon>_FAULT_<fault-name> */ pflt = &psc_faults[psc_nfaults++]; INIT_SPINLOCK(&pflt->pflt_lock); strlcpy(pflt->pflt_name, p + 7, sizeof(pflt->pflt_name)); for (p = pflt->pflt_name; *p; p++) *p = tolower(*p); pflt->pflt_chance = 100; pflt->pflt_count = -1; return (pflt); }
/* * Handle a BMAPCHWRMODE request to upgrade a client bmap lease from * READ-only to READ+WRITE. * @rq: RPC request. */ int slm_rmc_handle_bmap_chwrmode(struct pscrpc_request *rq) { struct bmap_mds_lease *bml = NULL; struct srm_bmap_chwrmode_req *mq; struct srm_bmap_chwrmode_rep *mp; struct fidc_membh *f = NULL; struct bmapc_memb *b = NULL; struct bmap_mds_info *bmi; SL_RSX_ALLOCREP(rq, mq, mp); mp->rc = -slm_fcmh_get(&mq->sbd.sbd_fg, &f); if (mp->rc) PFL_GOTOERR(out, mp->rc); mp->rc = bmap_lookup(f, mq->sbd.sbd_bmapno, &b); if (mp->rc) PFL_GOTOERR(out, mp->rc); bmi = bmap_2_bmi(b); bml = mds_bmap_getbml(b, mq->sbd.sbd_seq, mq->sbd.sbd_nid, mq->sbd.sbd_pid); if (bml == NULL) PFL_GOTOERR(out, mp->rc = -EINVAL); mp->rc = mds_bmap_bml_chwrmode(bml, mq->prefios[0]); if (mp->rc == -PFLERR_ALREADY) mp->rc = 0; else if (mp->rc) PFL_GOTOERR(out, mp->rc); mp->sbd = mq->sbd; mp->sbd.sbd_seq = bml->bml_seq; mp->sbd.sbd_key = bmi->bmi_assign->odtr_crc; psc_assert(bmi->bmi_wr_ion); mp->sbd.sbd_ios = rmmi2resm(bmi->bmi_wr_ion)->resm_res_id; out: if (bml) mds_bmap_bml_release(bml); if (b) bmap_op_done(b); if (f) fcmh_op_done(f); return (0); }
void _pll_remove(const struct pfl_callerinfo *pci, struct psc_lockedlist *pll, void *p) { struct psc_listentry *e; int locked; e = _pll_obj2entry(pll, p); locked = PLL_RLOCK(pll); psclist_del(e, &pll->pll_listhd); psc_assert(pll->pll_nitems > 0); pll->pll_nitems--; PLL_URLOCK(pll, locked); if ((pll->pll_flags & PLLF_NOLOG) == 0) _psclog_pci(pci, PLL_DEBUG, 0, "lockedlist %p remove item %p", pll, p); }
int pfl_vasprintf(char **p, const char *fmt, va_list ap) { va_list apd; int sz; va_copy(apd, ap); sz = vsnprintf(NULL, 0, fmt, ap); psc_assert(sz != -1); sz++; *p = PSCALLOC(sz); vsnprintf(*p, sz, fmt, apd); va_end(apd); return (sz); }
/** * rsx_bulkclient - Setup a source or sink for a client. * @type: GET_SOURCE lets server to pull our buffer, * PUT_SINK sets up a buffer filled in by the server * @rq: RPC request. * @descp: pointer to bulk xfer descriptor. * @ptl: portal to issue bulk xfer across. * @iov: iovec array of receive buffer. * @n: #iovecs. * Returns: 0 or negative errno on error. */ int rsx_bulkclient(struct pscrpc_request *rq, int type, int ptl, struct iovec *iov, int n) { struct pscrpc_bulk_desc *desc; int i; psc_assert(type == BULK_GET_SOURCE || type == BULK_PUT_SINK); desc = pscrpc_prep_bulk_imp(rq, n, type, ptl); if (desc == NULL) psc_fatal("NULL bulk descriptor"); desc->bd_nob = 0; desc->bd_iov_count = n; memcpy(desc->bd_iov, iov, n * sizeof(*iov)); for (i = 0; i < n; i++) desc->bd_nob += iov[i].iov_len; return (0); }
void mds_brepls_check(uint8_t *repls, int nr) { int val, off, i; psc_assert(nr > 0 && nr <= SL_MAX_REPLICAS); for (i = 0, off = 0; i < nr; i++, off += SL_BITS_PER_REPLICA) { val = SL_REPL_GET_BMAP_IOS_STAT(repls, off); switch (val) { case BREPLST_VALID: case BREPLST_GARBAGE_QUEUED: case BREPLST_GARBAGE_SCHED: case BREPLST_TRUNC_QUEUED: case BREPLST_TRUNC_SCHED: return; } } psc_fatalx("no valid replica states exist"); }
void slab_cache_init(void) { size_t nbuf; psc_assert(SLASH_SLVR_SIZE <= LNET_MTU); if (slcfg_local->cfg_slab_cache_size < SLAB_CACHE_MIN) psc_fatalx("invalid slab_cache_size setting; " "minimum allowed is %zu", SLAB_CACHE_MIN); nbuf = slcfg_local->cfg_slab_cache_size / SLASH_SLVR_SIZE; psc_poolmaster_init(&slab_poolmaster, struct slab, slb_mgmt_lentry, PPMF_AUTO, nbuf, nbuf, nbuf, slab_cache_reap, "slab", NULL); slab_pool = psc_poolmaster_getmgr(&slab_poolmaster); pscthr_init(SLITHRT_BREAP, slibreapthr_main, 0, "slibreapthr"); }
int sli_fcmh_ctor(struct fidc_membh *f, __unusedx int flags) { int rc; struct stat stb; struct fcmh_iod_info *fii; fii = fcmh_2_fii(f); INIT_PSC_LISTENTRY(&fii->fii_lentry); psc_assert(f->fcmh_flags & FCMH_INITING); if (f->fcmh_fg.fg_gen == FGEN_ANY) { DEBUG_FCMH(PLL_NOTICE, f, "refusing to open backing file " "with FGEN_ANY"); /* * This is not an error, we just don't have enough info * to create the backing file. */ return (0); } /* try to get a file descriptor for this backing obj */ rc = sli_open_backing_file(f); if (rc == 0) { if (fstat(fcmh_2_fd(f), &stb) == -1) { rc = -errno; DEBUG_FCMH(PLL_WARN, f, "error during " "getattr backing file rc=%d", rc); close(fcmh_2_fd(f)); } else { sl_externalize_stat(&stb, &f->fcmh_sstb); // XXX get ptruncgen and gen f->fcmh_flags |= FCMH_HAVE_ATTRS; } } if (!rc) f->fcmh_flags |= FCMH_IOD_BACKFILE; return (rc); }
/* * Get the specified bmap. * @f: fcmh. * @n: bmap number. * @rw: access mode. * @flags: retrieval parameters. * @bp: value-result bmap pointer. * Notes: returns the bmap referenced and locked. */ int _bmap_get(const struct pfl_callerinfo *pci, struct fidc_membh *f, sl_bmapno_t n, enum rw rw, int flags, struct bmap **bp) { int rc = 0, new_bmap, bmaprw = 0; struct bmap *b; if (bp) *bp = NULL; if (rw) bmaprw = rw == SL_WRITE ? BMAPF_WR : BMAPF_RD; new_bmap = flags & BMAPGETF_CREATE; b = bmap_lookup_cache(f, n, bmaprw, &new_bmap); if (b == NULL) { rc = ENOENT; goto out; } if (flags & BMAPGETF_NONBLOCK) { if (b->bcm_flags & BMAPF_LOADING) goto out; } else bmap_wait_locked(b, b->bcm_flags & BMAPF_LOADING); if (b->bcm_flags & BMAPF_LOADED) goto loaded; if (flags & BMAPGETF_NORETRIEVE) { if (b->bcm_flags & BMAPF_LOADED) OPSTAT_INCR("bmap-already-loaded"); else OPSTAT_INCR("bmap-not-yet-loaded"); goto out; } b->bcm_flags |= BMAPF_LOADING; DEBUG_BMAP(PLL_DIAG, b, "loading bmap; flags=%d", flags); BMAP_ULOCK(b); /* msl_bmap_retrieve(), iod_bmap_retrieve(), mds_bmap_read() */ rc = sl_bmap_ops.bmo_retrievef(b, flags); BMAP_LOCK(b); if (flags & BMAPGETF_NONBLOCK) { if (rc) b->bcm_flags &= ~BMAPF_LOADING; goto out; } b->bcm_flags &= ~BMAPF_LOADING; if (!rc) { b->bcm_flags |= BMAPF_LOADED; bmap_wake_locked(b); } loaded: /* * Early bail out should be safe. There is only one place the client * will do a bmap lookup. And it that code path, we just add DIO flag * to the bmap. See msrcm_handle_bmapdio(). */ if (rc || !bmaprw) goto out; /* * Others wishing to access this bmap in the same mode must wait * until MODECHNG ops have completed. If the desired mode is * present then a thread may proceed without blocking here so * long as it only accesses structures which pertain to its * mode. */ if (flags & BMAPGETF_NONBLOCK) { if (b->bcm_flags & BMAPF_MODECHNG) goto out; } else bmap_wait_locked(b, b->bcm_flags & BMAPF_MODECHNG); /* * Not all lookups are done with the intent of changing the bmap * mode i.e. bmap_lookup() does not specify a rw value. */ if (!(bmaprw & b->bcm_flags) && sl_bmap_ops.bmo_mode_chngf) { psc_assert(!(b->bcm_flags & BMAPF_MODECHNG)); b->bcm_flags |= BMAPF_MODECHNG; DEBUG_BMAP(PLL_DIAG, b, "mode change (rw=%d)", rw); BMAP_ULOCK(b); psc_assert(rw == SL_WRITE || rw == SL_READ); /* client only: call msl_bmap_modeset() */ rc = sl_bmap_ops.bmo_mode_chngf(b, rw, flags); BMAP_LOCK(b); } out: if (b) { DEBUG_BMAP(rc && (rc != SLERR_BMAP_INVALID || (flags & BMAPGETF_NOAUTOINST) == 0) ? PLL_ERROR : PLL_DIAG, b, "grabbed rc=%d", rc); if (rc) bmap_op_done(b); else *bp = b; } return (rc); }