/* * Obtain a read lock on the PFL file system modules list. */ void pflfs_modules_rdpin(void) { spinlock(&pflfs_modules_lock); while (pflfs_modules_modifying) { freelock(&pflfs_modules_lock); usleep(1); spinlock(&pflfs_modules_lock); } pflfs_modules_pins++; freelock(&pflfs_modules_lock); }
/* * Return the next SLASH2 FID to use. Note that from ZFS point of view, * it is perfectly okay that we use the same SLASH2 FID to refer to * different files/directories. However, doing so can confuse our * clients (think identity theft). So we must make sure that we never * reuse a SLASH2 FID, even after a crash. * * The siteid has already been baked into the initial cursor file. */ int slm_get_next_slashfid(slfid_t *fidp) { uint64_t fid; spinlock(&slm_fid_lock); /* * This should never happen. If it does, we crash to let the * sysadmin know. He could fix this if there is still room in * the cycle bits. We have to let the sysadmin know otherwise * they will not know to bump the cycle bits. */ if (FID_GET_INUM(slm_next_fid) >= FID_MAX_INUM) { psclog_warnx("max FID "SLPRI_FID" reached, manual " "intervention needed (bump the cycle bits)", slm_next_fid); freelock(&slm_fid_lock); return (ENOSPC); } fid = slm_next_fid++; freelock(&slm_fid_lock); psclog_diag("most recently allocated FID: "SLPRI_FID, fid); *fidp = fid; return (0); }
struct psc_memnode * psc_memnode_get(void) { struct psc_memnode *pmn, **pmnv; int memnid, rc; pmn = pthread_getspecific(psc_memnodes_key); if (pmn) return (pmn); memnid = psc_memnode_getid(); spinlock(&psc_memnodes_lock); if (psc_dynarray_ensurelen(&psc_memnodes, memnid + 1) == -1) psc_fatalx("ensurelen"); pmnv = psc_dynarray_get_mutable(&psc_memnodes); pmn = pmnv[memnid]; if (pmn == NULL) { pmn = psc_alloc(sizeof(*pmn), PAF_NOLOG); INIT_SPINLOCK(&pmn->pmn_lock); psc_dynarray_init(&pmn->pmn_keys); rc = pthread_setspecific(psc_memnodes_key, pmn); if (rc) psc_fatalx("pthread_setspecific: %s", strerror(rc)); psc_dynarray_setpos(&psc_memnodes, memnid, pmn); } freelock(&psc_memnodes_lock); return (pmn); }
void AsyncSpinnerImpl::start() { boost::mutex::scoped_lock lock(mutex_); if (continue_) return; boost::recursive_mutex::scoped_try_lock spinlock(spinmutex); if (!spinlock.owns_lock()) { ROS_WARN("AsyncSpinnerImpl: Attempt to start() an AsyncSpinner failed " "because another AsyncSpinner is already running. Note that the " "other AsyncSpinner might not be using the same callback queue " "as this AsyncSpinner, in which case no callbacks in your " "callback queue will be serviced."); return; } spinlock.swap(member_spinlock); continue_ = true; for (uint32_t i = 0; i < thread_count_; ++i) { threads_.create_thread(boost::bind(&AsyncSpinnerImpl::threadFunc, this)); } }
/* * Release a write lock on the PFL file system modules list. */ void pflfs_modules_wrunpin(void) { spinlock(&pflfs_modules_lock); pflfs_modules_modifying = 0; freelock(&pflfs_modules_lock); }
void psc_compl_destroy(struct psc_compl *pc) { spinlock(&pc->pc_lock); pfl_waitq_destroy(&pc->pc_wq); freelock(&pc->pc_lock); }
ACPI_CPU_FLAGS AcpiOsAcquireLock(ACPI_SPINLOCK Handle) { /* XXX: IRQ in flags? */ spinlock(Handle); return 0; }
/* * Acquire a spinlock. * * handle is a pointer to the spinlock_t. * flags is *not* the result of save_flags - it is an ACPI-specific flag variable * that indicates whether we are at interrupt level. */ acpi_cpu_flags acpi_os_acquire_lock( acpi_spinlock lockp ) { unsigned long flags; flags = spinlock( lockp ); return( flags ); }
struct pfl_opstat * pfl_opstat_initf(int flags, const char *namefmt, ...) { struct pfl_opstat *opst; int sz, pos; va_list ap; char *name = pfl_opstat_name; spinlock(&pfl_opstats_lock); va_start(ap, namefmt); sz = vsnprintf(name, 128, namefmt, ap) + 1; va_end(ap); /* (gdb) p ((struct pfl_opstat *)pfl_opstats.pda_items[74]).opst_name */ pos = psc_dynarray_bsearch(&pfl_opstats, name, _pfl_opstat_cmp); if (pos < psc_dynarray_len(&pfl_opstats)) { opst = psc_dynarray_getpos(&pfl_opstats, pos); if (strcmp(name, opst->opst_name) == 0) { pfl_assert((flags & OPSTF_EXCL) == 0); freelock(&pfl_opstats_lock); return (opst); } } pfl_opstats_sum++; opst = PSCALLOC(sizeof(*opst) + sz); strlcpy(opst->opst_name, name, 128); opst->opst_flags = flags; psc_dynarray_splice(&pfl_opstats, pos, 0, &opst, 1); freelock(&pfl_opstats_lock); return (opst); }
size_t pfl_odt_allocslot(struct pfl_odt *t) { struct pfl_odt_hdr *h; size_t item; h = t->odt_hdr; spinlock(&t->odt_lock); if (psc_vbitmap_next(t->odt_bitmap, &item) <= 0) { ODT_STAT_INCR(t, full); freelock(&t->odt_lock); return (-1); } if (item >= h->odth_nitems) { ODT_STAT_INCR(t, extend); OPSTAT_INCR("pfl.odtable-resize"); /* * psc_vbitmap_next() has enlarged the bitmap. Update * the number of items accordingly and write to the * disk. */ h->odth_nitems = psc_vbitmap_getsize(t->odt_bitmap); t->odt_ops.odtop_resize(t); /* slm_odt_resize() */ PFLOG_ODT(PLL_WARN, t, "odtable now has %u items (used to be %zd)", h->odth_nitems, item); } freelock(&t->odt_lock); return (item); }
void slm_set_curr_slashfid(slfid_t slfid) { spinlock(&slm_fid_lock); slm_next_fid = slfid; freelock(&slm_fid_lock); }
/* * Obtain a write lock on the PFL file system modules list. */ void pflfs_modules_wrpin(void) { spinlock(&pflfs_modules_lock); while (pflfs_modules_modifying) { freelock(&pflfs_modules_lock); usleep(1); spinlock(&pflfs_modules_lock); } pflfs_modules_modifying = 1; while (pflfs_modules_pins) { freelock(&pflfs_modules_lock); usleep(1); spinlock(&pflfs_modules_lock); } freelock(&pflfs_modules_lock); }
/* * A modification operation to the MDFS has begun. This means the * cursor thread must be woken to start a transaction group. */ void slm_zfs_cursor_start(void) { spinlock(&slm_cursor_lock); if (!slm_cursor_update_needed++ && !slm_cursor_update_inprog) psc_waitq_wakeall(&slm_cursor_waitq); freelock(&slm_cursor_lock); }
/* * Release a read lock on the PFL file system modules list. */ void pflfs_modules_rdunpin(void) { spinlock(&pflfs_modules_lock); psc_assert(pflfs_modules_pins > 0); pflfs_modules_pins--; freelock(&pflfs_modules_lock); }
/* * A modification operation to the MDFS has ended. If other operations * are ongoing, we need to re-wake the cursor thread to ensure a * transaction group is active as it is not guaranteed to be awake. */ void slm_zfs_cursor_end(void) { spinlock(&slm_cursor_lock); psc_assert(slm_cursor_update_needed > 0); if (--slm_cursor_update_needed && !slm_cursor_update_inprog) psc_waitq_wakeall(&slm_cursor_waitq); freelock(&slm_cursor_lock); }
slfid_t slm_get_curr_slashfid(void) { slfid_t fid; spinlock(&slm_fid_lock); fid = slm_next_fid; freelock(&slm_fid_lock); return (fid); }
static inline ALWAYS_INLINE void PIO_LOCK(int fd) { ONCE_FLAG_RUN(pio_inited, pio_init_once); g_assert(fd >= 0); g_assert(UNSIGNED(fd) < pio_capacity); spinlock(&pio_locks[fd]); }
void slmbmaptimeothr_begin(struct psc_thread *thr) { struct bmap_mds_lease *bml; int rc, nsecs = 0; while (pscthr_run(thr)) { spinlock(&mdsBmapTimeoTbl.btt_lock); bml = pll_peekhead(&mdsBmapTimeoTbl.btt_leases); if (!bml) { freelock(&mdsBmapTimeoTbl.btt_lock); nsecs = BMAP_TIMEO_MAX; goto sleep; } if (!BML_TRYLOCK(bml)) { freelock(&mdsBmapTimeoTbl.btt_lock); nsecs = 1; goto sleep; } if (bml->bml_refcnt) { BML_ULOCK(bml); freelock(&mdsBmapTimeoTbl.btt_lock); nsecs = 1; goto sleep; } if (!(bml->bml_flags & BML_FREEING)) { nsecs = bml->bml_expire - time(NULL); if (nsecs > 0) { BML_ULOCK(bml); freelock(&mdsBmapTimeoTbl.btt_lock); goto sleep; } bml->bml_flags |= BML_FREEING; } BML_ULOCK(bml); freelock(&mdsBmapTimeoTbl.btt_lock); rc = mds_bmap_bml_release(bml); if (rc) { DEBUG_BMAP(PLL_WARN, bml_2_bmap(bml), "rc=%d bml=%p fl=%d seq=%"PRId64, rc, bml, bml->bml_flags, bml->bml_seq); nsecs = 1; } else nsecs = 0; sleep: psclog_debug("nsecs=%d", nsecs); if (nsecs > 0) sleep((uint32_t)nsecs); } }
void pfl_opstat_destroy(struct pfl_opstat *opst) { int pos; spinlock(&pfl_opstats_lock); pos = psc_dynarray_bsearch(&pfl_opstats, opst->opst_name, _pfl_opstat_cmp); pfl_assert(psc_dynarray_getpos(&pfl_opstats, pos) == opst); pfl_opstat_destroy_pos(pos); freelock(&pfl_opstats_lock); }
uint8_t pfndb_type(unsigned pfn) { uint8_t t; ipfn_t *p = &pfndb[pfn]; assert(pfn <= pfndb_max); spinlock(&pfndblock); t = p->type; spinunlock(&pfndblock); return t; }
void *pfndb_getptr(unsigned pfn) { void *ptr; ipfn_t *p = &pfndb[pfn]; assert(pfn <= pfndb_max); spinlock(&pfndblock); ptr = (void *) p->ptr; spinunlock(&pfndblock); return ptr; }
void pfndb_subst(uint8_t t1, uint8_t t2) { unsigned i; spinlock(&pfndblock); for (i = 0; i < pfndb_max; i++) if (pfndb[i].type == t1) { pfndb[i].type = t2; pfndb_stats_dectype(t1); pfndb_stats_inctype(t2); } spinunlock(&pfndblock); }
void MultiThreadedSpinner::spin(CallbackQueue* queue) { boost::recursive_mutex::scoped_try_lock spinlock(spinmutex); if (not spinlock.owns_lock()) { ROS_ERROR("MultiThreadeSpinner: You've attempted to call ros::spin " "from multiple threads... " "but this spinner is already multithreaded."); return; } AsyncSpinner s(thread_count_, queue); s.start(); ros::waitForShutdown(); }
void _psc_compl_ready(struct psc_compl *pc, int rc, int one) { spinlock(&pc->pc_lock); if (one) pfl_waitq_wakeone(&pc->pc_wq); else { pc->pc_rc = rc; pc->pc_done = 1; pfl_waitq_wakeall(&pc->pc_wq); } pc->pc_counter++; freelock(&pc->pc_lock); }
void pmap_commit(struct pmap *pmap) { if (pmap == NULL) pmap = pmap_current(); spinlock(&pmap->lock); if (pmap->tlbflush & TLBF_GLOBAL) __flush_tlbs(-1, TLBF_GLOBAL); else if (pmap->tlbflush & TLBF_LOCAL) __flush_tlbs(pmap->cpumap, TLBF_LOCAL); pmap->tlbflush = 0; spinunlock(&pmap->lock); }
/* * Free the odtable slot which corresponds to the provided receipt. * Note: r is freed here. */ void pfl_odt_freeitem(struct pfl_odt *t, int64_t item) { struct pfl_odt_slotftr f; _pfl_odt_doput(t, item, NULL, &f, 0); spinlock(&t->odt_lock); psc_vbitmap_unset(t->odt_bitmap, item); freelock(&t->odt_lock); PFLOG_ODT(PLL_DIAG, t, "slot=%"PRId64, item); ODT_STAT_INCR(t, free); }
void pfndb_settype(unsigned pfn, uint8_t t) { uint8_t ot; ipfn_t *p = &pfndb[pfn]; assert(pfn <= pfndb_max); spinlock(&pfndblock); ot = p->type; p->type = t; spinunlock(&pfndblock); pfndb_stats_dectype(ot); pfndb_stats_inctype(t); }
static void pfl_odt_zerobuf_ensurelen(size_t len) { static psc_spinlock_t zerobuf_lock = SPINLOCK_INIT; static size_t zerobuf_len; if (len <= zerobuf_len) return; spinlock(&zerobuf_lock); if (len > zerobuf_len) { pfl_odt_zerobuf = psc_realloc(pfl_odt_zerobuf, len, 0); zerobuf_len = len; } freelock(&zerobuf_lock); }
/** * mds_bmap_timeotbl_mdsi - * Returns bmapseqno. */ uint64_t mds_bmap_timeotbl_mdsi(struct bmap_mds_lease *bml, int flags) { uint64_t seq = 0; if (flags & BTE_DEL) { bml->bml_flags &= ~BML_TIMEOQ; mds_bmap_timeotbl_remove(bml); return (BMAPSEQ_ANY); } if (flags & BTE_REATTACH) { /* BTE_REATTACH is only called from startup context. */ spinlock(&mdsBmapTimeoTbl.btt_lock); if (mdsBmapTimeoTbl.btt_maxseq < bml->bml_seq) /* * A lease has been found in odtable whose * issuance was after that of the last HWM * journal entry. (HWM's are journaled every * BMAP_SEQLOG_FACTOR times.) */ seq = mdsBmapTimeoTbl.btt_maxseq = bml->bml_seq; else if (mdsBmapTimeoTbl.btt_minseq > bml->bml_seq) /* This lease has already expired. */ seq = BMAPSEQ_ANY; else seq = bml->bml_seq; freelock(&mdsBmapTimeoTbl.btt_lock); } else { seq = mds_bmap_timeotbl_getnextseq(); } BML_LOCK(bml); if (bml->bml_flags & BML_TIMEOQ) { mds_bmap_timeotbl_remove(bml); pll_addtail(&mdsBmapTimeoTbl.btt_leases, bml); } else { bml->bml_flags |= BML_TIMEOQ; pll_addtail(&mdsBmapTimeoTbl.btt_leases, bml); } BML_ULOCK(bml); return (seq); }
uint8_t* BufferStore::get_buffer() { #ifdef INCLUDEOS_SMP_ENABLE scoped_spinlock spinlock(this->plock); #endif if (UNLIKELY(available_.empty())) { if (this->growth_enabled()) this->create_new_pool(); else throw std::runtime_error("This BufferStore has run out of buffers"); } auto* addr = available_.back(); available_.pop_back(); BSD_PRINT("%d: Gave away %p, %zu buffers remain\n", this->index, addr, available()); return addr; }