int ichiic_i2c_acquire_bus(void *cookie, int flags) { struct ichiic_softc *sc = cookie; if (cold || sc->sc_poll || (flags & I2C_F_POLL)) return (0); return (lockmgr(&sc->sc_i2c_lock, LK_EXCLUSIVE, NULL)); }
/* * Return the lock status of a vnode and unlock the vnode * if we owned the lock. This is not a boolean, if the * caller cares what the lock status is the caller must * check the various possible values. * * This only unlocks exclusive locks held by the caller, * it will NOT unlock shared locks (there is no way to * tell who the shared lock belongs to). * * MPSAFE */ int vn_islocked_unlock(struct vnode *vp) { int vpls; vpls = lockstatus(&vp->v_lock, curthread); if (vpls == LK_EXCLUSIVE) lockmgr(&vp->v_lock, LK_RELEASE); return(vpls); }
/** * mrsas_cam_detach: De-allocates and teardown CAM * input: Adapter instance soft state * * De-registers and frees the paths and SIMs. */ void mrsas_cam_detach(struct mrsas_softc *sc) { if (sc->ev_tq != NULL) taskqueue_free(sc->ev_tq); lockmgr(&sc->sim_lock, LK_EXCLUSIVE); if (sc->path_0) xpt_free_path(sc->path_0); if (sc->sim_0) { xpt_bus_deregister(cam_sim_path(sc->sim_0)); cam_sim_free(sc->sim_0); } if (sc->path_1) xpt_free_path(sc->path_1); if (sc->sim_1) { xpt_bus_deregister(cam_sim_path(sc->sim_1)); cam_sim_free(sc->sim_1); } lockmgr(&sc->sim_lock, LK_RELEASE); }
static uint32_t ua_mixer_setrecsrc(struct snd_mixer *m, uint32_t src) { struct lock *lock = mixer_get_lock(m); int retval; uint8_t do_unlock; if (lockowned(lock)) { do_unlock = 0; } else { do_unlock = 1; lockmgr(lock, LK_EXCLUSIVE); } retval = uaudio_mixer_setrecsrc(mix_getdevinfo(m), src); if (do_unlock) { lockmgr(lock, LK_RELEASE); } return (retval); }
void cam_sim_cond_unlock(sim_lock *lock, int doun) { if (doun) { if (lock == &sim_mplock) rel_mplock(); else lockmgr(lock, LK_RELEASE); } }
void puffs_msgmem_release(struct puffs_msgpark *park) { if (park == NULL) return; lockmgr(&park->park_mtx, LK_EXCLUSIVE); puffs_msgpark_release(park); }
void ichiic_i2c_release_bus(void *cookie, int flags) { struct ichiic_softc *sc = cookie; if (cold || sc->sc_poll || (flags & I2C_F_POLL)) return; lockmgr(&sc->sc_i2c_lock, LK_RELEASE, NULL); }
int fusefs_unlock(void *v) { struct vop_unlock_args *ap = v; struct vnode *vp = ap->a_vp; DPRINTF("fusefs_unlock\n"); return (lockmgr(&VTOI(vp)->ufs_ino.i_lock, ap->a_flags | LK_RELEASE, NULL)); }
static int fuse_vnode_alloc(struct mount *mp, struct thread *td, uint64_t nodeid, enum vtype vtyp, struct vnode **vpp) { struct fuse_vnode_data *fvdat; struct vnode *vp2; int err = 0; FS_DEBUG("been asked for vno #%ju\n", (uintmax_t)nodeid); if (vtyp == VNON) { return EINVAL; } *vpp = NULL; err = vfs_hash_get(mp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE, td, vpp, fuse_vnode_cmp, &nodeid); if (err) return (err); if (*vpp) { MPASS((*vpp)->v_type == vtyp && (*vpp)->v_data != NULL); FS_DEBUG("vnode taken from hash\n"); return (0); } fvdat = malloc(sizeof(*fvdat), M_FUSEVN, M_WAITOK | M_ZERO); err = getnewvnode("fuse", mp, &fuse_vnops, vpp); if (err) { free(fvdat, M_FUSEVN); return (err); } lockmgr((*vpp)->v_vnlock, LK_EXCLUSIVE, NULL); fuse_vnode_init(*vpp, fvdat, nodeid, vtyp); err = insmntque(*vpp, mp); ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); if (err) { free(fvdat, M_FUSEVN); *vpp = NULL; return (err); } err = vfs_hash_insert(*vpp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE, td, &vp2, fuse_vnode_cmp, &nodeid); if (err) return (err); if (vp2 != NULL) { *vpp = vp2; return (0); } ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); return (0); }
static int ng_btsocket_hci_raw_node_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ng_mesg *msg = NGI_MSG(item); /* item still has message */ int error = 0; /* * Check for empty sockets list creates LOR when both sender and * receiver device are connected to the same host, so remove it * for now */ if (msg != NULL && (msg->header.typecookie == NGM_HCI_COOKIE || msg->header.typecookie == NGM_GENERIC_COOKIE) && msg->header.flags & NGF_RESP) { if (msg->header.token == 0) { NG_FREE_ITEM(item); return (0); } lockmgr(&ng_btsocket_hci_raw_queue_lock, LK_EXCLUSIVE); if (NG_BT_ITEMQ_FULL(&ng_btsocket_hci_raw_queue)) { NG_BTSOCKET_HCI_RAW_ERR( "%s: Input queue is full\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_hci_raw_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { ng_ref_item(item); NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_hci_raw_queue, item); error = ng_btsocket_hci_raw_wakeup_input_task(); } lockmgr(&ng_btsocket_hci_raw_queue_lock, LK_RELEASE); } else { NG_FREE_ITEM(item); error = EINVAL; } return (error); } /* ng_btsocket_hci_raw_node_rcvmsg */
/* * Obtain a new vnode. The returned vnode is VX locked & vrefd. * * All new vnodes set the VAGE flags. An open() of the vnode will * decrement the (2-bit) flags. Vnodes which are opened several times * are thus retained in the cache over vnodes which are merely stat()d. * * We always allocate the vnode. Attempting to recycle existing vnodes * here can lead to numerous deadlocks, particularly with softupdates. */ struct vnode * allocvnode(int lktimeout, int lkflags) { struct vnode *vp; /* * Do not flag for synchronous recyclement unless there are enough * freeable vnodes to recycle and the number of vnodes has * significantly exceeded our target. We want the normal vnlru * process to handle the cleaning (at 9/10's) before we are forced * to flag it here at 11/10's for userexit path processing. */ if (numvnodes >= maxvnodes * 11 / 10 && cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { struct thread *td = curthread; if (td->td_lwp) atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU); } /* * lktimeout only applies when LK_TIMELOCK is used, and only * the pageout daemon uses it. The timeout may not be zero * or the pageout daemon can deadlock in low-VM situations. */ if (lktimeout == 0) lktimeout = hz / 10; vp = kmalloc(sizeof(*vp), M_VNODE, M_ZERO | M_WAITOK); lwkt_token_init(&vp->v_token, "vnode"); lockinit(&vp->v_lock, "vnode", lktimeout, lkflags); TAILQ_INIT(&vp->v_namecache); RB_INIT(&vp->v_rbclean_tree); RB_INIT(&vp->v_rbdirty_tree); RB_INIT(&vp->v_rbhash_tree); spin_init(&vp->v_spin, "allocvnode"); lockmgr(&vp->v_lock, LK_EXCLUSIVE); atomic_add_int(&numvnodes, 1); vp->v_refcnt = 1; vp->v_flag = VAGE0 | VAGE1; vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; KKASSERT(TAILQ_EMPTY(&vp->v_namecache)); /* exclusive lock still held */ vp->v_filesize = NOOFFSET; vp->v_type = VNON; vp->v_tag = 0; vp->v_state = VS_CACHED; _vactivate(vp); return (vp); }
static int intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs) { struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; u8 *buf; int error, i, reg_offset, unit; u32 val, loop; u16 len; sc = device_get_softc(idev); dev_priv = sc->drm_dev->dev_private; unit = device_get_unit(idev); lockmgr(&dev_priv->gmbus_lock, LK_EXCLUSIVE); if (sc->force_bit_dev) { error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs); goto out; } reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; I915_WRITE(GMBUS0 + reg_offset, sc->reg0); for (i = 0; i < nmsgs; i++) { len = msgs[i].len; buf = msgs[i].buf; if ((msgs[i].flags & IIC_M_RD) != 0) { I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) | (len << GMBUS_BYTE_COUNT_SHIFT) | (msgs[i].slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); POSTING_READ(GMBUS2 + reg_offset); do { loop = 0; if (_intel_wait_for(sc->drm_dev, (I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY)) != 0, 50, 1, "915gbr")) goto timeout; if ((I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) != 0) goto clear_err; val = I915_READ(GMBUS3 + reg_offset); do { *buf++ = val & 0xff; val >>= 8; } while (--len != 0 && ++loop < 4); } while (len != 0); } else {
static void iomdiic_release_bus(void *cookie, int flags) { struct iomdiic_softc *sc = cookie; /* XXX See above. */ if (flags & I2C_F_POLL) return; (void) lockmgr(&sc->sc_buslock, LK_RELEASE, NULL); }
static int iomdiic_acquire_bus(void *cookie, int flags) { struct iomdiic_softc *sc = cookie; /* XXX What should we do for the polling case? */ if (flags & I2C_F_POLL) return (0); return (lockmgr(&sc->sc_buslock, LK_EXCLUSIVE, NULL)); }
/** * mrsas_get_mpt_cmd: Get a cmd from free command pool * input: Adapter instance soft state * * This function removes an MPT command from the command free list and * initializes it. */ struct mrsas_mpt_cmd* mrsas_get_mpt_cmd(struct mrsas_softc *sc) { struct mrsas_mpt_cmd *cmd = NULL; lockmgr(&sc->mpt_cmd_pool_lock, LK_EXCLUSIVE); if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)){ cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head); TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next); } memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); cmd->data = NULL; cmd->length = 0; cmd->flags = 0; cmd->error_code = 0; cmd->load_balance = 0; cmd->ccb_ptr = NULL; lockmgr(&sc->mpt_cmd_pool_lock, LK_RELEASE); return cmd; }
/* * A mounted PFS needs Xops threads to support frontend operations. */ void hammer2_xop_helper_create(hammer2_pfs_t *pmp) { int i; int j; lockmgr(&pmp->lock, LK_EXCLUSIVE); pmp->has_xop_threads = 1; for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { for (j = 0; j < HAMMER2_XOPGROUPS; ++j) { if (pmp->xop_groups[j].thrs[i].td) continue; hammer2_thr_create(&pmp->xop_groups[j].thrs[i], pmp, "h2xop", i, j, hammer2_primary_xops_thread); } } lockmgr(&pmp->lock, LK_RELEASE); }
/* * always FAF, we don't really care if the server wants to fail to * reclaim the node or not */ static int puffs_vnop_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount); struct puffs_node *pnode = VPTOPP(vp); boolean_t notifyserver = TRUE; vinvalbuf(vp, V_SAVE, 0, 0); /* * first things first: check if someone is trying to reclaim the * root vnode. do not allow that to travel to userspace. * Note that we don't need to take the lock similarly to * puffs_root(), since there is only one of us. */ if (vp->v_flag & VROOT) { lockmgr(&pmp->pmp_lock, LK_EXCLUSIVE); KKASSERT(pmp->pmp_root != NULL); pmp->pmp_root = NULL; lockmgr(&pmp->pmp_lock, LK_RELEASE); notifyserver = FALSE; } /* * purge info from kernel before issueing FAF, since we * don't really know when we'll get around to it after * that and someone might race us into node creation */ lockmgr(&pmp->pmp_lock, LK_EXCLUSIVE); LIST_REMOVE(pnode, pn_hashent); lockmgr(&pmp->pmp_lock, LK_RELEASE); if (notifyserver) callreclaim(MPTOPUFFSMP(vp->v_mount), VPTOPNC(vp)); puffs_putvnode(vp); vp->v_data = NULL; return 0; }
/* * Per-port thread helper. This helper thread is responsible for * atomically retrieving and clearing the signal mask and calling * the machine-independant driver core. * * MPSAFE */ static void ahci_port_thread(void *arg) { struct ahci_port *ap = arg; int mask; /* * The helper thread is responsible for the initial port init, * so all the ports can be inited in parallel. * * We also run the state machine which should do all probes. * Since CAM is not attached yet we will not get out-of-order * SCSI attachments. */ ahci_os_lock_port(ap); ahci_port_init(ap); atomic_clear_int(&ap->ap_signal, AP_SIGF_THREAD_SYNC); wakeup(&ap->ap_signal); ahci_port_state_machine(ap, 1); ahci_os_unlock_port(ap); atomic_clear_int(&ap->ap_signal, AP_SIGF_INIT); wakeup(&ap->ap_signal); /* * Then loop on the helper core. */ mask = ap->ap_signal; while ((mask & AP_SIGF_STOP) == 0) { atomic_clear_int(&ap->ap_signal, mask); ahci_port_thread_core(ap, mask); lockmgr(&ap->ap_sig_lock, LK_EXCLUSIVE); if (ap->ap_signal == 0) { lksleep(&ap->ap_thread, &ap->ap_sig_lock, 0, "ahport", 0); } lockmgr(&ap->ap_sig_lock, LK_RELEASE); mask = ap->ap_signal; } ap->ap_thread = NULL; }
static int ng_btsocket_hci_raw_node_rcvdata(hook_p hook, item_p item) { struct mbuf *nam = NULL; int error; /* * Check for empty sockets list creates LOR when both sender and * receiver device are connected to the same host, so remove it * for now */ MGET(nam, M_NOWAIT, MT_SONAME); if (nam != NULL) { struct sockaddr_hci *sa = mtod(nam, struct sockaddr_hci *); nam->m_len = sizeof(struct sockaddr_hci); sa->hci_len = sizeof(*sa); sa->hci_family = AF_BLUETOOTH; strlcpy(sa->hci_node, NG_PEER_NODE_NAME(hook), sizeof(sa->hci_node)); NGI_GET_M(item, nam->m_next); NGI_M(item) = nam; lockmgr(&ng_btsocket_hci_raw_queue_lock, LK_EXCLUSIVE); if (NG_BT_ITEMQ_FULL(&ng_btsocket_hci_raw_queue)) { NG_BTSOCKET_HCI_RAW_ERR( "%s: Input queue is full\n", __func__); NG_BT_ITEMQ_DROP(&ng_btsocket_hci_raw_queue); NG_FREE_ITEM(item); error = ENOBUFS; } else { ng_ref_item(item); NG_BT_ITEMQ_ENQUEUE(&ng_btsocket_hci_raw_queue, item); error = ng_btsocket_hci_raw_wakeup_input_task(); } lockmgr(&ng_btsocket_hci_raw_queue_lock, LK_RELEASE); } else {
int ichsmb_recvb(device_t dev, u_char slave, char *byte) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x\n", slave); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); lockmgr(&sc->mutex, LK_EXCLUSIVE); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_READ); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) *byte = bus_read_1(sc->io_res, ICH_D0); lockmgr(&sc->mutex, LK_RELEASE); DBG("smb_error=%d byte=0x%02x\n", smb_error, (u_char)*byte); return (smb_error); }
/** * radeon_driver_irq_preinstall_kms - drm irq preinstall callback * * @dev: drm dev pointer * * Gets the hw ready to enable irqs (all asics). * This function disables all interrupt sources on the GPU. */ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; unsigned i; lockmgr(&rdev->irq.lock, LK_EXCLUSIVE); /* Disable *all* interrupts */ for (i = 0; i < RADEON_NUM_RINGS; i++) atomic_set(&rdev->irq.ring_int[i], 0); for (i = 0; i < RADEON_MAX_HPD_PINS; i++) rdev->irq.hpd[i] = false; for (i = 0; i < RADEON_MAX_CRTCS; i++) { rdev->irq.crtc_vblank_int[i] = false; atomic_set(&rdev->irq.pflip[i], 0); rdev->irq.afmt[i] = false; } radeon_irq_set(rdev); lockmgr(&rdev->irq.lock, LK_RELEASE); /* Clear bits */ radeon_irq_process(rdev); }
static int g_modem_detach(device_t dev) { struct g_modem_softc *sc = device_get_softc(dev); DPRINTF("\n"); lockmgr(&sc->sc_lock, LK_EXCLUSIVE); usb_callout_stop(&sc->sc_callout); usb_callout_stop(&sc->sc_watchdog); lockmgr(&sc->sc_lock, LK_RELEASE); usbd_transfer_unsetup(sc->sc_xfer, G_MODEM_N_TRANSFER); usb_callout_drain(&sc->sc_callout); usb_callout_drain(&sc->sc_watchdog); lockuninit(&sc->sc_lock); return (0); }
int cam_sim_cond_lock(sim_lock *lock) { if (lock == &sim_mplock) { get_mplock(); return(1); } else if (lockstatus(lock, curthread) != LK_EXCLUSIVE) { lockmgr(lock, LK_EXCLUSIVE); return(1); } return(0); }
int drm_read(struct dev_read_args *ap) { struct cdev *kdev = ap->a_head.a_dev; struct uio *uio = ap->a_uio; int ioflag = ap->a_ioflag; struct drm_file *file_priv; struct drm_device *dev; struct drm_pending_event *e; int error; error = devfs_get_cdevpriv(ap->a_fp, (void **)&file_priv); if (error != 0) { DRM_ERROR("can't find authenticator\n"); return (EINVAL); } dev = drm_get_device_from_kdev(kdev); lockmgr(&dev->event_lock, LK_EXCLUSIVE); while (list_empty(&file_priv->event_list)) { if ((ioflag & O_NONBLOCK) != 0) { error = EAGAIN; goto out; } error = lksleep(&file_priv->event_space, &dev->event_lock, PCATCH, "drmrea", 0); if (error != 0) goto out; } while (drm_dequeue_event(dev, file_priv, uio, &e)) { lockmgr(&dev->event_lock, LK_RELEASE); error = uiomove((caddr_t)e->event, e->event->length, uio); e->destroy(e); if (error != 0) return (error); lockmgr(&dev->event_lock, LK_EXCLUSIVE); } out: lockmgr(&dev->event_lock, LK_RELEASE); return (error); }
static void tws_drain_busy_queue(struct tws_softc *sc) { struct tws_request *req; TWS_TRACE_DEBUG(sc, "entry", 0, 0); lockmgr(&sc->q_lock, LK_EXCLUSIVE); req = tws_q_remove_tail(sc, TWS_BUSY_Q); lockmgr(&sc->q_lock, LK_RELEASE); while ( req ) { callout_stop(req->ccb_ptr->ccb_h.timeout_ch); tws_unmap_request(req->sc, req); TWS_TRACE_DEBUG(sc, "drained", 0, req->request_id); lockmgr(&sc->sim_lock, LK_EXCLUSIVE); req->ccb_ptr->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(req->ccb_ptr); lockmgr(&sc->sim_lock, LK_RELEASE); lockmgr(&sc->q_lock, LK_EXCLUSIVE); tws_q_insert_tail(sc, req, TWS_FREE_Q); req = tws_q_remove_tail(sc, TWS_BUSY_Q); lockmgr(&sc->q_lock, LK_RELEASE); } }
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; struct drm_mm_node *node = NULL; enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT; unsigned long lpfn; int ret; lpfn = place->lpfn; if (!lpfn) lpfn = man->size; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; if (place->flags & TTM_PL_FLAG_TOPDOWN) aflags = DRM_MM_CREATE_TOP; lockmgr(&rman->lock, LK_EXCLUSIVE); ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, mem->page_alignment, 0, place->fpfn, lpfn, DRM_MM_SEARCH_BEST, aflags); lockmgr(&rman->lock, LK_RELEASE); if (unlikely(ret)) { kfree(node); } else { mem->mm_node = node; mem->start = node->start; } return 0; }
int ttm_base_object_init(struct ttm_object_file *tfile, struct ttm_base_object *base, bool shareable, enum ttm_object_type object_type, void (*refcount_release) (struct ttm_base_object **), void (*ref_obj_release) (struct ttm_base_object *, enum ttm_ref_type ref_type)) { struct ttm_object_device *tdev = tfile->tdev; int ret; base->shareable = shareable; base->tfile = ttm_object_file_ref(tfile); base->refcount_release = refcount_release; base->ref_obj_release = ref_obj_release; base->object_type = object_type; kref_init(&base->refcount); lockinit(&tdev->object_lock, "ttmbao", 0, LK_CANRECURSE); lockmgr(&tdev->object_lock, LK_EXCLUSIVE); ret = drm_ht_just_insert_please(&tdev->object_hash, &base->hash, (unsigned long)base, 31, 0, 0); lockmgr(&tdev->object_lock, LK_RELEASE); if (unlikely(ret != 0)) goto out_err0; ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); if (unlikely(ret != 0)) goto out_err1; ttm_base_object_unref(&base); return 0; out_err1: lockmgr(&tdev->object_lock, LK_EXCLUSIVE); (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); lockmgr(&tdev->object_lock, LK_RELEASE); out_err0: return ret; }
linker_file_t linker_make_file(const char* pathname, void* priv, struct linker_file_ops* ops) { linker_file_t lf = 0; int namelen; const char *filename; filename = rindex(pathname, '/'); if (filename && filename[1]) filename++; else filename = pathname; KLD_DPF(FILE, ("linker_make_file: new file, filename=%s\n", filename)); lockmgr(&lock, LK_EXCLUSIVE|LK_RETRY, 0, curproc); namelen = strlen(filename) + 1; lf = malloc(sizeof(struct linker_file) + namelen, M_LINKER, M_WAITOK); if (!lf) goto out; bzero(lf, sizeof(*lf)); lf->refs = 1; lf->userrefs = 0; lf->filename = (char*) (lf + 1); strcpy(lf->filename, filename); lf->id = next_file_id++; lf->ndeps = 0; lf->deps = NULL; STAILQ_INIT(&lf->common); TAILQ_INIT(&lf->modules); lf->priv = priv; lf->ops = ops; TAILQ_INSERT_TAIL(&files, lf, link); out: lockmgr(&lock, LK_RELEASE, 0, curproc); return lf; }
int sim_lock_sleep(void *ident, int flags, const char *wmesg, int timo, sim_lock *lock) { int retval; if (lock != &sim_mplock) { /* lock should be held already */ KKASSERT(lockstatus(lock, curthread) != 0); tsleep_interlock(ident, flags); lockmgr(lock, LK_RELEASE); retval = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); } else { retval = tsleep(ident, flags, wmesg, timo); } if (lock != &sim_mplock) { lockmgr(lock, LK_EXCLUSIVE); } return (retval); }
/* * Start a XOP request, queueing it to all nodes in the cluster to * execute the cluster op. * * XXX optimize single-target case. */ void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func, int notidx) { hammer2_xop_group_t *xgrp; hammer2_thread_t *thr; hammer2_pfs_t *pmp; int g; int i; pmp = xop->ip->pmp; if (pmp->has_xop_threads == 0) hammer2_xop_helper_create(pmp); g = pmp->xop_iterator++; g = g & HAMMER2_XOPGROUPS_MASK; xgrp = &pmp->xop_groups[g]; xop->func = func; xop->xgrp = xgrp; /* XXX do cluster_resolve or cluster_check here, only start * synchronized elements */ for (i = 0; i < xop->ip->cluster.nchains; ++i) { thr = &xgrp->thrs[i]; if (thr->td && i != notidx) { lockmgr(&thr->lk, LK_EXCLUSIVE); if (thr->td && (thr->flags & HAMMER2_THREAD_STOP) == 0) { atomic_set_int(&xop->run_mask, 1U << i); atomic_set_int(&xop->chk_mask, 1U << i); TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry); } lockmgr(&thr->lk, LK_RELEASE); wakeup(&thr->flags); } } }