static int __zvol_remove_minor(const char *name) { zvol_state_t *zv; ASSERT(MUTEX_HELD(&zvol_state_lock)); zv = zvol_find_by_name(name); if (zv == NULL) return (ENXIO); if (zv->zv_open_count > 0) return (EBUSY); zvol_remove(zv); zvol_free(zv); return (0); }
/* * Find the multicast address `addr', return B_TRUE if it is one that * we receive. If `remove', remove it from the set received. */ static boolean_t xnbo_mcast_find(xnb_t *xnbp, ether_addr_t *addr, boolean_t remove) { xnbo_t *xnbop = xnbp->xnb_flavour_data; xmca_t *prev, *del, *this; ASSERT(MUTEX_HELD(&xnbp->xnb_state_lock)); ASSERT(xnbop->o_promiscuous == B_FALSE); prev = del = NULL; this = xnbop->o_mca; while (this != NULL) { if (bcmp(&this->addr, addr, sizeof (this->addr)) == 0) { del = this; if (remove) { if (prev == NULL) xnbop->o_mca = this->next; else prev->next = this->next; } break; } prev = this; this = this->next; } if (del == NULL) return (B_FALSE); if (remove) { DTRACE_PROBE3(mcast_remove, (char *), "remove", (void *), xnbp, (etheraddr_t *), del->addr); mac_multicast_remove(xnbop->o_mch, del->addr); kmem_free(del, sizeof (*del)); } return (B_TRUE); }
/* * Rename a zfs_snapentry_t in the zfs_snapshots_by_name. The structure is * removed, renamed, and added back to the new correct location in the tree. */ static int zfsctl_snapshot_rename(char *old_snapname, char *new_snapname) { zfs_snapentry_t *se; ASSERT(MUTEX_HELD(&zfs_snapshot_lock)); se = zfsctl_snapshot_find_by_name(old_snapname); if (se == NULL) return (ENOENT); zfsctl_snapshot_remove(se); strfree(se->se_name); se->se_name = strdup(new_snapname); zfsctl_snapshot_add(se); zfsctl_snapshot_rele(se); return (0); }
/* * The port_remove_fd_object() function frees all resources associated with * delivered portfd_t structure. Returns 1 if the port_kevent was found * and removed from the port queue. */ int port_remove_fd_object(portfd_t *pfd, port_t *pp, port_fdcache_t *pcp) { port_queue_t *portq; polldat_t *pdp = PFTOD(pfd); port_kevent_t *pkevp; int error; int removed = 0; ASSERT(MUTEX_HELD(&pcp->pc_lock)); if (pdp->pd_php != NULL) { pollhead_delete(pdp->pd_php, pdp); pdp->pd_php = NULL; } pkevp = pdp->pd_portev; portq = &pp->port_queue; mutex_enter(&portq->portq_mutex); port_block(portq); if (pkevp->portkev_flags & PORT_KEV_DONEQ) { if (portq->portq_getn && portq->portq_tnent) { /* * move events from the temporary "get" queue * back to the port queue */ port_push_eventq(portq); } /* cleanup merged port queue */ port_remove_event_doneq(pkevp, portq); removed = 1; } port_unblock(portq); mutex_exit(&portq->portq_mutex); if (pkevp->portkev_callback) { (void) (*pkevp->portkev_callback)(pkevp->portkev_arg, &error, pkevp->portkev_pid, PORT_CALLBACK_DISSOCIATE, pkevp); } port_free_event_local(pkevp, 0); /* remove polldat struct */ port_pcache_remove_fd(pcp, pfd); return (removed); }
/* * Search path of "offline:stop" status, and minimum path number * * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed )) * -. uinst_t->lock : M [RW_READER or RW_WRITER] * -. uinst_t->u_lock : M * -. uinst_t->l_lock : P * -. uinst_t->c_lock : P */ void oplmsu_search_min_stop_path(void) { upath_t *upath, *min_upath; lpath_t *lpath; int min_no = UNDEFINED; int active_flag = 0; ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock)); ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock)); upath = oplmsu_uinst->first_upath; while (upath) { if ((upath->status == MSU_PSTAT_ACTIVE) && (upath->traditional_status == MSU_ACTIVE)) { active_flag = 1; break; } else if ((upath->status == MSU_PSTAT_STOP) && (upath->traditional_status == MSU_STOP)) { if (upath->lpath != NULL) { if ((min_no == UNDEFINED) || (upath->path_no < min_no)) { lpath = upath->lpath; mutex_enter(&oplmsu_uinst->l_lock); if (lpath->status == MSU_EXT_NOTUSED) { min_upath = upath; min_no = upath->path_no; } mutex_exit(&oplmsu_uinst->l_lock); } } } upath = upath->u_next; } if (active_flag == 0) { lpath = min_upath->lpath; mutex_enter(&oplmsu_uinst->l_lock); lpath->src_upath = NULL; lpath->status = MSU_EXT_ACTIVE_CANDIDATE; mutex_exit(&oplmsu_uinst->l_lock); } }
/* * Check whether lower path is usable by lower path info table address * * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed )) * -. uinst_t->lock : M [RW_READER or RW_WRITER] * -. uinst_t->u_lock : A * -. uinst_t->l_lock : M * -. uinst_t->c_lock : P */ int oplmsu_check_lpath_usable(void) { lpath_t *lpath; int rval = SUCCESS; ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock)); ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock)); lpath = oplmsu_uinst->first_lpath; while (lpath) { if ((lpath->hndl_uqueue != NULL) || (lpath->hndl_mp != NULL)) { rval = BUSY; break; } lpath = lpath->l_next; } return (rval); }
/* * Find the next available range of ZVOL_MINORS minor numbers. The * zvol_state_list is kept in ascending minor order so we simply need * to scan the list for the first gap in the sequence. This allows us * to recycle minor number as devices are created and removed. */ static int zvol_find_minor(unsigned *minor) { zvol_state_t *zv; *minor = 0; ASSERT(MUTEX_HELD(&zvol_state_lock)); for (zv = list_head(&zvol_state_list); zv != NULL; zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) { if (MINOR(zv->zv_dev) != MINOR(*minor)) break; } /* All minors are in use */ if (*minor >= (1 << MINORBITS)) return ENXIO; return 0; }
/* * cyclic_expand() will cross call onto the CPU to perform the actual * expand operation. */ static void cyclic_expand(cyc_cpu_t *cpu) { cyc_index_t new_size, old_size; cyc_index_t *new_heap, *old_heap; cyclic_t *new_cyclics, *old_cyclics; cyc_xcallarg_t arg; cyc_backend_t *be = cpu->cyp_backend; ASSERT(MUTEX_HELD(&cpu_lock)); old_heap = cpu->cyp_heap; old_cyclics = cpu->cyp_cyclics; if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) { new_size = CY_DEFAULT_PERCPU; ASSERT(old_heap == NULL && old_cyclics == NULL); } /* * Check that the new_size is a power of 2. */ ASSERT(((new_size - 1) & new_size) == 0); new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK); new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK); arg.cyx_cpu = cpu; arg.cyx_heap = new_heap; arg.cyx_cyclics = new_cyclics; arg.cyx_size = new_size; be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, (cyc_func_t)cyclic_expand_xcall, &arg); if (old_cyclics != NULL) { ASSERT(old_heap != NULL); ASSERT(old_size != 0); free(old_cyclics, M_CYCLIC); free(old_heap, M_CYCLIC); } }
/* * taskq_ent_alloc() * * Allocates a new taskq_ent_t structure either from the free list or from the * cache. Returns NULL if it can't be allocated. * * Assumes: tq->tq_lock is held. */ static taskq_ent_t * taskq_ent_alloc(taskq_t *tq, int flags) { int kmflags = KM_NOSLEEP; taskq_ent_t *tqe; ASSERT(MUTEX_HELD(&tq->tq_lock)); /* * TQ_NOALLOC allocations are allowed to use the freelist, even if * we are below tq_minalloc. */ if ((tqe = tq->tq_freelist) != NULL && ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) { tq->tq_freelist = tqe->tqent_next; } else { if (flags & TQ_NOALLOC) return (NULL); mutex_exit(&tq->tq_lock); if (tq->tq_nalloc >= tq->tq_maxalloc) { if (kmflags & KM_NOSLEEP) { mutex_enter(&tq->tq_lock); return (NULL); } /* * We don't want to exceed tq_maxalloc, but we can't * wait for other tasks to complete (and thus free up * task structures) without risking deadlock with * the caller. So, we just delay for one second * to throttle the allocation rate. */ delay(hz); } tqe = kmem_cache_alloc(taskq_ent_cache, kmflags); mutex_enter(&tq->tq_lock); if (tqe != NULL) tq->tq_nalloc++; } return (tqe); }
/* * Write to the PCF8591 chip. * byteaddress = chip type base address | chip offset address. */ int ehc_write_pcf8591(struct ehc_envcunit *ehcp, int byteaddress, int channel, int autoinc, int amode, int aenable, uint8_t *buf, int size) { int i, status; register uint8_t control; ASSERT((byteaddress & 0x1) == 0); ASSERT(MUTEX_HELD(&ehcp->umutex)); control = ((aenable << 6) | (amode << 4) | (autoinc << 2) | channel); status = ehc_start_pcf8584(ehcp, byteaddress); if (status != EHC_SUCCESS) { if (status == EHC_NO_SLAVE_ACK) { /* * Send the "stop" condition. */ ehc_stop_pcf8584(ehcp); } return (EHC_FAILURE); } if ((status = ehc_write_pcf8584(ehcp, control)) != EHC_SUCCESS) { if (status == EHC_NO_SLAVE_ACK) ehc_stop_pcf8584(ehcp); return (EHC_FAILURE); } for (i = 0; i < size; i++) { status = ehc_write_pcf8584(ehcp, buf[i]); if (status != EHC_SUCCESS) { if (status == EHC_NO_SLAVE_ACK) ehc_stop_pcf8584(ehcp); return (EHC_FAILURE); } } ehc_stop_pcf8584(ehcp); return (EHC_SUCCESS); }
/* * Ensure the zap is flushed then inform the VFS of the capacity change. */ static int zvol_update_volsize(zvol_state_t *zv, uint64_t volsize) { struct block_device *bdev; dmu_tx_t *tx; int error; ASSERT(MUTEX_HELD(&zvol_state_lock)); tx = dmu_tx_create(zv->zv_objset); dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); return (error); } error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); dmu_tx_commit(tx); if (error) return (error); error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, volsize, DMU_OBJECT_END); if (error) return (error); zv->zv_volsize = volsize; zv->zv_changed = 1; bdev = bdget_disk(zv->zv_disk, 0); if (!bdev) return EIO; error = check_disk_change(bdev); ASSERT3U(error, !=, 0); bdput(bdev); return (0); }
/* * Write to the PCF8574A chip. * byteaddress = chip type base address | chip offset address. */ int ehc_write_pcf8574a(struct ehc_envcunit *ehcp, int byteaddress, uint8_t *buf, int size) { int i; int status; ASSERT((byteaddress & 0x1) == 0); ASSERT(MUTEX_HELD(&ehcp->umutex)); /* * Put the bus into the start condition (write) */ if ((status = ehc_start_pcf8584(ehcp, byteaddress)) != EHC_SUCCESS) { if (status == EHC_NO_SLAVE_ACK) { /* * Send the "stop" condition. */ ehc_stop_pcf8584(ehcp); } return (EHC_FAILURE); } /* * Send the data - poll as needed. */ for (i = 0; i < size; i++) { if ((status = ehc_write_pcf8584(ehcp, buf[i])) != EHC_SUCCESS) { if (status == EHC_NO_SLAVE_ACK) ehc_stop_pcf8584(ehcp); return (EHC_FAILURE); } } /* * Transmission complete - generate stop condition and * put device back into slave receiver mode. */ ehc_stop_pcf8584(ehcp); return (EHC_SUCCESS); }
/* * Similar to but more general than ip_sctp's conn_match(). * * Matches sets of addresses as follows: if the argument addr set is * a complete subset of the corresponding addr set in the sctp_t, it * is a match. * * Caller must hold tf->tf_lock. * * Returns with a SCTP_REFHOLD sctp structure. Caller must do a SCTP_REFRELE. */ sctp_t * sctp_lookup(sctp_t *sctp1, in6_addr_t *faddr, sctp_tf_t *tf, uint32_t *ports, int min_state) { sctp_t *sctp; sctp_faddr_t *fp; ASSERT(MUTEX_HELD(&tf->tf_lock)); for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) { if (*ports != sctp->sctp_ports || sctp->sctp_state < min_state) { continue; } /* check for faddr match */ for (fp = sctp->sctp_faddrs; fp; fp = fp->next) { if (IN6_ARE_ADDR_EQUAL(faddr, &fp->faddr)) { break; } } if (!fp) { /* no faddr match; keep looking */ continue; } /* check for laddr subset match */ if (sctp_compare_saddrs(sctp1, sctp) <= SCTP_ADDR_SUBSET) { goto done; } /* no match; continue searching */ } done: if (sctp) { SCTP_REFHOLD(sctp); } return (sctp); }
static void trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg) { zio_t zsearch, *zs; ASSERT(MUTEX_HELD(&tm->tm_lock)); zsearch.io_offset = start; zsearch.io_size = end - start; zs = avl_find(&tm->tm_inflight_writes, &zsearch, NULL); if (zs == NULL) { trim_map_segment_add(tm, start, end, txg); return; } if (start < zs->io_offset) trim_map_free_locked(tm, start, zs->io_offset, txg); if (zs->io_offset + zs->io_size < end) trim_map_free_locked(tm, zs->io_offset + zs->io_size, end, txg); }
/* * The port_remove_portfd() function dissociates the port from the fd * and vive versa. */ static void port_remove_portfd(polldat_t *pdp, port_fdcache_t *pcp) { port_t *pp; file_t *fp; ASSERT(MUTEX_HELD(&pcp->pc_lock)); pp = pdp->pd_portev->portkev_port; fp = getf(pdp->pd_fd); /* * If we did not get the fp for pd_fd but its portfd_t * still exist in the cache, it means the pd_fd is being * closed by some other thread which will also free the portfd_t. */ if (fp != NULL) { delfd_port(pdp->pd_fd, PDTOF(pdp)); releasef(pdp->pd_fd); port_remove_fd_object(PDTOF(pdp), pp, pcp); } }
static void vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) { spa_t *spa = zio->io_spa; avl_tree_t *qtt; ASSERT(MUTEX_HELD(&vq->vq_lock)); ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); qtt = vdev_queue_type_tree(vq, zio->io_type); if (qtt) avl_add(qtt, zio); #ifdef illumos mutex_enter(&spa->spa_iokstat_lock); spa->spa_queue_stats[zio->io_priority].spa_queued++; if (spa->spa_iokstat != NULL) kstat_waitq_enter(spa->spa_iokstat->ks_data); mutex_exit(&spa->spa_iokstat_lock); #endif }
/* * scf_timer_value_get() * * Description: Timer value get subroutine. * */ uint32_t scf_timer_value_get(int tmcd) { #undef SCF_FUNC_NAME #define SCF_FUNC_NAME "scf_timer_value_get() " uint32_t ret = 0; /* Return value */ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex)); SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start tmcd = %d", tmcd); /* Check timer code */ if (tmcd < SCF_TIMERCD_MAX) { /* Set timer value */ ret = scf_timer[tmcd].value; } SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end return = %d", ret); return (ret); }
void fscache_list_gc(cachefscache_t *cachep) { struct fscache *next, *fscp; ASSERT(MUTEX_HELD(&cachep->c_fslistlock)); for (fscp = cachep->c_fslist; fscp != NULL; fscp = next) { next = fscp->fs_next; mutex_enter(&fscp->fs_fslock); if (((fscp->fs_flags & CFS_FS_MOUNTED) == 0) && (fscp->fs_ref == 0)) { mutex_exit(&fscp->fs_fslock); fscache_list_remove(cachep, fscp); fscache_destroy(fscp); } else { mutex_exit(&fscp->fs_fslock); } } }
void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg) { range_seg_t *rs; void *cookie = NULL; ASSERT(MUTEX_HELD(rt->rt_lock)); if (rt->rt_ops != NULL) rt->rt_ops->rtop_vacate(rt, rt->rt_arg); while ((rs = avl_destroy_nodes(&rt->rt_root, &cookie)) != NULL) { if (func != NULL) func(arg, rs->rs_start, rs->rs_end - rs->rs_start); kmem_cache_free(range_seg_cache, rs); } bzero(rt->rt_histogram, sizeof (rt->rt_histogram)); rt->rt_space = 0; }
/* * Rename a block device minor mode for the specified volume. */ static void zvol_rename_minor(zvol_state_t *zv, const char *newname) { int readonly = get_disk_ro(zv->zv_disk); ASSERT(MUTEX_HELD(&zvol_state_lock)); strlcpy(zv->zv_name, newname, sizeof (zv->zv_name)); /* * The block device's read-only state is briefly changed causing * a KOBJ_CHANGE uevent to be issued. This ensures udev detects * the name change and fixes the symlinks. This does not change * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never * changes. This would normally be done using kobject_uevent() but * that is a GPL-only symbol which is why we need this workaround. */ set_disk_ro(zv->zv_disk, !readonly); set_disk_ro(zv->zv_disk, readonly); }
/* * Mask errors to continue dmu_objset_find() traversal */ static int zvol_create_minors_cb(const char *dsname, void *arg) { uint64_t snapdev; int error; ASSERT0(MUTEX_HELD(&spa_namespace_lock)); error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL); if (error) return (0); /* * Given the name and the 'snapdev' property, create device minor nodes * with the linkages to zvols/snapshots as needed. * If the name represents a zvol, create a minor node for the zvol, then * check if its snapshots are 'visible', and if so, iterate over the * snapshots and create device minor nodes for those. */ if (strchr(dsname, '@') == 0) { /* create minor for the 'dsname' explicitly */ error = zvol_create_minor_impl(dsname); if ((error == 0 || error == EEXIST) && (snapdev == ZFS_SNAPDEV_VISIBLE)) { fstrans_cookie_t cookie = spl_fstrans_mark(); /* * traverse snapshots only, do not traverse children, * and skip the 'dsname' */ error = dmu_objset_find((char *)dsname, zvol_create_snap_minor_cb, (void *)dsname, DS_FIND_SNAPSHOTS); spl_fstrans_unmark(cookie); } } else { dprintf("zvol_create_minors_cb(): %s is not a zvol name\n", dsname); } return (0); }
int cmt_pad_disable(pghw_type_t type) { group_t *hwset; group_iter_t iter; pg_cmt_t *pg; pg_cmt_t *child; ASSERT(PGHW_IS_PM_DOMAIN(type)); ASSERT(MUTEX_HELD(&cpu_lock)); if ((hwset = pghw_set_lookup(type)) == NULL) { /* * Unable to find any instances of the specified type of * power domain. */ return (-1); } /* * Iterate over the power domains, setting the default dispatcher * policy for performance optimization (load balancing). */ group_iter_init(&iter); while ((pg = group_iterate(hwset, &iter)) != NULL) { /* * If the power domain has an only child that implements * policy other than load balancing, promote the child * above the power domain to ensure it's policy dominates. */ if (pg->cmt_children != NULL && GROUP_SIZE(pg->cmt_children) == 1) { child = GROUP_ACCESS(pg->cmt_children, 0); if ((child->cmt_policy & CMT_BALANCE) == 0) { cmt_hier_promote(child, NULL); } } pg->cmt_policy = CMT_BALANCE; } return (0); }
/* * Unbind all threads from the specified processor set, or from all * processor sets. */ static int pset_unbind(psetid_t pset, void *projbuf, void *zonebuf, idtype_t idtype) { psetid_t olbind; kthread_t *tp; int error = 0; int rval; proc_t *pp; ASSERT(MUTEX_HELD(&cpu_lock)); if (idtype == P_PSETID && cpupart_find(pset) == NULL) return (EINVAL); mutex_enter(&pidlock); for (pp = practive; pp != NULL; pp = pp->p_next) { mutex_enter(&pp->p_lock); tp = pp->p_tlist; /* * Skip zombies and kernel processes, and processes in * other zones, if called from a non-global zone. */ if (tp == NULL || (pp->p_flag & SSYS) || !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { mutex_exit(&pp->p_lock); continue; } do { if ((idtype == P_PSETID && tp->t_bind_pset != pset) || (idtype == P_ALL && tp->t_bind_pset == PS_NONE)) continue; rval = pset_bind_thread(tp, PS_NONE, &olbind, projbuf, zonebuf); if (error == 0) error = rval; } while ((tp = tp->t_forw) != pp->p_tlist); mutex_exit(&pp->p_lock); } mutex_exit(&pidlock); return (error); }
/* * hci1394_tlist_remove() * This is an internal function which removes the given node from the list. * The list MUST be locked before calling this function. */ static void hci1394_tlist_remove(hci1394_tlist_t *list, hci1394_tlist_node_t *node) { ASSERT(list != NULL); ASSERT(node != NULL); ASSERT(node->tln_on_list == B_TRUE); ASSERT(MUTEX_HELD(&list->tl_mutex)); TNF_PROBE_0_DEBUG(hci1394_tlist_remove_enter, HCI1394_TNF_HAL_STACK, ""); /* if this is the only node on the list */ if ((list->tl_head == node) && (list->tl_tail == node)) { list->tl_head = NULL; list->tl_tail = NULL; /* if the node is at the head of the list */ } else if (list->tl_head == node) { list->tl_head = node->tln_next; node->tln_next->tln_prev = NULL; /* if the node is at the tail of the list */ } else if (list->tl_tail == node) { list->tl_tail = node->tln_prev; node->tln_prev->tln_next = NULL; /* if the node is in the middle of the list */ } else { node->tln_prev->tln_next = node->tln_next; node->tln_next->tln_prev = node->tln_prev; } /* Set state that this node has been removed from the list */ node->tln_on_list = B_FALSE; /* cleanup the node's link pointers */ node->tln_prev = NULL; node->tln_next = NULL; TNF_PROBE_0_DEBUG(hci1394_tlist_remove_exit, HCI1394_TNF_HAL_STACK, ""); }
/* Calculate name length, avoiding all the strcat calls of dsl_dir_name */ int dsl_dir_namelen(dsl_dir_t *dd) { int result = 0; if (dd->dd_parent) { /* parent's name + 1 for the "/" */ result = dsl_dir_namelen(dd->dd_parent) + 1; } if (!MUTEX_HELD(&dd->dd_lock)) { /* see dsl_dir_name */ mutex_enter(&dd->dd_lock); result += strlen(dd->dd_myname); mutex_exit(&dd->dd_lock); } else { result += strlen(dd->dd_myname); } return (result); }
/* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */ void dsl_dir_name(dsl_dir_t *dd, char *buf) { if (dd->dd_parent) { dsl_dir_name(dd->dd_parent, buf); (void) strcat(buf, "/"); } else { buf[0] = '\0'; } if (!MUTEX_HELD(&dd->dd_lock)) { /* * recursive mutex so that we can use * dprintf_dd() with dd_lock held */ mutex_enter(&dd->dd_lock); (void) strcat(buf, dd->dd_myname); mutex_exit(&dd->dd_lock); } else { (void) strcat(buf, dd->dd_myname); } }
/* * Mask errors to continue dmu_objset_find() traversal */ static int zvol_create_snap_minor_cb(const char *dsname, void *arg) { const char *name = (const char *)arg; ASSERT0(MUTEX_HELD(&spa_namespace_lock)); /* skip the designated dataset */ if (name && strcmp(dsname, name) == 0) return (0); /* at this point, the dsname should name a snapshot */ if (strchr(dsname, '@') == 0) { dprintf("zvol_create_snap_minor_cb(): " "%s is not a shapshot name\n", dsname); } else { (void) zvol_create_minor_impl(dsname); } return (0); }
static void vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) { #ifdef LINUX spa_t *spa = zio->io_spa; spa_stats_history_t *ssh = &spa->spa_stats.io_history; #endif ASSERT(MUTEX_HELD(&vq->vq_lock)); ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); vq->vq_class[zio->io_priority].vqc_active++; avl_add(&vq->vq_active_tree, zio); #ifdef LINUX if (ssh->kstat != NULL) { mutex_enter(&ssh->lock); kstat_runq_enter(ssh->kstat->ks_data); mutex_exit(&ssh->lock); } #endif }
void* thr1(void* arg){ mutex_enter(MTX); switch(__nondet_int()){ case 1: space_map_contains(); break; case 2: space_map_walk(); break; case 3: if(LOADING) space_map_load_wait(); else if(!LOADED) space_map_load(); else space_map_unload(); break; break; case 6: space_map_alloc(); break; case 7: space_map_sync(); break; case 8: space_map_ref_generate_map(); break; } ASSERT(MUTEX_HELD(MTX)); mutex_exit(MTX); assert(1); return 0; }
/* * find set in list */ static mhd_drive_set_t * mhd_find_set( char *setname ) { uint_t i; /* check lock */ assert(MUTEX_HELD(&mhd_set_mx)); /* look for set */ for (i = 0; (i < mhd_nset); ++i) { mhd_drive_set_t *sp = mhd_sets[i]; if (strcmp(setname, sp->sr_name) == 0) return (sp); } /* not found */ return (NULL); }