int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) { if (likely(man->io_reserve_fastpath)) return 0; if (interruptible) { if (sx_xlock_sig(&man->io_reserve_mutex)) return (-EINTR); else return (0); } sx_xlock(&man->io_reserve_mutex); return 0; }
static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) { uintptr_t list = (uintptr_t)data; struct list_head *head; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; size_t total_obj_size, total_gtt_size; int count; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); switch (list) { case ACTIVE_LIST: sbuf_printf(m, "Active:\n"); head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: sbuf_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; case PINNED_LIST: sbuf_printf(m, "Pinned:\n"); head = &dev_priv->mm.pinned_list; break; case FLUSHING_LIST: sbuf_printf(m, "Flushing:\n"); head = &dev_priv->mm.flushing_list; break; case DEFERRED_FREE_LIST: sbuf_printf(m, "Deferred free:\n"); head = &dev_priv->mm.deferred_free_list; break; default: DRM_UNLOCK(dev); return (EINVAL); } total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj, head, mm_list) { sbuf_printf(m, " "); describe_obj(m, obj); sbuf_printf(m, "\n"); total_obj_size += obj->base.size; total_gtt_size += obj->gtt_space->size; count++; }
int sblock(struct sockbuf *sb, int flags) { KASSERT((flags & SBL_VALID) == flags, ("sblock: flags invalid (0x%x)", flags)); if (flags & SBL_WAIT) { if ((sb->sb_flags & SB_NOINTR) || (flags & SBL_NOINTR)) { sx_xlock(&sb->sb_sx); return (0); } return (sx_xlock_sig(&sb->sb_sx)); } else { if (sx_try_xlock(&sb->sb_sx) == 0) return (EWOULDBLOCK); return (0); } }