/** * Create an object. **/ object_t * object_create(object_t *parent) { object_t *ret = xmalloc(sizeof(object_t)); MATRIX_DECL_IDENT(ident); ret->parent = NULL; ret->type = OBJ_NODE; ret->name = NULL; ret->mat = NO_MATERIAL; ret->transform_cache = NULL; ret->private_transform = NULL; ret->meta = NULL; ret->meta_destructor = 0; ret->draw_distance = 0; ret->child_draw_distance = 0; ret->trans[0] = ret->trans[1] = ret->trans[2] = 0; ret->scale[0] = ret->scale[1] = ret->scale[2] = 1; quat_init(&ret->rot, 0, 1, 0, 0); ret->children = NULL; ret->child_count = 0; refcount_init(&ret->refcount); refcount_add_destructor(&ret->refcount, object_destructor, ret); object_apply_pretransform(ret, ident); if (parent) object_reparent(ret, parent); return ret; }
/** * Create a new buffer object. * * size: Indices the buffer should accomodate. **/ ebuf_t * ebuf_create(size_t size) { ebuf_t *ret; GLuint handle; int memfail; if (! size) return NULL; glGenBuffers(1, &handle); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, handle); glBufferData(GL_ELEMENT_ARRAY_BUFFER, size * sizeof(uint16_t), NULL, GL_STATIC_DRAW); memfail = CHECK_GL_MEM; if (current_ebuf) ebuf_do_activate(current_ebuf); if (memfail) return NULL; ret = xmalloc(sizeof(ebuf_t)); ret->gl_handle = handle; ret->size = size; refcount_init(&ret->refcount); refcount_add_destructor(&ret->refcount, ebuf_destructor, ret); intervals_init(&ret->free); interval_set(&ret->free, 0, size); return ret; }
static int fticket_ctor(void *mem, int size, void *arg, int flags) { struct fuse_ticket *ftick = mem; struct fuse_data *data = arg; debug_printf("ftick=%p data=%p\n", ftick, data); FUSE_ASSERT_MS_DONE(ftick); FUSE_ASSERT_AW_DONE(ftick); ftick->tk_data = data; if (ftick->tk_unique != 0) fticket_refresh(ftick); /* May be truncated to 32 bits */ ftick->tk_unique = atomic_fetchadd_long(&data->ticketer, 1); if (ftick->tk_unique == 0) ftick->tk_unique = atomic_fetchadd_long(&data->ticketer, 1); refcount_init(&ftick->tk_refcount, 1); atomic_add_acq_int(&fuse_ticket_count, 1); return 0; }
/** * Allocate and initialize a new instance of data class @p cls, copying and * parsing NVRAM data from @p io. * * The caller is responsible for releasing the returned parser instance * reference via bhnd_nvram_data_release(). * * @param cls If non-NULL, the data class to be allocated. If NULL, * bhnd_nvram_data_probe_classes() will be used to determine the data format. * @param[out] nv On success, a pointer to the newly allocated NVRAM data instance. * @param io An I/O context mapping the NVRAM data to be copied and parsed. * * @retval 0 success * @retval non-zero if an error occurs during allocation or initialization, a * regular unix error code will be returned. */ int bhnd_nvram_data_new(bhnd_nvram_data_class *cls, struct bhnd_nvram_data **nv, struct bhnd_nvram_io *io) { struct bhnd_nvram_data *data; int error; /* If NULL, try to identify the appropriate class */ if (cls == NULL) return (bhnd_nvram_data_probe_classes(nv, io, NULL, 0)); /* Allocate new instance */ BHND_NV_ASSERT(sizeof(struct bhnd_nvram_data) <= cls->size, ("instance size %zu less than minimum %zu", cls->size, sizeof(struct bhnd_nvram_data))); data = bhnd_nv_calloc(1, cls->size); data->cls = cls; refcount_init(&data->refs, 1); /* Let the class handle initialization */ if ((error = cls->op_new(data, io))) { bhnd_nv_free(data); return (error); } *nv = data; return (0); }
/* * shmfd object management including creation and reference counting * routines. */ static struct shmfd * shm_alloc(struct ucred *ucred, mode_t mode) { struct shmfd *shmfd; shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); shmfd->shm_size = 0; shmfd->shm_uid = ucred->cr_uid; shmfd->shm_gid = ucred->cr_gid; shmfd->shm_mode = mode; shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL, shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); VM_OBJECT_LOCK(shmfd->shm_object); vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING); vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT); VM_OBJECT_UNLOCK(shmfd->shm_object); vfs_timestamp(&shmfd->shm_birthtime); shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = shmfd->shm_birthtime; refcount_init(&shmfd->shm_refs, 1); #ifdef MAC mac_posixshm_init(shmfd); mac_posixshm_create(ucred, shmfd); #endif return (shmfd); }
/* * ksem object management including creation and reference counting * routines. */ static struct ksem * ksem_alloc(struct ucred *ucred, mode_t mode, unsigned int value) { struct ksem *ks; mtx_lock(&ksem_count_lock); if (nsems == p31b_getcfg(CTL_P1003_1B_SEM_NSEMS_MAX) || ksem_dead) { mtx_unlock(&ksem_count_lock); return (NULL); } nsems++; mtx_unlock(&ksem_count_lock); ks = malloc(sizeof(*ks), M_KSEM, M_WAITOK | M_ZERO); ks->ks_uid = ucred->cr_uid; ks->ks_gid = ucred->cr_gid; ks->ks_mode = mode; ks->ks_value = value; cv_init(&ks->ks_cv, "ksem"); vfs_timestamp(&ks->ks_birthtime); ks->ks_atime = ks->ks_mtime = ks->ks_ctime = ks->ks_birthtime; refcount_init(&ks->ks_ref, 1); #ifdef MAC mac_posixsem_init(ks); mac_posixsem_create(ucred, ks); #endif return (ks); }
/** * Create a new mesh object. **/ mesh_t * mesh_create(size_t verts, const void *vert_data, size_t elems, const uint16_t *elem_data, vbuf_fmt_t format, GLenum type) { mesh_t *ret = xmalloc(sizeof(mesh_t)); size_t data_size = vbuf_fmt_vert_size(format); data_size *= verts; ret->vert_data = xmalloc(data_size); memcpy(ret->vert_data, vert_data, data_size); ret->verts = verts; ret->elem_data = xmalloc(2 * elems); memcpy(ret->elem_data, elem_data, 2 * elems); ret->elems = elems; ret->generation = NULL; ret->format = format; ret->type = type; ret->vbuf = NULL; ret->vbuf_pos = 0; ret->ebuf = NULL; ret->ebuf_pos = 0; refcount_init(&ret->refcount); refcount_add_destructor(&ret->refcount, mesh_destructor, ret); return ret; }
static struct archive_file * bz2_open(const char *pathname, GError **error_r) { struct bz2_archive_file *context; int len; context = g_malloc(sizeof(*context)); archive_file_init(&context->base, &bz2_archive_plugin); refcount_init(&context->ref); //open archive static GStaticMutex mutex = G_STATIC_MUTEX_INIT; context->istream = input_stream_open(pathname, g_static_mutex_get_mutex(&mutex), NULL, error_r); if (context->istream == NULL) { g_free(context); return NULL; } context->name = g_path_get_basename(pathname); //remove suffix len = strlen(context->name); if (len > 4) { context->name[len - 4] = 0; //remove .bz2 suffix } return &context->base; }
/* * Allocate a zeroed cred structure. */ struct ucred * crget(void) { register struct ucred *cr; cr = malloc(sizeof(*cr), M_CRED, M_WAITOK | M_ZERO); refcount_init(&cr->cr_ref, 1); return (cr); }
struct processor_data *processor_data_new(void) { struct processor_data *pd = g_malloc0(sizeof(struct processor_data)); g_mutex_init(&pd->mutex); refcount_init(&pd->queued); pd->state = processor_continue; pd->processor = NULL; pd->filters = NULL; pd->bistream = bistream_new(); return pd; }
/** * Create a new state object. **/ state_t * state_create(void) { state_t *state = xcalloc(1, sizeof(state_t)); state->blend_mode = STATE_BLEND_DONTCARE; state->num_materials = 0; state->materials = NULL; state->material_gen = material_backlog_subscribe(); refcount_init(&state->refcount); refcount_add_destructor(&state->refcount, state_destructor, state); return state; }
/** * Create a new draw operation. **/ draw_op_t * draw_op_create(object_t *object, object_t *camera) { draw_op_t *ret = xcalloc(1, sizeof(draw_op_t)); object_grab(object); object_grab(camera); ret->object = object; ret->camera = camera; ret->material_gen = material_backlog_subscribe(); refcount_init(&ret->refcount); refcount_add_destructor(&ret->refcount, draw_op_destructor, ret); return ret; }
/** * radeon_fence_emit - emit a fence on the requested ring * * @rdev: radeon_device pointer * @fence: radeon fence object * @ring: ring index the fence is associated with * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) { /* we are protected by the ring emission mutex */ *fence = malloc(sizeof(struct radeon_fence), DRM_MEM_DRIVER, M_WAITOK); if ((*fence) == NULL) { return -ENOMEM; } refcount_init(&((*fence)->kref), 1); (*fence)->rdev = rdev; (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; (*fence)->ring = ring; radeon_fence_ring_emit(rdev, ring, *fence); CTR2(KTR_DRM, "radeon fence: emit (ring=%d, seq=%d)", ring, (*fence)->seq); return 0; }
/** * radeon_fence_emit - emit a fence on the requested ring * * @rdev: radeon_device pointer * @fence: radeon fence object * @ring: ring index the fence is associated with * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) { /* we are protected by the ring emission mutex */ *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); if ((*fence) == NULL) { return -ENOMEM; } refcount_init(&((*fence)->kref), 1); (*fence)->rdev = rdev; (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; (*fence)->ring = ring; radeon_fence_ring_emit(rdev, ring, *fence); trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); return 0; }
/* * Return loginclass structure with a corresponding name. Not * performance critical, as it's used mainly by setloginclass(2), * which happens once per login session. Caller has to use * loginclass_free() on the returned value when it's no longer * needed. */ struct loginclass * loginclass_find(const char *name) { struct loginclass *lc, *new_lc; if (name[0] == '\0' || strlen(name) >= MAXLOGNAME) return (NULL); lc = curthread->td_ucred->cr_loginclass; if (strcmp(name, lc->lc_name) == 0) { loginclass_hold(lc); return (lc); } rw_rlock(&loginclasses_lock); lc = loginclass_lookup(name); rw_runlock(&loginclasses_lock); if (lc != NULL) return (lc); new_lc = malloc(sizeof(*new_lc), M_LOGINCLASS, M_ZERO | M_WAITOK); racct_create(&new_lc->lc_racct); refcount_init(&new_lc->lc_refcount, 1); strcpy(new_lc->lc_name, name); rw_wlock(&loginclasses_lock); /* * There's a chance someone created our loginclass while we * were in malloc and not holding the lock, so we have to * make sure we don't insert a duplicate loginclass. */ if ((lc = loginclass_lookup(name)) == NULL) { LIST_INSERT_HEAD(&loginclasses, new_lc, lc_next); rw_wunlock(&loginclasses_lock); lc = new_lc; } else { rw_wunlock(&loginclasses_lock); racct_destroy(&new_lc->lc_racct); free(new_lc, M_LOGINCLASS); } return (lc); }
static int filemon_open(struct cdev *dev, int oflags __unused, int devtype __unused, struct thread *td) { int error; struct filemon *filemon; filemon = malloc(sizeof(*filemon), M_FILEMON, M_WAITOK | M_ZERO); sx_init(&filemon->lock, "filemon"); refcount_init(&filemon->refcnt, 1); filemon->cred = crhold(td->td_ucred); error = devfs_set_cdevpriv(filemon, filemon_dtr); if (error != 0) filemon_release(filemon); return (error); }
/** * Create a copy of an existing draw operation. **/ draw_op_t * draw_op_clone(draw_op_t *op) { draw_op_t *ret = xmemdup(op, sizeof(draw_op_t)); draw_op_sync_mat_backlog(op); ret->material_gen = material_backlog_subscribe(); object_grab(ret->object); object_grab(ret->camera); if (ret->state) ret->state = state_clone(ret->state); refcount_init(&ret->refcount); refcount_add_destructor(&ret->refcount, draw_op_destructor, ret); return ret; }
void multiplex_strategy(struct bio *bio) { struct device *dev = bio->bio_dev; devop_strategy_t strategy = *((devop_strategy_t *)dev->private_data); uint64_t len = bio->bio_bcount; bio->bio_offset += bio->bio_dev->offset; uint64_t offset = bio->bio_offset; void *buf = bio->bio_data; assert(strategy != NULL); if (len <= dev->max_io_size) { strategy(bio); return; } // It is better to initialize the refcounter beforehand, specially because we can // trivially determine what is the number going to be. Otherwise, we can have a // situation in which we bump the refcount to 1, get scheduled out, the bio is // finished, and when it drops its refcount to 0, we consider the main bio finished. refcount_init(&bio->bio_refcnt, (len / dev->max_io_size) + !!(len % dev->max_io_size)); while (len > 0) { uint64_t req_size = MIN(len, dev->max_io_size); struct bio *b = alloc_bio(); b->bio_bcount = req_size; b->bio_data = buf; b->bio_offset = offset; b->bio_cmd = bio->bio_cmd; b->bio_dev = bio->bio_dev; b->bio_caller1 = bio; b->bio_done = multiplex_bio_done; strategy(b); buf += req_size; offset += req_size; len -= req_size; } }
int ttm_base_object_init(struct ttm_object_file *tfile, struct ttm_base_object *base, bool shareable, enum ttm_object_type object_type, void (*rcount_release) (struct ttm_base_object **), void (*ref_obj_release) (struct ttm_base_object *, enum ttm_ref_type ref_type)) { struct ttm_object_device *tdev = tfile->tdev; int ret; base->shareable = shareable; base->tfile = ttm_object_file_ref(tfile); base->refcount_release = rcount_release; base->ref_obj_release = ref_obj_release; base->object_type = object_type; refcount_init(&base->refcount, 1); rw_init(&tdev->object_lock, "ttmbao"); rw_wlock(&tdev->object_lock); ret = drm_ht_just_insert_please(&tdev->object_hash, &base->hash, (unsigned long)base, 31, 0, 0); rw_wunlock(&tdev->object_lock); if (unlikely(ret != 0)) goto out_err0; ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); if (unlikely(ret != 0)) goto out_err1; ttm_base_object_unref(&base); return 0; out_err1: rw_wlock(&tdev->object_lock); (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); rw_wunlock(&tdev->object_lock); out_err0: return ret; }
/* * Create a set in the space provided in 'set' with the provided parameters. * The set is returned with a single ref. May return EDEADLK if the set * will have no valid cpu based on restrictions from the parent. */ static int _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, cpusetid_t id) { if (!CPU_OVERLAP(&parent->cs_mask, mask)) return (EDEADLK); CPU_COPY(mask, &set->cs_mask); LIST_INIT(&set->cs_children); refcount_init(&set->cs_ref, 1); set->cs_flags = 0; mtx_lock_spin(&cpuset_lock); CPU_AND(&set->cs_mask, &parent->cs_mask); set->cs_id = id; set->cs_parent = cpuset_ref(parent); LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); if (set->cs_id != CPUSET_INVALID) LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); mtx_unlock_spin(&cpuset_lock); return (0); }
/** * Create a new state with the same properties as an existing state. **/ state_t * state_clone(state_t *in) { state_t *state = xmalloc(sizeof(state_t)); size_t i; memcpy(state, in, sizeof(state_t)); state_sync_mat_backlog(in); state->material_gen = material_backlog_subscribe(); if (state->colorbuf) colorbuf_grab(state->colorbuf); state->materials = vec_dup(in->materials, in->num_materials); for (i = 0; i < state->num_materials; i++) state_material_clone(&state->materials[i]); refcount_init(&state->refcount); refcount_add_destructor(&state->refcount, state_destructor, state); return state; }
struct toepcb * alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct toepcb *toep; int tx_credits, txsd_total, len; /* * The firmware counts tx work request credits in units of 16 bytes * each. Reserve room for an ABORT_REQ so the driver never has to worry * about tx credits if it wants to abort a connection. */ tx_credits = sc->params.ofldq_wr_cred; tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); /* * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte * immediate payload, and firmware counts tx work request credits in * units of 16 byte. Calculate the maximum work requests possible. */ txsd_total = tx_credits / howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); if (txqid < 0) txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq; KASSERT(txqid >= vi->first_ofld_txq && txqid < vi->first_ofld_txq + vi->nofldtxq, ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi, vi->first_ofld_txq, vi->nofldtxq)); if (rxqid < 0) rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq; KASSERT(rxqid >= vi->first_ofld_rxq && rxqid < vi->first_ofld_rxq + vi->nofldrxq, ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi, vi->first_ofld_rxq, vi->nofldrxq)); len = offsetof(struct toepcb, txsd) + txsd_total * sizeof(struct ofld_tx_sdesc); toep = malloc(len, M_CXGBE, M_ZERO | flags); if (toep == NULL) return (NULL); refcount_init(&toep->refcount, 1); toep->td = sc->tom_softc; toep->vi = vi; toep->tc_idx = -1; toep->tx_total = tx_credits; toep->tx_credits = tx_credits; toep->ofld_txq = &sc->sge.ofld_txq[txqid]; toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; mbufq_init(&toep->ulp_pduq, INT_MAX); mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); toep->txsd_total = txsd_total; toep->txsd_avail = txsd_total; toep->txsd_pidx = 0; toep->txsd_cidx = 0; aiotx_init_toep(toep); ddp_init_toep(toep); return (toep); }