/** * Create a process specific file. */ static int create_proc_file(vnode_t * pdir, pid_t pid, const char * filename, enum procfs_filetype ftype) { vnode_t * vn; struct procfs_info * spec; int err; KASSERT(pdir != NULL, "pdir must be set"); spec = mempool_get(specinfo_pool); if (!spec) return -ENOMEM; /* Create a specinfo */ spec->ftype = ftype; spec->pid = pid; err = pdir->vnode_ops->mknod(pdir, filename, S_IFREG | PROCFS_PERMS, spec, &vn); if (err) { mempool_return(specinfo_pool, spec); return -ENOTDIR; } spec->vnode = vn; vn->vn_specinfo = spec; vn->vnode_ops = &procfs_vnode_ops; vrele(vn); return 0; }
static int procfs_delete_vnode(vnode_t * vnode) { const struct procfs_info * spec = vnode->vn_specinfo; if (!spec || spec->ftype <= PROCFS_LAST) mempool_return(specinfo_pool, vnode->vn_specinfo); return ramfs_delete_vnode(vnode); }
/* * Function: slapi_ch_free * * Returns: nothing * * Description: frees the pointer, and then sets it to NULL to * prevent free-memory writes. * Note: pass in the address of the pointer you want to free. * Note: you can pass in null pointers, it's cool. * * Implementation: get the size from the size space, and determine the behavior * based upon the size: * 1B ~ 1KB: call system free * 1KB + 1B ~ 64MB: return memory to mempool * 64MB + 1B ~ : call munmap */ void slapi_ch_free(void **ptr) { void *realptr; unsigned long size; if (ptr==NULL || *ptr == NULL){ return; } realptr = (void *)((char *)*ptr - sizeof(unsigned long)); size = *(unsigned long *)realptr; if (size <= 1024) { free (realptr); } else if (size <= 67108864) { /* return 2KB ~ 64MB memory to memory pool */ unsigned long roundup = 1; int n = 0; int rc = LDAP_SUCCESS; while (1) { roundup <<= 1; n++; if (roundup >= size) { break; } } PR_ASSERT(n >= 11 && n <= 26); rc = mempool_return(n-11, *ptr, (mempool_cleanup_callback)NULL); if (LDAP_SUCCESS != rc) { free (realptr); } } else { slapi_ch_munmap_no_roundup( ptr, size ); } *ptr = NULL; if(!counters_created) { create_counters(); counters_created= 1; } PR_INCREMENT_COUNTER(slapi_ch_counter_free); PR_DECREMENT_COUNTER(slapi_ch_counter_exist); return; }
/** * Frees all objects in a list. * @param op * Start of the list. */ void objects_free(object *op) { object *next; while (op) { if (op->itype == TYPE_SPELL) { spells_remove(op); } else if (op->itype == TYPE_SKILL) { skills_remove(op); } else if (op->itype == TYPE_FORCE || op->itype == TYPE_POISONING) { widget_active_effects_remove(cur_widget[ACTIVE_EFFECTS_ID], op); } if (op->inv) { objects_free(op->inv); } next = op->next; mempool_return(pool_object, op); op = next; } }
/** * Remove an object. * @param op * What to remove. */ void object_remove(object *op) { if (op == NULL || op == cpl.ob || op == cpl.below) { return; } if (op->itype == TYPE_SPELL) { spells_remove(op); } else if (op->itype == TYPE_SKILL) { skills_remove(op); } else if (op->itype == TYPE_FORCE || op->itype == TYPE_POISONING) { widget_active_effects_remove(cur_widget[ACTIVE_EFFECTS_ID], op); } object_redraw(op); if (op->inv != NULL) { object_remove_inventory(op); } if (op->prev != NULL) { op->prev->next = op->next; } else if (op->env != NULL) { op->env->inv = op->next; } if (op->next != NULL) { op->next->prev = op->prev; } if (op->itype == TYPE_REGION_MAP) { region_map_fow_update(MapData.region_map); minimap_redraw_flag = 1; } mempool_return(pool_object, op); }
/** * Go through the freelists and free puddles with no used chunks. * * @return * Number of freed puddles. */ static size_t mempool_free_puddles (mempool_struct *pool) { size_t chunksize_real, nrof_arrays, i, j, freed; mempool_chunk_struct *last_free, *chunk; mempool_puddle_struct *puddle, *next_puddle; HARD_ASSERT(pool != NULL); if (pool->flags & MEMPOOL_BYPASS_POOLS) { return 0; } freed = 0; for (i = 0; i < MEMPOOL_NROF_FREELISTS; i++) { chunksize_real = sizeof(mempool_chunk_struct) + (pool->chunksize << i); nrof_arrays = pool->expand_size >> i; /* Free empty puddles and setup puddle-local freelists */ for (puddle = pool->puddlelist[i], pool->puddlelist[i] = NULL; puddle != NULL; puddle = next_puddle) { next_puddle = puddle->next; /* Count free chunks in puddle, and set up a local freelist */ puddle->first_free = puddle->last_free = NULL; puddle->nrof_free = 0; for (j = 0; j < nrof_arrays; j++) { chunk = (mempool_chunk_struct *) (((char *) puddle->first_chunk) + chunksize_real * j); /* Find free chunks. */ if (CHUNK_FREE(MEM_USERDATA(chunk))) { if (puddle->nrof_free == 0) { puddle->first_free = chunk; puddle->last_free = chunk; chunk->next = NULL; } else { chunk->next = puddle->first_free; puddle->first_free = chunk; } puddle->nrof_free++; } } /* Can we actually free this puddle? */ if (puddle->nrof_free == nrof_arrays || (deiniting && pool == pool_puddle)) { /* Yup. Forget about it. */ efree(puddle->first_chunk); if (!deiniting || pool != pool_puddle) { mempool_return(pool_puddle, puddle); } pool->nrof_free[i] -= nrof_arrays; pool->nrof_allocated[i] -= nrof_arrays; freed++; } else { /* Nope, keep this puddle: put it back into the tracking list */ puddle->next = pool->puddlelist[i]; pool->puddlelist[i] = puddle; } } /* Sort the puddles by amount of free chunks. It will let us set up the * freelist so that the chunks from the fullest puddles are used first. * This should (hopefully) help us free some of the lesser-used puddles * earlier. */ pool->puddlelist[i] = sort_linked_list(pool->puddlelist[i], 0, sort_puddle_by_nrof_free, NULL, NULL, NULL); /* Finally: restore the global freelist */ pool->freelist[i] = &end_marker; last_free = &end_marker; for (puddle = pool->puddlelist[i]; puddle != NULL; puddle = puddle->next) { if (puddle->nrof_free > 0) { if (pool->freelist[i] == &end_marker) { pool->freelist[i] = puddle->first_free; } else { last_free->next = puddle->first_free; } puddle->last_free->next = &end_marker; last_free = puddle->last_free; } } } return freed; }