static void insert(pool_reference *pool, global_reference root, uint64_t key, uint64_t value) { uint64_t *k = get_field(root, 2); if (*k < key) { global_reference left = get_field_reference(root, 0); if (left != NULL_REF) { insert(pool, left, key, value); } else { left = pool_alloc(pool); set_field(left, 2, &key); set_field(left, 3, &value); set_field_reference(root, 0, left); } } else if (*k > key) { global_reference right = get_field_reference(root, 1); if (right != NULL_REF) { insert(pool, right, key, value); } else { right = pool_alloc(pool); set_field(right, 2, &key); set_field(right, 3, &value); set_field_reference(root, 1, right); } } else { set_field(root, 3, &value); } }
/** * Get an variable by its name and vendor GUID */ static VOID * get_efi_variable(const CHAR16 *name, const EFI_GUID *vendor, UINTN *size, UINT32 *attributes) { EFI_STATUS status; UINT8 localbuffer[1024]; UINTN bufsize = sizeof(localbuffer), i; UINT8 *buffer = NULL; /* Find out how much size is needed and allocate accordingly in pool */ status = efi_call5(RT->GetVariable, name, vendor, attributes, &bufsize, localbuffer); if (status == EFI_BUFFER_TOO_SMALL) { buffer = pool_alloc(bufsize); if (buffer) { status = efi_call5(RT->GetVariable, name, vendor, attributes, &bufsize, buffer); } } else if (!EFI_ERROR(status) && bufsize <= sizeof(localbuffer)) { buffer = pool_alloc(bufsize); if (buffer) { for (i = 0; i < bufsize; i++) { buffer[i] = localbuffer[i]; } } } if (EFI_ERROR(status)) { print(L"Error in GetVariable\n"); if (buffer) { pool_free(buffer); } } if (size) { *size = bufsize; } return buffer; }
void t_field_map(void) { pool_reference list_pool = pool_create(LIST_TYPE_ID); CU_ASSERT_NOT_EQUAL_FATAL(list_pool, NULL_POOL); global_reference head = pool_alloc(&list_pool); pool_iterator itr = iterator_new(&list_pool, &head); size_t list_size = 10000; for (size_t i = 0 ; i < list_size ; ++i) { iterator_set_field(itr, 1, &i); iterator_list_insert(itr, pool_alloc(&list_pool)); itr = iterator_next(list_pool, itr); } pool_reference long_pool = pool_create(LONG_TYPE_ID); CU_ASSERT_NOT_EQUAL_FATAL(long_pool, NULL_POOL); CU_ASSERT_EQUAL(field_map(list_pool, &long_pool, 1, square), 0); uint64_t *result = pool_to_array(long_pool); int cmp_error_count = 0; for (size_t i = 0 ; i < list_size ; ++i) { cmp_error_count += i*i != result[i]; } CU_ASSERT_EQUAL(cmp_error_count, 0); iterator_destroy(&itr); pool_destroy(&long_pool); pool_destroy(&list_pool); }
int hcct_init() { // initialize custom memory allocator node_pool = pool_init(PAGE_SIZE, sizeof(lss_hcct_node_t), &free_list); if (node_pool == NULL) { printf("[hcct] error while initializing allocator... Quitting!\n"); return -1; } // create dummy root node pool_alloc(node_pool, free_list, hcct_root, lss_hcct_node_t); if (hcct_root == NULL) { printf("[hcct] error while initializing hcct root node... Quitting!\n"); return -1; } hcct_root->first_child = NULL; hcct_root->next_sibling = NULL; hcct_root->counter = 1; hcct_root->routine_id = 0; hcct_root->call_site = 0; hcct_root->parent = NULL; SetMonitored(hcct_root); // initialize stack stack[0] = hcct_root; stack_idx = 0; // create lazy priority queue #if UPDATE_MIN_SENTINEL == 1 queue = (lss_hcct_node_t**)malloc((epsilon+1)*sizeof(lss_hcct_node_t*)); pool_alloc(node_pool, free_list, queue[epsilon], lss_hcct_node_t); if (queue[epsilon] == NULL) { printf("[hcct] error while initializing lazy priority queue... Quitting!\n"); return -1; } queue[epsilon]->counter = min = 0; #else queue = (lss_hcct_node_t**)malloc(epsilon*sizeof(lss_hcct_node_t*)); #endif if (queue == NULL) { printf("[hcct] error while initializing lazy priority queue... Quitting!\n"); return -1; } queue[0] = hcct_root; num_queue_items = 1; // goes from 0 to epsilon queue_full = 0; min_idx = epsilon-1; second_min_idx = 0; lss_enter_events=0; return 0; }
static CK_ATTRIBUTE_PTR get_unlock_options_from_object (GkmWrapPrompt *self, CK_ULONG_PTR n_options) { CK_ATTRIBUTE_PTR options; CK_ATTRIBUTE attr; CK_ULONG i; CK_RV rv; g_assert (GKM_WRAP_IS_PROMPT (self)); g_assert (self->module); g_assert (n_options); *n_options = 0; attr.type = CKA_G_CREDENTIAL_TEMPLATE; attr.ulValueLen = 0; attr.pValue = NULL; /* Get the length of the entire template */ rv = (self->module->C_GetAttributeValue) (self->session, self->object, &attr, 1); if (rv != CKR_OK) { if (rv != CKR_ATTRIBUTE_TYPE_INVALID) g_warning ("couldn't get credential template for prompt: %s", gkm_util_rv_to_string (rv)); return NULL; } /* Number of attributes, rounded down */ *n_options = (attr.ulValueLen / sizeof (CK_ATTRIBUTE));; attr.pValue = options = pool_alloc (self, attr.ulValueLen); /* Get the size of each value */ rv = (self->module->C_GetAttributeValue) (self->session, self->object, &attr, 1); if (rv != CKR_OK) { g_warning ("couldn't read credential template for prompt: %s", gkm_util_rv_to_string (rv)); return NULL; } /* Allocate memory for each value */ for (i = 0; i < *n_options; ++i) { if (options[i].ulValueLen != (CK_ULONG)-1) options[i].pValue = pool_alloc (self, options[i].ulValueLen); } /* Now get the actual values */ rv = (self->module->C_GetAttributeValue) (self->session, self->object, &attr, 1); if (rv != CKR_OK) { g_warning ("couldn't retrieve credential template for prompt: %s", gkm_util_rv_to_string (rv)); return NULL; } return options; }
void *alloc_2w(void) { #ifdef MEMDEBUG return alloc_big(2*sizeof(void*)); #endif #ifdef _P64 return pool_alloc(&pools[2]); #else return pool_alloc(&pools[0]); #endif }
DLLEXPORT void *alloc_3w(void) { #ifdef MEMDEBUG return alloc_big(3*sizeof(void*)); #endif #ifdef _P64 return pool_alloc(&pools[4]); #else return pool_alloc(&pools[1]); #endif }
DLLEXPORT void *alloc_4w(void) { #ifdef MEMDEBUG return alloc_big(4*sizeof(void*)); #endif #ifdef _P64 return pool_alloc(&pools[6]); #else return pool_alloc(&pools[2]); #endif }
void *alloc_4w(void) { #ifdef MEMDEBUG return alloc_big(4*sizeof(void*), 1); #endif allocd_bytes += (4*sizeof(void*)); #ifdef __LP64__ return pool_alloc(&pools[6]); #else return pool_alloc(&pools[2]); #endif }
void *alloc_3w(void) { #ifdef MEMDEBUG return alloc_big(3*sizeof(void*), 1); #endif allocd_bytes += (3*sizeof(void*)); #ifdef __LP64__ return pool_alloc(&pools[4]); #else return pool_alloc(&pools[1]); #endif }
static Block* sec_block_create (size_t size, const char *during_tag) { Block *block; Cell *cell; ASSERT (during_tag); /* We can force all all memory to be malloced */ if (getenv ("SECMEM_FORCE_FALLBACK")) return NULL; block = pool_alloc (); if (!block) return NULL; cell = pool_alloc (); if (!cell) { pool_free (block); return NULL; } /* The size above is a minimum, we're free to go bigger */ if (size < DEFAULT_BLOCK_SIZE) size = DEFAULT_BLOCK_SIZE; block->words = sec_acquire_pages (&size, during_tag); block->n_words = size / sizeof (word_t); if (!block->words) { pool_free (block); pool_free (cell); return NULL; } #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_DEFINED (block->words, size); #endif /* The first cell to allocate from */ cell->words = block->words; cell->n_words = block->n_words; cell->requested = 0; sec_write_guards (cell); sec_insert_cell_ring (&block->unused_cells, cell); block->next = all_blocks; all_blocks = block; return block; }
static config_t *_config_init(pool_t *p) { config_t *c = &env_cfg; c->keys = pool_alloc(p, sizeof(*c->keys) * CONFIG_MAX_KEYS); c->vals = pool_alloc(p, sizeof(*c->vals) * CONFIG_MAX_KEYS); if (!c->keys || !c->vals) return NULL; c->pool = p; c->pairs = 0; return c; }
void add(Word *word) { unsigned int hash_val = hash(word->text, word->nbytes); unsigned int h = hash_val % n_bins; Entry *entry = bins[h]; if (!entry) { if (n_entries/n_bins > max_density) { rehash(); h = hash_val % n_bins; } entry = static_cast<Entry *>(pool_alloc(sizeof(Entry))); entry->word = word; entry->next = NULL; bins[h] = entry; n_entries++; return; } bool done = false; do { if (word->nbytes == entry->word->nbytes && strncmp(word->text, entry->word->text, word->nbytes) == 0) { /* Overwriting. WARNING: the original Word object is * permanently lost. This IS a memory leak, because * the memory is allocated by pool_alloc. Instead of * fixing this, tuning the dictionary file is a better * idea */ entry->word = word; done = true; break; } entry = entry->next; } while (entry); if (!done) { entry = static_cast<Entry *>(pool_alloc(sizeof(Entry))); entry->word = word; entry->next = bins[h]; bins[h] = entry; n_entries++; } }
/* Mostly copied from fast-import.c's main() */ static void init() { int i; reset_pack_idx_option(&pack_idx_opts); git_pack_config(); if (!pack_compression_seen && core_compression_seen) pack_compression_level = core_compression_level; alloc_objects(object_entry_alloc); strbuf_init(&command_buf, 0); atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*)); branch_table = xcalloc(branch_table_sz, sizeof(struct branch*)); avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*)); marks = pool_calloc(1, sizeof(struct mark_set)); global_argc = 1; rc_free = pool_alloc(cmd_save * sizeof(*rc_free)); for (i = 0; i < (cmd_save - 1); i++) rc_free[i].next = &rc_free[i + 1]; rc_free[cmd_save - 1].next = NULL; prepare_packed_git(); start_packfile(); set_die_routine(die_nicely); initialized = 1; atexit(cleanup); }
/* * Creates a HacheItem for use with HacheTable h. * * Returns: * A pointer to new HacheItem on success * NULL on failure. */ static HacheItem *HacheItemCreate(HacheTable *h) { HacheItem *hi; hi = (h->options & HASH_POOL_ITEMS ? pool_alloc(h->hi_pool) : malloc(sizeof(*hi))); if (NULL == hi) return NULL; hi->data.p = NULL; hi->data.i = 0; hi->next = NULL; hi->key = NULL; hi->key_len = 0; hi->ref_count = 1; hi->order = -1; hi->h = h; hi->in_use_next = NULL; hi->in_use_prev = NULL; h->nused++; //printf("Hash %p item %p\n", h, hi); return hi; }
/** * Submit a db 'delete' request. * The request is inserted in the appropriate db queue. * (always asynchronous) */ int handlemap_db_delete(nfs23_map_handle_t *p_in_nfs23_digest) { unsigned int i; db_op_item_t *new_task; int rc; /* which thread is going to handle this inode ? */ i = select_db_queue(p_in_nfs23_digest); /* get a new db operation */ pthread_mutex_lock(&db_thread[i].pool_mutex); new_task = pool_alloc(db_thread[i].dbop_pool, NULL); pthread_mutex_unlock(&db_thread[i].pool_mutex); if (!new_task) return HANDLEMAP_SYSTEM_ERROR; /* fill the task info */ new_task->op_type = DELETE; new_task->op_arg.fh_info.nfs23_digest = *p_in_nfs23_digest; rc = dbop_push(&db_thread[i].work_queue, new_task); if (rc) return rc; return HANDLEMAP_SUCCESS; }
static gpointer pool_dup (GkmWrapPrompt *self, gconstpointer original, gsize length) { gpointer memory = pool_alloc (self, length); memcpy (memory, original, length); return memory; }
/*ARGSUSED*/ static void* gmp_realloc(void* ptr, size_t old_size, size_t new_size) { void* p; if (old_size <= POOL_ELEM_SIZE && new_size <= POOL_ELEM_SIZE) return ptr; if (old_size <= POOL_ELEM_SIZE) { assert(new_size > POOL_ELEM_SIZE); assert(new_size > old_size); p = malloc(new_size); memcpy(p, ptr, old_size); pool_free(ptr); return p; } if (new_size <= POOL_ELEM_SIZE) { assert(old_size > POOL_ELEM_SIZE); assert(old_size > new_size); p = pool_alloc(); memcpy(p, ptr, new_size); free(ptr); return p; } return realloc(ptr, new_size); }
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, size_t size, s32_t timeout) { int ret, key; s64_t end = 0; __ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), ""); if (timeout > 0) { end = _tick_get() + _ms_to_ticks(timeout); } while (1) { ret = pool_alloc(p, block, size); if (ret == 0 || timeout == K_NO_WAIT || ret == -EAGAIN || (ret && ret != -ENOMEM)) { return ret; } key = irq_lock(); _pend_current_thread(&p->wait_q, timeout); _Swap(key); if (timeout != K_FOREVER) { timeout = end - _tick_get(); if (timeout < 0) { break; } } } return -EAGAIN; }
static bool replace_oldest_value_reg (rtx *loc, enum reg_class cl, rtx insn, struct value_data *vd) { rtx new_rtx = find_oldest_value_reg (cl, *loc, vd); if (new_rtx) { if (DEBUG_INSN_P (insn)) { struct queued_debug_insn_change *change; if (dump_file) fprintf (dump_file, "debug_insn %u: queued replacing reg %u with %u\n", INSN_UID (insn), REGNO (*loc), REGNO (new_rtx)); change = (struct queued_debug_insn_change *) pool_alloc (debug_insn_changes_pool); change->next = vd->e[REGNO (new_rtx)].debug_insn_changes; change->insn = insn; change->loc = loc; change->new_rtx = new_rtx; vd->e[REGNO (new_rtx)].debug_insn_changes = change; ++vd->n_debug_insn_changes; return true; } if (dump_file) fprintf (dump_file, "insn %u: replaced reg %u with %u\n", INSN_UID (insn), REGNO (*loc), REGNO (new_rtx)); validate_change (insn, loc, new_rtx, 1); return true; } return false; }
/* This function executes the "capture" action and store the result in a * capture slot if exists. It executes a fetch expression, turns the result * into a string and puts it in a capture slot. It always returns 1. If an * error occurs the action is cancelled, but the rule processing continues. */ static enum act_return http_action_res_capture_by_id(struct act_rule *rule, struct proxy *px, struct session *sess, struct stream *s, int flags) { struct sample *key; struct cap_hdr *h; char **cap = s->res_cap; struct proxy *fe = strm_fe(s); int len; int i; /* Look for the original configuration. */ for (h = fe->rsp_cap, i = fe->nb_rsp_cap - 1; h != NULL && i != rule->arg.capid.idx ; i--, h = h->next); if (!h) return ACT_RET_CONT; key = sample_fetch_as_type(s->be, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->arg.capid.expr, SMP_T_STR); if (!key) return ACT_RET_CONT; if (cap[h->index] == NULL) cap[h->index] = pool_alloc(h->pool); if (cap[h->index] == NULL) /* no more capture memory */ return ACT_RET_CONT; len = key->data.u.str.data; if (len > h->len) len = h->len; memcpy(cap[h->index], key->data.u.str.area, len); cap[h->index][len] = 0; return ACT_RET_CONT; }
/* This function executes the "capture" action. It executes a fetch expression, * turns the result into a string and puts it in a capture slot. It always * returns 1. If an error occurs the action is cancelled, but the rule * processing continues. */ static enum act_return http_action_req_capture(struct act_rule *rule, struct proxy *px, struct session *sess, struct stream *s, int flags) { struct sample *key; struct cap_hdr *h = rule->arg.cap.hdr; char **cap = s->req_cap; int len; key = sample_fetch_as_type(s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.cap.expr, SMP_T_STR); if (!key) return ACT_RET_CONT; if (cap[h->index] == NULL) cap[h->index] = pool_alloc(h->pool); if (cap[h->index] == NULL) /* no more capture memory */ return ACT_RET_CONT; len = key->data.u.str.data; if (len > h->len) len = h->len; memcpy(cap[h->index], key->data.u.str.area, len); cap[h->index][len] = 0; return ACT_RET_CONT; }
config_t *load_config(pool_t *proc, const char *conf_path) { char *line = NULL; size_t len = 0; int rv; pool_t *tmp_pool; config_t *conf; FILE *fp = fopen(conf_path, "r"); if (!fp) { fprintf(stderr, "Unable to open config file %s. Error %s", conf_path, strerror(errno)); return NULL; } tmp_pool = pool_create(proc, "temporary config pool"); conf = pool_alloc(proc, sizeof(*conf), "config object"); /* format is A=B\n. Lines starting with # are comments. Empty lines are allowed. WS is allowed */ while ((rv = readline(tmp_pool, &line, fp)) > 0) { char *l = line; while (isspace(*l)) l++; if ((l[0] != '#') && (l[0] != '\0')) { process_config_line(proc, conf, l); } } if (!feof(fp)) { fprintf(stderr, "Error while reading config file %s. Error %s", conf_path, strerror(ferror(fp))); pool_destroy(tmp_pool); return NULL; } pool_destroy(tmp_pool); return conf; }
int uart_register(struct uart *uart, const struct uart_params *uart_defparams) { extern const struct file_operations ttys_fops; struct device_module *cdev; cdev = pool_alloc(&cdev_serials_pool); if (!cdev) { return -ENOMEM; } if (uart_fill_name(uart)) { pool_free(&cdev_serials_pool, cdev); return -EBUSY; } if (uart_defparams) { memcpy(&uart->params, uart_defparams, sizeof(struct uart_params)); } else { memset(&uart->params, 0, sizeof(struct uart_params)); } memset(cdev, 0, sizeof(*cdev)); cdev->name = uart->dev_name; cdev->fops = (struct file_operations*)&ttys_fops; cdev->dev_data = uart; char_dev_register(cdev); return 0; }
net_packet_t net_packet_alloc(net_socket_t s) { net_packet_t packet = (net_packet_t)pool_alloc(s->poolPackets); // Alloc from pool net_packet_init(packet); // Set or Reset packet bitstream return packet; }
static unsigned long long profile_palloc_single_chars(const unsigned long iterations) { struct timeval start; struct timeval stop; pool_reference char_ref_pool = pool_create(CHAR_REF_TYPE_ID); pool_reference char_pool = pool_create(CHAR_TYPE_ID); pool_grow(&char_ref_pool, iterations); global_reference *char_refs = pool_to_array(char_ref_pool); gettimeofday(&start, NULL); for (unsigned long i = 0 ; i < iterations ; ++i) { char_refs[i] = pool_alloc(&char_pool); } gettimeofday(&stop, NULL); pool_destroy(&char_ref_pool); pool_destroy(&char_pool); return ((stop.tv_sec - start.tv_sec) * 1000000LLU) + stop.tv_usec - start.tv_usec; }
struct usb_dev *usb_dev_alloc(struct usb_hcd *hcd) { struct usb_dev *dev = pool_alloc(&usb_devs); size_t idx; if (!dev) { return NULL; } idx = index_alloc(&hcd->enumerator, INDEX_MIN); assert(idx != INDEX_NONE); assert(idx < USB_HC_MAX_DEV); memset(dev, 0, sizeof(struct usb_dev)); dev->hcd = hcd; dev->idx = idx; dev->bus_idx = 0; if (!usb_endp_alloc(dev, &usb_desc_endp_control_default)) { usb_dev_free(dev); return NULL; } dlist_head_init(&dev->dev_link); return dev; }
/*ARGSUSED*/ static void* gmp_alloc(size_t size) { if (size <= POOL_ELEM_SIZE) return pool_alloc(); return malloc(size); }
static int mapping_alloc(mapping_t *m, int nof_modules, int nof_processors) { int i; mdebug("addr=0x%x, nof_modules=%d\n",m,nof_modules); if (m->p_res) return -1; m->p_res = (int*) pool_alloc(nof_modules,sizeof(int)); if (!m->p_res) return -1; memset(m->modules_x_node,0,sizeof(int)*MAX(nodes)); join_function = calloc(1, sizeof (int) * nof_modules); joined_function = calloc(1, sizeof (int) * nof_modules); joined_function_inv = calloc(1, sizeof (int) * nof_modules); tmp_stages = calloc(1, sizeof (int) * nof_modules); tmp_c = calloc(1, sizeof (float) * nof_modules); wave.c = calloc(1, sizeof (float) * nof_modules); wave.force = calloc(1, sizeof (int) * nof_modules); wave.b = calloc(1, sizeof (float*) * nof_modules); for (i = 0; i < nof_modules; i++) { wave.b[i] = calloc(1, sizeof (float) * nof_modules); } tmp_b = calloc(1, sizeof (float*) * nof_modules); for (i = 0; i < nof_modules; i++) { tmp_b[i] = calloc(1, sizeof (float) * nof_modules); } plat.C = calloc(1, sizeof (int) * nof_processors); plat.B = calloc(1, sizeof (float*) * nof_processors); for (i = 0; i < nof_processors; i++) { plat.B[i] = calloc(1, sizeof (float) * nof_processors); } result.P_m = m->p_res; return 0; }
/* * Alloc the comp_ctx */ static inline int init_comp_ctx(struct comp_ctx **comp_ctx) { #ifdef USE_ZLIB z_stream *strm; if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < sizeof(struct comp_ctx)) return -1; #endif if (unlikely(pool_comp_ctx == NULL)) { HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock); if (unlikely(pool_comp_ctx == NULL)) pool_comp_ctx = create_pool("comp_ctx", sizeof(struct comp_ctx), MEM_F_SHARED); HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock); } *comp_ctx = pool_alloc(pool_comp_ctx); if (*comp_ctx == NULL) return -1; #if defined(USE_SLZ) (*comp_ctx)->direct_ptr = NULL; (*comp_ctx)->direct_len = 0; (*comp_ctx)->queued = NULL; #elif defined(USE_ZLIB) HA_ATOMIC_ADD(&zlib_used_memory, sizeof(struct comp_ctx)); strm = &(*comp_ctx)->strm; strm->zalloc = alloc_zlib; strm->zfree = free_zlib; strm->opaque = *comp_ctx; #endif return 0; }