CryptoData_t *crypto_create (const SEC_CRYPTO *plugin, size_t data_size, void *owner, int endpoint) { CryptoData_t *dp; Crypto_t h; unsigned n; void *nhandles; h = handle_alloc (handles); if (!h) { n = cur_handles + min_handles; if (n > max_handles) n = max_handles; n -= cur_handles; if (!n) { warn_printf ("Crypto: max. # of contexts reached (1)!"); return (NULL); } nhandles = handle_extend (handles, n); if (!nhandles) { warn_printf ("Crypto: max. # of contexts reached (2)!"); return (NULL); } handles = nhandles; h = handle_alloc (handles); if (!h) { fatal_printf ("Crypto: can't create a handle!"); return (NULL); } crypto = xrealloc (crypto, (cur_handles + 1 + n) * sizeof (CryptoData_t *)); if (!crypto) { fatal_printf ("Crypto: can't extend crypto table!"); return (NULL); } cur_handles += n; sec_crypt_alloc += n * sizeof (CryptoData_t *); } dp = xmalloc (sizeof (CryptoData_t) + data_size); if (!dp) { warn_printf ("Crypto: Out of memory for crypto data!"); handle_free (handles, h); return (NULL); } sec_crypt_alloc += sizeof (CryptoData_t) + data_size; dp->handle = h; dp->plugin = plugin; if (endpoint) dp->parent.endpoint = owner; else dp->parent.participant = owner; dp->endpoint = endpoint; dp->data = dp + 1; (*crypto) [h] = dp; dp->handle = h; return (dp); }
static void _tcp_accept(uv_stream_t *master, int status, bool tls) { if (status != 0) { return; } uv_stream_t *client = handle_alloc(master->loop); if (!client) { return; } memset(client, 0, sizeof(*client)); io_create(master->loop, (uv_handle_t *)client, SOCK_STREAM); if (uv_accept(master, client) != 0) { uv_close((uv_handle_t *)client, io_free); return; } /* Set deadlines for TCP connection and start reading. * It will re-check every half of a request time limit if the connection * is idle and should be terminated, this is an educated guess. */ struct session *session = client->data; session->has_tls = tls; if (tls && !session->tls_ctx) { session->tls_ctx = tls_new(master->loop->data); } uv_timer_t *timer = &session->timeout; uv_timer_init(master->loop, timer); timer->data = client; uv_timer_start(timer, tcp_timeout_trigger, KR_CONN_RTT_MAX/2, KR_CONN_RTT_MAX/2); io_start_read((uv_handle_t *)client); }
int handle_mapping_hash_add(hash_table_t *p_hash, uint64_t object_id, unsigned int handle_hash, const void *data, uint32_t datalen) { int rc; struct gsh_buffdesc buffkey; struct gsh_buffdesc buffval; digest_pool_entry_t *digest; handle_pool_entry_t *handle; if (datalen >= sizeof(handle->fh_data)) return HANDLEMAP_INVALID_PARAM; digest = digest_alloc(); if (!digest) return HANDLEMAP_SYSTEM_ERROR; handle = handle_alloc(); if (!handle) { digest_free(digest); return HANDLEMAP_SYSTEM_ERROR; } digest->nfs23_digest.object_id = object_id; digest->nfs23_digest.handle_hash = handle_hash; memset(handle->fh_data, 0, sizeof(handle->fh_data)); memcpy(handle->fh_data, data, datalen); handle->fh_len = datalen; buffkey.addr = (caddr_t) digest; buffkey.len = sizeof(digest_pool_entry_t); buffval.addr = (caddr_t) handle; buffval.len = sizeof(handle_pool_entry_t); rc = hashtable_test_and_set(handle_map_hash, &buffkey, &buffval, HASHTABLE_SET_HOW_SET_NO_OVERWRITE); if (rc != HASHTABLE_SUCCESS) { digest_free(digest); handle_free(handle); if (rc != HASHTABLE_ERROR_KEY_ALREADY_EXISTS) { LogCrit(COMPONENT_FSAL, "ERROR %d inserting entry to handle mapping hash table", rc); return HANDLEMAP_HASHTABLE_ERROR; } else { return HANDLEMAP_EXISTS; } } return HANDLEMAP_SUCCESS; }
// Replace malloc void* malloc(size_t sz) { static __thread int no_hook=0; if(initializing == -1) init(); if(initializing!=0) // Avoid circular dependency between malloc and dlsym return initMalloc(sz); void *ret = real_malloc(sz); if(no_hook==0) { no_hook=1; handle_alloc(sz,ret, "malloc"); no_hook=0; } return ret; }
static void tcp_accept(uv_stream_t *master, int status) { if (status != 0) { return; } uv_stream_t *client = handle_alloc(master->loop, sizeof(*client)); if (!client) { return; } memset(client, 0, sizeof(*client)); io_create(master->loop, (uv_handle_t *)client, SOCK_STREAM); if (uv_accept(master, client) != 0) { handle_free((uv_handle_t *)client); return; } io_start_read((uv_handle_t *)client); }
// Replace calloc void* calloc(size_t nmb,size_t sz) { static __thread int no_hook=0; void *ret=NULL; if(initializing == -1) init(); if(initializing!=0){ // Avoid circular dependency between calloc and dlsym ret=initMalloc(nmb*sz); memset(ret,0,nmb*sz); return ret; } ret = real_calloc(nmb,sz); if(no_hook==0) { no_hook=1; handle_alloc(nmb*sz,ret, "calloc"); no_hook=0; } return ret; }
int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, struct buflib_callbacks *ops) { union buflib_data *handle, *block; size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; bool last; /* This really is assigned a value before use */ int block_len; size += name_len; size = (size + sizeof(union buflib_data) - 1) / sizeof(union buflib_data) /* add 4 objects for alloc len, pointer to handle table entry and * name length, and the ops pointer */ + 4; handle_alloc: handle = handle_alloc(ctx); if (!handle) { /* If allocation has failed, and compaction has succeded, it may be * possible to get a handle by trying again. */ union buflib_data* last_block = find_block_before(ctx, ctx->alloc_end, false); struct buflib_callbacks* ops = last_block[2].ops; unsigned hints = 0; if (!ops || !ops->shrink_callback) { /* the last one isn't shrinkable * make room in front of a shrinkable and move this alloc */ hints = BUFLIB_SHRINK_POS_FRONT; hints |= last_block->val * sizeof(union buflib_data); } else if (ops && ops->shrink_callback) { /* the last is shrinkable, make room for handles directly */ hints = BUFLIB_SHRINK_POS_BACK; hints |= 16*sizeof(union buflib_data); } /* buflib_compact_and_shrink() will compact and move last_block() * if possible */ if (buflib_compact_and_shrink(ctx, hints)) goto handle_alloc; return -1; } buffer_alloc: /* need to re-evaluate last before the loop because the last allocation * possibly made room in its front to fit this, so last would be wrong */ last = false; for (block = find_first_free(ctx);;block += block_len) { /* If the last used block extends all the way to the handle table, the * block "after" it doesn't have a header. Because of this, it's easier * to always find the end of allocation by saving a pointer, and always * calculate the free space at the end by comparing it to the * last_handle pointer. */ if(block == ctx->alloc_end) { last = true; block_len = ctx->last_handle - block; if ((size_t)block_len < size) block = NULL; break; } block_len = block->val; /* blocks with positive length are already allocated. */ if(block_len > 0) continue; block_len = -block_len; /* The search is first-fit, any fragmentation this causes will be * handled at compaction. */ if ((size_t)block_len >= size) break; } if (!block) { /* Try compacting if allocation failed */ unsigned hint = BUFLIB_SHRINK_POS_FRONT | ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK); if (buflib_compact_and_shrink(ctx, hint)) { goto buffer_alloc; } else { handle->val=1; handle_free(ctx, handle); return -2; } } /* Set up the allocated block, by marking the size allocated, and storing * a pointer to the handle. */ union buflib_data *name_len_slot; block->val = size; block[1].handle = handle; block[2].ops = ops; strcpy(block[3].name, name); name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); name_len_slot->val = 1 + name_len/sizeof(union buflib_data); handle->alloc = (char*)(name_len_slot + 1); block += size; /* alloc_end must be kept current if we're taking the last block. */ if (last) ctx->alloc_end = block; /* Only free blocks *before* alloc_end have tagged length. */ else if ((size_t)block_len > size) block->val = size - block_len; /* Return the handle index as a positive integer. */ return ctx->handle_table - handle; }
int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, struct buflib_callbacks *ops) { /* busy wait if there's a thread owning the lock */ while (ctx->handle_lock != 0) YIELD(); union buflib_data *handle, *block; size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; bool last; /* This really is assigned a value before use */ int block_len; size += name_len; size = (size + sizeof(union buflib_data) - 1) / sizeof(union buflib_data) /* add 4 objects for alloc len, pointer to handle table entry and * name length, and the ops pointer */ + 4; handle_alloc: handle = handle_alloc(ctx); if (!handle) { /* If allocation has failed, and compaction has succeded, it may be * possible to get a handle by trying again. */ if (!ctx->compact && buflib_compact(ctx)) goto handle_alloc; else { /* first try to shrink the alloc before the handle table * to make room for new handles */ int handle = ctx->handle_table - ctx->last_handle; union buflib_data* last_block = handle_to_block(ctx, handle); struct buflib_callbacks* ops = last_block[2].ops; if (ops && ops->shrink_callback) { char *data = buflib_get_data(ctx, handle); unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data); if (ops->shrink_callback(handle, hint, data, (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK) { /* retry one more time */ goto handle_alloc; } } return 0; } } buffer_alloc: /* need to re-evaluate last before the loop because the last allocation * possibly made room in its front to fit this, so last would be wrong */ last = false; for (block = ctx->first_free_block;;block += block_len) { /* If the last used block extends all the way to the handle table, the * block "after" it doesn't have a header. Because of this, it's easier * to always find the end of allocation by saving a pointer, and always * calculate the free space at the end by comparing it to the * last_handle pointer. */ if(block == ctx->alloc_end) { last = true; block_len = ctx->last_handle - block; if ((size_t)block_len < size) block = NULL; break; } block_len = block->val; /* blocks with positive length are already allocated. */ if(block_len > 0) continue; block_len = -block_len; /* The search is first-fit, any fragmentation this causes will be * handled at compaction. */ if ((size_t)block_len >= size) break; } if (!block) { /* Try compacting if allocation failed */ if (buflib_compact_and_shrink(ctx, (size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK)) { goto buffer_alloc; } else { handle->val=1; handle_free(ctx, handle); return 0; } } /* Set up the allocated block, by marking the size allocated, and storing * a pointer to the handle. */ union buflib_data *name_len_slot; block->val = size; block[1].handle = handle; block[2].ops = ops ?: &default_callbacks; strcpy(block[3].name, name); name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); name_len_slot->val = 1 + name_len/sizeof(union buflib_data); handle->alloc = (char*)(name_len_slot + 1); /* If we have just taken the first free block, the next allocation search * can save some time by starting after this block. */ if (block == ctx->first_free_block) ctx->first_free_block += size; block += size; /* alloc_end must be kept current if we're taking the last block. */ if (last) ctx->alloc_end = block; /* Only free blocks *before* alloc_end have tagged length. */ else if ((size_t)block_len > size) block->val = size - block_len; /* Return the handle index as a positive integer. */ return ctx->handle_table - handle; }