/*******************************************************************//** Adds new element to the locks cache, enlarging it if necessary. Returns a pointer to the added row. If the row is already present then no row is added and a pointer to the existing row is returned. If row can not be allocated then NULL is returned. @return row */ static i_s_locks_row_t* add_lock_to_cache( /*==============*/ trx_i_s_cache_t* cache, /*!< in/out: cache */ const lock_t* lock, /*!< in: the element to add */ ulint heap_no)/*!< in: lock's record number or ULINT_UNDEFINED if the lock is a table lock */ { i_s_locks_row_t* dst_row; #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES ulint i; for (i = 0; i < 10000; i++) { #endif #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS /* quit if this lock is already present */ dst_row = search_innodb_locks(cache, lock, heap_no); if (dst_row != NULL) { return(dst_row); } #endif dst_row = (i_s_locks_row_t*) table_cache_create_empty_row(&cache->innodb_locks, cache); /* memory could not be allocated */ if (dst_row == NULL) { return(NULL); } if (!fill_locks_row(dst_row, lock, heap_no, cache)) { /* memory could not be allocated */ cache->innodb_locks.rows_used--; return(NULL); } #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE HASH_INSERT( /* the type used in the hash chain */ i_s_hash_chain_t, /* hash_chain->"next" */ next, /* the hash table */ cache->locks_hash, /* fold */ fold_lock(lock, heap_no), /* add this data to the hash */ &dst_row->hash_chain); #endif #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES } /* for()-loop */ #endif return(dst_row); }
/*******************************************************************//** Copies data into the storage and returns a pointer to the copy. If the same data chunk is already present, then pointer to it is returned. Data chunks are considered to be equal if len1 == len2 and memcmp(data1, data2, len1) == 0. If "data" is not present (and thus data_len bytes need to be allocated) and the size of storage is going to become more than "memlim" then "data" is not added and NULL is returned. To disable this behavior "memlim" can be set to 0, which stands for "no limit". */ UNIV_INTERN const void* ha_storage_put_memlim( /*==================*/ ha_storage_t* storage, /*!< in/out: hash storage */ const void* data, /*!< in: data to store */ ulint data_len, /*!< in: data length */ ulint memlim) /*!< in: memory limit to obey */ { void* raw; ha_storage_node_t* node; const void* data_copy; ulint fold; /* check if data chunk is already present */ data_copy = ha_storage_get(storage, data, data_len); if (data_copy != NULL) { return(data_copy); } /* not present */ /* check if we are allowed to allocate data_len bytes */ if (memlim > 0 && ha_storage_get_size(storage) + data_len > memlim) { return(NULL); } /* we put the auxiliary node struct and the data itself in one continuous block */ raw = mem_heap_alloc(storage->heap, sizeof(ha_storage_node_t) + data_len); node = (ha_storage_node_t*) raw; data_copy = (byte*) raw + sizeof(*node); memcpy((byte*) raw + sizeof(*node), data, data_len); node->data_len = data_len; node->data = data_copy; /* avoid repetitive calls to ut_fold_binary() in the HASH_INSERT macro */ fold = ut_fold_binary(data, data_len); HASH_INSERT( ha_storage_node_t, /* type used in the hash chain */ next, /* node->"next" */ storage->hash, /* the hash table */ fold, /* key */ node); /* add this data to the hash */ /* the output should not be changed because it will spoil the hash table */ return(data_copy); }
static int callid_2_sdp_ctor(struct callid_2_sdp *c2s, char const *call_id, struct timeval const *now) { SLOG(LOG_DEBUG, "Construct callid_2_sdp@%p for callid '%s'", c2s, call_id); c2s->sdp_parser = proto_sdp->ops->parser_new(proto_sdp); if (! c2s->sdp_parser) return -1; memset(c2s->call_id, 0, sizeof c2s->call_id); // because it's used as a hash key snprintf(c2s->call_id, sizeof(c2s->call_id), "%s", call_id); c2s->last_used = *now; mutex_lock(&callids_2_sdps_mutex); callids_2_sdps_timeout(now); HASH_INSERT(&callids_2_sdps, c2s, &c2s->call_id, entry); TAILQ_INSERT_TAIL(&callids_2_sdps_used, c2s, used_entry); mutex_unlock(&callids_2_sdps_mutex); return 0; }
/* called by mylisten() to specify the number of pending connection * requests permitted for a listening socket. a backlog of zero * specifies at most one pending connection is permitted for the socket. */ void _mysock_set_backlog(mysock_context_t *ctx, unsigned int backlog) { unsigned int k, max_len = backlog + 1; uint16_t local_port; listen_queue_t *q; assert(ctx && ctx->listening && ctx->bound); local_port = ntohs(_network_get_port(&ctx->network_state)); assert(local_port > 0); PTHREAD_CALL(pthread_rwlock_wrlock(&listen_lock)); if ((q = _get_connection_queue(ctx)) == NULL) { /* first backlog specified for new listening socket */ DEBUG_LOG(("allocating connection queue for local port %hu\n", local_port)); q = (listen_queue_t *) calloc(1, sizeof(listen_queue_t)); assert(q); q->local_port = local_port; PTHREAD_CALL(pthread_cond_init(&q->connection_cond, NULL)); PTHREAD_CALL(pthread_mutex_init(&q->connection_lock, NULL)); HASH_INSERT(listen_table, ctx->my_sd, q); } assert(q); assert(q->local_port == local_port); if (max_len > q->max_len) { q->connection_queue = (connect_request_t *) realloc(q->connection_queue, max_len * sizeof(connect_request_t)); assert(q->connection_queue); memset(q->connection_queue + q->max_len, 0, (max_len - q->max_len) * sizeof(connect_request_t)); } for (k = q->max_len; k < max_len; ++k) q->connection_queue[k].sd = -1; q->max_len = max_len; PTHREAD_CALL(pthread_rwlock_unlock(&listen_lock)); }
/**********************************************************************//** Allocate a buffer block to the buddy allocator. */ static void buf_buddy_block_register( /*=====================*/ buf_block_t* block) /*!< in: buffer frame to allocate */ { buf_pool_t* buf_pool = buf_pool_from_block(block); const ulint fold = BUF_POOL_ZIP_FOLD(block); ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(buf_block_get_state(block) == BUF_BLOCK_READY_FOR_USE); buf_block_set_state(block, BUF_BLOCK_MEMORY); ut_a(block->frame); ut_a(!ut_align_offset(block->frame, UNIV_PAGE_SIZE)); ut_ad(!block->page.in_page_hash); ut_ad(!block->page.in_zip_hash); ut_d(block->page.in_zip_hash = TRUE); HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page); ut_d(buf_pool->buddy_n_frames++); }