static int binlog_mem_add(binlog *bl, void *buf, unsigned int len) { binlog_entry *entry; if (bl->write_index >= bl->alloc && binlog_grow(bl) < 0) return BINLOG_EDROPPED; entry = malloc(sizeof(*entry)); if (!entry) return BINLOG_EDROPPED; entry->data = malloc(len); if (!entry->data) { free(entry); return BINLOG_EDROPPED; } entry->size = len; memcpy(entry->data, buf, len); bl->cache[bl->write_index++] = entry; bl->mem_size += entry_size(entry); bl->mem_avail += len; return 0; }
static void test_kdc_remove_lookaside_multiple(void **state) { struct entry *e1; krb5_context context = *state; krb5_data req1 = string2data("I'm a test request"); krb5_data rep1 = string2data("I'm a test response"); krb5_data req2 = string2data("I'm a different test request"); e1 = insert_entry(context, &req1, &rep1, 0); insert_entry(context, &req2, NULL, 0); kdc_remove_lookaside(context, &req2); assert_null(k5_hashtab_get(hash_table, req2.data, req2.length)); assert_ptr_equal(k5_hashtab_get(hash_table, req1.data, req1.length), e1); assert_int_equal(num_entries, 1); assert_int_equal(total_size, entry_size(&req1, &rep1)); kdc_remove_lookaside(context, &req1); assert_null(k5_hashtab_get(hash_table, req1.data, req1.length)); assert_int_equal(num_entries, 0); assert_int_equal(total_size, 0); }
static unsigned long long _avl_espacio(struct avl_nodo *nodo){ if(nodo == NULL) return 0; // se explica en abb.c unsigned long long tama_estructura = sizeof(struct avl_nodo) + entry_size(nodo->e); return tama_estructura + _avl_espacio(nodo->izq) + _avl_espacio(nodo->der); }
void command_defragment(FILE * f) { uint64 read_index, write_index, index; read_index = write_index = jump_to_first_word(f); int read_count = 0; int write_count = 0; struct entry_t entry; again: if (write_count < header.actual_words) { index = read_entry(f, &entry, read_index, READ_ENTRY_ALL); if (existent_entry(&entry)) { read_index = index; read_count++; int code = fseek(f, write_index, SEEK_SET); if (0 != code) WTF(); write_entry(f, &entry); write_count++; write_index += entry_size(&entry); goto again; } else { read_index = index; goto again; } } else { header.total_words = header.actual_words; write_header(&header, f); ftruncate(fileno(f), write_index); } }
static void test_kdc_insert_lookaside_cache_expire(void **state) { struct entry *e; krb5_context context = *state; krb5_data req1 = string2data("I'm a test request"); krb5_data rep1 = string2data("I'm a test response"); size_t e1_size = entry_size(&req1, &rep1); krb5_data req2 = string2data("I'm a different test request"); size_t e2_size = entry_size(&req2, NULL); struct entry *hash1_ent, *hash2_ent, *exp_ent; time_return(0, 0); kdc_insert_lookaside(context, &req1, &rep1); hash1_ent = k5_hashtab_get(hash_table, req1.data, req1.length); assert_non_null(hash1_ent); assert_true(data_eq(hash1_ent->req_packet, req1)); assert_true(data_eq(hash1_ent->reply_packet, rep1)); exp_ent = K5_TAILQ_FIRST(&expiration_queue); assert_true(data_eq(exp_ent->req_packet, req1)); assert_true(data_eq(exp_ent->reply_packet, rep1)); assert_int_equal(num_entries, 1); assert_int_equal(total_size, e1_size); /* Increase hits on entry */ e = k5_hashtab_get(hash_table, req1.data, req1.length); assert_non_null(e); e->num_hits = 5; time_return(STALE_TIME + 1, 0); kdc_insert_lookaside(context, &req2, NULL); assert_null(k5_hashtab_get(hash_table, req1.data, req1.length)); assert_int_equal(max_hits_per_entry, 5); hash2_ent = k5_hashtab_get(hash_table, req2.data, req2.length); assert_non_null(hash2_ent); assert_true(data_eq(hash2_ent->req_packet, req2)); assert_int_equal(hash2_ent-> reply_packet.length, 0); exp_ent = K5_TAILQ_FIRST(&expiration_queue); assert_true(data_eq(exp_ent->req_packet, req2)); assert_int_equal(exp_ent->reply_packet.length, 0); assert_int_equal(num_entries, 1); assert_int_equal(total_size, e2_size); }
static void test_entry_size_no_response(void **state) { size_t result; const krb5_data req = string2data("I'm a test request"); result = entry_size(&req, NULL); assert_int_equal(result, sizeof(struct entry) + 18); }
VkResult tu_GetPipelineCacheData(VkDevice _device, VkPipelineCache _cache, size_t *pDataSize, void *pData) { TU_FROM_HANDLE(tu_device, device, _device); TU_FROM_HANDLE(tu_pipeline_cache, cache, _cache); struct cache_header *header; VkResult result = VK_SUCCESS; pthread_mutex_lock(&cache->mutex); const size_t size = sizeof(*header) + cache->total_size; if (pData == NULL) { pthread_mutex_unlock(&cache->mutex); *pDataSize = size; return VK_SUCCESS; } if (*pDataSize < sizeof(*header)) { pthread_mutex_unlock(&cache->mutex); *pDataSize = 0; return VK_INCOMPLETE; } void *p = pData, *end = pData + *pDataSize; header = p; header->header_size = sizeof(*header); header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE; header->vendor_id = 0 /* TODO */; header->device_id = 0 /* TODO */; memcpy(header->uuid, device->physical_device->cache_uuid, VK_UUID_SIZE); p += header->header_size; struct cache_entry *entry; for (uint32_t i = 0; i < cache->table_size; i++) { if (!cache->hash_table[i]) continue; entry = cache->hash_table[i]; const uint32_t size = entry_size(entry); if (end < p + size) { result = VK_INCOMPLETE; break; } memcpy(p, entry, size); for (int j = 0; j < MESA_SHADER_STAGES; ++j) ((struct cache_entry *) p)->variants[j] = NULL; p += size; } *pDataSize = p - pData; pthread_mutex_unlock(&cache->mutex); return result; }
// // Slab_cache // PUBLIC inline Slab_cache::Slab_cache(unsigned long slab_size, unsigned elem_size, unsigned alignment, char const * name) : _slab_size(slab_size), _entry_size(entry_size(elem_size, alignment)), _num_empty(0), _name (name) { lock.init(); _elem_num = (_slab_size - sizeof(Slab)) / _entry_size; }
int32_t block_size(block_t *l) { entry_t *e; int32_t bsize; if (!l) return(-1); bsize = sizeof(uint64_t); for (e = block_head(l); e != NULL; e = entry_next(e)) bsize = bsize + entry_size(e); return(bsize); }
static void add_to_sync_queue(struct sync_queue *queue, u32 sync_point_id, u32 sync_point_value, u32 nr_slots, struct nvmap_client *user_nvmap, struct nvmap_handle **handles, u32 nr_handles, u32 first_get, struct nvhost_userctx_timeout *timeout) { struct nvhost_cdma *cdma; struct nvhost_master *host; u32 size, write = queue->write; u32 *p = queue->buffer + write; cdma = container_of(queue, struct nvhost_cdma, sync_queue); host = cdma_to_dev(cdma); BUG_ON(sync_point_id == NVSYNCPT_INVALID); BUG_ON(sync_queue_space(queue) < nr_handles); size = SQ_IDX_HANDLES; size += entry_size(nr_handles); write += size; BUG_ON(write > host->sync_queue_size); p[SQ_IDX_SYNCPT_ID] = sync_point_id; p[SQ_IDX_SYNCPT_VAL] = sync_point_value; p[SQ_IDX_FIRST_GET] = first_get; p[SQ_IDX_TIMEOUT] = timeout->timeout; p[SQ_IDX_NUM_SLOTS] = nr_slots; p[SQ_IDX_NUM_HANDLES] = nr_handles; *(void **)(&p[SQ_IDX_TIMEOUT_CTX]) = timeout; BUG_ON(!user_nvmap); *(struct nvmap_client **)(&p[SQ_IDX_NVMAP_CTX]) = nvmap_client_get(user_nvmap); if (nr_handles) { memcpy(&p[SQ_IDX_HANDLES], handles, (nr_handles * sizeof(struct nvmap_handle *))); } /* If there's not enough room for another entry, wrap to the start. */ if ((write + SYNC_QUEUE_MIN_ENTRY) > host->sync_queue_size) { /* * It's an error for the read position to be zero, as that * would mean we emptied the queue while adding something. */ BUG_ON(queue->read == 0); write = 0; } queue->write = write; }
static void test_kdc_insert_lookaside_multiple(void **state) { krb5_context context = *state; krb5_data req1 = string2data("I'm a test request"); krb5_data rep1 = string2data("I'm a test response"); size_t e1_size = entry_size(&req1, &rep1); krb5_data req2 = string2data("I'm a different test request"); size_t e2_size = entry_size(&req2, NULL); struct entry *hash1_ent, *hash2_ent, *exp_first, *exp_last; time_return(0, 0); kdc_insert_lookaside(context, &req1, &rep1); hash1_ent = k5_hashtab_get(hash_table, req1.data, req1.length); assert_non_null(hash1_ent); assert_true(data_eq(hash1_ent->req_packet, req1)); assert_true(data_eq(hash1_ent->reply_packet, rep1)); exp_first = K5_TAILQ_FIRST(&expiration_queue); assert_true(data_eq(exp_first->req_packet, req1)); assert_true(data_eq(exp_first->reply_packet, rep1)); assert_int_equal(num_entries, 1); assert_int_equal(total_size, e1_size); time_return(0, 0); kdc_insert_lookaside(context, &req2, NULL); hash2_ent = k5_hashtab_get(hash_table, req2.data, req2.length); assert_non_null(hash2_ent); assert_true(data_eq(hash2_ent->req_packet, req2)); assert_int_equal(hash2_ent->reply_packet.length, 0); exp_last = K5_TAILQ_LAST(&expiration_queue, entry_queue); assert_true(data_eq(exp_last->req_packet, req2)); assert_int_equal(exp_last->reply_packet.length, 0); assert_int_equal(num_entries, 2); assert_int_equal(total_size, e1_size + e2_size); }
bool store_entry(char *path, uint8_t *key, entry *entry) { size_t size = -ENTRY_LEN(0); size += entry_size(entry); size += 1024 - (size % 1024); void *addr = mmfile(path, &size); box *box = addr; if (!addr) return false; write_entry(BOX_DATA(box), entry); encrypt_box(key, box, ENTRY_LEN(size)); return mmsync(path, addr, size); }
static u32 *advance_next_entry(struct nvhost_cdma *cdma, u32 *read) { struct nvhost_master *host; u32 ridx; host = cdma_to_dev(cdma); /* move sync_queue read ptr to next entry */ ridx = (read - cdma->sync_queue.buffer); ridx += (SQ_IDX_HANDLES + entry_size(read[SQ_IDX_NUM_HANDLES])); if ((ridx + SYNC_QUEUE_MIN_ENTRY) > host->sync_queue_size) ridx = 0; /* return sync_queue entry */ return cdma->sync_queue.buffer + ridx; }
Slab_cache::Slab_cache(unsigned elem_size, unsigned alignment, char const * name, unsigned long min_size, unsigned long max_size) : _entry_size(entry_size(elem_size, alignment)), _num_empty(0), _name (name) { lock.init(); for ( _slab_size = min_size; (_slab_size - sizeof(Slab)) / _entry_size < 8 && _slab_size < max_size; _slab_size <<= 1) ; _elem_num = (_slab_size - sizeof(Slab)) / _entry_size; }
void tu_pipeline_cache_load(struct tu_pipeline_cache *cache, const void *data, size_t size) { struct tu_device *device = cache->device; struct cache_header header; if (size < sizeof(header)) return; memcpy(&header, data, sizeof(header)); if (header.header_size < sizeof(header)) return; if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE) return; if (header.vendor_id != 0 /* TODO */) return; if (header.device_id != 0 /* TODO */) return; if (memcmp(header.uuid, device->physical_device->cache_uuid, VK_UUID_SIZE) != 0) return; char *end = (void *) data + size; char *p = (void *) data + header.header_size; while (end - p >= sizeof(struct cache_entry)) { struct cache_entry *entry = (struct cache_entry *) p; struct cache_entry *dest_entry; size_t size = entry_size(entry); if (end - p < size) break; dest_entry = vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE); if (dest_entry) { memcpy(dest_entry, entry, size); for (int i = 0; i < MESA_SHADER_STAGES; ++i) dest_entry->variants[i] = NULL; tu_pipeline_cache_add_entry(cache, dest_entry); } p += size; } }
static void test_kdc_insert_lookaside_no_reply(void **state) { krb5_context context = *state; krb5_data req = string2data("I'm a test request"); struct entry *hash_ent, *exp_ent; time_return(0, 0); kdc_insert_lookaside(context, &req, NULL); hash_ent = k5_hashtab_get(hash_table, req.data, req.length); assert_non_null(hash_ent); assert_true(data_eq(hash_ent->req_packet, req)); assert_int_equal(hash_ent->reply_packet.length, 0); exp_ent = K5_TAILQ_FIRST(&expiration_queue); assert_true(data_eq(exp_ent->req_packet, req)); assert_int_equal(exp_ent->reply_packet.length, 0); assert_int_equal(num_entries, 1); assert_int_equal(total_size, entry_size(&req, NULL)); }
static void tu_pipeline_cache_set_entry(struct tu_pipeline_cache *cache, struct cache_entry *entry) { const uint32_t mask = cache->table_size - 1; const uint32_t start = entry->sha1_dw[0]; /* We'll always be able to insert when we get here. */ assert(cache->kernel_count < cache->table_size / 2); for (uint32_t i = 0; i < cache->table_size; i++) { const uint32_t index = (start + i) & mask; if (!cache->hash_table[index]) { cache->hash_table[index] = entry; break; } } cache->total_size += entry_size(entry); cache->kernel_count++; }
/** * Advances to the next queue entry, if you want to consume it. */ static void dequeue_sync_queue_head(struct sync_queue *queue) { struct nvhost_cdma *cdma = container_of(queue, struct nvhost_cdma, sync_queue); struct nvhost_master *host = cdma_to_dev(cdma); u32 read = queue->read; u32 size; BUG_ON(read == queue->write); size = SQ_IDX_HANDLES; size += entry_size(queue->buffer[read + SQ_IDX_NUM_HANDLES]); read += size; BUG_ON(read > host->sync_queue_size); /* If there's not enough room for another entry, wrap to the start. */ if ((read + SYNC_QUEUE_MIN_ENTRY) > host->sync_queue_size) read = 0; queue->read = read; }