void dt_mipmap_cache_print(dt_mipmap_cache_t *cache) { for(int k=0; k<(int)DT_MIPMAP_F; k++) { printf("[mipmap_cache] level %d fill %.2f/%.2f MB (%.2f%% in %u/%u buffers)\n", k, cache->mip[k].cache.cost/(1024.0*1024.0), cache->mip[k].cache.cost_quota/(1024.0*1024.0), 100.0f*(float)cache->mip[k].cache.cost/(float)cache->mip[k].cache.cost_quota, dt_cache_size(&cache->mip[k].cache), dt_cache_capacity(&cache->mip[k].cache)); } for(int k=(int)DT_MIPMAP_F; k<=(int)DT_MIPMAP_FULL; k++) { printf("[mipmap_cache] level [f%d] fill %d/%d slots (%.2f%% in %u/%u buffers)\n", k, cache->mip[k].cache.cost, cache->mip[k].cache.cost_quota, 100.0f*(float)cache->mip[k].cache.cost/(float)cache->mip[k].cache.cost_quota, dt_cache_size(&cache->mip[k].cache), dt_cache_capacity(&cache->mip[k].cache)); } if(cache->compression_type) { printf("[mipmap_cache] scratch fill %.2f/%.2f MB (%.2f%% in %u/%u buffers)\n", cache->scratchmem.cache.cost/(1024.0*1024.0), cache->scratchmem.cache.cost_quota/(1024.0*1024.0), 100.0f*(float)cache->scratchmem.cache.cost/(float)cache->scratchmem.cache.cost_quota, dt_cache_size(&cache->scratchmem.cache), dt_cache_capacity(&cache->scratchmem.cache)); } printf("\n\n"); // very verbose stats about locks/users //dt_cache_print(&cache->mip[DT_MIPMAP_3].cache); }
void dt_mipmap_cache_print(dt_mipmap_cache_t *cache) { for(int k=0; k<(int)DT_MIPMAP_F; k++) { printf("[mipmap_cache] level [i%d] (%4dx%4d) fill %.2f/%.2f MB (%.2f%% in %u/%u buffers)\n", k, cache->mip[k].max_width, cache->mip[k].max_height, cache->mip[k].cache.cost/(1024.0*1024.0), cache->mip[k].cache.cost_quota/(1024.0*1024.0), 100.0f*(float)cache->mip[k].cache.cost/(float)cache->mip[k].cache.cost_quota, dt_cache_size(&cache->mip[k].cache), dt_cache_capacity(&cache->mip[k].cache)); } for(int k=(int)DT_MIPMAP_F; k<=(int)DT_MIPMAP_FULL; k++) { printf("[mipmap_cache] level [f%d] fill %d/%d slots (%.2f%% in %u/%u buffers)\n", k, (uint32_t)cache->mip[k].cache.cost, (uint32_t)cache->mip[k].cache.cost_quota, 100.0f*(float)cache->mip[k].cache.cost/(float)cache->mip[k].cache.cost_quota, dt_cache_size(&cache->mip[k].cache), dt_cache_capacity(&cache->mip[k].cache)); } if(cache->compression_type) { printf("[mipmap_cache] scratch fill %.2f/%.2f MB (%.2f%% in %u/%u buffers)\n", cache->scratchmem.cache.cost/(1024.0*1024.0), cache->scratchmem.cache.cost_quota/(1024.0*1024.0), 100.0f*(float)cache->scratchmem.cache.cost/(float)cache->scratchmem.cache.cost_quota, dt_cache_size(&cache->scratchmem.cache), dt_cache_capacity(&cache->scratchmem.cache)); } uint64_t sum = 0; uint64_t sum_fetches = 0; uint64_t sum_standins = 0; for(int k=0; k<=(int)DT_MIPMAP_FULL; k++) { sum += cache->mip[k].stats_requests; sum_fetches += cache->mip[k].stats_fetches; sum_standins += cache->mip[k].stats_standin; } printf("[mipmap_cache] level | near match | miss | stand-in | fetches | total rq\n"); for(int k=0; k<=(int)DT_MIPMAP_FULL; k++) printf("[mipmap_cache] %c%d | %6.2f%% | %6.2f%% | %6.2f%% | %6.2f%% | %6.2f%%\n", k > 3 ? 'f' : 'i', k, 100.0*cache->mip[k].stats_near_match/(float)cache->mip[k].stats_requests, 100.0*cache->mip[k].stats_misses/(float)cache->mip[k].stats_requests, 100.0*cache->mip[k].stats_standin/(float)sum_standins, 100.0*cache->mip[k].stats_fetches/(float)sum_fetches, 100.0*cache->mip[k].stats_requests/(float)sum); printf("\n\n"); // very verbose stats about locks/users //dt_cache_print(&cache->mip[DT_MIPMAP_3].cache); }
int main(int argc, char *arg[]) { dt_cache_t cache; // dt_cache_init(&cache, 110000, 16, 64, 100000); // really hammer it, make quota insanely low: dt_cache_init(&cache, 110000, 16, 64, 100); dt_cache_set_allocate_callback(&cache, alloc_dummy, NULL); #ifdef _OPENMP # pragma omp parallel for default(none) schedule(guided) shared(cache, stderr) num_threads(16) #endif for(int k=0;k<100000;k++) { void *data = (void *)(long int)k; const int size = 0;//dt_cache_size(&cache); const int con1 = dt_cache_contains(&cache, k); const int val1 = (int)(long int)dt_cache_read_get(&cache, k); const int val2 = (int)(long int)dt_cache_read_get(&cache, k); // fprintf(stderr, "\rinserted number %d, size %d, value %d - %d, contains %d - %d", k, size, val1, val2, con1, con2); const int con2 = dt_cache_contains(&cache, k); assert (con1 == 0); assert (con2 == 1); assert (val2 == k); dt_cache_read_release(&cache, k); dt_cache_read_release(&cache, k); } dt_cache_print_locked(&cache); // fprintf(stderr, "\n"); fprintf(stderr, "[passed] inserting 100000 entries concurrently\n"); const int size = dt_cache_size(&cache); const int lru_cnt = lru_check_consistency(&cache); const int lru_cnt_r = lru_check_consistency_reverse(&cache); // fprintf(stderr, "lru list contains %d|%d/%d entries\n", lru_cnt, lru_cnt_r, size); assert(size == lru_cnt); assert(lru_cnt_r == lru_cnt); fprintf(stderr, "[passed] cache lru consistency after removals, have %d entries left.\n", size); dt_cache_cleanup(&cache); exit(0); }