int main(int argc, char * argv[]) { srand_file(); Parameters p; if(set_parameters(argc, argv, p)==false) { if (argc>1) cerr<<"Please, look at ReadMe.txt..."<<endl; return -1; } erase_file_if_exists("network.dat"); erase_file_if_exists("community.dat"); erase_file_if_exists("statistics.dat"); int sy0= system("rm network_*"); sy0= system("rm community_*"); sy0= system("rm network_layer_*"); int num_of_original_graphs=p.num_of_original_graphs; int num_of_layers=p.num_of_layers; int layer_index=0; cout<<"No. of original graphs: "<<num_of_original_graphs<<" No. of layers per graph: "<<num_of_layers<<endl; int max_assignment=0; ofstream comout("level_node_cluster.clu"); ofstream multiplexout("level_node_node_weight.edges"); multiplexout<<"*Intra"<<endl; for(int ori=0; ori<num_of_original_graphs; ori++) { // generating and printing graph benchmark(p.excess, p.defect, p.num_nodes, p.average_k, p.max_degree, p.tau, p.tau2, \ p.mixing_parameter, p.overlapping_nodes, p.overlap_membership, \ p.nmin, p.nmax, p.fixed_range, p.clustering_coeff); char buffer[1000]; sprintf(buffer, "mv network.dat network_%d", ori); int sy=system(buffer); sprintf(buffer, "mv community.dat community_%d", ori); sy=system(buffer); system("rm statistics.dat"); // creating layers max_assignment= create_layers(layer_index, num_of_layers, ori, comout, max_assignment, multiplexout); } comout.close(); multiplexout.close(); return 0; }
static void sort_times() { APP_LOG(APP_LOG_LEVEL_DEBUG, "sort_times..."); // Determine if any of the first 4 offsets is local time, // if so then we can take 5 TZs as one will be local time. int usable_tz = 4; for (int i = 0; i < DISPLAY_SIZE; i++) { if (0 == s_offset[i]) { // Found a local time, so we can use the first 5 configured TZs usable_tz = 5; break; } } // Initialise indexes to unsorted offsets. int indexes[CONFIG_SIZE]; for (int i = 0; i < CONFIG_SIZE; i++) { indexes[i] = i; } // Bubblesort offsets via indexes. for (int i = 0; i < (usable_tz - 1); i++) { for (int j = 0; j < (usable_tz - 1 - i); j++) { compare_swap(indexes, j); } } // Iterate offsets (via indexes), inserting local time (replacing a TZ if needed). bool found_local = false; int d = 0; for (int i = 0; i < usable_tz; i++) { int offset = s_offset[indexes[i]]; if (OFFSET_NO_DISPLAY == offset) { APP_LOG(APP_LOG_LEVEL_DEBUG, "NO DISPLAY"); break; } if (0 == offset) { if (found_local) { // Already found a local, so skip this one APP_LOG(APP_LOG_LEVEL_DEBUG, "Already found local"); continue; } // This is the local time... APP_LOG(APP_LOG_LEVEL_DEBUG, "Found local"); s_display[d++] = DISPLAY_LOCAL_TIME; found_local = true; continue; } if (!found_local && offset < 0) { APP_LOG(APP_LOG_LEVEL_DEBUG, "Missed local, adding"); // We have moved past local time without finding it, so add it in. s_display[d++] = DISPLAY_LOCAL_TIME; found_local = true; // Fall through to add the current TZ } APP_LOG(APP_LOG_LEVEL_DEBUG, "Adding %d", indexes[i]); s_display[d++] = indexes[i]; } if (!found_local) { APP_LOG(APP_LOG_LEVEL_DEBUG, "Missed local altogether, adding"); // We did not find or insert a local time in the list at all, so add it last. s_display[d++] = DISPLAY_LOCAL_TIME; found_local = true; } s_num_display = d; for (int i = 0; i < s_num_display; i++) { int x = s_display[i]; APP_LOG(APP_LOG_LEVEL_DEBUG, "Ordered list %d: %s (%ld)", i, (x == DISPLAY_LOCAL_TIME) ? "LOCAL" : s_tz[x], (x == DISPLAY_LOCAL_TIME) ? 0 : s_offset[x]); } create_layers(); // ----- Handle popup display ------ // Initialise indexes to unsorted offsets. int pindexes[CONFIG_SIZE]; for (int i = 0; i < CONFIG_SIZE; i++) { pindexes[i] = i; } // Bubblesort offsets via indexes. for (int i = 0; i < (CONFIG_SIZE - 1); i++) { for (int j = 0; j < (CONFIG_SIZE - 1 - i); j++) { compare_swap(pindexes, j); } } for (int i = 0; i < CONFIG_SIZE; i++) { APP_LOG(APP_LOG_LEVEL_DEBUG, "Popup: %d", pindexes[i]); s_p_display[i] = pindexes[i]; } create_popup_layers(); APP_LOG(APP_LOG_LEVEL_DEBUG, "...sort_times"); }
// This test case exercises the public API of the GrLayerCache class. // In particular it checks its interaction with the resource cache (w.r.t. // locking & unlocking textures). // TODO: need to add checks on VRAM usage! DEF_GPUTEST(GpuLayerCache, reporter, factory) { static const int kInitialNumLayers = 5; for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) { GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i; if (!GrContextFactory::IsRenderingGLContext(glCtxType)) { continue; } GrContext* context = factory->get(glCtxType); if (NULL == context) { continue; } SkPictureRecorder recorder; recorder.beginRecording(1, 1); SkAutoTUnref<const SkPicture> picture(recorder.endRecording()); GrLayerCache cache(context); create_layers(reporter, &cache, *picture, kInitialNumLayers, 0); for (int i = 0; i < kInitialNumLayers; ++i) { GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I()); REPORTER_ASSERT(reporter, NULL != layer); lock_layer(reporter, &cache, layer); // The first 4 layers should be in the atlas (and thus have non-empty // rects) if (i < 4) { REPORTER_ASSERT(reporter, layer->isAtlased()); } else { // The 5th layer couldn't fit in the atlas REPORTER_ASSERT(reporter, !layer->isAtlased()); } } // Unlock the textures for (int i = 0; i < kInitialNumLayers; ++i) { GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I()); REPORTER_ASSERT(reporter, NULL != layer); cache.unlock(layer); } for (int i = 0; i < kInitialNumLayers; ++i) { GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I()); REPORTER_ASSERT(reporter, NULL != layer); REPORTER_ASSERT(reporter, !layer->locked()); // The first 4 layers should still be in the atlas. if (i < 4) { REPORTER_ASSERT(reporter, NULL != layer->texture()); REPORTER_ASSERT(reporter, layer->isAtlased()); } else { // The final layer should be unlocked. REPORTER_ASSERT(reporter, NULL == layer->texture()); REPORTER_ASSERT(reporter, !layer->isAtlased()); } } { // Add an additional layer. Since all the layers are unlocked this // will force out the first atlased layer create_layers(reporter, &cache, *picture, 1, kInitialNumLayers); GrCachedLayer* layer = cache.findLayer(picture, kInitialNumLayers+1, kInitialNumLayers+2, SkMatrix::I()); REPORTER_ASSERT(reporter, NULL != layer); lock_layer(reporter, &cache, layer); cache.unlock(layer); } for (int i = 0; i < kInitialNumLayers+1; ++i) { GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I()); // 3 old layers plus the new one should be in the atlas. if (1 == i || 2 == i || 3 == i || 5 == i) { REPORTER_ASSERT(reporter, NULL != layer); REPORTER_ASSERT(reporter, !layer->locked()); REPORTER_ASSERT(reporter, NULL != layer->texture()); REPORTER_ASSERT(reporter, layer->isAtlased()); } else if (4 == i) { // The one that was never atlased should still be around REPORTER_ASSERT(reporter, NULL != layer); REPORTER_ASSERT(reporter, NULL == layer->texture()); REPORTER_ASSERT(reporter, !layer->isAtlased()); } else { // The one bumped out of the atlas (i.e., 0) should be gone REPORTER_ASSERT(reporter, NULL == layer); } } //-------------------------------------------------------------------- // Free them all SkGpuDevice-style. This will not free up the // atlas' texture but will eliminate all the layers. TestingAccess::Purge(&cache, picture->uniqueID()); REPORTER_ASSERT(reporter, TestingAccess::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here //-------------------------------------------------------------------- // Test out the GrContext-style purge. This should remove all the layers // and the atlas. // Re-create the layers create_layers(reporter, &cache, *picture, kInitialNumLayers, 0); // Free them again GrContext-style. This should free up everything. cache.freeAll(); REPORTER_ASSERT(reporter, TestingAccess::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here //-------------------------------------------------------------------- // Test out the MessageBus-style purge. This will not free the atlas // but should eliminate the free-floating layers. create_layers(reporter, &cache, *picture, kInitialNumLayers, 0); picture.reset(NULL); cache.processDeletedPictures(); REPORTER_ASSERT(reporter, TestingAccess::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here } }
// This test case exercises the public API of the GrLayerCache class. // In particular it checks its interaction with the resource cache (w.r.t. // locking & unlocking textures). // TODO: need to add checks on VRAM usage! DEF_GPUTEST(GpuLayerCache, reporter, factory) { GrContext* context = factory->get(GrContextFactory::kNative_GLContextType); if (NULL == context) { return; } SkPicture picture; GrLayerCache cache(context); create_layers(reporter, &cache, picture); // Lock the layers making them all 512x512 GrTextureDesc desc; desc.fWidth = 512; desc.fHeight = 512; desc.fConfig = kSkia8888_GrPixelConfig; for (int i = 0; i < kNumLayers; ++i) { GrCachedLayer* layer = cache.findLayer(&picture, i); REPORTER_ASSERT(reporter, NULL != layer); bool foundInCache = cache.lock(layer, desc); REPORTER_ASSERT(reporter, !foundInCache); foundInCache = cache.lock(layer, desc); REPORTER_ASSERT(reporter, foundInCache); REPORTER_ASSERT(reporter, NULL != layer->texture()); #if USE_ATLAS // The first 4 layers should be in the atlas (and thus have non-empty // rects) if (i < 4) { REPORTER_ASSERT(reporter, !layer->rect().isEmpty()); } else { #endif REPORTER_ASSERT(reporter, layer->rect().isEmpty()); #if USE_ATLAS } #endif } // Unlock the textures for (int i = 0; i < kNumLayers; ++i) { GrCachedLayer* layer = cache.findLayer(&picture, i); REPORTER_ASSERT(reporter, NULL != layer); cache.unlock(layer); } for (int i = 0; i < kNumLayers; ++i) { GrCachedLayer* layer = cache.findLayer(&picture, i); REPORTER_ASSERT(reporter, NULL != layer); #if USE_ATLAS // The first 4 layers should be in the atlas (and thus do not // currently unlock). The final layer should be unlocked. if (i < 4) { REPORTER_ASSERT(reporter, NULL != layer->texture()); REPORTER_ASSERT(reporter, !layer->rect().isEmpty()); } else { #endif REPORTER_ASSERT(reporter, NULL == layer->texture()); REPORTER_ASSERT(reporter, layer->rect().isEmpty()); #if USE_ATLAS } #endif } // Free them all SkGpuDevice-style. This will not free up the // atlas' texture but will eliminate all the layers. cache.purge(&picture); REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here #if 0 // Re-create the layers create_layers(reporter, &cache, picture); // Free them again GrContext-style. This should free up everything. cache.freeAll(); REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here #endif }
// This test case exercises the public API of the GrLayerCache class. // In particular it checks its interaction with the resource cache (w.r.t. // locking & unlocking textures). // TODO: need to add checks on VRAM usage! DEF_GPUTEST(GpuLayerCache, reporter, factory) { static const unsigned kInitialNumLayers = 5; for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) { GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i; if (!GrContextFactory::IsRenderingGLContext(glCtxType)) { continue; } GrContext* context = factory->get(glCtxType); if (NULL == context) { continue; } SkPictureRecorder recorder; SkCanvas* c = recorder.beginRecording(1, 1); // Draw something, anything, to prevent an empty-picture optimization, // which is a singleton and never purged. c->drawRect(SkRect::MakeWH(1,1), SkPaint()); SkAutoTUnref<const SkPicture> picture(recorder.endRecording()); GrLayerCache cache(context); create_layers(reporter, &cache, *picture, kInitialNumLayers, 0); for (unsigned i = 0; i < kInitialNumLayers; ++i) { unsigned indices[1] = { i + 1 }; GrCachedLayer* layer = TestingAccess::Find(&cache, picture->uniqueID(), SkMatrix::I(), indices, 1); REPORTER_ASSERT(reporter, layer); lock_layer(reporter, &cache, layer); // The first 4 layers should be in the atlas (and thus have non-empty // rects) if (i < 4) { REPORTER_ASSERT(reporter, layer->isAtlased()); } else { // The 5th layer couldn't fit in the atlas REPORTER_ASSERT(reporter, !layer->isAtlased()); } } // Unlock the textures for (unsigned i = 0; i < kInitialNumLayers; ++i) { unsigned indices[1] = { i+1 }; GrCachedLayer* layer = TestingAccess::Find(&cache, picture->uniqueID(), SkMatrix::I(), indices, 1); REPORTER_ASSERT(reporter, layer); cache.removeUse(layer); } for (unsigned i = 0; i < kInitialNumLayers; ++i) { unsigned indices[1] = { i+1 }; GrCachedLayer* layer = TestingAccess::Find(&cache, picture->uniqueID(), SkMatrix::I(), indices, 1); REPORTER_ASSERT(reporter, layer); // All the layers should be unlocked REPORTER_ASSERT(reporter, !layer->locked()); // When hoisted layers aren't cached they are aggressively removed // from the atlas #if GR_CACHE_HOISTED_LAYERS // The first 4 layers should still be in the atlas. if (i < 4) { REPORTER_ASSERT(reporter, layer->texture()); REPORTER_ASSERT(reporter, layer->isAtlased()); } else { #endif // The final layer should not be atlased. REPORTER_ASSERT(reporter, NULL == layer->texture()); REPORTER_ASSERT(reporter, !layer->isAtlased()); #if GR_CACHE_HOISTED_LAYERS } #endif } { unsigned indices[1] = { kInitialNumLayers+1 }; // Add an additional layer. Since all the layers are unlocked this // will force out the first atlased layer create_layers(reporter, &cache, *picture, 1, kInitialNumLayers); GrCachedLayer* layer = TestingAccess::Find(&cache, picture->uniqueID(), SkMatrix::I(), indices, 1); REPORTER_ASSERT(reporter, layer); lock_layer(reporter, &cache, layer); cache.removeUse(layer); } for (unsigned i = 0; i < kInitialNumLayers+1; ++i) { unsigned indices[1] = { i+1 }; GrCachedLayer* layer = TestingAccess::Find(&cache, picture->uniqueID(), SkMatrix::I(), indices, 1); #if GR_CACHE_HOISTED_LAYERS // 3 old layers plus the new one should be in the atlas. if (1 == i || 2 == i || 3 == i || 5 == i) { REPORTER_ASSERT(reporter, layer); REPORTER_ASSERT(reporter, !layer->locked()); REPORTER_ASSERT(reporter, layer->texture()); REPORTER_ASSERT(reporter, layer->isAtlased()); } else if (4 == i) { #endif // The one that was never atlased should still be around REPORTER_ASSERT(reporter, layer); REPORTER_ASSERT(reporter, NULL == layer->texture()); REPORTER_ASSERT(reporter, !layer->isAtlased()); #if GR_CACHE_HOISTED_LAYERS } else { // The one bumped out of the atlas (i.e., 0) should be gone REPORTER_ASSERT(reporter, NULL == layer); } #endif } //-------------------------------------------------------------------- // Free them all SkGpuDevice-style. This will not free up the // atlas' texture but will eliminate all the layers. TestingAccess::Purge(&cache, picture->uniqueID()); REPORTER_ASSERT(reporter, TestingAccess::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here //-------------------------------------------------------------------- // Test out the GrContext-style purge. This should remove all the layers // and the atlas. // Re-create the layers create_layers(reporter, &cache, *picture, kInitialNumLayers, 0); // Free them again GrContext-style. This should free up everything. cache.freeAll(); REPORTER_ASSERT(reporter, TestingAccess::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here //-------------------------------------------------------------------- // Test out the MessageBus-style purge. This will not free the atlas // but should eliminate the free-floating layers. create_layers(reporter, &cache, *picture, kInitialNumLayers, 0); picture.reset(NULL); cache.processDeletedPictures(); REPORTER_ASSERT(reporter, TestingAccess::NumLayers(&cache) == 0); // TODO: add VRAM/resource cache check here } }