void benchmark(Graf& cos, int poczatek, int koniec, std::ostream& out) { std::chrono::time_point<std::chrono::system_clock> start, end; std::chrono::duration<double> elapsed_time(0); start = std::chrono::system_clock::now(); auto list = depth_first(cos, poczatek, koniec); end = std::chrono::system_clock::now(); elapsed_time = end-start; out << "DEPTH-FIRST time: " << elapsed_time.count() << "s. "; for( auto t : list ) std::cerr << t << " -> "; std::cerr << "\n"; start = std::chrono::system_clock::now(); list = breadth_first(cos, poczatek, koniec); end = std::chrono::system_clock::now(); elapsed_time = end-start; out << "BREADTH-FIRST time: " << elapsed_time.count() << "s. "; for( auto t : list ) std::cerr << t << " -> "; std::cerr << "\n"; start = std::chrono::system_clock::now(); list = a_star(cos, poczatek, koniec ); end = std::chrono::system_clock::now(); elapsed_time = end-start; out << "A* time: " << elapsed_time.count() << "s. "; for( auto t : list ) std::cerr << t << " -> "; std::cerr << "\n"; }
inline void PSPromotionManager::claim_or_forward_breadth(T* p) { assert(!depth_first(), "invariant"); assert(PSScavenge::should_scavenge(p, true), "revisiting object?"); assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(Universe::heap()->is_in(p), "pointer outside heap"); if (UsePrefetchQueue) { claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p)); } else { // This option is used for testing. The use of the prefetch // queue can delay the processing of the objects and thus // change the order of object scans. For example, remembered // set updates are typically the clearing of the remembered // set (the cards) followed by updates of the remembered set // for young-to-old pointers. In a situation where there // is an error in the sequence of clearing and updating // (e.g. clear card A, update card A, erroneously clear // card A again) the error can be obscured by a delay // in the update due to the use of the prefetch queue // (e.g., clear card A, erroneously clear card A again, // update card A that was pushed into the prefetch queue // and thus delayed until after the erronous clear). The // length of the delay is random depending on the objects // in the queue and the delay can be zero. claim_or_forward_internal_breadth(p); } }
int main() { int *store=NULL, *stor=NULL, i, j, n2, n_num, k, tmp, s; stk=NULL; findex=-1; printf("Enter number of vertices\n"); scanf("%d", &n); n2=n*n; stor=(int*)malloc(sizeof(int)*n2); vertex=(ELE*)malloc(sizeof(ELE)*n); finished=(int*)malloc(sizeof(int)*n); input=(int**)malloc(sizeof(int)*n2); for(i=0;i<n2;i++) { scanf("%d", &stor[i]); } for(i=0;i<n;i++) { input[i]=stor+n*i; } for(i=0;i<n;i++) { for(j=0;j<n;j++) { if(input[i][j]>0) { input[j][i]=1; } } } printf("Enter source\n"); scanf("%d", &s); for(i=0;i<n;i++) { vertex[i]=create_element(i); } for(i=0;i<n;i++) { n_num=-1; store=(int*)malloc(sizeof(int)*n); for(j=0;j<n;j++) { if(input[i][j]>0) { store[++n_num]=j; } } if(n_num>-1) { vertex[i]->child=(ELE*)malloc(sizeof(ELE)*n_num); vertex[i]->n_child=n_num; } else { continue; } for(k=0;k<=n_num;k++) { tmp=store[k]; (vertex[i]->child)[k]=vertex[tmp]; } free(store); } depth_first(vertex[s]); return 0; }
inline void PSPromotionManager::claim_or_forward_depth(heapRef* p) { assert(depth_first(), "invariant"); assert(PSScavenge::should_scavenge(ALWAYS_UNPOISON_OBJECTREF(*p),true),"revisiting object?"); assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(Universe::heap()->is_in(p), "pointer outside heap"); claim_or_forward_internal_depth(p); }
void depth_first(tree_t *comm_tree, int *proc_list,int *i) { int j; if(!comm_tree->child){ proc_list[(*i)++] = comm_tree->id; return; } for( j = 0 ; j < comm_tree->arity ; j++ ) depth_first(comm_tree->child[j],proc_list,i); }
void depth_first(ELE tos) { int i; if(in_array(finished, findex, tos->val)) { return; } printf("%d ", tos->val); stk=push_stack(stk, tos); push_array(finished, &findex, tos->val); for(i=0;i<=tos->n_child;i++) { depth_first(tos->child[i]); } stk=pop_stack(stk); }
int main(int argc, char** argv) { if (argc != 2) return -1; int n = std::atoi(argv[1]); std::cout << n << " queens" << std::endl; Field field(n); FieldSet result; depth_first(field, result); std::cout << "found " << result.size() << " solutions." << std::endl; return 0; }
int main(int argc, char *argv[]){ if(argc != 5 && argc != 6){ printf("Usage: ./genmaze OUTPUT_FILE ALGORITHM WIDTH HEIGHT [RANDOMNESS]\n" "\n" "ALGORITHM: rand OR dfs\n" "RANDOMNESS: odds of adding a(n extra, for dfs) connection, out of 256\n\n"); return 0; } if(!strcmp(argv[1], "-")){ of = stdout; } else { of = fopen(argv[1], "w"); } mx = atoi(argv[3]); my = atoi(argv[4]); if(argc == 6){ odds = atoi(argv[5]); } else { if(!strcmp(argv[2], "rand")){ odds = 128; } else { odds = 0; } } rf = fopen("/dev/urandom", "r"); fprintf(stderr, "Allocating...\n"); alloc_maze(); fprintf(stderr, "Generating...\n"); if(!strcmp(argv[2], "rand")){ random_connections(); } else if(!strcmp(argv[2], "dfs")){ depth_first(); } else { printf("Invalid algorithm.\n"); return 0; } fprintf(stderr, "Printing...\n"); print_maze(); fprintf(stderr, "Done.\n"); return 0; }
state::Enum depth_first(sooty::const_parseme_ptr_ref starting_node, PreFunc prefix_func, PostFunc postfix_func) { if (!starting_node) return state::keep_going; if ( prefix_func(starting_node) == state::stop ) return state::stop; for (sooty::parseme_container::const_iterator i = starting_node->children.begin(); i != starting_node->children.end(); ++i) { state::Enum r = depth_first(*i, prefix_func, postfix_func); if (r == state::stop) return state::stop; } if ( postfix_func(starting_node) == state::stop ) return state::stop; return state::keep_going; }
void depth_first(const Field & field, FieldSet & result) { //std::cout << "descending: " << field.queens.size() << "/" << field.n << std::endl; if (field.queens.size() == field.n) result.insert(field); for (int x = 0; x < field.n; ++x) { if (field.x[x]) continue; for (int y = 0; y < field.n; ++y) { if (field.is_free(x, y)) { Field variation = field.with_queen_at(x, y); depth_first(variation, result); } } } }
inline void PSPromotionManager::flush_prefetch_queue() { assert(!depth_first(), "invariant"); for (int i = 0; i < _prefetch_queue.length(); i++) { claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop()); } }
void map_topology(tm_topology_t *topology,tree_t *comm_tree,int nb_compute_units, int level,int *sigma, int nb_processes, int *k) { int *nodes_id = NULL; int *proc_list = NULL; int i,N,M,block_size; unsigned int vl = get_verbose_level(); M = nb_leaves(comm_tree); nodes_id = topology->node_id[level]; N = topology->nb_nodes[level]; if(vl >= INFO){ printf("nb_leaves=%d\n",M); printf("level=%d, nodes_id=%p, N=%d\n",level,(void *)nodes_id,N); printf("N=%d,nb_compute_units=%d\n",N,nb_compute_units); } /* The number of node at level "level" in the tree should be equal to the number of processors*/ assert(N==nb_compute_units); proc_list = (int*)MALLOC(sizeof(int)*M); i = 0; depth_first(comm_tree,proc_list,&i); if(vl >= DEBUG) for(i=0;i<M;i++){ printf ("%d\n",proc_list[i]); } block_size = M/N; if(k){/*if we need the k vector*/ if(vl >= INFO) printf("M=%d, N=%d, BS=%d\n",M,N,block_size); for( i = 0 ; i < nb_processing_units(topology) ; i++ ) k[i] = -1; for( i = 0 ; i < M ; i++ ) if(proc_list[i] != -1){ if(vl >= DEBUG) printf ("%d->%d\n",proc_list[i],nodes_id[i/block_size]); if( proc_list[i] < nb_processes ){ sigma[proc_list[i]] = nodes_id[i/block_size]; k[nodes_id[i/block_size]] = proc_list[i]; } } }else{ if(vl >= INFO) printf("M=%d, N=%d, BS=%d\n",M,N,block_size); for( i = 0 ; i < M ; i++ ) if(proc_list[i] != -1){ if(vl >= DEBUG) printf ("%d->%d\n",proc_list[i],nodes_id[i/block_size]); if( proc_list[i] < nb_processes ) sigma[proc_list[i]] = nodes_id[i/block_size]; } } if((vl >= DEBUG) && (k)){ printf("k: "); for( i = 0 ; i < nb_processing_units(topology) ; i++ ) printf("%d ",k[i]); printf("\n"); } FREE(proc_list); }
/*Map topology to cores: sigma_i is such that process i is mapped on core sigma_i k_i is such that core i exectutes process k_i size of sigma is the number of process size of k is the number of cores/nodes We must have numbe of process<=number of cores k_i =-1 if no process is mapped on core i */ void map_topology(tm_topology_t *topology,tree_t *comm_tree,int nb_proc,int level, int *sigma, int *k){ int *nodes_id; int N; int *proc_list,i,l; int M; int block_size; M=nb_leaves(comm_tree); printf("nb_leaves=%d\n",M); nodes_id=topology->node_id[level]; N=topology->nb_nodes[level]; //printf("level=%d, nodes_id=%p, N=%d\n",level,nodes_id,N); //printf("N=%d,nb_proc=%d\n",N,nb_proc); /* The number of node at level "level" in the tree should be equal to the number of processors*/ assert(N==nb_proc); proc_list=(int*)malloc(sizeof(int)*M); i=0; depth_first(comm_tree,proc_list,&i); l=0; for(i=0;i<M;i++){ //printf ("%d\n",proc_list[i]); } block_size=M/N; if(k){/*if we need the k vector*/ printf("M=%d, N=%d, BS=%d\n",M,N,block_size); for(i=0;i<nb_nodes(topology);i++){ k[i]=-1; } for(i=0;i<M;i++){ if(proc_list[i]!=-1){ #ifdef DEBUG printf ("%d->%d\n",proc_list[i],nodes_id[i/block_size]); #endif sigma[proc_list[i]]=nodes_id[i/block_size]; k[nodes_id[i/block_size]]=proc_list[i]; } } }else{ printf("M=%d, N=%d, BS=%d\n",M,N,block_size); for(i=0;i<M;i++){ if(proc_list[i]!=-1){ #ifdef DEBUG printf ("%d->%d\n",proc_list[i],nodes_id[i/block_size]); #endif sigma[proc_list[i]]=nodes_id[i/block_size]; } } } free(proc_list); }