예제 #1
0
void App::initial_load()
{
  build_femspace();
  //autoSetGrainInfo();
  readGrainInfo();

  Operator::L2Project(&phi_0, *phi_h, Operator::LOCAL_LEAST_SQUARE, 5);
  Operator::L2Project(&c_0, *c_h, Operator::LOCAL_LEAST_SQUARE, 5);

  for (int i=0;i<3;++i)
  {
    adapt_mesh(); 
    initOrientation();

//    Operator::L2Interpolate(&phi_0, *phi_h);
//    Operator::L2Interpolate(&c_0, *c_h);
  }

  Operator::L2Project(&u_0, *u_h, Operator::LOCAL_LEAST_SQUARE, 5);
  Operator::L2Project(&v_0, *v_h, Operator::LOCAL_LEAST_SQUARE, 5);
  Operator::L2Project(&p_0, *p_h, Operator::LOCAL_LEAST_SQUARE, 5);
  Operator::L2Project(&p_0, *last_p_h, Operator::LOCAL_LEAST_SQUARE, 5);

  save_data();

  load_balance(); 
}
예제 #2
0
void SCL::run()
{
  std::cout.precision(6);
  std::cout.setf(std::ios::scientific, std::ios::floatfield);
  double start_time = MPI_Wtime();
  double last_time = start_time;
  double this_time;
  initialize();
  do {
    step_forward();
    adapt_mesh();
    load_balance(); /// 完成负载平衡
    getControl();

    if (htree.rank() == 0) {
      last_time = this_time;
      this_time = MPI_Wtime();
      std::cout << "t = " << t 
                << ", dt = " << dt
                << ", wall time = " << this_time - start_time
                << ", step time = " << this_time - last_time
                << std::endl;
    }
  } while(end_t - t > 2.0e-16);
}
예제 #3
0
void DashcoinWallet::load_wallet_data(){
    if(!wallet_is_running){
        return;
    }

    load_balance();
    load_address();
    load_transactions();

}
예제 #4
0
파일: sched.c 프로젝트: ferreiro/C
/* Main function for a per-CPU simulation thread
 * */
void* sched_cpu(void* arg)
{
    int this_cpu=(long)arg;
    int simulation_step=0;
    runqueue_t* cpu_rq=get_runqueue_cpu(this_cpu);

    /* Initial steps
     * (1) Process incomming tasks
     * (2) Call the scheduler to pick the next task the very first time
     * */

    process_sched_events(this_cpu,0);

    lock_rq(cpu_rq);
    active_sched_class->pick_next_task(cpu_rq,this_cpu);
    /* After this step the CPU may still be idle -> current may be the idle task */
    cpu_rq->cur_task->state=TS_ONPROC;
    unlock_rq(cpu_rq);

    while(cpus_completed<nr_cpus && simulation_step<max_simulation_steps) {

        /* Current runs for a tick*/
        usleep(tick_delay);

        /* Tick processing */
        scheduler_tick(this_cpu,simulation_step);

        /* Handle awoken tasks (for the next interval) */
        process_sched_events(this_cpu,simulation_step+1);

        /* load balance */
        load_balance(this_cpu,simulation_step);

        /* Select a new thread to run if necessary */
        schedule(this_cpu,simulation_step);

        /* Synchronize CPUs ...*/
        synchronize_cpus(this_cpu);

        simulation_step++;
    }

    write_cpu_log(this_cpu,"==================\n");
    return NULL;
}
예제 #5
0
/**************************************************************************************************
	_SORT_A_RECS
	If the request is for 'A' or 'AAAA' and there are multiple A or AAAA records, sort them.
	Since this is an A or AAAA record, the answer section contains only addresses.
	If any of the RR's have nonzero "aux" values, do load balancing, else do round robin.
**************************************************************************************************/
static inline void
_sort_a_recs(TASK *t, RRLIST *rrlist, datasection_t section, int sort_level)
{
	register RR *node;
	register int nonzero_aux = 0;
	register int count = 0;										/* Number of nodes at this level */

	/* If any addresses have nonzero 'aux' values, do load balancing */
	for (count = 0, node = rrlist->head; node; node = node->next)
		if (RR_IS_ADDR(node) && node->sort_level == sort_level)
		{
			count++;
			if (((MYDNS_RR *)node->rr)->aux)
				nonzero_aux = 1;
		}

	if (count < 2)													/* Only one node here, don't bother */
		return;
	t->reply_cache_ok = 0;										/* Don't cache load-balanced replies */

	if (nonzero_aux)
	{
		load_balance(t, rrlist, section, sort_level);
	}
	else /* Round robin - for address records, set 'sort' to a random number */
	{
#if DEBUG_ENABLED && DEBUG_SORT
		Debug("%s: Sorting A records in %s section (round robin)", desctask(t), datasection_str[section]);
#endif

		for (node = rrlist->head; node; node = node->next)
			if (RR_IS_ADDR(node) && node->sort_level == sort_level)
				node->sort1 = RAND(4294967294U);
	t->reply_cache_ok = 0;										/* Don't cache load-balanced replies */
	}
}
예제 #6
0
void graph_miner_mpi_dyn::project(Projected &projected, int dfs_level)
{

  if(is_min() == false) {
    return;
  } else {

  }

  // Check if the pattern is frequent enough.
  unsigned int sup = support(projected);

  if(sup < minimal_support) return;

  DEBUG(*logger, "DFS level = " << dfs_level);

  DEBUG(*(graph_miner::logger), "executing project for code: " << DFS_CODE.to_string() << "; support: " << sup);

  // Output the frequent substructure
  report(projected, sup);

  // In case we have a valid upper bound and our graph already exceeds it,
  // return.  Note: we do not check for equality as the DFS exploration may
  // still add edges within an existing subgraph, without increasing the
  // number of nodes.
  //
  //if(maxpat_max > maxpat_min && DFS_CODE.nodeCount() > maxpat_max) return;

  // We just outputted a frequent subgraph.  As it is frequent enough, so
  // might be its (n+1)-extension-graphs, hence we enumerate them all.
  const RMPath &rmpath = DFS_CODE.buildRMPath();
  int minlabel = DFS_CODE[0].fromlabel;
  int maxtoc = DFS_CODE[rmpath[0]].to;

  Projected_map3 new_fwd_root;
  Projected_map2 new_bck_root;
  types::EdgeList edges;

  current_dfs_level = dfs_level;

  // Enumerate all possible one edge extensions of the current substructure.
  for(unsigned int n = 0; n < projected.size(); ++n) {

    unsigned int id = projected[n].id;
    PDFS *cur = &projected[n];
    History history(graph, cur);

    // XXX: do we have to change something here for directed edges?

    // backward
    for(int i = (int)rmpath.size() - 1; i >= 1; --i) {
      Edge *e = get_backward(graph, history[rmpath[i]], history[rmpath[0]], history);
      if(e)
        new_bck_root[DFS_CODE[rmpath[i]].from][e->elabel].push(id, e, cur);
    }

    // pure forward
    // FIXME: here we pass a too large e->to (== history[rmpath[0]]->to
    // into get_forward_pure, such that the assertion fails.
    //
    // The problem is:
    // history[rmpath[0]]->to > graph.size()
    if(get_forward_pure(graph, history[rmpath[0]], minlabel, history, edges)) {
      for(types::EdgeList::iterator it = edges.begin(); it != edges.end(); ++it) {
        new_fwd_root[maxtoc][(*it)->elabel][graph[(*it)->to].label].push(id, *it, cur);
      }
    }

    // backtracked forward
    for(int i = 0; i < (int)rmpath.size(); ++i) {
      if(get_forward_rmpath(graph, history[rmpath[i]], minlabel, history, edges)) {
        for(types::EdgeList::iterator it = edges.begin(); it != edges.end(); ++it) {
          new_fwd_root[DFS_CODE[rmpath[i]].from][(*it)->elabel][graph[(*it)->to].label].push(id, *it, cur);
        } // for it
      } // if
    } // for i
  } // for n


  std::deque<types::DFS> tmp;

  if(dfs_task_queue.size() <= dfs_level) {
    dfs_task_queue.push_back(tmp);
  }

  // Test all extended substructures.
  // backward
  for(Projected_iterator2 to = new_bck_root.begin(); to != new_bck_root.end(); ++to) {
    for(Projected_iterator1 elabel = to->second.begin(); elabel != to->second.end(); ++elabel) {

      DFS dfs(maxtoc, to->first, -1, elabel->first, -1);
      dfs_task_queue[dfs_level].push_back(dfs);

      load_balance();

    }
  }

  // forward
  for(Projected_riterator3 from = new_fwd_root.rbegin();
      from != new_fwd_root.rend(); ++from) {
    for(Projected_iterator2 elabel = from->second.begin();
        elabel != from->second.end(); ++elabel) {
      for(Projected_iterator1 tolabel = elabel->second.begin();
          tolabel != elabel->second.end(); ++tolabel) {

        DFS dfs(from->first, maxtoc + 1, -1, elabel->first, tolabel->first);
        dfs_task_queue[dfs_level].push_back(dfs);

        load_balance();

      }
    }
  }


  //current_dfs_level = dfs_level;
  //current_dfs_level = dfs_level + 1;

  while(dfs_task_queue[dfs_level].size() > 0) {

    DFS dfs = dfs_task_queue[dfs_level].front();
    dfs_task_queue[dfs_level].pop_front();
    DEBUG(*logger, "popped dfs = " << dfs.to_string() );

    current_dfs_level = dfs_level;
    load_balance();

    DFS_CODE.push(dfs.from, dfs.to, dfs.fromlabel, dfs.elabel, dfs.tolabel);

    if(dfs.is_backward())
      project(new_bck_root[dfs.to][dfs.elabel], dfs_level + 1);      //Projected (PDFS vector): each entry contains graph id 0, edge pointer, null PDFS
    else
      project(new_fwd_root[dfs.from][dfs.elabel][dfs.tolabel], dfs_level + 1);      //Projected (PDFS vector): each entry contains graph id 0, edge pointer, null PDFS

    DFS_CODE.pop();
  }

  //current_dfs_level = dfs_level;

  return;
}
예제 #7
0
void graph_miner_mpi_dyn::run_intern(void)
{

  types::EdgeList edges;
  Projected_map3 root;
  int single_edge_dfscodes = 0;


  for(unsigned int from = 0; from < graph.size(); ++from) {
    if(get_forward_root(graph, graph[from], edges)) {   // get the edge list of the node g[from] in graph g
      for(types::EdgeList::iterator it = edges.begin(); it != edges.end(); ++it) {
        //embeddings with a single edge
        if(root.count(graph[from].label) == 0 || root[graph[from].label].count((*it)->elabel) == 0 || root[graph[from].label][(*it)->elabel].count(graph[(*it)->to].label) == 0) {
          single_edge_dfscodes++;
          DEBUG(*logger, "single edge DFS code : (0,1," << graph[from].label << "," << (*it)->elabel << "," << graph[(*it)->to].label << ")" );
        }
        root[graph[from].label][(*it)->elabel][graph[(*it)->to].label].push(0, *it, 0);          //projected (PDFS vector) entry: graph id (always 0 for single graph), edge pointer and null PDFS
      }  //for
    }   // if
  }   // for from
  //} // for id


  int dfscodes_per_rank =  (int) ceil((single_edge_dfscodes * 1.0) / numtasks);
  int start_index = rank * dfscodes_per_rank;
  int end_index = start_index + dfscodes_per_rank - 1;
  if (end_index > single_edge_dfscodes - 1)
    end_index = single_edge_dfscodes - 1;

  DEBUG(*(graph_miner::logger), "start index = " << start_index << " , end index = " << end_index << endl);

  std::deque<types::DFS> tmp;
  dfs_task_queue.push_back(tmp);

  int index = 0;
  for(Projected_iterator3 fromlabel = root.begin(); fromlabel != root.end(); ++fromlabel) {
    for(Projected_iterator2 elabel = fromlabel->second.begin(); elabel != fromlabel->second.end(); ++elabel) {
      for(Projected_iterator1 tolabel = elabel->second.begin();
          tolabel != elabel->second.end(); ++tolabel) {

        if( index >= start_index && index <= end_index ) {
          // Build the initial two-node graph.  It will be grownrecursively within project.

          DFS dfs(0, 1, fromlabel->first, elabel->first, tolabel->first);
          dfs_task_queue[0].push_back(dfs);
          //std::cout << dfs.to_string() << endl;
        }
        index++;

      } // for tolabel
    } // for elabel
  } // for fromlabel

  //std::cout<<"size = " << dfs_task_queue[0].size() << std::endl;

  //while(dfs_task_queue[0].size() > 0){
  while(computation_end == false) {

    if(dfs_task_queue[0].size() == 0) {
      is_working = false;
      embeddings_regeneration_level = 0;
      task_split_level = 0;
      load_balance();

    }else{
      //this is done in process_received_data, so not required here
      //is_working = true;

      DFS dfs = dfs_task_queue[0].front();
      dfs_task_queue[0].pop_front();
      DEBUG(*(graph_miner::logger), "popped dfs = " << dfs.to_string() );
      load_balance();

      DFS_CODE.push(0, 1, dfs.fromlabel, dfs.elabel, dfs.tolabel);
      current_dfs_level = 1;

      //INFO(*(graph_miner::logger), "embeddings regeneration level = " << embeddings_regeneration_level);
      if(embeddings_regeneration_level < 1)
        project(root[dfs.fromlabel][dfs.elabel][dfs.tolabel], 1);                    //Projected (PDFS vector): each entry contains graph id 0, edge pointer, null PDFS
      else
        regenerate_embeddings(root[dfs.fromlabel][dfs.elabel][dfs.tolabel], 1);

      current_dfs_level = 0;
      DFS_CODE.pop();
      if(dfs_task_queue[0].size() == 0) {
        DEBUG(*(graph_miner::logger),"processor " << rank << " is idle, has token = " << has_token);
        if(has_token == true)
          DEBUG(*(graph_miner::logger),"processor " << rank << " token color = " << get_token_color(token_color));
      }
    }
  }

} // void graph_miner_mpi_dyn::run_intern(void)
예제 #8
0
void graph_miner_mpi_dyn::regenerate_embeddings(Projected &projected, int dfs_level)
{
  // We don't need to check if the pattern is frequent or minimal

  DEBUG(*(graph_miner::logger), "DFS level inside regenerate embeddings = " << dfs_level << " queue size = " << dfs_task_queue[dfs_level].size());

  //not necessary though, as task split is not done while regenerating embeddings

  //current_dfs_level = dfs_level + 1;

  //iterate for all in the task_queue

  for(int i = 0; dfs_task_queue[dfs_level].size() > 0; i++) {

    types::DFS dfs = dfs_task_queue[dfs_level].front();
    dfs_task_queue[dfs_level].pop_front();

    current_dfs_level = dfs_level;
    load_balance();

    DFS_CODE.push(dfs.from, dfs.to, dfs.fromlabel, dfs.elabel, dfs.tolabel);

    DEBUG(*(graph_miner::logger), "*****regenerating embeddings for code: " << DFS_CODE.to_string() );

    //const RMPath &rmpath = DFS_CODE.buildRMPath();
    //int minlabel = DFS_CODE[0].fromlabel;
    //int maxtoc = DFS_CODE[rmpath[0]].to;

    Projected new_root;

    for(unsigned int n = 0; n < projected.size(); ++n) {

      unsigned int id = projected[n].id;
      PDFS *cur = &projected[n];
      History history(graph, cur);

      if(dfs.is_backward() ) {
        Edge *e = get_backward(graph, DFS_CODE, history);
        if(e)
          new_root.push(id, e, cur);
      }else{
        types::EdgeList edges;
        if(get_forward(graph, DFS_CODE, history, edges)) {
          for(types::EdgeList::iterator it = edges.begin(); it != edges.end(); ++it) {
            new_root.push(id, *it, cur);
          }
        }
      }
    }

    if( embeddings_regeneration_level > dfs_level ) {
      regenerate_embeddings(new_root, dfs_level + 1);
    }else{
      //regeneration of embeddings ended
      //now perform regular extensions with project function
      //reset embeddings_regeneration_level
      //embeddings_regeneration_level = 0;
      project(new_root, dfs_level + 1);
    }

    DFS_CODE.pop();

  }
  //current_dfs_level = dfs_level;

  return;
}
static int sparse_initialize(int *n, int *non_zero, int **row_ind,
                             int **col_ind, double **values, double **vec,
                             double **svec) {

    int i, j, rc, max, *row_ind_tmp=NULL, *tmp_indices=NULL;
    double *tmp_values=NULL;
    unsigned long len;
    FILE *fp=NULL;

    /* Broadcast order of matrix */
    if(me==0) {
        if((fp=fopen("Sparse-MPI/av41092.rua.data", "r")) == NULL)
            ARMCI_Error("Error: Input file not found", me);
        fortran_indexing = 1; /* This is 1 for Harwell-Boeing format matrices */
        fscanf(fp, "%d", n);
        if(*n%nproc)
            ARMCI_Error("# of rows is not divisible by # of processors", nproc);
        if(*n > ROW)
            ARMCI_Error("order is greater than defined variable ROW", ROW);
    }
    len = sizeof(int);
    armci_msg_brdcst(n, len, 0);

    /* Broad cast number of non_zeros */
    if(me==0) fscanf(fp, "%d", non_zero);
    armci_msg_brdcst(non_zero, len, 0);

    /* Broadcast row indices */
    len = (*n+1)*sizeof(int);
    row_ind_tmp = (int *)malloc(len);
    if(me==0)for(i=0; i<*n+1; i++) {
            fscanf(fp, "%d", &row_ind_tmp[i]);
            if(fortran_indexing) --row_ind_tmp[i];
        }
    armci_msg_brdcst(row_ind_tmp, len, 0);

    load_balance(*n, *non_zero, row_ind_tmp);

    /* find how much temporary storage is needed at the maximum */
    if(me==0) {
        for(max=-1,j=0; j<nproc; j++) if(max<proc_nz_list[j]) max=proc_nz_list[j];
        if(max<0) ARMCI_Error(" max cannot be negative", max);
    }

    /* Broadcast the maximum number of elements */
    len = sizeof(int);
    armci_msg_brdcst(&max, len, 0);

    /* create the Sparse MAtrix Array */
    if(me==0) printf("  Creating ValueArray (CompressedSparseMatrix) ...\n\n");
    create_array((void**)col_ind, sizeof(int), 1, &max);

    /* create the column subscript array */
    if(me==0) printf("  Creating Column Subscript Array ... \n\n");
    create_array((void**)values, sizeof(double), 1, &max);

    /* create the x-vector and the solution vector */
    if(me==0) printf("  Creating Vectors ... \n\n");
    create_array((void**)vec,  sizeof(double),1, &max);
    create_array((void**)svec, sizeof(double),1, &max);
    armci_msg_barrier();


    /* Process 0 distributes the column indices and non_zero values to
       respective processors*/
    if(me == 0) {
        tmp_indices = (int *)malloc(max*sizeof(int));
        tmp_values  = (double *)malloc(max*sizeof(double));

        for(j=0; j<nproc; j++) {
            for(i=0; i<proc_nz_list[j]; i++) {
                fscanf(fp, "%d", &tmp_indices[i]);
                if(fortran_indexing) --tmp_indices[i];
            }
            /* rc = fread(tmp_indices, sizeof(int), proc_nz_list[j], fp); */
            if((rc=ARMCI_Put(tmp_indices, col_ind[j], proc_nz_list[j]*sizeof(int), j)))
                ARMCI_Error("armci_nbput failed\n",rc);
        }
        for(j=0; j<nproc; j++) {
            for(i=0; i<proc_nz_list[j]; i++) fscanf(fp, "%lf", &tmp_values[i]);
            if((rc=ARMCI_Put(tmp_values, values[j], proc_nz_list[j]*sizeof(double), j)))
                ARMCI_Error("armci_nbput failed\n",rc);
        }
    }
    ARMCI_AllFence();
    armci_msg_barrier();
    ARMCI_AllFence();

    /* initializing x-vector */
    if(me==0) for(i=0; i<proc_nz_list[me]; i++) vec[me][i] = (i+1);
    else for(i=0; i<proc_nz_list[me]; i++) vec[me][i]=me*proc_nz_list[me-1]+(i+1);

#if 0
    if(me==0) {
        printf("max = %d\n", max);
        for(i=0; i<max; i++)  printf("%.1f ", values[me][i]);
        printf("\n");
    }
#endif

    *row_ind = row_ind_tmp;
    if(me==0) {
        free(tmp_indices);
        free(tmp_values);
        fclose(fp);
    }
    return 0;
}