Ejemplo n.º 1
0
int rip (cdrom_drive *drive, text_tag_s **text_tags, char **filenames)
{
  int i, len;
  rip_opts_s *rip_opts = parse_config (NULL);
  
  for (i = 0; text_tags[i] != NULL; i++)
    /* just counting */;
  len = i; 
  for (i = 0; text_tags[i] != NULL; i++) {
    char *filename;
    if (filenames == NULL) {
      filename = build_filename (text_tags[i]);
    }
    else {
      filename = filenames[i];
    }
    encode_ogg (drive, rip_opts, text_tags[i], i + 1, len, filename, filenames);
    free_text_tag (text_tags[i]);
  }
  free (text_tags);

  update_statistics (0, 0, 0, 0, len, 1, NULL);

  free (rip_opts);
  cdda_close (drive);
  return 0;
}
Ejemplo n.º 2
0
 void update_statistics( NodeCursor const &n , node_statistics &stat )
 {
     stat.num_nodes++;
     if( n.size() == 0 ) stat.num_terminals++;
     else if( n.size() == 1 ) stat.num_unaries++;
     else if( n.size() == 2 ) stat.num_binaries++;
     else GPCXX_ASSERT( false );
     for( size_t i=0 ; i<n.size() ; ++i ) update_statistics( n.children(i) , stat );
 }
Ejemplo n.º 3
0
Archivo: thrd.c Proyecto: dacav/soto
/* Each real-time thread of this project actually corresponds to the
 * execution of this routine. */
static
void * thread_routine (void * arg)
{
    /* Thread management information is transparently keeped into the
     * thread stack. The external callback system gives the abstraction. */
    thrd_t *thrd = (thrd_t *)arg;
    struct timespec next_act;
    struct timespec finish_time;
    struct timespec arrival_time;
    void *context;

    context = thrd->info.context;

    /* If the user declared an initialization function we execute it. */
    if (thrd->info.init) {
        if (thrd->info.init(context)) {
            /* The user aborted thread startup. If there's a destructor
             * callback it's time to call it. */
            if (thrd->info.destroy) {
                thrd->info.destroy(context);
            }
            pthread_exit(NULL);
        }
    }

    /* Wait delayed activation. */
    rtutils_wait(&thrd->start);

    /* Periodic loop: at each cycle the next absoute activation time is
     * computed. */
    rtutils_get_now(&next_act);
    for (;;) {
        rtutils_time_copy(&arrival_time, &next_act);
        rtutils_time_increment(&next_act, &thrd->info.period);

        if (thrd->info.callback(context)) {
            /* Thread required to shut down. If there's a destructor
             * callback it shall be called now. */
            if (thrd->info.destroy) {
                thrd->info.destroy(context);
            }
            pthread_exit(NULL);
        }
        rtutils_get_now(&finish_time);

        update_statistics(&thrd->statistics,
                          rtutils_time2ns(&arrival_time),
                          rtutils_time2ns(&finish_time),
                          rtutils_time_cmp(&next_act, &finish_time) > 0);

        rtutils_wait(&next_act);
    }

    pthread_exit(NULL);
}
Ejemplo n.º 4
0
void TimerHandler::return_timer(Timer* timer, bool callback_successful)
{
  if (!callback_successful)
  {
    // The HTTP Callback failed, so we should set the alarm
    // We also wipe all the tags this timer had from our count, as the timer
    // will be destroyed
    update_statistics(std::vector<std::string>(), timer->tags);

    if ((_timer_pop_alarm) && (timer->is_last_replica()))
    {
      _timer_pop_alarm->set();
    }

    // Decrement global timer count on deleting timer
    TRC_DEBUG("Callback failed");
    _all_timers_table->decrement(1);

    delete timer;
    return;
  }

  // We succeeded but may need to tombstone the timer
  if ((timer->sequence_number + 1) * timer->interval_ms > timer->repeat_for)
  {
    // This timer won't pop again, so tombstone it and update statistics
    timer->become_tombstone();
    update_statistics(std::vector<std::string>(), timer->tags);

    // Decrement global timer count for tombstoned timer
    TRC_DEBUG("Timer won't pop again and is being tombstoned");
    _all_timers_table->decrement(1);
  }

  // Replicate and add the timer back to store
  _replicator->replicate(timer);

  // Timer will be re-added, but stats should not be updated, as
  // no stats were altered on it popping
  add_timer(timer, false);
}
Ejemplo n.º 5
0
LoadMonitor::LoadMonitor(int init_target_latency, int max_bucket_size,
                         float init_token_rate, float init_min_token_rate,
                         SNMP::ContinuousAccumulatorTable* token_rate_table,
                         SNMP::U32Scalar* smoothed_latency_scalar,
                         SNMP::U32Scalar* target_latency_scalar,
                         SNMP::U32Scalar* penalties_scalar,
                         SNMP::U32Scalar* token_rate_scalar)
                         : bucket(max_bucket_size, init_token_rate),
                           _token_rate_table(token_rate_table),
                           _smoothed_latency_scalar(smoothed_latency_scalar),
                           _target_latency_scalar(target_latency_scalar),
                           _penalties_scalar(penalties_scalar),
                           _token_rate_scalar(token_rate_scalar)
{
  pthread_mutexattr_t attrs;
  pthread_mutexattr_init(&attrs);
  pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
  pthread_mutex_init(&_lock, &attrs);
  pthread_mutexattr_destroy(&attrs);

  TRC_STATUS("Constructing LoadMonitor");
  TRC_STATUS("   Target latency (usecs)   : %d", init_target_latency);
  TRC_STATUS("   Max bucket size          : %d", max_bucket_size);
  TRC_STATUS("   Initial token fill rate/s: %f", init_token_rate);
  TRC_STATUS("   Min token fill rate/s    : %f", init_min_token_rate);

  REQUESTS_BEFORE_ADJUSTMENT = 20;
  SECONDS_BEFORE_ADJUSTMENT = 2;

  // Adjustment parameters for token bucket
  DECREASE_THRESHOLD = 0.0;
  DECREASE_FACTOR = 1.2;
  INCREASE_THRESHOLD = -0.005;
  INCREASE_FACTOR = 0.5;

  accepted = 0;
  rejected = 0;
  penalties = 0;
  pending_count = 0;
  max_pending_count = 0;
  target_latency = init_target_latency;
  smoothed_latency = init_target_latency;
  adjust_count = 0;

  timespec current_time;
  clock_gettime(CLOCK_MONOTONIC_COARSE, &current_time);
  last_adjustment_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_nsec / 1000);
  min_token_rate = init_min_token_rate;

  // As this statistics reporting is continuous, we should
  // publish the statistics when initialised.
  update_statistics();
}
Ejemplo n.º 6
0
int DcoBranchStrategyPseudo::createCandBranchObjects(BcpsTreeNode * node) {
  // get node
  DcoTreeNode * dco_node = dynamic_cast<DcoTreeNode*>(node);
  // update statistics
  update_statistics(dco_node);
  // get dco model and message stuff
  DcoModel * dco_model = dynamic_cast<DcoModel*>(model());
  CoinMessageHandler * message_handler = dco_model->dcoMessageHandler_;
  CoinMessages * messages = dco_model->dcoMessages_;
  // get number of relaxed columns
  // we assume all relaxed columns are integer variables.
  int num_relaxed = dco_model->numRelaxedCols();
  // get indices of relaxed object
  int const * relaxed = dco_model->relaxedCols();
  // store branch objects in bobjects
  std::vector<BcpsBranchObject*> bobjects;
  // iterate over relaxed columns and populate bobjects
  for (int i=0; i<num_relaxed; ++i) {
    int preferredDir;
    BcpsObject * curr_object = dco_model->getVariables()[relaxed[i]];
    double infeasibility = curr_object->infeasibility(dco_model, preferredDir);
    // check the amount of infeasibility
    if (infeasibility != 0.0) {
      double min = std::min(down_derivative_[i], up_derivative_[i]);
      double max = std::max(down_derivative_[i], up_derivative_[i]);
      // compute score
      double score = score_factor_*max + (1.0-score_factor_)*min;
      // create a branch object for this
      BcpsBranchObject * cb =
        curr_object->createBranchObject(dco_model, preferredDir);
      // set score
      cb->setScore(score);
      bobjects.push_back(cb);

      // debug stuff
      message_handler->message(DISCO_PSEUDO_REPORT, *messages)
        << dco_model->broker()->getProcRank()
        << relaxed[i]
        << score
        << CoinMessageEol;
    }
  }
  // add branch objects to branchObjects_
  setBranchObjects(bobjects);
  // bobjects are now owned by BcpsBranchStrategy, do not free them.
  bobjects.clear();
  // set the branch object member of the node
  dco_node->setBranchObject(new DcoBranchObject(bestBranchObject()));
  // compare branch objects and keep the best one at bestBranchObject_
  return 0;
}
Ejemplo n.º 7
0
void TimePartitions::report_gc_phase_end(const Ticks& time, GCPhase::PhaseType type) {
  int phase_index = _active_phases.pop();
  GCPhase* phase = _phases->adr_at(phase_index);
  phase->set_end(time);
  update_statistics(phase);
}
Ejemplo n.º 8
0
static int encode_ogg (cdrom_drive *drive, rip_opts_s *rip_opts,
		       text_tag_s *text_tag, int track,
		       int tracktot, char *filename, char **filenames)
{
  ogg_stream_state os;
  ogg_page og;
  ogg_packet op;

  vorbis_dsp_state vd;
  vorbis_block vb;
  vorbis_info vi;

  long samplesdone = 0;
  int sector = 0, last_sector = 0;
  long bytes_written = 0, packetsdone = 0;
  double time_elapsed = 0.0;
  int ret = 0;
  time_t *timer;
  double time;
  
  int serialno = rand ();
  vorbis_comment vc;
  long total_samples_per_channel = 0;
  int channels = 2;
  int eos = 0;
  long rate = 44100;
  FILE *out = fopen (filename, "w+");

  timer = timer_start ();

  if (!rip_opts->managed && (rip_opts->min_bitrate > 0 || rip_opts->max_bitrate > 0)) {
    log_msg ("Min or max bitrate requires managed", FL, FN, LN);
    return -1;
  }

  if (rip_opts->bitrate < 0 && rip_opts->min_bitrate < 0 && rip_opts->max_bitrate < 0) {
    rip_opts->quality_set = 1;
  }
  
  start_func (filename, rip_opts->bitrate, rip_opts->quality, rip_opts->quality_set,
	      rip_opts->managed, rip_opts->min_bitrate, rip_opts->max_bitrate);
  
  vorbis_info_init (&vi);

  if (rip_opts->quality_set > 0) {
    if (vorbis_encode_setup_vbr (&vi, channels, rate, rip_opts->quality)) {
      log_msg ("Couldn't initialize vorbis_info", FL, FN, LN);
      vorbis_info_clear (&vi);
      return -1;
    }
    /* two options here, max or min bitrate */
    if (rip_opts->max_bitrate > 0 || rip_opts->min_bitrate > 0) {
      struct ovectl_ratemanage_arg ai;
      vorbis_encode_ctl (&vi, OV_ECTL_RATEMANAGE_GET, &ai);
      ai.bitrate_hard_min = rip_opts->min_bitrate;
      ai.bitrate_hard_max = rip_opts->max_bitrate;
      ai.management_active = 1;
      vorbis_encode_ctl (&vi, OV_ECTL_RATEMANAGE_SET, &ai);
    }
  } else {
    if (vorbis_encode_setup_managed (&vi, channels, rate,
				     rip_opts->max_bitrate > 0 ? rip_opts->max_bitrate * 1000 : -1,
				     rip_opts->bitrate * 1000,
				     rip_opts->min_bitrate > 0 ? rip_opts->min_bitrate * 1000 : -1)) {
      log_msg ("Mode init failed, encode setup managed", FL, FN, LN);
      vorbis_info_clear (&vi);
      return -1;
    }
  }

  if (rip_opts->managed && rip_opts->bitrate < 0) {
    vorbis_encode_ctl (&vi, OV_ECTL_RATEMANAGE_AVG, NULL);
  } else if (!rip_opts->managed) {
    vorbis_encode_ctl (&vi, OV_ECTL_RATEMANAGE_SET, NULL);
  }

  /* set advanced encoder options */

  vorbis_encode_setup_init (&vi);

  vorbis_analysis_init (&vd, &vi);
  vorbis_block_init (&vd, &vb);

  ogg_stream_init (&os, serialno);

  {
    ogg_packet header_main;
    ogg_packet header_comments;
    ogg_packet header_codebooks;
    int result;
    char buf[32];

    vorbis_comment_init (&vc);
    vorbis_comment_add_tag (&vc, "title", text_tag->songname);
    vorbis_comment_add_tag (&vc, "artist", text_tag->artistname);
    vorbis_comment_add_tag (&vc, "album", text_tag->albumname);
    vorbis_comment_add_tag (&vc, "genre", text_tag->genre);
    snprintf (buf, 32, "%d", text_tag->year);
    vorbis_comment_add_tag (&vc, "date", buf);
    snprintf (buf, 32, "%02d", text_tag->track);
    vorbis_comment_add_tag (&vc, "tracknumber", buf);
	
    vorbis_analysis_headerout (&vd, &vc, &header_main, &header_comments, &header_codebooks);

    ogg_stream_packetin (&os, &header_main);
    ogg_stream_packetin (&os, &header_comments);
    ogg_stream_packetin (&os, &header_codebooks);

    while ((result = ogg_stream_flush (&os, &og))) {
      if (result == 0)
	break;
      ret = write_page (&og, out);
      if (ret != og.header_len + og.body_len) {
	log_msg ("Failed writing data to output stream", FL, FN, LN);
	ret = -1;
      }
    }
	  
    sector = cdda_track_firstsector (drive, track);
    last_sector = cdda_track_lastsector (drive, track);
    total_samples_per_channel = (last_sector - sector) * (CD_FRAMESAMPLES / 2);
    int eos = 0;
	
    while (!eos) {
      signed char *buffer = (signed char *)malloc (CD_FRAMESIZE_RAW * READ_SECTORS);
      //use this variable as a s**t
      long sectors_read = last_sector - sector;
      if (sectors_read > READ_SECTORS)
	sectors_read = READ_SECTORS;

      sectors_read = cdda_read (drive, (signed char *)buffer, sector, sectors_read);
      int i;
	  
      if (sectors_read == 0) {
	vorbis_analysis_wrote (&vd, 0);
      } else {
	float **vorbbuf = vorbis_analysis_buffer (&vd, CD_FRAMESIZE_RAW * sectors_read);
	for (i = 0; i < (CD_FRAMESIZE_RAW * sectors_read) / 4; i++) {
	  vorbbuf[0][i] = ((buffer[i * 4 + 1] << 8) | (0x00ff&(int)buffer[i * 4])) / 32768.f;
	  vorbbuf[1][i] = ((buffer[i * 4 + 3] << 8) | (0x00ff&(int)buffer[i * 4 + 2])) / 32768.f;
	}

	int samples_read = sectors_read * (CD_FRAMESAMPLES / 2);
	samplesdone += samples_read;
	// progress every 60 pages
	if (packetsdone >= 60) {
	  packetsdone = 0;
	  time = timer_time (timer);
	  update_statistics (total_samples_per_channel, samplesdone, time, track,
			     tracktot, 0, filenames);
	}
	vorbis_analysis_wrote (&vd, i);
      }
	  
      free (buffer);
      sector += sectors_read;
	  
      while (vorbis_analysis_blockout (&vd, &vb) == 1) {
	vorbis_analysis (&vb, &op);
	vorbis_bitrate_addblock (&vb);

	while (vorbis_bitrate_flushpacket (&vd, &op)) {
	  ogg_stream_packetin (&os, &op);
	  packetsdone++;

	  while (!eos) {
	    int result = ogg_stream_pageout (&os, &og);
	    if (result == 0) {
	      break;
	    }
	    ret = write_page (&og, out);
	    if (ret != og.header_len + og.body_len) {
	      log_msg ("Failed writing data to output stream", FL, FN, LN);
	      ret = -1;
	    } else
	      bytes_written += ret;

	    if (ogg_page_eos (&og)) {
	      eos = 1;
	    }
	  }
	}
      }
    }
  }
  ret = 0;

  update_statistics (total_samples_per_channel, samplesdone, time, track,
		     tracktot, 0, filenames);
  
  ogg_stream_clear (&os);
  vorbis_block_clear (&vb);
  vorbis_dsp_clear (&vd);
  vorbis_comment_clear (&vc);
  vorbis_info_clear (&vi);
  vorbis_comment_clear (&vc);
  time_elapsed = timer_time (timer);
  end_func (time_elapsed, rate, samplesdone, bytes_written);
  timer_clear (timer);
  fclose (out);
  
  return ret;
}
Ejemplo n.º 9
0
static inline int ping_v4(const char * hostname)
{
    int sockfd;
    struct sockaddr_in address;
    icmp4_packet packet;

    int success = get_ipv4(hostname, IPPROTO_ICMP, &address);
    printf("ping ");
    print_host_v4(&address);
    printf("\n");
    succeed_or_die(success, 0, create_raw_socket(AF_INET, SOCK_RAW, IPPROTO_ICMP, &sockfd));
    succeed_or_die(success, 0, icmp4_packet_init(&packet, extract_ipv4(&address)));
    succeed_or_die(success, 0, icmp4_packet_set_length(&packet, sizeof packet));

    int sent = 0;
    int i = 0;
    int gotten = 0;
    struct timeval wait_time = { 1, 0 };
    long double min = 0.;
    long double max = 0.;
    long double sum = .0;
    struct timeval ping_start = { 0, 0 };
    struct timeval ping_end = { 0, 0 };
    gettimeofday(&ping_start, NULL);
    while (success == 0 && fin_des_temps == 1)
    {
        struct timeval start = { 0, 0 };
        struct timeval end = { 0, 0 };

        succeed_or_die(success, 0, icmp4_packet_set_echo_seq(&packet, i));
        gettimeofday(&start, NULL);
        if (sendto(sockfd, &packet, sizeof packet, 0, (struct sockaddr *) &address, sizeof address) == sizeof packet)
        {
            sent++;
            icmp4_packet received;
            memset(&received, 0, sizeof received);
            int before = gotten;
            if (receive_icmp_v4(sockfd, &address, &wait_time, &received) == 0)
            {
                if (received.icmp_header.type == ICMP_ECHOREPLY
                    && received.icmp_header.un.echo.sequence == i
                    && received.icmp_header.un.echo.id == packet.icmp_header.un.echo.id
                    )
                {
                    gotten++;
                    gettimeofday(&end, NULL);
                    struct timeval diff = diff_timeval(start, end);
                    long double rtt = extract_time(diff);
                    update_statistics(&min, &max, &sum, rtt, gotten, before);
                    print_received(&received, &address, rtt);

                    if (rtt > sum / gotten * 2 || rtt < sum / gotten / 2)
                        success = -1;
                }
            }
            if ((float) gotten / sent < 0.7)
                success = -1;
        }
        i++;
        sleep(1);
    }
    gettimeofday(&ping_end, NULL);
    struct timeval total = diff_timeval(ping_start, ping_end);
    print_ping_statistics(sent, gotten, min, max, sum, total, &address);

    return success;

}
Ejemplo n.º 10
0
void TimePartitions::report_gc_phase_end(jlong time) {
  int phase_index = _active_phases.pop();
  GCPhase* phase = _phases->adr_at(phase_index);
  phase->set_end(time);
  update_statistics(phase);
}
Ejemplo n.º 11
0
void PFilter::predict(Sampled_predict_model& predict_model)
{
    SIR_kalman_scheme::predict(predict_model);
    update_statistics();
}
Ejemplo n.º 12
0
void LoadMonitor::request_complete(int latency)
{
  pthread_mutex_lock(&_lock);
  pending_count -= 1;
  smoothed_latency = (7 * smoothed_latency + latency) / 8;
  adjust_count += 1;

  if (adjust_count >= REQUESTS_BEFORE_ADJUSTMENT)
  {
    // We've seen the right number of requests, but ensure
    // that an appropriate amount of time has passed, so the rate doesn't
    // fluctuate wildly if latency spikes for a few milliseconds
    timespec current_time;
    clock_gettime(CLOCK_MONOTONIC_COARSE, &current_time);
    unsigned long current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_nsec / 1000);
    if (current_time_ms >= (last_adjustment_time_ms + (SECONDS_BEFORE_ADJUSTMENT * 1000)))
    {
      // This algorithm is based on the Welsh and Culler "Adaptive Overload
      // Control for Busy Internet Servers" paper, although based on a smoothed
      // mean latency, rather than the 90th percentile as per the paper.
      // Also, the additive increase is scaled as a proportion of the maximum
      // bucket size, rather than an absolute number as per the paper.
      float err = ((float) (smoothed_latency - target_latency)) / target_latency;

      // Work out the percentage of accepted requests (for logs)
      float accepted_percent = (accepted + rejected == 0) ? 100.0 : 100 * (((float) accepted) / (accepted + rejected));

      TRC_INFO("Accepted %f%% of requests, latency error = %f, overload responses = %d",
          accepted_percent, err, penalties);

      // latency is above where we want it to be, or we are getting overload responses from
      // Homer/Homestead, so adjust the rate downwards by a multiplicative factor

      if (err > DECREASE_THRESHOLD || penalties > 0)
      {
        float new_rate = bucket.rate / DECREASE_FACTOR;
        if (new_rate < min_token_rate)
        {
          new_rate = min_token_rate;
        }
        bucket.update_rate(new_rate);
        TRC_STATUS("Maximum incoming request rate/second decreased to %f "
                   "(based on a smoothed mean latency of %d and %d upstream overload responses)",
                   bucket.rate,
                   smoothed_latency,
                   penalties);
      }
      else if (err < INCREASE_THRESHOLD)
      {
        // Our latency is below the threshold, so increasing our permitted request rate would be
        // sensible. Before doing that, we check that we're using a significant proportion of our
        // current rate - if we're allowing 100 requests/sec, and we get 1 request/sec because it's
        // a quiet period, then it's going to be handled quickly, but that's not sufficient evidence
        // to increase our rate.
        float maximum_permitted_requests = bucket.rate * (current_time_ms - last_adjustment_time_ms) / 1000;

        // Arbitrary threshold - require 50% of our current permitted rate to be used
        float minimum_threshold = maximum_permitted_requests * 0.5;

        if (accepted > minimum_threshold)
        {
          float new_rate = bucket.rate + (-1 * err * bucket.max_size * INCREASE_FACTOR);
          bucket.update_rate(new_rate);
          TRC_STATUS("Maximum incoming request rate/second increased to %f "
                     "(based on a smoothed mean latency of %d and %d upstream overload responses)",
                     bucket.rate,
                     smoothed_latency,
                     penalties);
        }
        else
        {
          TRC_STATUS("Maximum incoming request rate/second unchanged - only handled %d requests"
                     " recently, minimum threshold for a change is %f",
                     accepted,
                     minimum_threshold);
        }
      }
      else
      {
        TRC_DEBUG("Maximum incoming request rate/second is unchanged at %f",
                  bucket.rate);
      }

      update_statistics();

      // Reset counts
      last_adjustment_time_ms = current_time_ms;
      adjust_count = 0;
      accepted = 0;
      rejected = 0;
      penalties = 0;
    }
  }

  pthread_mutex_unlock(&_lock);
}
Ejemplo n.º 13
0
int analyze(char *trace,char *config,char *output,char *log)
{
	unsigned int i,chunk_num;
	unsigned int size_in_window=0,req_in_window=0;
	long double time_in_window=0;
	double i_non_access=0,i_inactive=0,i_seq_intensive=0,i_seq_less_intensive=0,i_random_intensive=0,i_random_less_intensive=0;

	struct pool_info *pool;
	pool=(struct pool_info *)malloc(sizeof(struct pool_info));
	alloc_assert(pool,"pool");
	memset(pool,0,sizeof(struct pool_info));

	load_parameters(pool,config);
	initialize(pool,trace,output,log);

#ifdef _NETAPP_TRACE_
	chunk_num=get_range_netapp(pool);
	fgets(pool->buffer,SIZE_BUFFER,pool->file_trace);	//read the first line out
	while(get_request_netapp(pool)!=FAILURE)
#else
	chunk_num=get_range_msr(pool);
	while(get_request_msr(pool)!=FAILURE)
#endif
	{
		if(pool->window_type==WINDOW_DATA)
		{
			seq_detection(pool);	//Sequential IO Detection
			update_statistics(pool);

			//update window info
			size_in_window+=pool->req->size;
			req_in_window++;
			if(req_in_window==1)
				pool->window_time_start=pool->req->time;
			pool->window_time_end=pool->req->time;
			
			//THE CURRENT WINDOW IS FULL
			if((size_in_window>=pool->window_size*2048)||((feof(pool->file_trace)!=0)&&(size_in_window>0)))
			{
				flush_stream(pool);	//Flush information in POOL->STREAMS into each Chunks
				/*Pattern Detection*/
				time_in_window=(long double)(pool->window_time_end-pool->window_time_start)/(long double)1000000000;
				pool->window_time[pool->window_sum]=time_in_window;
				pool->chunk_access[pool->window_sum]=pool->chunk_win;

				for(i=pool->chunk_min;i<=pool->chunk_max;i++)
				{
					if(pool->chunk[i].req_sum_all==0)//no access
					{
						/*No Access*/
						if(pool->record_all[i].accessed!=0)
						{
							i_non_access++;
						}
						pool->chunk[i].pattern=PATTERN_NON_ACCESS;
					}
					else if(pool->chunk[i].req_sum_all<pool->threshold_inactive)//inactive
					{
						/*Inactive*/
						i_inactive++;
						if(((long double)pool->chunk[i].req_sum_read/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
						{
							/*Inactive Read*/
							pool->chunk[i].pattern=PATTERN_INACTIVE_R;
						}
						else if(((long double)pool->chunk[i].req_sum_write/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
						{
							/*Inactive Write*/
							pool->chunk[i].pattern=PATTERN_INACTIVE_W;
						}
						else{
							/*Inactive Hybrid*/
							pool->chunk[i].pattern=PATTERN_INACTIVE_H;
						}
					}
					else if((pool->chunk[i].seq_size_all/pool->chunk[i].req_size_all)>=pool->threshold_cbr &&
						((long double)pool->chunk[i].seq_sum_all/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_car)
					{
						/*SEQUENTIAL*/
						i_seq_intensive++;
						/*Sequential Intensive*/
						if(pool->chunk[i].req_sum_all>=(req_in_window/pool->chunk_win)*pool->threshold_intensive)
						{
							if(((long double)pool->chunk[i].req_sum_read/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Sequential Intensive Read*/
								pool->chunk[i].pattern=PATTERN_SEQ_INTENSIVE_R;
							}
							else if(((long double)pool->chunk[i].req_sum_write/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Sequential Intensive Write*/
								pool->chunk[i].pattern=PATTERN_SEQ_INTENSIVE_W;
							}
							else
							{
								/*Sequential Intensive Hybrid*/
								pool->chunk[i].pattern=PATTERN_SEQ_INTENSIVE_H;
							}
						}
						else{
							i_seq_less_intensive++;
							if(((long double)pool->chunk[i].req_sum_read/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Sequential Less Intensive Read*/
								pool->chunk[i].pattern=PATTERN_SEQ_LESS_INTENSIVE_R;
							}
							else if(((long double)pool->chunk[i].req_sum_write/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Sequential Less Intensive Write*/
								pool->chunk[i].pattern=PATTERN_SEQ_LESS_INTENSIVE_W;
							}
							else
							{
								/*Sequential Less Intensive Hybrid*/
								pool->chunk[i].pattern=PATTERN_SEQ_LESS_INTENSIVE_H;
							}
						}
					}
					else{
						/*Random*/
						i_random_intensive++;
						if(pool->chunk[i].req_sum_all>=(req_in_window/pool->chunk_win)*pool->threshold_intensive)
						{
							if(((long double)pool->chunk[i].req_sum_read/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Random Intensive Read*/
								pool->chunk[i].pattern=PATTERN_RANDOM_INTENSIVE_R;
							}
							else if(((long double)pool->chunk[i].req_sum_write/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Random Intensive Write*/
								pool->chunk[i].pattern=PATTERN_RANDOM_INTENSIVE_W;
							}
							else
							{
								/*Random Intensive Hybrid*/
								pool->chunk[i].pattern=PATTERN_RANDOM_INTENSIVE_H;
							}
						}
						else{
							i_random_less_intensive++;
							if(((long double)pool->chunk[i].req_sum_read/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Random Less Intensive Read*/
								pool->chunk[i].pattern=PATTERN_RANDOM_LESS_INTENSIVE_R;
							}
							else if(((long double)pool->chunk[i].req_sum_write/(long double)pool->chunk[i].req_sum_all)>=pool->threshold_rw)
							{
								/*Random Less Intensive Write*/
								pool->chunk[i].pattern=PATTERN_RANDOM_LESS_INTENSIVE_W;
							}
							else
							{
								/*Random Less Intensive Hybrid*/
								pool->chunk[i].pattern=PATTERN_RANDOM_LESS_INTENSIVE_H;
							}
						}
					}
					//Only record limited information (the first SIZE_ARRY windows)
					if(pool->window_sum<SIZE_ARRAY)
					{
						pool->chunk[i].history_pattern[pool->window_sum]=pool->chunk[i].pattern;

						pool->pattern_non_access[pool->window_sum]=i_non_access/(double)pool->chunk_all;
						pool->pattern_inactive[pool->window_sum]=i_inactive/(double)pool->chunk_all;
						pool->pattern_seq_intensive[pool->window_sum]=i_seq_intensive/(double)pool->chunk_all;
						pool->pattern_seq_less_intensive[pool->window_sum]=i_seq_less_intensive/(double)pool->chunk_all;
						pool->pattern_random_intensive[pool->window_sum]=i_random_intensive/(double)pool->chunk_all;
						pool->pattern_random_less_intensive[pool->window_sum]=i_random_less_intensive/(double)pool->chunk_all;
					}
					
					print_log(pool,i);	//print info of each chunk in this window to log file.
					/*Initialize the statistics in each chunk*/
					pool->chunk[i].req_sum_all=0;
					pool->chunk[i].req_sum_read=0;
					pool->chunk[i].req_sum_write=0;
					pool->chunk[i].req_size_all=0;
					pool->chunk[i].req_size_read=0;
					pool->chunk[i].req_size_write=0;

					pool->chunk[i].seq_sum_all=0;
					pool->chunk[i].seq_sum_read=0;
					pool->chunk[i].seq_sum_write=0;
					pool->chunk[i].seq_stream_all=0;
					pool->chunk[i].seq_stream_read=0;
					pool->chunk[i].seq_stream_write=0;
					pool->chunk[i].seq_size_all=0;
					pool->chunk[i].seq_size_read=0;
					pool->chunk[i].seq_size_write=0;
				}//for
				
				/*Update the pool info*/
				pool->window_sum++;
				if(pool->window_sum%20==0)
					printf("------pool->window_sum=%d---------\n",pool->window_sum);
				pool->window_time_start=0;
				pool->window_time_end=0;
				
				/*Start a new window*/
				size_in_window=0;
				req_in_window=0;
				time_in_window=0;
				
				i_non_access=0;
				i_inactive=0;
				i_seq_intensive=0;
				i_seq_less_intensive=0;
				i_random_intensive=0;
				i_random_less_intensive=0;

				//accessed chunks in each window
				memset(pool->record_win,0,sizeof(struct record_info)*pool->chunk_sum);
				printf("pool->chunk_win=%d\n",pool->chunk_win);
				pool->chunk_win=0;
			}//if
		}//if
	}//while

	print_statistics(pool);

	fclose(pool->file_trace);
	fclose(pool->file_output);
	fclose(pool->file_log);
	
	free(pool->chunk);
	free(pool->map);
	free(pool->req);
	free(pool);

	return SUCCESS;
}
Ejemplo n.º 14
0
Archivo: qvmc.cpp Proyecto: sarahr/QVMC
/**
 * Constructor
 * @param N - number of MC cycles
 * @param N_therm - number of thermalization steps
 * @param alpha - first variational parameter
 * @param beta - second variational parameter
 * @param myrank - MPI rank
 */
void VMC::run_algo(int N, int N_therm, double alpha, double beta, int myrank) {

    int i;
    double rat, eps;
    double del_E = 0;
    double del_Epot = 0;
    double del_Ekin = 0;

#if DENSITY
    ofstream ofile2;
    ostringstream ost;
    ost << "density" << myrank << ".dat";
    ofile2.open(ost.str().c_str(), ios::out);
#endif
#if PAIRCOR
    ofstream ofile4;
    ostringstream ost;
    ost << "paircorVMC" << myrank << ".dat";
    ofile4.open(ost.str().c_str(), ios::out);
#endif  
#if POSITION
    ofstream ofile5;
    ostringstream ost;
    ost << "position" << myrank << ".dat";
    ofile5.open(ost.str().c_str(), ios::out);
#endif


    //*************************  Thermalization  ******************************

    accepted = 0;

    initialize(alpha, beta); // Initialize the system


    for (i = 0; i < N_therm; i++) {

        for (int p = 0; p < numpart; p++) {// Loop over all particles

            // Calculate trial position
            trial_pos(p, alpha, beta);

            // Compute acceptance ratio
            rat = ratio(p, alpha, beta);

            // Check if move is accepted
            if (rat >= 1.0) { // accept if probability is greater
                accept(p, alpha, beta);
                accepted++;
            } else { // otherwise check against random number
                eps = ran3(&idum);
                if (eps < rat) {
                    accept(p, alpha, beta);
                    accepted++;
                } else
                    not_accept(p); // Do not accept
            }
        }
    }


    //********************** After thermalization *****************************


    if (N > 0) accepted = 0; // For function delta_opt()

    for (i = 0; i < N; i++) {

        for (int p = 0; p < numpart; p++) { // Loop over particles

            // Calculate trial position
            trial_pos(p, alpha, beta);

            // Compute acceptance ratio
            rat = ratio(p, alpha, beta);

            // Check if move is accepted
            if (rat >= 1.0) { // accept if probability is greater
                accept(p, alpha, beta);
                del_E = E_local(alpha, beta);
#if E_POT_KIN
                E_Pot_Kin(alpha, beta, del_Epot, del_Ekin);
#endif

#if MINIMIZE
                // part_psi(alpha, beta);
                part_psi_analytic(alpha, beta);
#endif  
                accepted++;
            } else { // otherwise check against random number

                eps = ran3(&idum);
                if (eps < rat) {
                    accept(p, alpha, beta);
                    del_E = E_local(alpha, beta);
#if E_POT_KIN
                    E_Pot_Kin(alpha, beta, del_Epot, del_Ekin);
#endif

#if MINIMIZE
                    //part_psi(alpha, beta);
                    part_psi_analytic(alpha, beta);
#endif   
                    accepted++;
                } else
                    not_accept(p); // Do not accept

            }

            // Updating statistics 
#if E_POT_KIN
            update_statistics(del_E, del_Epot, del_Ekin);
#else
            update_statistics(del_E);
#endif

        }

#if DENSITY
        if (i % 100 == 0) {
            for (int l = 0; l < numpart; l++) {
                ofile2 << sqrt(Trial->Pos->r(l)) << " ";
            }

            ofile2 << 1.0 << endl; // weight "1" for walker in VMC
        }
#endif
#if PAIRCOR
        if ((i % 500) == 0) {
            for (int h = 1; h < numpart; h++) {
                for (int m = 0; m < h; m++) {
                    ofile4 << Trial->Pos->r_int(m, h) << " ";
                }
            }

            ofile4 << 1 << endl;
        }

#endif
#if POSITION
        if ((i % 500) == 0)
            for (int part = 0; part < numpart; part++) {
                for (int m = 0; m < dim; m++) {
                    ofile5 << Trial->Pos->current(part, m) << " ";
                }
                ofile5 << endl;
            }
#endif

    }

    //cout << accepted/(N*numpart) << endl;

#if DENSITY
    ofile2.close();
#endif
#if PAIRCOR
    ofile4.close();
#endif
#if POSITION
    ofile5.close();
#endif


    return;

}
Ejemplo n.º 15
0
void TimerHandler::add_timer(Timer* timer, bool update_stats)
{
  pthread_mutex_lock(&_mutex);
  bool will_add_timer = true;

  // Convert the new timer to a timer pair
  TimerPair new_tp;
  new_tp.active_timer = timer;

  // Pull out any existing timer pair from the timer store
  TimerPair existing_tp;
  bool timer_found = _store->fetch(timer->id, existing_tp);

  // We've found a timer.
  if (timer_found)
  {
    std::string cluster_view_id;
    __globals->get_cluster_view_id(cluster_view_id);

    if ((timer->is_matching_cluster_view_id(cluster_view_id)) &&
        !(existing_tp.active_timer->is_matching_cluster_view_id(cluster_view_id)))
    {
      // If the new timer matches the current cluster view ID, and the old timer
      // doesn't, always prioritise the new timer.
      TRC_DEBUG("Adding timer with current cluster view ID");
    }
    else if (timer->sequence_number == existing_tp.active_timer->sequence_number)
    {
      // If the new timer has the same sequence number as the old timer,
      // then check which timer is newer. If the existing timer is newer then we just
      // want to replace the timer and not change it
      if (Utils::overflow_less_than(timer->start_time_mono_ms,
                                    existing_tp.active_timer->start_time_mono_ms))
      {
        TRC_DEBUG("Timer sequence numbers the same, but timer is older than the "
                  "timer in the store");

        delete new_tp.active_timer;
        new_tp.active_timer = new Timer(*existing_tp.active_timer);

        if (existing_tp.information_timer)
        {
          new_tp.information_timer = new Timer(*existing_tp.information_timer);
        }

        will_add_timer = false;
      }
      else
      {
        TRC_DEBUG("Adding timer as it's newer than the timer in the store");
      }
    }
    else
    {
      // One of the sequence numbers is non-zero - at least one request is not
      // from the client
      if ((near_time(timer->start_time_mono_ms,
                     existing_tp.active_timer->start_time_mono_ms))            &&
          (timer->sequence_number < existing_tp.active_timer->sequence_number) &&
          (timer->sequence_number != 0))
      {
        // These are probably the same timer, and the timer we are trying to add is both
        // not from the client, and has a lower sequence number (so is less "informed")
        TRC_DEBUG("Not adding timer as it's older than the timer in the store");

        delete new_tp.active_timer;
        new_tp.active_timer = new Timer(*existing_tp.active_timer);

        if (existing_tp.information_timer)
        {
          new_tp.information_timer = new Timer(*existing_tp.information_timer);
        }

        will_add_timer = false;
      }
      else
      {
        TRC_DEBUG("Adding timer as it's newer than the timer in the store");
      }
    }

    // We're adding the new timer (not just replacing an existing timer)
    if (will_add_timer)
    {
      // If the new timer is a tombstone, make sure its interval is long enough
      save_tombstone_information(new_tp.active_timer, existing_tp.active_timer);

      // Decide whether we should save the old timer as an informational timer
      if (existing_tp.active_timer->cluster_view_id !=
          new_tp.active_timer->cluster_view_id)
      {
        // The cluster IDs on the new and existing timers are different.
        // This means that the cluster configuration has changed between
        // then and when the timer was last updated
        TRC_DEBUG("Saving existing timer as informational timer");

        if (existing_tp.information_timer)
        {
          // There's already a saved timer, but the new timer doesn't match the
          // existing timer. This is an error condition, and suggests that
          // a scaling operation has been started before an old scaling operation
          // finished, or there was a node failure during a scaling operation.
          // Either way, the saved timer information is out of date, and is
          // deleted (by not saving a copy of it when we delete the entire Timer
          // ID structure in the next step)
          TRC_WARNING("Deleting out of date timer from timer map");
        }

        new_tp.information_timer = new Timer(*existing_tp.active_timer);
      }
      else if (existing_tp.information_timer)
      {
        // If there's an existing informational timer save it off
        new_tp.information_timer = new Timer(*existing_tp.information_timer);
      }
    }
  }
  else
  {
    TRC_DEBUG("Adding new timer");
  }

  // Would be good in future work to pull all statistics logic out into a 
  // separate statistics module, passing in new and old tags, and what is
  // happening to the timer (add, update, delete), to keep the timer_handler
  // scope of responsibility clear.

  // Update statistics 
  if (update_stats)
  {
    std::vector<std::string> tags_to_add = std::vector<std::string>();
    std::vector<std::string> tags_to_remove = std::vector<std::string>();

    if (new_tp.active_timer->is_tombstone())
    {
      // If the new timer is a tombstone, no new tags should be added
      // If it overwrites an existing active timer, the old tags should
      // be removed, and global count decremented
      if ((existing_tp.active_timer) &&
          !(existing_tp.active_timer->is_tombstone()))
      {
        tags_to_remove = existing_tp.active_timer->tags;
        TRC_DEBUG("new timer is a tombstone overwriting an existing timer");
        _all_timers_table->decrement(1);
      }
    }
    else
    {
      // Add new timer tags
      tags_to_add = new_tp.active_timer->tags;

      // If there was an old existing timer, its tags should be removed
      // Global count should only increment if there was not an old
      // timer, as otherwise it is only an update.
      if ((existing_tp.active_timer) &&
          !(existing_tp.active_timer->is_tombstone()))
      {
        tags_to_remove = existing_tp.active_timer->tags;
      }
      else
      {
        TRC_DEBUG("New timer being added, and no existing timer");
        _all_timers_table->increment(1);
      }
    }

    update_statistics(tags_to_add, tags_to_remove);
  }

  delete existing_tp.active_timer;
  delete existing_tp.information_timer;

  TimerID id = new_tp.active_timer->id;
  uint32_t next_pop_time = new_tp.active_timer->next_pop_time();

  std::vector<std::string> cluster_view_id_vector;
  cluster_view_id_vector.push_back(new_tp.active_timer->cluster_view_id);

  if (new_tp.information_timer)
  {
    cluster_view_id_vector.push_back(new_tp.information_timer->cluster_view_id);
  }

  TRC_DEBUG("Inserting the new timer with ID %llu", id);
  _store->insert(new_tp, id, next_pop_time, cluster_view_id_vector);
  pthread_mutex_unlock(&_mutex);
}