Example #1
0
/*Run FOP with Lock/unlock */
void run_fop_with_lock(int rank, WINDOW type)
{
    int i;
    MPI_Aint disp = 0;
    MPI_Win     win;

    allocate_atomic_memory(rank, sbuf_original, rbuf_original,
                tbuf_original, NULL, (char **)&sbuf, (char **)&rbuf,
                (char **)&tbuf, NULL, (char **)&rbuf,  MAX_MSG_SIZE, type, &win);

    if(rank == 0) {
        if (type == WIN_DYNAMIC) {
            disp = disp_remote;
        }

        for (i = 0; i < skip + loop; i++) {
            if (i == skip) {
                t_start = MPI_Wtime ();
            }
            MPI_CHECK(MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 1, 0, win));
            MPI_CHECK(MPI_Fetch_and_op(sbuf, tbuf, MPI_LONG_LONG, 1, disp, MPI_SUM, win));
            MPI_CHECK(MPI_Win_unlock(1, win));
        }
        t_end = MPI_Wtime ();
    }                

    MPI_CHECK(MPI_Barrier(MPI_COMM_WORLD));

    print_latency(rank, 8);

    free_atomic_memory (sbuf, rbuf, tbuf, NULL, win, rank);
}
Example #2
0
void report_scan_done(disk_t *pdisk)
{
	unsigned hist_idx;
	
	printf("Access time histogram:\n");
	for (hist_idx = 0; hist_idx < ARRAY_SIZE(pdisk->histogram); hist_idx++)
	{
		if (hist_idx != ARRAY_SIZE(pdisk->histogram)-1)
			printf("%8" PRIu64 ": %" PRIu64 "\n", histogram_time[hist_idx].top_val, pdisk->histogram[hist_idx]);
		else
			printf("%8s: %" PRIu64 "\n", "above that", pdisk->histogram[hist_idx]);
	}

	print_latency(pdisk->latency_graph, pdisk->latency_graph_len);

	printf("Conclusion: %s\n", conclusion_to_str(pdisk->conclusion));
}
Example #3
0
/*Run Get_accumulate with flush local*/
void run_get_acc_with_flush_local(int rank, WINDOW type)
{
    int size, i;
    MPI_Aint disp = 0;
    MPI_Win     win;

    for (size = 0; size <= MAX_SIZE; size = (size ? size * 2 : size + 1)) {
        allocate_memory(rank, rbuf, size, type, &win);

        if (type == WIN_DYNAMIC) {
            disp = sdisp_remote;
        }

        if(size > LARGE_MESSAGE_SIZE) {
            loop = LOOP_LARGE;
            skip = SKIP_LARGE;
        }

        if(rank == 0) {
            MPI_CHECK(MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 1, 0, win));
            for (i = 0; i < skip + loop; i++) {
                if (i == skip) {
                    t_start = MPI_Wtime ();
                }
                MPI_CHECK(MPI_Get_accumulate(sbuf, size, MPI_CHAR, cbuf, size, MPI_CHAR, 1, disp, size,
                    MPI_CHAR, MPI_SUM, win));
                MPI_CHECK(MPI_Win_flush_local(1, win));
            }
            t_end = MPI_Wtime ();
            MPI_CHECK(MPI_Win_unlock(1, win));
        }                

        MPI_CHECK(MPI_Barrier(MPI_COMM_WORLD));
        
        print_latency(rank, size);

        MPI_Win_free(&win);
    }
}
Example #4
0
/*Run ACC with Lock/unlock */
void run_acc_with_lock(int rank, WINDOW type)
{
    int size, i;
    MPI_Aint disp = 0;
    MPI_Win     win;

    for (size = 0; size <= MAX_SIZE; size = (size ? size * 2 : 1)) {
        allocate_memory(rank, sbuf_original, rbuf_original, &sbuf, &rbuf, &sbuf, size, type, &win);

#if MPI_VERSION >= 3
        if (type == WIN_DYNAMIC) {
            disp = disp_remote;
        }
#endif
        if(size > LARGE_MESSAGE_SIZE) {
            loop = LOOP_LARGE;
            skip = SKIP_LARGE;
        }

        if(rank == 0) {
            for (i = 0; i < skip + loop; i++) {
                if (i == skip) {
                    t_start = MPI_Wtime ();
                }
                MPI_CHECK(MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win));
                MPI_CHECK(MPI_Accumulate(sbuf, size, MPI_CHAR, 1, disp, size, MPI_CHAR, MPI_SUM, win));
                MPI_CHECK(MPI_Win_unlock(1, win));
            }
            t_end = MPI_Wtime ();
        }                

        MPI_CHECK(MPI_Barrier(MPI_COMM_WORLD));

        print_latency(rank, size);

        free_memory (sbuf, rbuf, win, rank);
    }
}
Example #5
0
/* this is the meat of the state machine.  There is a list of
 * active operations structs, and as each one finishes the required
 * io it is moved to a list of finished operations.  Once they have
 * all finished whatever stage they were in, they are given the chance
 * to restart and pick a different stage (read/write/random read etc)
 *
 * various timings are printed in between the stages, along with
 * thread synchronization if there are more than one threads.
 */
int worker(struct thread_info *t)
{
    struct io_oper *oper;
    char *this_stage = NULL;
    struct timeval stage_time;
    int status = 0;
    int iteration = 0;
    int cnt;

    aio_setup(&t->io_ctx, 512);

restart:
    if (num_threads > 1) {
        pthread_mutex_lock(&stage_mutex);
	threads_starting++;
	if (threads_starting == num_threads) {
	    threads_ending = 0;
	    gettimeofday(&global_stage_start_time, NULL);
	    pthread_cond_broadcast(&stage_cond);
	}
	while (threads_starting != num_threads)
	    pthread_cond_wait(&stage_cond, &stage_mutex);
        pthread_mutex_unlock(&stage_mutex);
    }
    if (t->active_opers) {
        this_stage = stage_name(t->active_opers->rw);
	gettimeofday(&stage_time, NULL);
	t->stage_mb_trans = 0;
    }

    cnt = 0;
    /* first we send everything through aio */
    while(t->active_opers && (cnt < iterations || iterations == RUN_FOREVER)) {
	if (stonewall && threads_ending) {
	    oper = t->active_opers;
	    oper->stonewalled = 1;
	    oper_list_del(oper, &t->active_opers);
	    oper_list_add(oper, &t->finished_opers);
	} else {
	    run_active_list(t, io_iter,  max_io_submit);
        }
	cnt++;
    }
    if (latency_stats)
        print_latency(t);

    if (completion_latency_stats)
	print_completion_latency(t);

    /* then we wait for all the operations to finish */
    oper = t->finished_opers;
    do {
	if (!oper)
		break;
	io_oper_wait(t, oper);
	oper = oper->next;
    } while(oper != t->finished_opers);

    /* then we do an fsync to get the timing for any future operations
     * right, and check to see if any of these need to get restarted
     */
    oper = t->finished_opers;
    while(oper) {
	if (fsync_stages)
            fsync(oper->fd);
	t->stage_mb_trans += oper_mb_trans(oper);
	if (restart_oper(oper)) {
	    oper_list_del(oper, &t->finished_opers);
	    oper_list_add(oper, &t->active_opers);
	    oper = t->finished_opers;
	    continue;
	}
	oper = oper->next;
	if (oper == t->finished_opers)
	    break;
    } 

    if (t->stage_mb_trans && t->num_files > 0) {
        double seconds = time_since_now(&stage_time);
	fprintf(stderr, "thread %llu %s totals (%.2f MB/s) %.2f MB in %.2fs\n",
	        (unsigned long long)(t - global_thread_info), this_stage,
		t->stage_mb_trans/seconds, t->stage_mb_trans, seconds);
    }

    if (num_threads > 1) {
	pthread_mutex_lock(&stage_mutex);
	threads_ending++;
	if (threads_ending == num_threads) {
	    threads_starting = 0;
	    pthread_cond_broadcast(&stage_cond);
	    global_thread_throughput(t, this_stage);
	}
	while(threads_ending != num_threads)
	    pthread_cond_wait(&stage_cond, &stage_mutex);
	pthread_mutex_unlock(&stage_mutex);
    }
    
    /* someone got restarted, go back to the beginning */
    if (t->active_opers && (cnt < iterations || iterations == RUN_FOREVER)) {
	iteration++;
        goto restart;
    }

    /* finally, free all the ram */
    while(t->finished_opers) {
	oper = t->finished_opers;
	oper_list_del(oper, &t->finished_opers);
	status = finish_oper(t, oper);
    }

    if (t->num_global_pending) {
        fprintf(stderr, "global num pending is %d\n", t->num_global_pending);
    }
    io_queue_release(t->io_ctx);
    
    return status;
}
Example #6
0
static int
blockset_open_file(struct blockstore *blockStore,
                   struct blockset *bs)
{
   uint64 offset;
   mtime_t ts;
   int res;

   res = file_open(bs->filename, 0 /* R/O */, 0 /* !unbuf */, &bs->desc);
   if (res) {
      return res;
   }

   bs->filesize = file_getsize(bs->desc);
   if (bs->filesize < 0) {
      return errno;
   }

   if (bs->filesize > 0) {
      char *s = print_size(bs->filesize);
      char *name = file_getname(bs->filename);
      Log(LGPFX" reading file %s -- %s -- %llu headers.\n",
          name, s, bs->filesize / sizeof(btc_block_header));
      free(name);
      free(s);
   }

   ts = time_get();
   offset = 0;
   while (offset < bs->filesize) {
      btc_block_header buf[10000];
      size_t numRead;
      size_t numBytes;
      int numHeaders;
      int i;

      numBytes = MIN(bs->filesize - offset, sizeof buf);

      res = file_pread(bs->desc, offset, buf, numBytes, &numRead);
      if (res != 0) {
         break;
      }

      if (btc->stop != 0) {
         res = 1;
         NOT_TESTED();
         break;
      }

      numHeaders = numRead / sizeof(btc_block_header);
      for (i = 0; i < numHeaders; i++) {
         struct blockentry *be;
         uint256 hash;

         be = blockstore_alloc_entry(buf + i);
         be->written = 1;
         hash256_calc(buf + i, sizeof buf[0], &hash);

         if (!blockstore_validate_chkpt(&hash, blockStore->height + 1)) {
            return 1;
         }

         blockstore_add_entry(blockStore, be, &hash);

         if (i == numHeaders - 1) {
            bitcui_set_status("loading headers .. %llu%%",
                             (offset + numBytes) * 100 / bs->filesize);
         }
         if (i == numHeaders - 1 ||
             (numBytes < sizeof buf && i > numHeaders - 256)) {
            bitcui_set_last_block_info(&hash, blockStore->height,
                                      be->header.timestamp);
         }
      }

      offset += numRead;
   }

   ts = time_get() - ts;

   char hashStr[80];
   char *latStr;

   uint256_snprintf_reverse(hashStr, sizeof hashStr, &blockStore->best_hash);
   Log(LGPFX" loaded blocks up to %s\n", hashStr);
   latStr = print_latency(ts);
   Log(LGPFX" this took %s\n", latStr);
   free(latStr);

   return res;
}