Beispiel #1
0
static
void
EMC_Delete( char *testFileName, IOR_param_t * param ) {

	if (param->verbose >= VERBOSE_2) {
		printf("-> EMC_Delete(%s)\n", testFileName);
	}

	/* maybe initialize curl */
	s3_connect( param );

#if 0
	// EMC BUG: If file was written with appends, and is deleted,
	//      Then any future recreation will result in an object that can't be read.
	//      this
	AWS4C_CHECK( s3_delete(param->io_buf, testFileName) );
#else
	// just replace with a zero-length object for now
	aws_iobuf_reset(param->io_buf);
	AWS4C_CHECK   ( s3_put(param->io_buf, testFileName) );
#endif

	AWS4C_CHECK_OK( param->io_buf );

	if (param->verbose >= VERBOSE_2)
		printf("<- EMC_Delete\n");
}
Beispiel #2
0
int putObject ( char * name, IOBuf * bf )
{
  int i;
  for ( i = 0 ; i < 9000 ; i ++ )
    {
      char S[128];
      snprintf ( S,sizeof(S), "Ln %d \n" , i );
      aws_iobuf_append ( bf,S, strlen(S));
    }
  return s3_put ( bf, name );
}
/*Compress the task file and copy it to the S3 bucket*/
static int makeflow_archive_s3_task(struct archive_instance *a, char *taskID, char *task_path){
	// Convert directory to a tar.gz file
	struct timeval start_time;
		struct timeval end_time;
	char *tarConvert = string_format("tar -czvf %s.tar.gz -C %s .",taskID,task_path);
	if(system(tarConvert) == -1){
		free(tarConvert);
		return 0;
	}
	free(tarConvert);

	// Add file to the s3 bucket
	char *tarFile = string_format("%s.tar.gz",taskID);
	FILE *fp = fopen(tarFile,"rb");
		gettimeofday(&start_time, NULL);
	if(s3_put(fp,taskID) != 0){
		gettimeofday(&end_time,NULL);
				float run_time = ((end_time.tv_sec*1000000 + end_time.tv_usec) - (start_time.tv_sec*1000000 + start_time.tv_usec)) / 1000000.0;
				total_up_time += run_time;
				debug(D_MAKEFLOW_HOOK," It took %f seconds for %s to fail uploading to %s",run_time, taskID, a->s3_dir);
				debug(D_MAKEFLOW_HOOK," The total upload time is %f second(s)",total_up_time);
		free(tarFile);
		return 0;
	}
	gettimeofday(&end_time,NULL);
		float run_time = ((end_time.tv_sec*1000000 + end_time.tv_usec) - (start_time.tv_sec*1000000 + start_time.tv_usec)) / 1000000.0;
		total_up_time += run_time;
	printf("Upload %s to %s/%s\n",tarFile,a->s3_dir,taskID);
		debug(D_MAKEFLOW_HOOK," It took %f seconds for %s to upload to %s",run_time, taskID, a->s3_dir);
		debug(D_MAKEFLOW_HOOK," The total upload time is %f second(s)",total_up_time);
	fclose(fp);
	// Remove extra tar files on local directory
	char *removeTar = string_format("rm %s",tarFile);
	if(system(removeTar) == -1){
		free(removeTar);
		return 0;
	}
	free(tarFile);
	free(removeTar);

	return 1;
}
/* Copy a file to the s3 bucket*/
static int makeflow_archive_s3_file(struct archive_instance *a, char *batchID, char *file_path){
	// Copy to s3 archive
	struct timeval start_time;
	struct timeval end_time;
	char *fileCopy;
	FILE *fp;
	//Tar directories before submitting them to s3 bucket
	if(path_is_dir(file_path) != 1){
		fp = fopen(file_path,"rb");
	}
	else{
		char *tarDir = string_format("tar -czvf %s.tar.gz -C %s .",file_path,file_path);
		if(system(tarDir) != 0){
			free(tarDir);
			return 0;
		}
		free(tarDir);
		fileCopy = string_format("%s.tar.gz",file_path);
		fp = fopen(fileCopy,"rb");
		free(fileCopy);
	}
	gettimeofday(&start_time, NULL);
	if(s3_put(fp,batchID) != 0){
		gettimeofday(&end_time,NULL);
		float run_time = ((end_time.tv_sec*1000000 + end_time.tv_usec) - (start_time.tv_sec*1000000 + start_time.tv_usec)) / 1000000.0;
		total_up_time += run_time;
		debug(D_MAKEFLOW_HOOK," It took %f seconds for %s to fail uploading to %s",run_time, batchID, a->s3_dir);
		debug(D_MAKEFLOW_HOOK," The total upload time is %f second(s)",total_up_time);
		return 0;
	}
	gettimeofday(&end_time,NULL);
		float run_time = ((end_time.tv_sec*1000000 + end_time.tv_usec) - (start_time.tv_sec*1000000 + start_time.tv_usec)) / 1000000.0;
	total_up_time += run_time;
	hash_table_insert(s3_files_in_archive, batchID, batchID);
	fclose(fp);
	printf("Upload %s to %s/%s\n",file_path, a->s3_dir, batchID);
	debug(D_MAKEFLOW_HOOK," It took %f second(s) for %s to upload to %s\n",run_time, batchID, a->s3_dir);
	debug(D_MAKEFLOW_HOOK," The total upload time is %f second(s)",total_up_time);

	return 1;
}
Beispiel #5
0
int s3_op_internal(ObjectStream* os) {
   IOBuf*        b  = &os->iob;
   __attribute__ ((unused)) AWSContext*   ctx = b->context;

   // run the GET or PUT
   int is_get = (os->flags & OSF_READING);
   if (is_get) {
      LOG(LOG_INFO, "GET  '%s/%s/%s'\n",
          (ctx ? ctx->S3Host : "*"),  (ctx ? ctx->Bucket : "*"), os->url);
      AWS4C_CHECK1( s3_get(b, os->url) ); /* create empty object with user metadata */
   }
   else {
      LOG(LOG_INFO, "PUT  '%s/%s/%s'\n",
          (ctx ? ctx->S3Host : "*"),  (ctx ? ctx->Bucket : "*"), os->url);
      // If you are getting errors here, the comments above the "#if
      // ((LIBCURL_VERSION ...", in stream_sync(), *might* be relevant.
      AWS4C_CHECK1( s3_put(b, os->url) ); /* create empty object with user metadata */
   }


   // s3_get with byte-range can leave streaming_writefunc() waiting for
   // a curl callback that never comes.  This happens if there is still writable
   // space in the buffer, when the last bytes in the request are processed.
   // This can happen because caller (e.g. fuse) may ask for more bytes than are present,
   // and provide a buffer big enought o receive them.
   if (is_get && (b->code == 206)) {
      // should we do something with os->iob_full?  set os->flags & EOF?
      LOG(LOG_INFO, "GET complete\n");
      os->flags |= OSF_EOF;
      POST(&os->iob_full);
      return 0;
   }
   else if (AWS4C_OK(b) ) {
      LOG(LOG_INFO, "%s complete\n", ((is_get) ? "GET" : "PUT"));
      return 0;
   }
   LOG(LOG_ERR, "CURL ERROR: %lx %d '%s'\n", (size_t)b, b->code, b->result);
   return -1;
}
Beispiel #6
0
int
put_file( IOBuf * aws_buf, char *name ) {
  FILE  * fp;
  char readbuf[BUF_SIZE];
  char * filename;
  
  // Read in file to upload
  if( (fp = fopen(name, "rb")) == NULL) {
      fprintf(stdout, "ERROR: The specified file doesn't exist. \n");
      return -1;
    }
  else {
    int n;
    while( !feof(fp) ) {
      n = fread(readbuf, sizeof(unsigned char), BUF_SIZE, fp);
      if(n != BUF_SIZE) {
        if(feof(fp)) {
          ; // Do Nothing
        }
        else {
          fprintf(stdout, "ERROR: Error reading from file. \n");
          return -1;
        }
      }
      aws_iobuf_append ( aws_buf, readbuf, n);
    }
    fclose(fp);
  }
  
  // Strip the full path from the name and replace spaces with %20
  filename = getFilenameFromPath(name);
  
  int rv = s3_put(aws_buf, filename);
  
  free(filename);
  return rv;
}
Beispiel #7
0
/******************************************************************************
* Name  pack_objects 
* 
* This function traverses the object and file link lists and reads object 
* data for repacking into a new object.   
******************************************************************************/
int pack_objects(File_Handles *file_info, repack_objects *objects)
{
   struct stat statbuf;
   char *path = "/";
//   repack_objects *objects; 

	 //struct stat statbuf;
   stat(path, &statbuf);
   size_t write_offset = 0;
   size_t obj_raw_size;
   size_t obj_size;
   size_t offset;
   //MarFS_XattrPre pre_struct;
   //MarFS_XattrPre* pre = &pre_struct;
   MarFS_XattrPre pre;
   IOBuf *nb = aws_iobuf_new();
   char test_obj[2048];
   obj_files *files;
   int ret;
   char *obj_ptr;
   CURLcode s3_return;
   char pre_str[MARFS_MAX_XATTR_SIZE];


   // Also, if file_count =1 do i make uni or?
   //
   //
   while (objects) { 
      // need inner loop to get files for each object
      // If chunk_count == file count no need to pack
      // and garbage collection took care of it
      if (objects->chunk_count == objects->pack_count) {
         objects=objects->next;
         continue;
      }
      //No need to pack if only one file specified in xattr and only
      //one file found
      if (objects->chunk_count == 1 && objects->pack_count ==1 ) {
         objects=objects->next;
         continue;
      }
      // Not quite sure how this next condition could happen
      // TO DO:  make only one contion chunk_count > file_count
      // all others continue
      if (objects->pack_count > objects->chunk_count) {
         objects=objects->next;
         continue;
      }

      LOG(LOG_INFO,"object = %s\n", objects->objid);
      LOG(LOG_INFO, "file count = %ld chunks = %ld\n", objects->pack_count, objects->chunk_count);
      files = objects->files_ptr;
      write_offset = 0;
      ret=str_2_pre(&pre, objects->objid, NULL);
      sprintf(test_obj,"%s.teste",objects->objid);

      //Make this a unique object since it derived from an existing object 
      pre.unique++;    


      LOG(LOG_INFO,"stdout,new object name =%s\n", test_obj);
  
      //aws_iobuf_reset(nb);

      while (files) {
         //fprintf(stdout, "file = %s offset=%ld\n", files->filename, files->offset);

         stat(files->filename, &statbuf);


         obj_raw_size = statbuf.st_size;
         obj_size = obj_raw_size + MARFS_REC_UNI_SIZE;
         files->size = obj_size;

         //fprintf(stdout, "obj_size = %ld REC SIZE = %d\n", obj_size,MARFS_REC_UNI_SIZE);
         //write_offset+=obj_size;
         if ((obj_ptr = (char *)malloc(obj_size))==NULL) {
            fprintf(stderr, "Error allocating memory\n");
            return -1;
         }

         check_security_access(&pre);
         update_pre(&pre);
         s3_set_host(pre.host);
         //offset = objects->files_ptr->offset;

         offset = files->original_offset;
         //fprintf(stdout, "file %s will get re-written at offset %ld\n",
         //        files->filename, write_offset);

         // get object_data
         // Using byte range to get data for particular offsets
         s3_set_byte_range(offset, obj_size);
         // Use extend to get more buffering capability on each get
         aws_iobuf_extend_dynamic(nb, obj_ptr, obj_size);
         LOG(LOG_INFO, "going to get file %s from object %s at offset %ld and size %ld\n", files->filename, objects->objid, offset, obj_size);
         fprintf(file_info->outfd, "Getting file %s from object %s at offset %ld and size %ld\n", files->filename, objects->objid, offset, obj_size);
         s3_return = s3_get(nb,objects->objid);
         check_S3_error(s3_return, nb, S3_GET);

         LOG(LOG_INFO, "Read buffer write count = %ld  len = %ld\n", nb->write_count, nb->len);
         // may have to copy nb to a new buffer 
         // then write 
     

         files->new_offset = write_offset;
         write_offset += obj_size; 
	 files = files->next;
      }
      // create object string for put
      pre_2_str(pre_str, MARFS_MAX_XATTR_SIZE,&pre);

      strcpy(objects->new_objid, pre_str);
     
      LOG(LOG_INFO, "Going to write to object %s\n", pre_str);
      fprintf(file_info->outfd, "Writing file to object %s\n", pre_str);

      // Write data back to new object
      s3_put(nb,pre_str);
      check_S3_error(s3_return, nb, S3_PUT); 

      aws_iobuf_reset_hard(nb);
      objects=objects->next;
   }
   return 0;
}
Beispiel #8
0
static
IOR_offset_t
S3_Xfer_internal(int          access,
					  void*        file,
					  IOR_size_t*  buffer,
					  IOR_offset_t length,
					  IOR_param_t* param,
					  int          multi_part_upload_p ) {

	if (param->verbose >= VERBOSE_2) {
		printf("-> S3_Xfer(acc:%d, target:%s, buf:0x%llx, len:%llu, 0x%llx)\n",
				 access, (char*)file, buffer, length, param);
	}

	char*      fname = (char*)file; /* see NOTE above S3_Create_Or_Open() */
	size_t     remaining = (size_t)length;
	char*      data_ptr = (char *)buffer;
	off_t      offset = param->offset;

	// easier to think
	int        n_to_n    = param->filePerProc;
	int        n_to_1    = (! n_to_n);
	int        segmented = (param->segmentCount == 1);


	if (access == WRITE) {	/* WRITE */

		if (verbose >= VERBOSE_3) {
			fprintf( stdout, "rank %d writing length=%lld to offset %lld\n",
						rank,
                  remaining,
						param->offset + length - remaining);
		}


		if (multi_part_upload_p) {

			// For N:1, part-numbers must have a global ordering for the
			// components of the final object.  param->part_number is
			// incremented by 1 per write, on each rank.  This lets us use it
			// to compute a global part-numbering.
         //
         // In the N:N case, we only need to increment part-numbers within
			// each rank.
         //
         // In the N:1 case, the global order of part-numbers we're writing
         // depends on whether wer're writing strided or segmented, in
         // other words, how <offset> and <remaining> are acutally
         // positioning the parts being written. [See discussion at
         // S3_Close_internal().]
         //
			// NOTE: 's3curl.pl --debug' shows StringToSign having partNumber
			//       first, even if I put uploadId first in the URL.  Maybe
			//       that's what the server will do.  GetStringToSign() in
			//       aws4c is not clever about this, so we spoon-feed args in
			//       the proper order.

			size_t part_number;
			if (n_to_1) {
            if (segmented) {      // segmented
               size_t parts_per_rank = param->blockSize / param->transferSize;
               part_number = (rank * parts_per_rank) + param->part_number;
            }
            else                // strided
               part_number = (param->part_number * param->numTasks) + rank;
         }
         else
				part_number = param->part_number;
         ++ param->part_number;


         //         if (verbose >= VERBOSE_3) {
         //            fprintf( stdout, "rank %d of %d writing (%s,%s) part_number %lld\n",
         //                     rank,
         //                     param->numTasks,
         //                     (n_to_1 ? "N:1" : "N:N"),
         //                     (segmented ? "segmented" : "strided"),
         //                     part_number);
         //         }

			snprintf(buff, BUFF_SIZE,
						"%s?partNumber=%d&uploadId=%s",
						fname, part_number, param->UploadId);

			// For performance, we append <data_ptr> directly into the linked list
			// of data in param->io_buf.  We are "appending" rather than
			// "extending", so the added buffer is seen as written data, rather
			// than empty storage.
			//
			// aws4c parses some header-fields automatically for us (into members
			// of the IOBuf).  After s3_put2(), we can just read the etag from
			// param->io_buf->eTag.  The server actually returns literal
			// quote-marks, at both ends of the string.

			aws_iobuf_reset(param->io_buf);
			aws_iobuf_append_static(param->io_buf, data_ptr, remaining);
			AWS4C_CHECK( s3_put(param->io_buf, buff) );
			AWS4C_CHECK_OK( param->io_buf );

         //			if (verbose >= VERBOSE_3) {
         //				printf("rank %d: read ETag = '%s'\n", rank, param->io_buf->eTag);
         //				if (strlen(param->io_buf->eTag) != ETAG_SIZE+2) { /* quotes at both ends */
         //					fprintf(stderr, "Rank %d: ERROR: expected ETag to be %d hex digits\n",
         //							  rank, ETAG_SIZE);
         //					exit(1);
         //				}
         //			}

         if (verbose >= VERBOSE_3) {
            fprintf( stdout, "rank %d of %d (%s,%s) offset %lld, part# %lld --> ETag %s\n",
                     rank,
                     param->numTasks,
                     (n_to_1 ? "N:1" : "N:N"),
                     (segmented ? "segmented" : "strided"),
                     offset,
                     part_number,
                     param->io_buf->eTag); // incl quote-marks at [0] and [len-1]
         }
         if (strlen(param->io_buf->eTag) != ETAG_SIZE+2) { /* quotes at both ends */
					fprintf(stderr, "Rank %d: ERROR: expected ETag to be %d hex digits\n",
							  rank, ETAG_SIZE);
					exit(1);
         }

			// save the eTag for later
			//
			//		memcpy(etag, param->io_buf->eTag +1, strlen(param->io_buf->eTag) -2);
			//		etag[ETAG_SIZE] = 0;
			aws_iobuf_append(param->etags,
								  param->io_buf->eTag +1,
								  strlen(param->io_buf->eTag) -2);
			// DEBUGGING
			if (verbose >= VERBOSE_4) {
				printf("rank %d: part %d = ETag %s\n", rank, part_number, param->io_buf->eTag);
			}

			// drop ptrs to <data_ptr>, in param->io_buf
			aws_iobuf_reset(param->io_buf);
		}
		else {	 // use EMC's byte-range write-support, instead of MPU


			// NOTE: You must call 's3_enable_EMC_extensions(1)' for
			//       byte-ranges to work for writes.
			if (n_to_n)
				s3_set_byte_range(-1,-1); // EMC header "Range: bytes=-1-" means "append"
			else
				s3_set_byte_range(offset, remaining);

			// For performance, we append <data_ptr> directly into the linked list
			// of data in param->io_buf.  We are "appending" rather than
			// "extending", so the added buffer is seen as written data, rather
			// than empty storage.
			aws_iobuf_reset(param->io_buf);
			aws_iobuf_append_static(param->io_buf, data_ptr, remaining);
			AWS4C_CHECK   ( s3_put(param->io_buf, file) );
			AWS4C_CHECK_OK( param->io_buf );

			// drop ptrs to <data_ptr>, in param->io_buf
			aws_iobuf_reset(param->io_buf);
		}


		if ( param->fsyncPerWrite == TRUE ) {
			WARN("S3 doesn't support 'fsync'" ); /* does it? */
		}

	}
	else {				/* READ or CHECK */

		if (verbose >= VERBOSE_3) {
			fprintf( stdout, "rank %d reading from offset %lld\n",
						rank,
						param->offset + length - remaining );
		}

		// read specific byte-range from the object
      // [This is included in the "pure" S3 spec.]
		s3_set_byte_range(offset, remaining);

		// For performance, we append <data_ptr> directly into the linked
		// list of data in param->io_buf.  In this case (i.e. reading),
		// we're "extending" rather than "appending".  That means the
		// buffer represents empty storage, which will be filled by the
		// libcurl writefunction, invoked via aws4c.
		aws_iobuf_reset(param->io_buf);
		aws_iobuf_extend_static(param->io_buf, data_ptr, remaining);
		AWS4C_CHECK( s3_get(param->io_buf, file) );
		if (param->io_buf->code != 206) { /* '206 Partial Content' */
			snprintf(buff, BUFF_SIZE,
						"Unexpected result (%d, '%s')",
						param->io_buf->code, param->io_buf->result);
			ERR_SIMPLE(buff);
		}

		// drop refs to <data_ptr>, in param->io_buf
		aws_iobuf_reset(param->io_buf);
	}


	if (param->verbose >= VERBOSE_2) {
		printf("<- S3_Xfer\n");
	}
	return ( length );
}
Beispiel #9
0
static
void *
S3_Create_Or_Open_internal(char*         testFileName,
                           IOR_param_t*  param,
                           unsigned char createFile,
									int           multi_part_upload_p ) {

	if (param->verbose >= VERBOSE_2) {
		printf("-> S3_Create_Or_Open('%s', ,%d, %d)\n",
				 testFileName, createFile, multi_part_upload_p);
	}

	/* initialize curl, if needed */
	s3_connect( param );

	/* Check for unsupported flags */
	if ( param->openFlags & IOR_EXCL ) {
		fprintf( stdout, "Opening in Exclusive mode is not implemented in S3\n" );
	}
	if ( param->useO_DIRECT == TRUE ) {
		fprintf( stdout, "Direct I/O mode is not implemented in S3\n" );
	}

	// easier to think
	int n_to_n = param->filePerProc;
	int n_to_1 = ! n_to_n;

	/* check whether object needs reset to zero-length */
	int needs_reset = 0;
	if (! multi_part_upload_p)
		needs_reset = 1;			  /* so "append" can work */
	else if ( param->openFlags & IOR_TRUNC )
		needs_reset = 1;			  /* so "append" can work */
	else if (createFile) {
		// AWS4C_CHECK( s3_head(param->io_buf, testFileName) );
		// if ( ! AWS4C_OK(param->io_buf) )
			needs_reset = 1;
	}

	if ( param->open == WRITE ) {

		/* initializations for N:1 or N:N writes using multi-part upload */
		if (multi_part_upload_p) {

			// For N:N, all ranks do their own MPU open/close.  For N:1, only
			// rank0 does that. Either way, the response from the server
			// includes an "uploadId", which must be used to upload parts to
			// the same object.
			if ( n_to_n || (rank == 0) ) {

				// rank0 handles truncate
				if ( needs_reset) {
					aws_iobuf_reset(param->io_buf);
					AWS4C_CHECK( s3_put(param->io_buf, testFileName) ); /* 0-length write */
					AWS4C_CHECK_OK( param->io_buf );
				}

				// POST request with URL+"?uploads" initiates multi-part upload
				snprintf(buff, BUFF_SIZE, "%s?uploads", testFileName);
				IOBuf* response = aws_iobuf_new();
				AWS4C_CHECK( s3_post2(param->io_buf, buff, NULL, response) );
				AWS4C_CHECK_OK( param->io_buf );

				// parse XML returned from server, into a tree structure
				aws_iobuf_realloc(response);
				xmlDocPtr doc = xmlReadMemory(response->first->buf,
														response->first->len,
														NULL, NULL, 0);
				if (doc == NULL)
					ERR_SIMPLE("Rank0 Failed to find POST response\n");

				// navigate parsed XML-tree to find UploadId
				xmlNode* root_element = xmlDocGetRootElement(doc);
				const char* upload_id = find_element_named(root_element, (char*)"UploadId");
				if (! upload_id)
					ERR_SIMPLE("couldn't find 'UploadId' in returned XML\n");

				if (param->verbose >= VERBOSE_3)
					printf("got UploadId = '%s'\n", upload_id);

				const size_t upload_id_len = strlen(upload_id);
				if (upload_id_len > MAX_UPLOAD_ID_SIZE) {
					snprintf(buff, BUFF_SIZE,
								"UploadId length %d exceeds expected max (%d)",
								upload_id_len, MAX_UPLOAD_ID_SIZE);
					ERR_SIMPLE(buff);
				}

				// save the UploadId we found
				memcpy(param->UploadId, upload_id, upload_id_len);
				param->UploadId[upload_id_len] = 0;

				// free storage for parsed XML tree
				xmlFreeDoc(doc);
				aws_iobuf_free(response);

				// For N:1, share UploadId across all ranks
				if (n_to_1)
					MPI_Bcast(param->UploadId, MAX_UPLOAD_ID_SIZE, MPI_BYTE, 0, param->testComm);
			}
			else
				// N:1, and we're not rank0. recv UploadID from Rank 0
				MPI_Bcast(param->UploadId, MAX_UPLOAD_ID_SIZE, MPI_BYTE, 0, param->testComm);
		}

		/* initializations for N:N or N:1 writes using EMC byte-range extensions */
		else {

			/* maybe reset to zero-length, so "append" can work */
			if (needs_reset) {

            if (verbose >= VERBOSE_3) {
               fprintf( stdout, "rank %d resetting\n",
                        rank);
            }

				aws_iobuf_reset(param->io_buf);
				AWS4C_CHECK( s3_put(param->io_buf, testFileName) );
				AWS4C_CHECK_OK( param->io_buf );
			}
		}
	}


	if (param->verbose >= VERBOSE_2) {
		printf("<- S3_Create_Or_Open\n");
	}
	return ((void *) testFileName );
}
Beispiel #10
0
static void s3_connect( IOR_param_t* param ) {
	if (param->verbose >= VERBOSE_2) {
		printf("-> s3_connect\n"); /* DEBUGGING */
	}

	if ( param->curl_flags & IOR_CURL_INIT ) {
		if (param->verbose >= VERBOSE_2) {
			printf("<- s3_connect  [nothing to do]\n"); /* DEBUGGING */
		}
		return;
	}

	// --- Done once-only (per rank).  Perform all first-time inits.
	//
	// The aws library requires a config file, as illustrated below.  We
	// assume that the user running the test has an entry in this file,
	// using their login moniker (i.e. `echo $USER`) as the key, as
	// suggested in the example:
	//
	//     <user>:<s3_login_id>:<s3_private_key>
	//
	// This file must not be readable by other than user.
	//
	// NOTE: These inits could be done in init_IORParam_t(), in ior.c, but
	//       would require conditional compilation, there.

	aws_set_debug(param->verbose >= 4);
	aws_read_config(getenv("USER"));  // requires ~/.awsAuth
	aws_reuse_connections(1);

	// initalize IOBufs.  These are basically dynamically-extensible
	// linked-lists.  "growth size" controls the increment of new memory
	// allocated, whenever storage is used up.
	param->io_buf = aws_iobuf_new();
	aws_iobuf_growth_size(param->io_buf, 1024*1024*1);

	param->etags = aws_iobuf_new();
	aws_iobuf_growth_size(param->etags, 1024*1024*8);

   // WARNING: if you have http_proxy set in your environment, you may need
   //          to override it here.  TBD: add a command-line variable to
   //          allow you to define a proxy.
   //
	// our hosts are currently 10.140.0.15 - 10.140 0.18
	// TBD: Try DNS-round-robin server at vi-lb.ccstar.lanl.gov
   // TBD: try HAProxy round-robin at 10.143.0.1

#if 1
   //   snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
   //   s3_set_proxy(buff);
   //
   //   snprintf(buff, BUFF_SIZE, "10.140.0.%d", 15 + (rank % 4));
   //	s3_set_host(buff);

   snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
   s3_set_host(buff);

#else
/*
 * If you just want to go to one if the ECS nodes, put that IP
 * address in here directly with port 9020.
 *
 */
//   s3_set_host("10.140.0.15:9020");

/*
 * If you want to go to haproxy.ccstar.lanl.gov, this is its IP
 * address.
 *
 */
//   s3_set_proxy("10.143.0.1:80");
//   s3_set_host( "10.143.0.1:80");
#endif

	// make sure test-bucket exists
	s3_set_bucket((char*)bucket_name);

   if (rank == 0) {
      AWS4C_CHECK( s3_head(param->io_buf, "") );
      if ( param->io_buf->code == 404 ) {					// "404 Not Found"
         printf("  bucket '%s' doesn't exist\n", bucket_name);

         AWS4C_CHECK( s3_put(param->io_buf, "") );	/* creates URL as bucket + obj */
         AWS4C_CHECK_OK(     param->io_buf );		// assure "200 OK"
         printf("created bucket '%s'\n", bucket_name);
      }
      else {														// assure "200 OK"
         AWS4C_CHECK_OK( param->io_buf );
      }
   }
   MPI_CHECK(MPI_Barrier(param->testComm), "barrier error");


	// Maybe allow EMC extensions to S3
	s3_enable_EMC_extensions(param->curl_flags & IOR_CURL_S3_EMC_EXT);

	// don't perform these inits more than once
	param->curl_flags |= IOR_CURL_INIT;


	if (param->verbose >= VERBOSE_2) {
		printf("<- s3_connect  [success]\n");
	}
}