Example #1
0
File: s3_put.c Project: cheah/aws4c
int main ( int argc, char * argv[] )
{
  int rv;

  aws_init ();
  aws_set_debug  ( 0 );
  int rc = aws_read_config  ( "sample" );
  if ( rc )
    {
      puts ( "Could not find a credential in the config file" );
      puts ( "Make sure your ~/.awsAuth file is correct" );
      exit ( 1 );
    }

  s3_set_bucket ("aws4c.samples");
  s3_set_mime ("text/plain");
  s3_set_acl ("public-read");

  IOBuf * bf = aws_iobuf_new ();

  rv = putObject ( "aws4c.samplefile", bf );
  printf ( "RV %d\n", rv );

  printf ( "CODE    [%d] \n", bf->code );
  printf ( "RESULT  [%s] \n", bf->result );
  printf ( "LEN     [%d] \n", bf->len );
  printf ( "LASTMOD [%s] \n", bf->lastMod );
  printf ( "ETAG    [%s] \n", bf->eTag );

  while(-1)
    {
  char Ln[1024];
  int sz = aws_iobuf_getdata ( bf, Ln, sizeof(Ln));
  if ( Ln[0] == 0 ) break;
    printf ( "S[%3d] %s", sz, Ln );
    }

  /// Now Repeat using the RRS
  bf = aws_iobuf_new ();
  aws_set_rrs ( 1 ) ;
  rv = putObject ( "aws4c.samplefile.rrs", bf );
  printf ( "RV %d\n", rv );
  printf ( "CODE    [%d] \n", bf->code );
  printf ( "RESULT  [%s] \n", bf->result );
  printf ( "LEN     [%d] \n", bf->len );
  printf ( "LASTMOD [%s] \n", bf->lastMod );
  printf ( "ETAG    [%s] \n", bf->eTag );

  aws_deinit ();
  return 0;
}
Example #2
0
File: s3util.c Project: cheah/aws4c
int
main (int argc, char *argv[]) {
  aws_init();
  if(argv[3] != NULL) {
    aws_set_debug(atoi(argv[3]));
  }
  IOBuf * aws_buf = aws_iobuf_new();
  
  // Read credential file
  int rv = aws_read_config("myteksi");
  if ( rv )
  {
    fprintf(stderr, "Could not find a credential in the config file \n" );
    fprintf(stderr, "Make sure your ~/.awsAuth file is correct \n" );
    exit (1);
  }
  
  
  // Read config file
  FILE *fp = NULL;
  
  char getline[ LINE_MAX * sizeof(char) ];
  if( (fp = fopen("s3config", "r")) == NULL) {
    //File does not exist. Initialize it
    if( (fp = fopen("s3config", "w+")) == NULL) {
      fprintf(stderr, "ERROR: Unable to create config file.\n");
      exit(0);
    }
    
    // Ask for bucket_name
    fprintf(stdout, "Config file doesn't exist yet! Creating one now. \n");
    fprintf(stdout, "Please specify the AWS S3 base address "
                    "[default s3.amazonaws.com] :");
    char getInput[ LINE_MAX * sizeof(char) ];
    if( fgets( getInput, sizeof(getInput) , stdin ) != NULL ) {
      if( strcmp(getInput, "\n") != 0 ) {
        S3_host = strndup(getInput, strlen(getInput) -1); // Remove trailing NL
      }
      else {
        S3_host = strdup("s3.amazonaws.com");
      }
    }
    
    int validbucketname = 0;
    while( !validbucketname ) {
      fprintf(stdout, "Please specify the bucket name: ");
      if( fgets( getInput, sizeof(getInput) , stdin ) != NULL ) {
        bucketname = strndup(getInput, strlen(getInput) -1);
        validbucketname = 1;
      }
    }
    
    char * buf = malloc( snprintf(NULL, 0, "S3_Base_Address=\"%s\"\n"
                                  "bucket_name=\"%s\"\n", S3_host, bucketname));
    sprintf(buf, "S3_Base_Address=\"%s\"\n"
                 "bucket_name=\"%s\"\n", S3_host, bucketname );
    
    if( fputs( buf, fp ) == EOF ) {
      fprintf(stderr, "ERROR: Unable to create config file.\n");
    }
  }
  // Config file exist, parse it
  else {
    char    delim[4] = {'=', '\"', '\n', '\0'};
    char*   left;
    char*   right;
    
    while( fgets( getline, sizeof(getline) , fp ) != NULL ) {
      if( (left = strtok(getline, delim)) != NULL ) {
        right = strtok(NULL, delim);
      }
      else {
        //Empty Line
      }
      
      // Match the strings
      char* comparison = "S3_Base_Address";
      if( strcmp(left, comparison) == 0) {
        if(right != NULL) {
          S3_host = strdup(right);
        }
        else {
          S3_host = strdup("s3.amazonaws.com");
        }
      }
      
      comparison = "bucket_name";
      if( strcmp(left, comparison) == 0 && right != NULL) {
          bucketname = strdup(right);
      }
    }  // End while
    
    if( S3_host == NULL || bucketname == NULL ) {
      fprintf(stderr, "ERROR: Invalid entry in config file.\n");
    }
  }
  
  // Set parameters in S3 library
  s3_set_host(S3_host);
  s3_set_bucket(bucketname);
  s3_set_acl(S3_acl);
  
  // Check for valid arguments
  if ( argc != 3 && argc != 4 ) {
    fprintf(stderr, "Usage: s3util <operation> <filename>\n");
    fprintf(stderr, "Operation can be one of {PUT, GET, DELETE}\n");
    exit(1);
  }
  // Check if operation is valid
  operation = strdup(argv[1]);
  filename  = strdup(argv[2]);
  
  // PUT file
  if( strcmp(operation, "PUT") == 0 ) {
    int rc;
    char s3replyMD5[33];
    
    rv = put_file( aws_buf, filename );
    rc = -1;
    if( aws_buf->eTag != NULL && strlen(aws_buf->eTag) > 2 ) {
      memset(s3replyMD5, 0, 33);
      memcpy(s3replyMD5, aws_buf->eTag + 1, 32);
      rc = verifyMD5(filename, s3replyMD5);
    }
    if(rv != 0 || rc != 0) {
      printf ( "PUT operation was unsuccessful \n" );
      return rc;
    }
    printf ( "MD5SUM matches, file uploaded successfully \n" );
  }
  
  // GET file
  else if( strcmp(operation, "GET") == 0 ) {
    rv = get_file( aws_buf, filename );
    if(rv == 0 && aws_buf->code == 200) {
      printf ( "File was successfully downloaded \n" );
    }
    else {
      printf ( "GET operation was unsuccessful \n" );
      return(-1);
    }
  }
  
  // DELETE FILE
  else if( strcmp(operation, "DELETE") == 0 ) {
    rv = delete_file( aws_buf, filename );
    if(rv == 0 && aws_buf->code == 204) {
      printf ( "File was successfully deleted \n" );
    }
    else {
      printf ( "DELETE operation was unsuccessful \n" );
      return(-1);
    }
  }
  else {
    fprintf(stderr, "Invalid operation, operation must be one of "
    "{PUT, GET, DELETE}\n");
    return(1);
  }
  
  /*
  printf ( "RV %d\n", rv );
  printf ( "CODE    [%d] \n", aws_buf->code );
  printf ( "RESULT  [%s] \n", aws_buf->result );
  printf ( "LEN     [%d] \n", aws_buf->len );
  printf ( "LASTMOD [%s] \n", aws_buf->lastMod );
  printf ( "ETAG    [%s] \n", aws_buf->eTag );
  */
  
  aws_iobuf_free(aws_buf);
  
  global_free();
  return 0;
}
Example #3
0
static void s3_connect( IOR_param_t* param ) {
	if (param->verbose >= VERBOSE_2) {
		printf("-> s3_connect\n"); /* DEBUGGING */
	}

	if ( param->curl_flags & IOR_CURL_INIT ) {
		if (param->verbose >= VERBOSE_2) {
			printf("<- s3_connect  [nothing to do]\n"); /* DEBUGGING */
		}
		return;
	}

	// --- Done once-only (per rank).  Perform all first-time inits.
	//
	// The aws library requires a config file, as illustrated below.  We
	// assume that the user running the test has an entry in this file,
	// using their login moniker (i.e. `echo $USER`) as the key, as
	// suggested in the example:
	//
	//     <user>:<s3_login_id>:<s3_private_key>
	//
	// This file must not be readable by other than user.
	//
	// NOTE: These inits could be done in init_IORParam_t(), in ior.c, but
	//       would require conditional compilation, there.

	aws_set_debug(param->verbose >= 4);
	aws_read_config(getenv("USER"));  // requires ~/.awsAuth
	aws_reuse_connections(1);

	// initalize IOBufs.  These are basically dynamically-extensible
	// linked-lists.  "growth size" controls the increment of new memory
	// allocated, whenever storage is used up.
	param->io_buf = aws_iobuf_new();
	aws_iobuf_growth_size(param->io_buf, 1024*1024*1);

	param->etags = aws_iobuf_new();
	aws_iobuf_growth_size(param->etags, 1024*1024*8);

   // WARNING: if you have http_proxy set in your environment, you may need
   //          to override it here.  TBD: add a command-line variable to
   //          allow you to define a proxy.
   //
	// our hosts are currently 10.140.0.15 - 10.140 0.18
	// TBD: Try DNS-round-robin server at vi-lb.ccstar.lanl.gov
   // TBD: try HAProxy round-robin at 10.143.0.1

#if 1
   //   snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
   //   s3_set_proxy(buff);
   //
   //   snprintf(buff, BUFF_SIZE, "10.140.0.%d", 15 + (rank % 4));
   //	s3_set_host(buff);

   snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
   s3_set_host(buff);

#else
/*
 * If you just want to go to one if the ECS nodes, put that IP
 * address in here directly with port 9020.
 *
 */
//   s3_set_host("10.140.0.15:9020");

/*
 * If you want to go to haproxy.ccstar.lanl.gov, this is its IP
 * address.
 *
 */
//   s3_set_proxy("10.143.0.1:80");
//   s3_set_host( "10.143.0.1:80");
#endif

	// make sure test-bucket exists
	s3_set_bucket((char*)bucket_name);

   if (rank == 0) {
      AWS4C_CHECK( s3_head(param->io_buf, "") );
      if ( param->io_buf->code == 404 ) {					// "404 Not Found"
         printf("  bucket '%s' doesn't exist\n", bucket_name);

         AWS4C_CHECK( s3_put(param->io_buf, "") );	/* creates URL as bucket + obj */
         AWS4C_CHECK_OK(     param->io_buf );		// assure "200 OK"
         printf("created bucket '%s'\n", bucket_name);
      }
      else {														// assure "200 OK"
         AWS4C_CHECK_OK( param->io_buf );
      }
   }
   MPI_CHECK(MPI_Barrier(param->testComm), "barrier error");


	// Maybe allow EMC extensions to S3
	s3_enable_EMC_extensions(param->curl_flags & IOR_CURL_S3_EMC_EXT);

	// don't perform these inits more than once
	param->curl_flags |= IOR_CURL_INIT;


	if (param->verbose >= VERBOSE_2) {
		printf("<- s3_connect  [success]\n");
	}
}
static int create( void ** instance_struct, struct jx *hook_args )
{	
	aws_init ();
        aws_set_debug (0);
        char line[256];

	if(s3_files_in_archive == NULL){
				s3_files_in_archive = hash_table_create(0,0);
		}

	struct archive_instance *a = archive_instance_create();
	*instance_struct = a;

	if(jx_lookup_string(hook_args, "archive_dir")){
		a->dir = xxstrdup(jx_lookup_string(hook_args, "archive_dir"));	
	} else {
		a->dir = string_format("%s%d", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY, getuid());
	}

	if(jx_lookup_boolean(hook_args, "archive_s3_no_check")){
		a->s3_check = 0;
	}
	else{
		a->s3_check = 1;
	}

	if(jx_lookup_string(hook_args, "archive_s3_arg")){
		a->s3 = 1;
		a->s3_dir = xxstrdup(jx_lookup_string(hook_args, "archive_s3_arg"));
	}
	else if(jx_lookup_string(hook_args, "archive_s3_no_arg")) {
		a->s3 = 1;
		a->s3_dir = string_format("%s",MAKEFLOW_ARCHIVE_DEFAULT_S3_BUCKET);
	}
	else{
		a->s3 = 0;
	}

	if(jx_lookup_string(hook_args, "s3_hostname")){
		s3_set_host(xxstrdup(jx_lookup_string(hook_args, "s3_hostname")));
	}

	if(jx_lookup_string(hook_args, "s3_keyid")){
		aws_set_keyid(xxstrdup(jx_lookup_string(hook_args, "s3_keyid")));	
	}
	else{
		FILE *fp = popen("grep aws_access_key_id ~/.aws/credentials | cut -d ""="" -f 2 | tr -d ' '","r");
        	fgets(line, 255, fp);
        	line[strlen(line)-1] = '\0';
        	aws_set_keyid(line);
        	pclose(fp);	
	}

	if(jx_lookup_string(hook_args, "s3_secretkey")){
		aws_set_key(xxstrdup(jx_lookup_string(hook_args, "s3_secretkey")));
	}
	else{
		FILE *ft = popen("grep aws_secret_access_key ~/.aws/credentials | cut -d ""="" -f 2 | tr -d ' '","r");
        	fgets(line, 255, ft);
        	line[strlen(line)-1] = '\0';
        	aws_set_key(line);
        	pclose(ft);
	}

	if(jx_lookup_boolean(hook_args, "archive_read")){
		a->read = 1;
	}

	if(jx_lookup_boolean(hook_args, "archive_write")){
		a->write = 1;
	}

	if (!create_dir(a->dir, 0777) && errno != EEXIST){
		debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create base archiving directory %s: %d %s\n", 
			a->dir, errno, strerror(errno));
		return MAKEFLOW_HOOK_FAILURE;
	}

	char *files_dir = string_format("%s/files", a->dir);
	if (!create_dir(files_dir, 0777) && errno != EEXIST){
		debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create files archiving directory %s: %d %s\n", 
			files_dir, errno, strerror(errno));
		free(files_dir);
		return MAKEFLOW_HOOK_FAILURE;
	}
	free(files_dir);

	char *tasks_dir = string_format("%s/tasks", a->dir);
	if (!create_dir(tasks_dir, 0777) && errno != EEXIST){
		debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create tasks archiving directory %s: %d %s\n", 
			tasks_dir, errno, strerror(errno));
		free(tasks_dir);
		return MAKEFLOW_HOOK_FAILURE;
	}
	free(tasks_dir);

	s3_set_bucket (a->s3_dir);

	return MAKEFLOW_HOOK_SUCCESS;
}