コード例 #1
0
ファイル: s3util.c プロジェクト: cheah/aws4c
int
main (int argc, char *argv[]) {
  aws_init();
  if(argv[3] != NULL) {
    aws_set_debug(atoi(argv[3]));
  }
  IOBuf * aws_buf = aws_iobuf_new();
  
  // Read credential file
  int rv = aws_read_config("myteksi");
  if ( rv )
  {
    fprintf(stderr, "Could not find a credential in the config file \n" );
    fprintf(stderr, "Make sure your ~/.awsAuth file is correct \n" );
    exit (1);
  }
  
  
  // Read config file
  FILE *fp = NULL;
  
  char getline[ LINE_MAX * sizeof(char) ];
  if( (fp = fopen("s3config", "r")) == NULL) {
    //File does not exist. Initialize it
    if( (fp = fopen("s3config", "w+")) == NULL) {
      fprintf(stderr, "ERROR: Unable to create config file.\n");
      exit(0);
    }
    
    // Ask for bucket_name
    fprintf(stdout, "Config file doesn't exist yet! Creating one now. \n");
    fprintf(stdout, "Please specify the AWS S3 base address "
                    "[default s3.amazonaws.com] :");
    char getInput[ LINE_MAX * sizeof(char) ];
    if( fgets( getInput, sizeof(getInput) , stdin ) != NULL ) {
      if( strcmp(getInput, "\n") != 0 ) {
        S3_host = strndup(getInput, strlen(getInput) -1); // Remove trailing NL
      }
      else {
        S3_host = strdup("s3.amazonaws.com");
      }
    }
    
    int validbucketname = 0;
    while( !validbucketname ) {
      fprintf(stdout, "Please specify the bucket name: ");
      if( fgets( getInput, sizeof(getInput) , stdin ) != NULL ) {
        bucketname = strndup(getInput, strlen(getInput) -1);
        validbucketname = 1;
      }
    }
    
    char * buf = malloc( snprintf(NULL, 0, "S3_Base_Address=\"%s\"\n"
                                  "bucket_name=\"%s\"\n", S3_host, bucketname));
    sprintf(buf, "S3_Base_Address=\"%s\"\n"
                 "bucket_name=\"%s\"\n", S3_host, bucketname );
    
    if( fputs( buf, fp ) == EOF ) {
      fprintf(stderr, "ERROR: Unable to create config file.\n");
    }
  }
  // Config file exist, parse it
  else {
    char    delim[4] = {'=', '\"', '\n', '\0'};
    char*   left;
    char*   right;
    
    while( fgets( getline, sizeof(getline) , fp ) != NULL ) {
      if( (left = strtok(getline, delim)) != NULL ) {
        right = strtok(NULL, delim);
      }
      else {
        //Empty Line
      }
      
      // Match the strings
      char* comparison = "S3_Base_Address";
      if( strcmp(left, comparison) == 0) {
        if(right != NULL) {
          S3_host = strdup(right);
        }
        else {
          S3_host = strdup("s3.amazonaws.com");
        }
      }
      
      comparison = "bucket_name";
      if( strcmp(left, comparison) == 0 && right != NULL) {
          bucketname = strdup(right);
      }
    }  // End while
    
    if( S3_host == NULL || bucketname == NULL ) {
      fprintf(stderr, "ERROR: Invalid entry in config file.\n");
    }
  }
  
  // Set parameters in S3 library
  s3_set_host(S3_host);
  s3_set_bucket(bucketname);
  s3_set_acl(S3_acl);
  
  // Check for valid arguments
  if ( argc != 3 && argc != 4 ) {
    fprintf(stderr, "Usage: s3util <operation> <filename>\n");
    fprintf(stderr, "Operation can be one of {PUT, GET, DELETE}\n");
    exit(1);
  }
  // Check if operation is valid
  operation = strdup(argv[1]);
  filename  = strdup(argv[2]);
  
  // PUT file
  if( strcmp(operation, "PUT") == 0 ) {
    int rc;
    char s3replyMD5[33];
    
    rv = put_file( aws_buf, filename );
    rc = -1;
    if( aws_buf->eTag != NULL && strlen(aws_buf->eTag) > 2 ) {
      memset(s3replyMD5, 0, 33);
      memcpy(s3replyMD5, aws_buf->eTag + 1, 32);
      rc = verifyMD5(filename, s3replyMD5);
    }
    if(rv != 0 || rc != 0) {
      printf ( "PUT operation was unsuccessful \n" );
      return rc;
    }
    printf ( "MD5SUM matches, file uploaded successfully \n" );
  }
  
  // GET file
  else if( strcmp(operation, "GET") == 0 ) {
    rv = get_file( aws_buf, filename );
    if(rv == 0 && aws_buf->code == 200) {
      printf ( "File was successfully downloaded \n" );
    }
    else {
      printf ( "GET operation was unsuccessful \n" );
      return(-1);
    }
  }
  
  // DELETE FILE
  else if( strcmp(operation, "DELETE") == 0 ) {
    rv = delete_file( aws_buf, filename );
    if(rv == 0 && aws_buf->code == 204) {
      printf ( "File was successfully deleted \n" );
    }
    else {
      printf ( "DELETE operation was unsuccessful \n" );
      return(-1);
    }
  }
  else {
    fprintf(stderr, "Invalid operation, operation must be one of "
    "{PUT, GET, DELETE}\n");
    return(1);
  }
  
  /*
  printf ( "RV %d\n", rv );
  printf ( "CODE    [%d] \n", aws_buf->code );
  printf ( "RESULT  [%s] \n", aws_buf->result );
  printf ( "LEN     [%d] \n", aws_buf->len );
  printf ( "LASTMOD [%s] \n", aws_buf->lastMod );
  printf ( "ETAG    [%s] \n", aws_buf->eTag );
  */
  
  aws_iobuf_free(aws_buf);
  
  global_free();
  return 0;
}
コード例 #2
0
ファイル: aiori-S3.c プロジェクト: roblatham00/ior
static void s3_connect( IOR_param_t* param ) {
	if (param->verbose >= VERBOSE_2) {
		printf("-> s3_connect\n"); /* DEBUGGING */
	}

	if ( param->curl_flags & IOR_CURL_INIT ) {
		if (param->verbose >= VERBOSE_2) {
			printf("<- s3_connect  [nothing to do]\n"); /* DEBUGGING */
		}
		return;
	}

	// --- Done once-only (per rank).  Perform all first-time inits.
	//
	// The aws library requires a config file, as illustrated below.  We
	// assume that the user running the test has an entry in this file,
	// using their login moniker (i.e. `echo $USER`) as the key, as
	// suggested in the example:
	//
	//     <user>:<s3_login_id>:<s3_private_key>
	//
	// This file must not be readable by other than user.
	//
	// NOTE: These inits could be done in init_IORParam_t(), in ior.c, but
	//       would require conditional compilation, there.

	aws_set_debug(param->verbose >= 4);
	aws_read_config(getenv("USER"));  // requires ~/.awsAuth
	aws_reuse_connections(1);

	// initalize IOBufs.  These are basically dynamically-extensible
	// linked-lists.  "growth size" controls the increment of new memory
	// allocated, whenever storage is used up.
	param->io_buf = aws_iobuf_new();
	aws_iobuf_growth_size(param->io_buf, 1024*1024*1);

	param->etags = aws_iobuf_new();
	aws_iobuf_growth_size(param->etags, 1024*1024*8);

   // WARNING: if you have http_proxy set in your environment, you may need
   //          to override it here.  TBD: add a command-line variable to
   //          allow you to define a proxy.
   //
	// our hosts are currently 10.140.0.15 - 10.140 0.18
	// TBD: Try DNS-round-robin server at vi-lb.ccstar.lanl.gov
   // TBD: try HAProxy round-robin at 10.143.0.1

#if 1
   //   snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
   //   s3_set_proxy(buff);
   //
   //   snprintf(buff, BUFF_SIZE, "10.140.0.%d", 15 + (rank % 4));
   //	s3_set_host(buff);

   snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
   s3_set_host(buff);

#else
/*
 * If you just want to go to one if the ECS nodes, put that IP
 * address in here directly with port 9020.
 *
 */
//   s3_set_host("10.140.0.15:9020");

/*
 * If you want to go to haproxy.ccstar.lanl.gov, this is its IP
 * address.
 *
 */
//   s3_set_proxy("10.143.0.1:80");
//   s3_set_host( "10.143.0.1:80");
#endif

	// make sure test-bucket exists
	s3_set_bucket((char*)bucket_name);

   if (rank == 0) {
      AWS4C_CHECK( s3_head(param->io_buf, "") );
      if ( param->io_buf->code == 404 ) {					// "404 Not Found"
         printf("  bucket '%s' doesn't exist\n", bucket_name);

         AWS4C_CHECK( s3_put(param->io_buf, "") );	/* creates URL as bucket + obj */
         AWS4C_CHECK_OK(     param->io_buf );		// assure "200 OK"
         printf("created bucket '%s'\n", bucket_name);
      }
      else {														// assure "200 OK"
         AWS4C_CHECK_OK( param->io_buf );
      }
   }
   MPI_CHECK(MPI_Barrier(param->testComm), "barrier error");


	// Maybe allow EMC extensions to S3
	s3_enable_EMC_extensions(param->curl_flags & IOR_CURL_S3_EMC_EXT);

	// don't perform these inits more than once
	param->curl_flags |= IOR_CURL_INIT;


	if (param->verbose >= VERBOSE_2) {
		printf("<- s3_connect  [success]\n");
	}
}
コード例 #3
0
ファイル: marfs_repack.c プロジェクト: wfvining/marfs
/******************************************************************************
* Name  pack_objects 
* 
* This function traverses the object and file link lists and reads object 
* data for repacking into a new object.   
******************************************************************************/
int pack_objects(File_Handles *file_info, repack_objects *objects)
{
   struct stat statbuf;
   char *path = "/";
//   repack_objects *objects; 

	 //struct stat statbuf;
   stat(path, &statbuf);
   size_t write_offset = 0;
   size_t obj_raw_size;
   size_t obj_size;
   size_t offset;
   //MarFS_XattrPre pre_struct;
   //MarFS_XattrPre* pre = &pre_struct;
   MarFS_XattrPre pre;
   IOBuf *nb = aws_iobuf_new();
   char test_obj[2048];
   obj_files *files;
   int ret;
   char *obj_ptr;
   CURLcode s3_return;
   char pre_str[MARFS_MAX_XATTR_SIZE];


   // Also, if file_count =1 do i make uni or?
   //
   //
   while (objects) { 
      // need inner loop to get files for each object
      // If chunk_count == file count no need to pack
      // and garbage collection took care of it
      if (objects->chunk_count == objects->pack_count) {
         objects=objects->next;
         continue;
      }
      //No need to pack if only one file specified in xattr and only
      //one file found
      if (objects->chunk_count == 1 && objects->pack_count ==1 ) {
         objects=objects->next;
         continue;
      }
      // Not quite sure how this next condition could happen
      // TO DO:  make only one contion chunk_count > file_count
      // all others continue
      if (objects->pack_count > objects->chunk_count) {
         objects=objects->next;
         continue;
      }

      LOG(LOG_INFO,"object = %s\n", objects->objid);
      LOG(LOG_INFO, "file count = %ld chunks = %ld\n", objects->pack_count, objects->chunk_count);
      files = objects->files_ptr;
      write_offset = 0;
      ret=str_2_pre(&pre, objects->objid, NULL);
      sprintf(test_obj,"%s.teste",objects->objid);

      //Make this a unique object since it derived from an existing object 
      pre.unique++;    


      LOG(LOG_INFO,"stdout,new object name =%s\n", test_obj);
  
      //aws_iobuf_reset(nb);

      while (files) {
         //fprintf(stdout, "file = %s offset=%ld\n", files->filename, files->offset);

         stat(files->filename, &statbuf);


         obj_raw_size = statbuf.st_size;
         obj_size = obj_raw_size + MARFS_REC_UNI_SIZE;
         files->size = obj_size;

         //fprintf(stdout, "obj_size = %ld REC SIZE = %d\n", obj_size,MARFS_REC_UNI_SIZE);
         //write_offset+=obj_size;
         if ((obj_ptr = (char *)malloc(obj_size))==NULL) {
            fprintf(stderr, "Error allocating memory\n");
            return -1;
         }

         check_security_access(&pre);
         update_pre(&pre);
         s3_set_host(pre.host);
         //offset = objects->files_ptr->offset;

         offset = files->original_offset;
         //fprintf(stdout, "file %s will get re-written at offset %ld\n",
         //        files->filename, write_offset);

         // get object_data
         // Using byte range to get data for particular offsets
         s3_set_byte_range(offset, obj_size);
         // Use extend to get more buffering capability on each get
         aws_iobuf_extend_dynamic(nb, obj_ptr, obj_size);
         LOG(LOG_INFO, "going to get file %s from object %s at offset %ld and size %ld\n", files->filename, objects->objid, offset, obj_size);
         fprintf(file_info->outfd, "Getting file %s from object %s at offset %ld and size %ld\n", files->filename, objects->objid, offset, obj_size);
         s3_return = s3_get(nb,objects->objid);
         check_S3_error(s3_return, nb, S3_GET);

         LOG(LOG_INFO, "Read buffer write count = %ld  len = %ld\n", nb->write_count, nb->len);
         // may have to copy nb to a new buffer 
         // then write 
     

         files->new_offset = write_offset;
         write_offset += obj_size; 
	 files = files->next;
      }
      // create object string for put
      pre_2_str(pre_str, MARFS_MAX_XATTR_SIZE,&pre);

      strcpy(objects->new_objid, pre_str);
     
      LOG(LOG_INFO, "Going to write to object %s\n", pre_str);
      fprintf(file_info->outfd, "Writing file to object %s\n", pre_str);

      // Write data back to new object
      s3_put(nb,pre_str);
      check_S3_error(s3_return, nb, S3_PUT); 

      aws_iobuf_reset_hard(nb);
      objects=objects->next;
   }
   return 0;
}
コード例 #4
0
static int create( void ** instance_struct, struct jx *hook_args )
{	
	aws_init ();
        aws_set_debug (0);
        char line[256];

	if(s3_files_in_archive == NULL){
				s3_files_in_archive = hash_table_create(0,0);
		}

	struct archive_instance *a = archive_instance_create();
	*instance_struct = a;

	if(jx_lookup_string(hook_args, "archive_dir")){
		a->dir = xxstrdup(jx_lookup_string(hook_args, "archive_dir"));	
	} else {
		a->dir = string_format("%s%d", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY, getuid());
	}

	if(jx_lookup_boolean(hook_args, "archive_s3_no_check")){
		a->s3_check = 0;
	}
	else{
		a->s3_check = 1;
	}

	if(jx_lookup_string(hook_args, "archive_s3_arg")){
		a->s3 = 1;
		a->s3_dir = xxstrdup(jx_lookup_string(hook_args, "archive_s3_arg"));
	}
	else if(jx_lookup_string(hook_args, "archive_s3_no_arg")) {
		a->s3 = 1;
		a->s3_dir = string_format("%s",MAKEFLOW_ARCHIVE_DEFAULT_S3_BUCKET);
	}
	else{
		a->s3 = 0;
	}

	if(jx_lookup_string(hook_args, "s3_hostname")){
		s3_set_host(xxstrdup(jx_lookup_string(hook_args, "s3_hostname")));
	}

	if(jx_lookup_string(hook_args, "s3_keyid")){
		aws_set_keyid(xxstrdup(jx_lookup_string(hook_args, "s3_keyid")));	
	}
	else{
		FILE *fp = popen("grep aws_access_key_id ~/.aws/credentials | cut -d ""="" -f 2 | tr -d ' '","r");
        	fgets(line, 255, fp);
        	line[strlen(line)-1] = '\0';
        	aws_set_keyid(line);
        	pclose(fp);	
	}

	if(jx_lookup_string(hook_args, "s3_secretkey")){
		aws_set_key(xxstrdup(jx_lookup_string(hook_args, "s3_secretkey")));
	}
	else{
		FILE *ft = popen("grep aws_secret_access_key ~/.aws/credentials | cut -d ""="" -f 2 | tr -d ' '","r");
        	fgets(line, 255, ft);
        	line[strlen(line)-1] = '\0';
        	aws_set_key(line);
        	pclose(ft);
	}

	if(jx_lookup_boolean(hook_args, "archive_read")){
		a->read = 1;
	}

	if(jx_lookup_boolean(hook_args, "archive_write")){
		a->write = 1;
	}

	if (!create_dir(a->dir, 0777) && errno != EEXIST){
		debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create base archiving directory %s: %d %s\n", 
			a->dir, errno, strerror(errno));
		return MAKEFLOW_HOOK_FAILURE;
	}

	char *files_dir = string_format("%s/files", a->dir);
	if (!create_dir(files_dir, 0777) && errno != EEXIST){
		debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create files archiving directory %s: %d %s\n", 
			files_dir, errno, strerror(errno));
		free(files_dir);
		return MAKEFLOW_HOOK_FAILURE;
	}
	free(files_dir);

	char *tasks_dir = string_format("%s/tasks", a->dir);
	if (!create_dir(tasks_dir, 0777) && errno != EEXIST){
		debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create tasks archiving directory %s: %d %s\n", 
			tasks_dir, errno, strerror(errno));
		free(tasks_dir);
		return MAKEFLOW_HOOK_FAILURE;
	}
	free(tasks_dir);

	s3_set_bucket (a->s3_dir);

	return MAKEFLOW_HOOK_SUCCESS;
}