static int upload_input_files_to_s3(char* files,char* jobname){ int success = 1; char* env_var = initialized_data.master_env_prefix; struct list* file_list = extract_file_names_from_list(files); debug(D_BATCH,"extra input files list: %s, len: %i",files, list_size(file_list)); list_first_item(file_list); char* cur_file = NULL; while((cur_file = list_next_item(file_list)) != NULL){ if(hash_table_lookup(submitted_files,cur_file) == &HAS_SUBMITTED_VALUE){ continue; } debug(D_BATCH,"Submitting file: %s",cur_file); char* put_file_command = string_format("tar -cvf %s.txz %s && %s aws s3 cp %s.txz s3://%s/%s.txz ",cur_file,cur_file,env_var,cur_file,bucket_name,cur_file); int ret = sh_system(put_file_command); if(ret != 0){ debug(D_BATCH,"File Submission: %s FAILURE return code: %i",cur_file,ret); success = 0; }else{ debug(D_BATCH,"File Submission: %s SUCCESS return code: %i",cur_file,ret); } free(put_file_command); put_file_command = string_format("rm %s.txz",cur_file); sh_system(put_file_command); free(put_file_command); //assume everything went well? hash_table_insert(submitted_files,cur_file,&HAS_SUBMITTED_VALUE); } list_free(file_list); list_delete(file_list); return success; }
static void upload_cmd_file(char* bucket_name, char* input_files, char* output_files, char* cmd, unsigned int jobid){ char* env_var = initialized_data.master_env_prefix; //Create command to pull files from s3 and into local space to work on char* bucket = string_format("s3://%s",bucket_name); char* cpy_in = generate_s3_cp_cmds(input_files,bucket,"./"); char* chmod = chmod_all(input_files); //run the actual command char* cmd_tmp = string_format("%s\n%s\n%s\n",cpy_in,chmod,cmd); free(cpy_in); //copy out any external files char* cpy_out = generate_s3_cp_cmds(output_files,"./",bucket); cmd_tmp = string_format("%s\n%s\n",cmd_tmp,cpy_out); //add headder char* final_cmd = string_format("#!/bin/sh\n%s",cmd_tmp); free(cmd_tmp); //write out to file unsigned int tempuid = gen_guid(); char* tmpfile_string = string_format("TEMPFILE-%u.sh",tempuid); FILE* tmpfile = fopen(tmpfile_string,"w+"); fwrite(final_cmd,sizeof(char),strlen(final_cmd),tmpfile); fclose(tmpfile); free(final_cmd); //make executable and put into s3 cmd_tmp = string_format("chmod +x %s",tmpfile_string); sh_system(cmd_tmp); free(cmd_tmp); cmd_tmp = string_format("%s aws s3 cp %s s3://%s/COMAND_FILE_%u.sh",env_var,tmpfile_string,bucket_name,jobid); sh_system(cmd_tmp); free(cmd_tmp); remove(tmpfile_string); free(tmpfile_string); }
int main(int argc,const string_t* argv){ sh_prompt_update(); //linenoiseHistorySetMaxLen(10); sh_initAutoComplete(); string_t line; sds prcdLn; while((line = linenoise(sh_prompt())) != NULL) { linenoiseHistoryAdd(line); //prcdLn = sh_expand_string(line); sh_system(line); free(cPTR line); //sh_system(prcdLn); //sdsfree(prcdLn); } }
static int batch_job_amazon_batch_remove(struct batch_queue *q, batch_job_id_t jobid){ struct internal_amazon_batch_amazon_ids amazon_ids = initialize(q); char* env_var = amazon_ids.master_env_prefix; if(itable_lookup(done_jobs,jobid)==NULL){ char* name = string_format("%s_%i",queue_name,(int)jobid); itable_insert(done_jobs,jobid+1,name); } char* amazon_id; if((amazon_id=itable_lookup(amazon_job_ids,jobid))==NULL){ return -1; } char* cmd = string_format("%s aws batch terminate-job --job-id %s --reason \"Makeflow Killed\"",env_var,amazon_id); debug(D_BATCH,"Terminating the job: %s\n",cmd); sh_system(cmd); free(cmd); return 0; }
static batch_job_id_t batch_job_amazon_batch_wait(struct batch_queue *q, struct batch_job_info *info_out, time_t stoptime){ struct internal_amazon_batch_amazon_ids amazon_ids = initialize(q); //succeeded check int done = 0; char* env_var = amazon_ids.master_env_prefix; itable_firstkey(amazon_job_ids); char* jaid; UINT64_T jobid; while(itable_nextkey(amazon_job_ids,&jobid,(void**)&jaid)){ done = describe_aws_job(jaid,env_var); char* jobname = string_format("%s_%u",queue_name,(unsigned int)jobid); unsigned int id = (unsigned int)jobid; if(done == DESCRIBE_AWS_JOB_SUCCESS){ if(itable_lookup(done_jobs,id+1) == NULL){ //id is done, returning here debug(D_BATCH,"Inserting id: %u into done_jobs",id); itable_insert(done_jobs,id+1,jobname); itable_remove(amazon_job_ids,jobid); //pull files from s3 char* output_files = itable_lookup(done_files,id); struct list* file_list = extract_file_names_from_list(output_files); if(list_size(file_list)> 0){ list_first_item(file_list); char* cur_file = NULL; while((cur_file=list_next_item(file_list)) != NULL){ debug(D_BATCH,"Copying over %s",cur_file); char* get_from_s3_cmd = string_format("%s aws s3 cp s3://%s/%s.txz ./%s.txz && tar -xvf %s.txz && rm %s.txz",env_var,bucket_name,cur_file,cur_file, cur_file, cur_file); int outputcode = sh_system(get_from_s3_cmd); debug(D_BATCH,"output code from calling S3 to pull file %s: %i",cur_file,outputcode); FILE* tmpOut = fopen(cur_file,"r"); if(tmpOut){ debug(D_BATCH,"File does indeed exist: %s",cur_file); fclose(tmpOut); }else{ debug(D_BATCH,"File doesn't exist: %s",cur_file); } free(get_from_s3_cmd); } } list_free(file_list); list_delete(file_list); //Let Makeflow know we're all done! debug(D_BATCH,"Removing the job from the job_table"); struct batch_job_info* info = itable_remove(q->job_table, id);//got from batch_job_amazon.c info->finished = time(0);//get now info->exited_normally=1; info->exit_code=finished_aws_job_exit_code(jaid,env_var); debug(D_BATCH,"copying over the data to info_out"); memcpy(info_out, info, sizeof(struct batch_job_info)); free(info); char* jobdef = aws_job_def(jaid); del_job_def(jobdef); free(jobdef); return id; } }else if(done == DESCRIBE_AWS_JOB_FAILED || done == DESCRIBE_AWS_JOB_NON_EXIST){ if(itable_lookup(done_jobs,id+1)==NULL){ //id is done, returning here itable_insert(done_jobs,id+1,jobname); itable_remove(amazon_job_ids,jobid); debug(D_BATCH,"Failed job: %i",id); struct batch_job_info* info = itable_remove(q->job_table, id);//got from batch_job_amazon.c info->finished = time(0); //get now info->exited_normally=0; int exc = finished_aws_job_exit_code(jaid,env_var); info->exit_code= exc == 0 ? -1 : exc; memcpy(info_out, info, sizeof(*info)); free(info); char* jobdef = aws_job_def(jaid); del_job_def(jobdef); free(jobdef); return id; } }else{ continue; } } return -1; }
static int del_job_def(char* jobdef){ char* cmd = string_format("aws batch deregister-job-definition --job-definition %s",jobdef); int ret = sh_system(cmd); free(cmd); return ret; }