static int checkpoint_read( struct deltadb *db, const char *filename ) { FILE * file = fopen(filename,"r"); if(!file) return 0; /* Load the entire checkpoint into one json object */ struct jx *jcheckpoint = jx_parse_stream(file); fclose(file); if(!jcheckpoint || jcheckpoint->type!=JX_OBJECT) { jx_delete(jcheckpoint); return compat_checkpoint_read(db,filename); } /* For each key and value, move the value over to the hash table. */ /* Skip objects that don't match the filter. */ struct jx_pair *p; for(p=jcheckpoint->u.pairs;p;p=p->next) { if(p->key->type!=JX_STRING) continue; if(!deltadb_boolean_expr(db->filter_expr,p->value)) continue; hash_table_insert(db->table,p->key->u.string_value,p->value); p->value = 0; } /* Delete the leftover object with empty pairs. */ jx_delete(jcheckpoint); return 1; }
static int server_table_load(time_t stoptime) { struct catalog_query *q; struct jx *j; char *key; void *item; if((last_update + update_interval) > time(0)) { return 1; } if(!server_table) { server_table = hash_table_create(0, 0); if(!server_table) return 0; } if(inhibit_catalog_queries) { debug(D_CHIRP, "catalog queries disabled\n"); return 1; } hash_table_firstkey(server_table); while(hash_table_nextkey(server_table, &key, &item)) { hash_table_remove(server_table, key); jx_delete(item); } debug(D_CHIRP, "querying catalog at %s:%d", CATALOG_HOST, CATALOG_PORT); q = catalog_query_create(CATALOG_HOST, CATALOG_PORT, stoptime); if(!q) return 0; while((j = catalog_query_read(q, stoptime))) { char name[CHIRP_PATH_MAX]; const char *type, *hname; int port; type = jx_lookup_string(j, "type"); if(type && !strcmp(type, "chirp")) { hname = jx_lookup_string(j, "name"); if(hname) { port = jx_lookup_integer(j, "port"); if(!port) port = CHIRP_PORT; sprintf(name, "%s:%d", hname, port); hash_table_insert(server_table, name, j); } else { jx_delete(j); } } else { jx_delete(j); } } catalog_query_delete(q); last_update = time(0); return 1; }
static int checkpoint_read( struct jx_database *db, const char *filename ) { FILE * file = fopen(filename,"r"); if(!file) return 0; /* Load the entire checkpoint into one json object */ struct jx *jcheckpoint = jx_parse_stream(file); fclose(file); if(!jcheckpoint || jcheckpoint->type!=JX_OBJECT) { debug(D_NOTICE, "could not parse checkpoint file, falling back to compatibility mode"); jx_delete(jcheckpoint); return compat_checkpoint_read(db,filename); } /* For each key and value, move the value over to the hash table. */ struct jx_pair *p; for(p=jcheckpoint->u.pairs;p;p=p->next) { if(p->key->type!=JX_STRING) continue; hash_table_insert(db->table,p->key->u.string_value,p->value); p->value = 0; } /* Delete the leftover object with empty pairs. */ jx_delete(jcheckpoint); return 1; }
void jx_delete( struct jx *j ) { if(!j) return; switch(j->type) { case JX_DOUBLE: case JX_BOOLEAN: case JX_INTEGER: case JX_NULL: break; case JX_SYMBOL: free(j->u.symbol_name); break; case JX_STRING: free(j->u.string_value); break; case JX_ARRAY: jx_item_delete(j->u.items); break; case JX_OBJECT: jx_pair_delete(j->u.pairs); break; case JX_OPERATOR: jx_delete(j->u.oper.left); jx_delete(j->u.oper.right); break; case JX_FUNCTION: jx_delete(j->u.func.arguments); break; case JX_ERROR: jx_delete(j->u.err); break; } free(j); }
static char* aws_submit_job(char* job_name, char* properties_string){ char* queue = queue_name; char* env_var = initialized_data.master_env_prefix; //submit the job-def char* tmp = string_format("%s aws batch register-job-definition --job-definition-name %s_def --type container --container-properties \"%s\"",env_var,job_name, properties_string); debug(D_BATCH,"Creating the Job Definition: %s",tmp); struct jx* jx = run_command(tmp); free(tmp); char* arn = (char*)jx_lookup_string(jx,"jobDefinitionArn"); if(arn == NULL){ fatal("Fatal error when trying to create the job definition!"); } jx_delete(jx); //now that we have create a job-definition, we can submit the job. tmp = string_format("%s aws batch submit-job --job-name %s --job-queue %s --job-definition %s_def",env_var,job_name,queue,job_name); debug(D_BATCH,"Submitting the job: %s",tmp); jx = run_command(tmp); free(tmp); char* jaid = strdup((char*)jx_lookup_string(jx,"jobId")); if(!jaid) fatal("NO JOB ID FROM AMAZON GIVEN"); jx_delete(jx); return jaid; }
void jx_pair_delete( struct jx_pair *pair ) { if(!pair) return; jx_delete(pair->key); jx_delete(pair->value); jx_pair_delete(pair->next); free(pair); }
static void remove_expired_records() { struct jx *j; char *key; time_t current = time(0); // Only clean every clean_interval seconds. if((current-last_clean_time)<clean_interval) return; // After restarting, all records will have appear to be stale. // Run for a minimum of lifetime seconds before cleaning anything up. if((current-starttime)<lifetime ) return; jx_database_firstkey(table); while(jx_database_nextkey(table, &key, &j)) { time_t lastheardfrom = jx_lookup_integer(j,"lastheardfrom"); int this_lifetime = jx_lookup_integer(j,"lifetime"); if(this_lifetime>0) { this_lifetime = MIN(lifetime,this_lifetime); } else { this_lifetime = lifetime; } if( (current-lastheardfrom) > this_lifetime ) { j = jx_database_remove(table,key); if(j) jx_delete(j); } } last_clean_time = current; }
void jx_item_delete( struct jx_item *item ) { if(!item) return; jx_delete(item->value); jx_item_delete(item->next); free(item); }
static int compat_checkpoint_read( struct deltadb *db, const char *filename ) { FILE * file = fopen(filename,"r"); if(!file) return 0; while(1) { struct nvpair *nv = nvpair_create(); if(nvpair_parse_stream(nv,file)) { const char *key = nvpair_lookup_string(nv,"key"); if(key) { nvpair_delete(hash_table_remove(db->table,key)); struct jx *j = nvpair_to_jx(nv); /* skip objects that don't match the filter */ if(deltadb_boolean_expr(db->filter_expr,j)) { hash_table_insert(db->table,key,j); } else { jx_delete(j); } } nvpair_delete(nv); } else { nvpair_delete(nv); break; } } fclose(file); return 1; }
/** Deletes task struct and frees contained data. */ void batch_task_delete(struct batch_task *t) { if (!t) return; free(t->command); struct batch_file *f; list_first_item(t->input_files); while((f = list_next_item(t->input_files))){ batch_file_delete(f); } list_delete(t->input_files); list_first_item(t->output_files); while((f = list_next_item(t->output_files))){ batch_file_delete(f); } list_delete(t->output_files); rmsummary_delete(t->resources); jx_delete(t->envlist); batch_job_info_delete(t->info); free(t); }
struct jx *catalog_query_read(struct catalog_query *q, time_t stoptime) { while(q && q->current) { int keepit = 1; if(q->filter_expr) { struct jx * b; b = jx_eval(q->filter_expr,q->current->value); if(jx_istype(b, JX_BOOLEAN) && b->u.boolean_value) { keepit = 1; } else { keepit = 0; } jx_delete(b); } else { keepit = 1; } if(keepit) { struct jx *result = jx_copy(q->current->value); q->current = q->current->next; return result; } q->current = q->current->next; } return 0; }
int makeflow_catalog_summary(struct dag* d, char* name, batch_queue_type_t type, timestamp_t start){ struct dag_node *n; dag_node_state_t state; int tasks_completed = 0; int tasks_aborted = 0; int tasks_waiting = 0; int tasks_running = 0; int tasks_failed = 0; for (n = d->nodes; n; n = n->next) { state = n->state; if (state == DAG_NODE_STATE_FAILED) tasks_failed++; else if (state == DAG_NODE_STATE_ABORTED) tasks_aborted++; else if (state == DAG_NODE_STATE_COMPLETE) tasks_completed++; else if(state == DAG_NODE_STATE_RUNNING) tasks_running++; else if(state == DAG_NODE_STATE_WAITING) tasks_waiting++; } //transmit report here char* host = CATALOG_HOST; char username[USERNAME_MAX]; username_get(username); const char* batch_type = batch_queue_type_to_string(type); struct jx *j = jx_object(0); jx_insert_string(j,"type","makeflow"); jx_insert_integer(j,"total",itable_size(d->node_table)); jx_insert_integer(j,"running",tasks_running); jx_insert_integer(j,"waiting",tasks_waiting); jx_insert_integer(j,"aborted",tasks_aborted); jx_insert_integer(j,"completed",tasks_completed); jx_insert_integer(j,"failed",tasks_failed); jx_insert_string(j,"project",name); jx_insert_string(j,"owner",username); char* timestring = string_format("%" PRIu64 "", start); jx_insert_string(j,"time_started",timestring); jx_insert_string(j,"batch_type",batch_type); //creates memory char* text = jx_print_string(j); int resp = catalog_query_send_update(host, text); free(text); free(timestring); jx_delete(j); return resp;//all good }
static void makeflow_node_export_variables( struct dag *d, struct dag_node *n ) { struct jx *j = dag_node_env_create(d,n); if(j) { jx_export(j); jx_delete(j); } }
int deltadb_remove_event( struct deltadb *db, const char *key, const char *name ) { struct jx *jobject = hash_table_lookup(db->table,key); if(!jobject) return 1; struct jx *jname = jx_string(name); jx_delete(jx_remove(jobject,jname)); jx_delete(jname); if(display_mode==MODE_STREAM) { display_deferred_time(db); printf("R %s %s\n",key,name); return 1; } return 1; }
int deltadb_boolean_expr( struct jx *expr, struct jx *data ) { if(!expr) return 1; struct jx *j = jx_eval(expr,data); int result = j && !jx_istype(j, JX_ERROR) && j->type==JX_BOOLEAN && j->u.boolean_value; jx_delete(j); return result; }
static void display_reduce_exprs( struct deltadb *db, time_t current ) { struct list_node *n; /* Reset all reductions. */ for(n=db->reduce_exprs->head;n;n=n->next) { deltadb_reduction_reset(n->data); } /* For each item in the hash table: */ char *key; struct jx *jobject; hash_table_firstkey(db->table); while(hash_table_nextkey(db->table,&key,(void**)&jobject)) { /* Skip if the where expression doesn't match */ if(!deltadb_boolean_expr(db->where_expr,jobject)) continue; /* Update each reduction with its value. */ for(n=db->reduce_exprs->head;n;n=n->next) { struct deltadb_reduction *r = n->data; struct jx *value = jx_eval(r->expr,jobject); if(value && !jx_istype(value, JX_ERROR)) { if(value->type==JX_INTEGER) { deltadb_reduction_update(n->data,(double)value->u.integer_value); } else if(value->type==JX_DOUBLE) { deltadb_reduction_update(n->data,value->u.double_value); } else { // treat non-numerics as 1, to facilitate operations like COUNT deltadb_reduction_update(n->data,1); } jx_delete(value); } } } /* Emit the current time */ if(db->epoch_mode) { printf("%lld\t",(long long) current); } else { char str[32]; strftime(str,sizeof(str),"%F %T",localtime(¤t)); printf("%s\t",str); } /* For each reduction, display the final value. */ for(n=db->reduce_exprs->head;n;n=n->next) { printf("%lf\t",deltadb_reduction_value(n->data)); } printf("\n"); }
void delete_projects_list(struct list *l) { if(l) { struct jx *j; while((j=list_pop_head(l))) { jx_delete(j); } list_delete(l); } }
int jx_insert_unless_empty( struct jx *object, struct jx *key, struct jx *value ) { switch(value->type) { case JX_OBJECT: case JX_ARRAY: /* C99 says union members have the same start address, so * just pick one, they're both pointers. */ if(value->u.pairs == NULL) { jx_delete(key); jx_delete(value); return -1; } else { return jx_insert(object, key, value); } break; default: return jx_insert(object, key, value); break; } }
static struct jx * jx_parse_finish( struct jx_parser *p ) { struct jx * j = jx_parse(p); if(jx_parser_errors(p)) { jx_parser_delete(p); jx_delete(j); return 0; } jx_parser_delete(p); return j; }
/* Parse the stream for the next summary */ struct rmsummary *rmsummary_parse_next(FILE *stream) { struct jx *j = jx_parse_stream(stream); if(!j) return NULL; struct rmsummary *s = json_to_rmsummary(j); jx_delete(j); return s; }
int main( int argc, char *argv[] ) { struct jx_parser *p = jx_parser_create(0); jx_parser_read_file(p,stdin); struct jx *j = jx_parse(p); if(!jx_parser_errors(p)) { jx_print_stream(j,stdout); printf("\n"); jx_delete(j); jx_parser_delete(p); return 0; } else { printf("\"jx parse error: %s\"\n",jx_parser_error_string(p)); jx_delete(j); jx_parser_delete(p); return 1; } }
struct jx *jx_merge(struct jx *j, ...) { va_list ap; va_start (ap, j); struct jx *result = jx_object(NULL); for (struct jx *next = j; jx_istype(next, JX_OBJECT); next = va_arg(ap, struct jx *)) { for (struct jx_pair *p = next->u.pairs; p; p = p->next) { jx_delete(jx_remove(result, p->key)); jx_insert(result, jx_copy(p->key), jx_copy(p->value)); } } va_end(ap); return result; }
int deltadb_delete_event( struct deltadb *db, const char *key ) { struct jx *jobject = hash_table_remove(db->table,key); if(jobject) { jx_delete(jobject); if(display_mode==MODE_STREAM) { display_deferred_time(db); printf("D %s\n",key); } } return 1; }
static char* aws_job_def(char* aws_jobid){ char* cmd = string_format("aws batch describe-jobs --jobs %s",aws_jobid); struct jx* jx = run_command(cmd); free(cmd); struct jx* jobs_array = jx_lookup(jx,"jobs"); if(!jobs_array){ debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid); return NULL; } struct jx* first_item = jx_array_index(jobs_array,0); if(!first_item){ debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid); return NULL; } char* ret = string_format("%s",(char*)jx_lookup_string(first_item,"jobDefinition")); jx_delete(jx); return ret; }
static int finished_aws_job_exit_code(char* aws_jobid, char* env_var){ char* cmd = string_format("aws batch describe-jobs --jobs %s",aws_jobid); struct jx* jx = run_command(cmd); free(cmd); struct jx* jobs_array = jx_lookup(jx,"jobs"); if(!jobs_array){ debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid); return DESCRIBE_AWS_JOB_NON_EXIST; } struct jx* first_item = jx_array_index(jobs_array,0); if(!first_item){ debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid); return DESCRIBE_AWS_JOB_NON_EXIST; } int ret = (int)jx_lookup_integer(first_item,"exitCode"); jx_delete(jx); return ret; }
void jx_database_insert( struct jx_database *db, const char *key, struct jx *nv ) { struct jx *old = hash_table_remove(db->table,key); hash_table_insert(db->table,key,nv); if(db->logdir) { if(old) { log_updates(db,key,old,nv); } else { log_create(db,key,nv); } } if(old) jx_delete(old); log_flush(db); }
int deltadb_update_event( struct deltadb *db, const char *key, const char *name, struct jx *jvalue ) { struct jx * jobject = hash_table_lookup(db->table,key); if(!jobject) return 1; struct jx *jname = jx_string(name); jx_delete(jx_remove(jobject,jname)); jx_insert(jobject,jname,jvalue); if(display_mode==MODE_STREAM) { display_deferred_time(db); char *str = jx_print_string(jvalue); printf("U %s %s %s\n",key,name,str); free(str); } return 1; }
void rmsummary_print(FILE *stream, struct rmsummary *s, struct jx *verbatim_fields) { struct jx *jsum = rmsummary_to_json(s, 0); if(verbatim_fields) { if(!jx_istype(verbatim_fields, JX_OBJECT)) { fatal("Vebatim fields are not a json object."); } struct jx_pair *head = verbatim_fields->u.pairs; while(head) { jx_insert(jsum, jx_copy(head->key), jx_copy(head->value)); head = head->next; } } jx_pretty_print_stream(jsum, stream); jx_delete(jsum); }
static void update_all_catalogs() { struct jx *j = jx_object(0); jx_insert_string(j,"type","catalog"); jx_insert(j, jx_string("version"), jx_format("%d.%d.%d", CCTOOLS_VERSION_MAJOR, CCTOOLS_VERSION_MINOR, CCTOOLS_VERSION_MICRO)); jx_insert_string(j,"owner",owner); jx_insert_integer(j,"starttime",starttime); jx_insert_integer(j,"port",port); jx_insert(j, jx_string("url"), jx_format("http://%s:%d",preferred_hostname,port) ); char *text = jx_print_string(j); jx_delete(j); list_iterate(outgoing_host_list, (list_op_t) catalog_query_send_update, text); free(text); }
static int describe_aws_job(char* aws_jobid, char* env_var){ char* cmd = string_format("aws batch describe-jobs --jobs %s",aws_jobid); struct jx* jx = run_command(cmd); free(cmd); int succeed = DESCRIBE_AWS_JOB_NON_FINAL; //default status struct jx* jobs_array = jx_lookup(jx,"jobs"); if(!jobs_array){ debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid); return DESCRIBE_AWS_JOB_NON_EXIST; } struct jx* first_item = jx_array_index(jobs_array,0); if(!first_item){ debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid); return DESCRIBE_AWS_JOB_NON_EXIST; } if(strstr((char*)jx_lookup_string(first_item,"status"),"SUCCEEDED")){ succeed = DESCRIBE_AWS_JOB_SUCCESS; } if(strstr((char*)jx_lookup_string(first_item,"status"),"FAILED")){ succeed = DESCRIBE_AWS_JOB_FAILED; } //start and stop if(succeed == DESCRIBE_AWS_JOB_SUCCESS || succeed == DESCRIBE_AWS_JOB_FAILED){ int64_t created_string = (int64_t) jx_lookup_integer(first_item,"createdAt"); int64_t start_string = (int64_t)jx_lookup_integer(first_item,"startedAt"); int64_t end_string = (int64_t)jx_lookup_integer(first_item,"stoppedAt"); if(created_string != 0 ){ debug(D_BATCH,"Job %s was created at: %"PRIi64"",aws_jobid,created_string); } if(start_string != 0 ){ debug(D_BATCH,"Job %s started at: %"PRIi64"",aws_jobid,start_string); } if(end_string != 0 ){ debug(D_BATCH,"Job %s ended at: %"PRIi64"",aws_jobid,end_string); } } jx_delete(jx); return succeed; }