int state8() /* Do what needs to be done in state 8. pre: State 8 means that the variator needs to reset and get ready to start again in state 0. post: Get ready to start again in state 0. Return value == 0 if successful, == 1 if unspecified errors happened, == 2 if file reading failed. */ { /**********| added for DTLZ |**************/ int result; gen = 1; result = read_arc(); if (0 == result) /* arc file correctly read this means it was not read before */ { write_output_file(); } /**********| addition for DTLZ end |*******/ return (0); }
int state4() /* Do what needs to be done in state 4. pre: State 4 means the variator has to terminate. post: Free all memory. Return value == 0 if successful, == 1 if unspecified errors happened, == 2 if file reading failed. */ { /**********| added for DTLZ |**************/ int result; result = read_arc(); if (0 == result) /* arc file correctly read this means it was not read before, e.g., in a reset. */ { write_output_file(); } /**********| addition for DTLZ end |*******/ return (0); }
int main(int argc, char * argv[]) { int i; int total; struct input_t ** input; if (argc != 3) { printf("./codejam inputfile outputfile\n"); return -1; } //read in file input = read_input_file(argv[1],&total); //solve for (i = 0;i < total;i++) { solution(input[i]); } //write output file write_output_file(argv[2],input,total); return 1; }
static void output_interesting_cursors(FILE *file) { int i; int n = sizeof(interesting_cursors) / sizeof(interesting_cursors[0]); struct reconstructed_glyph *glyphs = malloc(n * sizeof(*glyphs)); for (i = 0; i < n; ++i) { struct glyph *cursor, *mask; find_cursor_and_mask(interesting_cursors[i].source_name, &cursor, &mask); if (!cursor) { fprintf(stderr, "no cursor for %s\n", interesting_cursors[i].source_name); abort(); } if (!mask) { fprintf(stderr, "no mask for %s\n", interesting_cursors[i].source_name); abort(); } reconstruct_glyph(cursor, mask, interesting_cursors[i].target_name, &glyphs[i]); } write_output_file(file, glyphs, n); }
int main(int argc, char const *argv[]) { Vector v = read_input_file("input"); selection(v); write_output_file(v, "output"); free_vector(v); return 0; }
int main ( int argc, char **argv ) { struct input_file input; struct output_file output; struct zinfo_file zinfo; unsigned int i; if ( argc != 3 ) { fprintf ( stderr, "Syntax: %s file.bin file.zinfo " "> file.zbin\n", argv[0] ); exit ( 1 ); } if ( read_input_file ( argv[1], &input ) < 0 ) exit ( 1 ); if ( read_zinfo_file ( argv[2], &zinfo ) < 0 ) exit ( 1 ); if ( alloc_output_file ( ( input.len * 4 ), &output ) < 0 ) exit ( 1 ); for ( i = 0 ; i < zinfo.num_entries ; i++ ) { if ( process_zinfo ( &input, &output, &zinfo.zinfo[i] ) < 0 ) exit ( 1 ); } if ( write_output_file ( &output ) < 0 ) exit ( 1 ); return 0; }
static void on_download_finished (SoupMessage *message, gpointer user_data) { SoupDownload *download = SOUP_DOWNLOAD (user_data); g_assert (download != NULL); write_output_file (download); g_downloadable_set_status (G_DOWNLOADABLE(download), G_DOWNLOADABLE_COMPLETED); }
void reforge_plot_t::analyze() { if ( sim->is_canceled() ) return; if ( reforge_plot_stat_str.empty() ) return; analyze_stats(); write_output_file(); }
int main(void) { int rank, size; // MPI parameters long long int vect_length; // User-specified vector length long long int i; int* vector; // The master vector -- not yet allocated int* chunk_vector; // The chunk vector -- not yet allocated char infilename[100], outfilename[100]; MPI_Init (NULL, NULL); MPI_Comm_size(MPI_COMM_WORLD, &size); //gets size MPI_Comm_rank(MPI_COMM_WORLD, &rank); //gets rank // Get user input: total length of vector, filenames // and read vector if (rank == 0) { printf("Vector length:\n"); scanf("%lld", &vect_length); printf("Name of input file:\n"); scanf("%99s", infilename); printf("Name of output file:\n"); scanf("%99s", outfilename); printf("\n"); vector = (int *) malloc(vect_length * sizeof(int*)+1); read_input_file(infilename, vect_length, vector); } int chunk; chunk = vect_length / size; chunk_vector = (int *) malloc(chunk * sizeof(int*)+1); if(rank == 0){ MPI_Scatter(vector, chunk, MPI_INT, chunk_vector, chunk, MPI_INT, 0, MPI_COMM_WORLD); } printf("rank %d of %d recieved chunk size of %d\n", rank, size, chunk); int k; for(k = 0; k < chunk; k++){ chunk_vector[k]++; } if(rank == 0){ MPI_Gather(vector, chunk, MPI_INT, vector, vect_length, MPI_INT, 0, MPI_COMM_WORLD);//this isnt working write_output_file(outfilename, vect_length, vector); } // free(vector); MPI_Finalize(); return 0; }
static void soup_download_finalize (GObject *object) { /* TODO: Add deinitalization code here */ SoupDownload *download = SOUP_DOWNLOAD (object); if (download->priv->need_to_write) { rookie_debug ("writing file ..."); write_output_file (download); } g_object_unref (SOUP_DOWNLOAD (object)->priv->session); G_OBJECT_CLASS (soup_download_parent_class)->finalize (object); }
/* SHCodecs_Encoder_Output callback for writing encoded data to the output file */ static int write_output(SHCodecs_Encoder * encoder, unsigned char *data, int length, void *user_data) { struct shenc * shenc = (struct shenc *)user_data; double ifps, mfps; if (shcodecs_encoder_get_frame_num_delta(encoder) > 0 && shenc->enc_framerate != NULL) { framerate_mark (shenc->enc_framerate); ifps = framerate_instantaneous_fps (shenc->enc_framerate); mfps = framerate_mean_fps (shenc->enc_framerate); if (shenc->enc_framerate->nr_handled % 10 == 0) { fprintf (stderr, " Encoding @ %4.2f fps \t(avg %4.2f fps)\r", ifps, mfps); } } return write_output_file(&shenc->ainfo, data, length); }
static void output_all_cursors() { int i, j; struct reconstructed_glyph *glyphs = malloc(sizeof(struct reconstructed_glyph) * extracted_font.count/2); j = 0; for (i = 0; i < extracted_font.count; ++i) { struct glyph *g = &extracted_font.glyphs[i]; if (strstr(g->name, "_mask")) continue; struct glyph *mask = find_mask_glyph(g->name); reconstruct_glyph(g, mask, g->name, &glyphs[j]); j++; } write_output_file(glyphs, extracted_font.count/2); }
static void recordcb(ParseDelimitedText * parser, char eol) { /* {{{ */ if(field_count != record->length) { error("Wrong field count on row %zu, found %i expected %i\n", parser->record_count + 1, field_count, record->length); valid = 0; parser->m->stop(parser); } else { record->m->reset_each(record); while(record->m->each(record) != null_ArrayElement) { if((output_buffer->length + ((String *)record->current_element->data)->length + 2) >= output_buffer->size) { write_output_file(); } output_buffer->m->append(output_buffer, (String *)record->current_element->data); if(record->position <= (record->length - 2)) { output_buffer->m->append_cstr(output_buffer, "\x1f", 1); } } output_buffer->m->append_cstr(output_buffer, "\x0a", 1); field_count = 0; } } /* }}} */
static rc_t gater_and_write( context *ctx ) { KDirectory *dir; rc_t rc = KDirectoryNativeDir( &dir ); if ( rc != 0 ) LogErr( klogInt, rc, "KDirectoryNativeDir() failed\n" ); else { statistic data; rc = make_statistic( &data, ctx->gc_window, ctx->ignore_mismatch ); if ( rc == 0 ) { rc = gather_statistic( &data, dir, ctx ); /* <--- the meat */ if ( rc == 0 ) { uint64_t written; if ( ctx->show_progress ) { OUTMSG(( "%lu statistic-entries gathered\n", data.entries )); OUTMSG(( "max. cycles per read = %u\n", data.max_cycle )); } switch( ctx->output_mode[ 0 ] ) { case 'f' : if ( ctx->output_file_path != NULL ) { rc = write_output_file( dir, &data, ctx->output_file_path, &written ); if ( rc == 0 && ctx->info ) { OUTMSG(( "%lu lines written to '%s'\n", written, ctx->output_file_path )); } } else OUTMSG(( "the output-path is missing!\n" )); break; case 'd' : rc = write_statistic_into_db( dir, &data, ctx->src_schema_list, ctx->src_path, &written, ctx->show_progress ); if ( rc == 0 && ctx->info ) { OUTMSG(( "%lu rows written to database\n", written )); } break; case 't' : if ( ctx->output_file_path != NULL ) { rc = write_statistic_into_tab( dir, &data, ctx->src_schema_list, ctx->output_file_path, &written, ctx->show_progress ); if ( rc == 0 && ctx->info ) { OUTMSG(( "%lu rows written to table\n", written )); } } else { OUTMSG(( "the output-path is missing!\n" )); } break; } } whack_statistic( &data ); } KDirectoryRelease( dir ); } return rc; }
int main(int argc, char * argv[], char * envp[]) { ParseDelimitedText * parser; int input_file_fd; char input_file_buffer[PDT_BLK_SIZE]; size_t bytes_read; /* Initalize the classes we're using */ class_Array(); class_String(); class_PDTFormat(); class_PDTColumn(); class_PDTFormatFile(); class_ParseDelimitedText(); /* Parse command line parameters */ set_params(argc, argv); /* Load the format file */ format = new_PDTFormat(); format->m->read_file(format, format_file->string, format_file->length); /* Open the files */ input_file_fd = open_input_file(); output_file_fd = open_output_file(); /* Set the parser's options */ parser = new_ParseDelimitedText(0); parser->m->apply_format(parser, format); parser->m->set_block_size(parser, PDT_BLK_SIZE); parser->m->set_field_callback(parser, fieldcb); parser->m->set_record_callback(parser, recordcb); /* Initialize Globals */ record = new_Array(); record->auto_free = null_String->m->Array_free; format->columns->m->reset_each(format->columns); while(format->columns->m->each(format->columns) != null_ArrayElement) { record->m->append(record, new_String("", 0)); } field_count = 0; valid = 1; output_buffer = new_String("", 0); output_buffer->m->extend(output_buffer, 8191); /* Convert the file to standard form */ while(parser->stop == 0 && (bytes_read = read(input_file_fd, input_file_buffer, PDT_BLK_SIZE)) != 0) { parser->m->parse(parser, input_file_buffer, bytes_read); } parser->m->finish(parser); if(output_file->length != 0) { write_output_file(); } /* Cleanup */ close_input_file(input_file_fd); close_output_file(output_file_fd); parser->m->free(parser); record->m->free(record); format->m->free(format); output_buffer->m->free(output_buffer); return (valid) ? SHELL_TRUE : SHELL_FALSE; }
int main(int argc, const char **argv) { try { JsonnetVm *vm = jsonnet_make(); JsonnetConfig config; ArgStatus arg_status = process_args(argc, argv, &config, vm); if (arg_status != ARG_CONTINUE) { jsonnet_destroy(vm); return arg_status == ARG_SUCCESS ? EXIT_SUCCESS : EXIT_FAILURE; } // Evaluate input Jsonnet and handle any errors from Jsonnet VM. int error; char *output; switch (config.cmd) { case EVAL: { assert(config.inputFiles.size() == 1); // Read input file. std::string input; if (!read_input(&config, &config.inputFiles[0], &input)) { jsonnet_destroy(vm); return EXIT_FAILURE; } if (config.evalMulti) { output = jsonnet_evaluate_snippet_multi( vm, config.inputFiles[0].c_str(), input.c_str(), &error); } else if (config.evalStream) { output = jsonnet_evaluate_snippet_stream( vm, config.inputFiles[0].c_str(), input.c_str(), &error); } else { output = jsonnet_evaluate_snippet( vm, config.inputFiles[0].c_str(), input.c_str(), &error); } if (error) { std::cerr << output; jsonnet_realloc(vm, output, 0); jsonnet_destroy(vm); return EXIT_FAILURE; } // Write output JSON. if (config.evalMulti) { if (!write_multi_output_files( vm, output, config.evalMultiOutputDir, config.outputFile)) { jsonnet_destroy(vm); return EXIT_FAILURE; } } else if (config.evalStream) { if (!write_output_stream(vm, output, config.outputFile)) { jsonnet_destroy(vm); return EXIT_FAILURE; } } else { bool successful = write_output_file(output, config.outputFile); jsonnet_realloc(vm, output, 0); if (!successful) { jsonnet_destroy(vm); return EXIT_FAILURE; } } } break; case FMT: { std::string output_file = config.outputFile; if (config.fmtInPlace || config.fmtTest) { assert(config.inputFiles.size() >= 1); for (std::string &inputFile : config.inputFiles) { if (config.fmtInPlace) { output_file = inputFile; if (inputFile == "-") { std::cerr << "ERROR: Cannot use --in-place with stdin" << std::endl; jsonnet_destroy(vm); return EXIT_FAILURE; } if (config.filenameIsCode) { std::cerr << "ERROR: Cannot use --in-place with --exec" << std::endl; jsonnet_destroy(vm); return EXIT_FAILURE; } } std::string input; if (!read_input(&config, &inputFile, &input)) { jsonnet_destroy(vm); return EXIT_FAILURE; } output = jsonnet_fmt_snippet(vm, inputFile.c_str(), input.c_str(), &error); if (error) { std::cerr << output; jsonnet_realloc(vm, output, 0); jsonnet_destroy(vm); return EXIT_FAILURE; } if (config.fmtTest) { // Check the output matches the input. bool ok = output == input; jsonnet_realloc(vm, output, 0); if (!ok) { jsonnet_destroy(vm); return 2; } } else { // Write output Jsonnet. bool successful = write_output_file(output, output_file); jsonnet_realloc(vm, output, 0); if (!successful) { jsonnet_destroy(vm); return EXIT_FAILURE; } } } } else { assert(config.inputFiles.size() == 1); // Read input file. std::string input; if (!read_input(&config, &config.inputFiles[0], &input)) { jsonnet_destroy(vm); return EXIT_FAILURE; } output = jsonnet_fmt_snippet( vm, config.inputFiles[0].c_str(), input.c_str(), &error); if (error) { std::cerr << output; jsonnet_realloc(vm, output, 0); jsonnet_destroy(vm); return EXIT_FAILURE; } // Write output Jsonnet. bool successful = write_output_file(output, output_file); jsonnet_realloc(vm, output, 0); if (!successful) { jsonnet_destroy(vm); return EXIT_FAILURE; } } } break; } jsonnet_destroy(vm); return EXIT_SUCCESS; } catch (const std::bad_alloc &) { // Avoid further allocation attempts fputs("Internal out-of-memory error (please report this)\n", stderr); } catch (const std::exception &e) { std::cerr << "Internal error (please report this): " << e.what() << std::endl; } catch (...) { std::cerr << "An unknown exception occurred (please report this)." << std::endl; } return EXIT_FAILURE; }
int main(int argc, const char **argv) { try { JsonnetVm *vm = jsonnet_make(); JsonnetConfig config; if (!process_args(argc, argv, &config, vm)) { return EXIT_FAILURE; } // Read input files. std::string input; if (!read_input(&config, &input)) { return EXIT_FAILURE; } // Evaluate input Jsonnet and handle any errors from Jsonnet VM. int error; char *output; if (config.multi()) { output = jsonnet_evaluate_snippet_multi( vm, config.input_file().c_str(), input.c_str(), &error); } else { output = jsonnet_evaluate_snippet( vm, config.input_file().c_str(), input.c_str(), &error); } if (error) { std::cerr << output; std::cerr.flush(); jsonnet_realloc(vm, output, 0); jsonnet_destroy(vm); return EXIT_FAILURE; } // Write output JSON. if (config.multi()) { if (!write_multi_output_files(vm, output, config.output_dir())) { return EXIT_FAILURE; } } else { bool successful = write_output_file(output, config.output_file()); jsonnet_realloc(vm, output, 0); if (!successful) { jsonnet_destroy(vm); return EXIT_FAILURE; } } jsonnet_destroy(vm); return EXIT_SUCCESS; } catch (const std::bad_alloc &) { // Avoid further allocation attempts fputs("Internal out-of-memory error (please report this)\n", stderr); } catch (const std::exception &e) { std::cerr << "Internal error (please report this): " << e.what() << std::endl; } catch (...) { std::cerr << "An unknown exception occurred (please report this)." << std::endl; } return EXIT_FAILURE; }