Example #1
0
vector<vector<float> > Storage::LoadDataFloatCSVNextItems(int nrItems, bool close)
{
	vector<vector<float> > out;

	size_t bytes_read;
	char buf[1024];
	struct counts c = {vector<float>(0),0, 0};

	long oldRows = 0;

	while ((bytes_read=fread(buf, 1, 1, m_fp)) > 0) {//((bytes_read=fread(buf, 1, 1024, m_fp)) > 0) {
		if (csv_parse(&m_csvParser, buf, bytes_read, cbColumn, cbRow, &c) != bytes_read) {
			
			fprintf(stderr, "Error while parsing file: %s\n", csv_strerror(csv_error(&m_csvParser)));
		}
		
		if(c.rows>oldRows)
		{
			out.push_back(c.fData);
			c.fData.clear();
			oldRows = c.rows;
		}

		if(c.rows>=nrItems)
			break;
    }

	if(close == true)
		csv_fini(&m_csvParser, cbColumn, cbRow, &c);

	
	return out;
}
Example #2
0
cxCSV *cxCSV::Create(const cxStr *data,cxUInt8 opt)
{
    cxCSV *rv = cxCSV::Create()->Init(opt);
    csv_parse(&rv->parser, data->Data(), data->Size()+1, colcbfunc, rowcbfunc, rv);
    csv_fini(&rv->parser, colcbfunc, rowcbfunc, rv);
    return rv;
}
Example #3
0
int loadSameNameUnoverlapped()
{
	FILE *fp;
	int len=0;
	struct csv_parser *p;
	char buf[1024];
	size_t bytes_read;
	const char *file_name="data/NewSameNameUnoverlapped.csv";
	//const char *file_name="data/SameNameUnoverlapped.csv";
	if((p=(struct csv_parser *)malloc(sizeof(struct csv_parser))) == 0) return -1;
	if ((fp = fopen(file_name,"r"))== NULL)
		fprintf(stderr, "Failed to open %s\n",file_name);
	
	csv_init(p, (unsigned char)0);
	while ((bytes_read=fread(buf, 1, 1024, fp)) > 0)
		if (csv_parse(p, buf, bytes_read, col, row,&len) != bytes_read)
		{
			fprintf(stderr, "Error while parsing file: %s\n", csv_strerror(csv_error(p)));
		}
	qsort(aids,len,sizeof(int),intcmp);
	aidlen = len;
	csv_free(p);
	fclose(fp);
	
	for(int i=0;i<100;i++) printf("%d | ",aids[i]);
	printf("\n%d",aidlen);
	return 1;
}
Example #4
0
/**
 * Main processing loop
 */
void process_csv(struct csv_context *ctx) {
    FILE *fp;
    char buf[READ_BUF_SIZE];
    size_t bytes_read;

    // Read from a file or STDIN
    if(!ctx->from_stdin) {
        // Attempt to open our file
        if(!(fp = fopen(ctx->in_file, "r"))) {
            fprintf(stderr, "Couldn't open input file '%s'\n", ctx->in_file);
            exit(EXIT_FAILURE);
        }
    } else {
        // Just read from STDIN
        fp = stdin;
    }

    // Process the file
    while((bytes_read = fread(buf, 1, sizeof(buf), fp)) > 0) {
        // Parse our CSV
        if(csv_parse(&ctx->parser, buf, bytes_read, cb_col, cb_row, (void*)ctx) != bytes_read) {
            fprintf(stderr, "Error while parsing file!\n");
            exit(EXIT_FAILURE);
        }
    }

    // Write any additional rows to disk as long as it's just just our header we've been
    // keeping around (if we're injecting headers).
    if(CBUF_POS(ctx->csv_buf) > ctx->header_len) flush_file(ctx, 0);

    // Close our file
    fclose(fp);
}
Example #5
0
int SOMAexternal (SOMAsetup *ssetup, FitnessFunction ffunc, int nparams, Parameter *params, const char *file, SOMAreturn* sr) {
    Array2D array;
    char *buf;
    long len;
    struct csv_parser csvp;
    clock_t begin, end;
	
	array_init (&array);
	array_append_column(&array); //blank column for external fitness function
    array.currentcolumn++;

    //Parse CSV file
    if (csv_init (&csvp, CSV_APPEND_NULL)!=0)
        return ERR;

    if ((buf=read_file_into_buffer (file, &len))==NULL)
        return ERR_FILE;
    csv_parse (&csvp, buf, len, csvcallback1, csvcallback2e, (void*) &array);
    csv_fini (&csvp, csvcallback1, csvcallback2e, (void*) &array);
    csv_free (&csvp);

    //array_print (&array); //debug

    //soma_array=&array;
    //Initialize output struct
    SOMAreturn_init (sr, ssetup->Migrations, nparams);
    begin=clock();
    //SOMA call
    SOMA2 (ssetup, nparams, params, sr, &array, ffunc);
    end=clock();
    sr->time=(double) (end-begin)/CLOCKS_PER_SEC;
    array_delete (&array);

    return OK;
}
Example #6
0
int parseFile (char * inFilename, struct csv_parser * parser,
		struct parse_t * p_struct,
		void (item_cb)(void * item, size_t len, void * data),
		void (line_cb)(int ch,void * data)){

	int fdin;
	fdin = open(inFilename,O_RDONLY);
	if (fdin < 0){
		printf("Can't open file %s, exitting\n",inFilename);
		return 1;
	}

	struct stat statbuf;
	fstat (fdin,&statbuf);

	char * inbuf;
	inbuf = mmap(NULL,statbuf.st_size,PROT_READ,MAP_PRIVATE,fdin,0);

	p_struct->count = 0;
	csv_parse(parser,inbuf,statbuf.st_size,item_cb,line_cb,p_struct);
	
	munmap(inbuf,statbuf.st_size);
	
	
	return 0;
}
Example #7
0
// CSV to Qucs conversion.
int csv2qucs (struct actionset_t * action, char * infile, char * outfile) {
  int ret = 0;
  csv_init ();
  if ((csv_in = open_file (infile, "r")) == NULL) {
    ret = -1;
  } else if (csv_parse () != 0) {
    ret = -1;
  } else if (csv_check () != 0) {
    ret = -1;
  }
  csv_lex_destroy ();
  if (csv_in)
    fclose (csv_in);
  if (ret) {
    csv_destroy ();
    return -1;
  }

  if (!strcmp (action->out, "qucsdata")) {
    csv_result->setFile (outfile);
    qucsdata_producer (csv_result);
  }
  csv_destroy ();
  return 0;
}
Example #8
0
int SOMAascii (SOMAsetup *ssetup, const char *expr, int nparams, Parameter *params, const char *file, SOMAreturn* sr) {
    Array2D array;
	int i;  
    char *buf;
    long len;
    struct csv_parser csvp;
    clock_t begin, end;
	MuExpr me;

	array_init(&array);

    //Parse CSV file
    if (csv_init (&csvp, CSV_APPEND_NULL)!=0)
        return ERR;

    if ((buf=read_file_into_buffer (file, &len))==NULL)
        return ERR_FILE;
    csv_parse (&csvp, buf, len, csvcallback1, csvcallback2, (void*) &array);
    csv_fini (&csvp, csvcallback1, csvcallback2, (void*) &array);
    csv_free (&csvp);

    //array_print (&array); //debug

    muexpr_init (&me, expr);

    for (i=0; i<nparams; i++) 
        if (defpar (&me, params+i, i)!=OK) {
            return ERR_PARAMS;
            array_delete (&array);
            muexpr_free (&me);
        }
    findvars (&me, VAR_NAME_PREFIX);

    //CVS file and expression don't match
    if (me.nvars!=array.ncolumns-1) {
        array_delete (&array);
        muexpr_free (&me);
        return ERR_COLUMNS;
    }

    muexpr_eval (&me);
    if (mupError (me.hparser)) {
        array_delete (&array);
        muexpr_free (&me);
        return ERR_EXPR;
    }

    //Initialize output struct
    SOMAreturn_init (sr, ssetup->Migrations, nparams);
    begin=clock();
    //SOMA call
    SOMA (ssetup, nparams, params, sr, &array, &me);
    end=clock();
    sr->time=(double) (end-begin)/CLOCKS_PER_SEC;
    array_delete (&array);
    muexpr_free (&me);

    return OK;
}
Example #9
0
File: csvfix.c Project: Dexhub/Avro
int main (int argc, char *argv[]) {
  char buf[1024];
  size_t i;
  struct csv_parser p;
  FILE *infile, *outfile;
  csv_init(&p, 0);

  if (argc != 3) {
    fprintf(stderr, "Usage: csv_fix infile outfile\n");
    return EXIT_FAILURE;
  }

  if (!strcmp(argv[1], argv[2])) {
    fprintf(stderr, "Input file and output file must not be the same!\n");
    exit(EXIT_FAILURE);
  }

  infile = fopen(argv[1], "rb");
  if (infile == NULL) {
    fprintf(stderr, "Failed to open file %s: %s\n", argv[1], strerror(errno));
    exit(EXIT_FAILURE);
  }

  outfile = fopen(argv[2], "wb");
  if (outfile == NULL) {
    fprintf(stderr, "Failed to open file %s: %s\n", argv[2], strerror(errno));
    fclose(infile);
    exit(EXIT_FAILURE);
  }

  while ((i=fread(buf, 1, 1024, infile)) > 0) {
    if (csv_parse(&p, buf, i, cb1, cb2, outfile) != i) {
      fprintf(stderr, "Error parsing file: %s\n", csv_strerror(csv_error(&p)));
      fclose(infile);
      fclose(outfile);
      remove(argv[2]);
      exit(EXIT_FAILURE);
    }
  }

  csv_fini(&p, cb1, cb2, outfile);
  csv_free(&p);

  if (ferror(infile)) {
    fprintf(stderr, "Error reading from input file");
    fclose(infile);
    fclose(outfile);
    remove(argv[2]);
    exit(EXIT_FAILURE);
  }

  fclose(infile);
  fclose(outfile);
  return EXIT_SUCCESS;
}
int
bgpstream_csvfile_datasource_update_input_queue(bgpstream_csvfile_datasource_t* csvfile_ds,
                                                bgpstream_input_mgr_t *input_mgr) {
  bgpstream_debug("\t\tBSDS_CSVFILE: csvfile_ds update input queue start");
  
  io_t *file_io = NULL;
  char buffer[BUFFER_LEN];
  int read = 0;

  struct timeval tv;
  gettimeofday(&tv, NULL);
  
  /* we accept all timestamp earlier than now() - 1 second */
  csvfile_ds->max_accepted_ts = tv.tv_sec - 1;

  csvfile_ds->num_results = 0;
  csvfile_ds->max_ts_infile = 0;
  csvfile_ds->input_mgr = input_mgr;
  
  if((file_io = wandio_create(csvfile_ds->csvfile_file)) == NULL)
    {
      bgpstream_log_err("\t\tBSDS_CSVFILE: create csvfile_ds can't open file %s", csvfile_ds->csvfile_file);    
      return -1;
    }

    while((read = wandio_read(file_io, &buffer, BUFFER_LEN)) > 0)
    {
      if(csv_parse(&(csvfile_ds->parser), buffer, read,
		   parse_csvfile_field,
		   parse_csvfile_rowend,
		   csvfile_ds) != read)
	{
          bgpstream_log_err("\t\tBSDS_CSVFILE: CSV error %s", csv_strerror(csv_error(&(csvfile_ds->parser))));         
	  return -1;
	}
    }

  if(csv_fini(&(csvfile_ds->parser),
	      parse_csvfile_field,
	      parse_csvfile_rowend,
	      csvfile_ds) != 0)
    {
      bgpstream_log_err("\t\tBSDS_CSVFILE: CSV error %s", csv_strerror(csv_error(&(csvfile_ds->parser))));         
      return -1;
    }
  
  wandio_destroy(file_io);
  csvfile_ds->input_mgr = NULL;
  csvfile_ds->last_processed_ts = csvfile_ds->max_ts_infile;
  
  bgpstream_debug("\t\tBSDS_CSVFILE: csvfile_ds update input queue end");
  return csvfile_ds->num_results;
}
Example #11
0
void genericLineBasedParsing(
    std::istream &file,
    field_cb_t cb_per_field,
    line_cb_t cb_per_line,
    void *data,
    const csv::params &params) {
  struct csv_parser parser;

  if (!csv_init(&parser, 0)) {
    csv_set_opts(&parser, CSV_APPEND_NULL);
    csv_set_delim(&parser, params.getDelimiter());

    std::string line;
    int line_start = params.getLineStart();

    if (line_start != 1) {
      while (line_start > 1) {
        std::getline(file, line);
        --line_start;
      }
    }


    int lineCount = 0;
    while (std::getline(file, line)) {

      ++lineCount;
      line.append("\n");
      if (csv_parse(&parser,
                    line.c_str(),
                    line.size(),
                    cb_per_field,
                    cb_per_line,
                    data) != line.size()) {
        throw ParserError(csv_strerror(csv_error(&parser)));
      }

      if (params.getLineCount() != -1 && lineCount >= params.getLineCount())
        break;

      if (file.bad())
        break;
    }

    csv_fini(&parser,
             cb_per_field,
             cb_per_line,
             data);
  }
  csv_free(&parser);

}
Example #12
0
File: csv-c.c Project: Tux/CSV
int main (int argc, char* argv[]) {
    struct csv_parser parser = {0};
    csv_init (&parser, CSV_APPEND_NULL);
    char *buf = (char*)malloc (READ_SZ);
    size_t buflen = READ_SZ;
    int count = 0; 
    while ((buflen = read (0, buf, READ_SZ)) > 0) {
        csv_parse (&parser, buf, buflen, field_count, 0, &count);
        }
    printf ("%d\n", count);
    free (buf);
    csv_free (&parser);
    return EXIT_SUCCESS;
    }
Example #13
0
File: KMeans.c Project: Adri96/aifh
CLUSTER_ITEM* KMeansLoadCSV(char *filename, int labelColumn, int startColumn, int featureCount) {
	FILE *fp;
	char buf[1024];
	size_t bytes_read;
	struct _KMeansStructCSV c;
	struct csv_parser p;
	CLUSTER_ITEM *result;

	/* Setup csvlib to read the CSV file */
	if (csv_init(&p, CSV_APPEND_NULL) != 0) exit(EXIT_FAILURE);
	fp = fopen(filename, "rb");
	if (!fp)
	{ 
		printf("Could not open: %s\n", filename);
		exit(EXIT_FAILURE); 
	}

	c.row = 0;
	c.col = 0;
	c.startCol = startColumn;
	c.featureCount = featureCount;
	c.labelCol = labelColumn;
	c.item = c.prevItem = c.firstItem = NULL;
	c.features = (double*)calloc(featureCount,sizeof(double));

	/* Loop over the contents.  It is important to note that we are not reading line by
	   line, at this level.  Rather, we are passing blocks off to csvlib.  Then csvlib
	   calls our two callbacks as needed. */

	while ((bytes_read=fread(buf, 1, 1024, fp)) > 0) {
		if (csv_parse(&p, buf, bytes_read, _KMeansCallbackColumn, _KMeansCallbackRow, &c) != bytes_read) {
			fprintf(stderr, "Error while parsing file: %s\n",
			csv_strerror(csv_error(&p)) );
			exit(EXIT_FAILURE);
		}
	}

	result = c.firstItem;

	/* Handle any final data.  May call the callbacks once more */
	csv_fini(&p, _KMeansCallbackColumn, _KMeansCallbackRow, &c);


	/* Cleanup */
	free(c.features);
	fclose(fp);
	csv_free(&p);

	return result;
}
Example #14
0
/* ==================================================================
 * Parser for comma-separated argument  list
 * ================================================================== */
void ParseVarList(int nLn, FILE *flp, char *dataName, char *leftPart, char *argString)	
{

  struct csv_parser p;
  unsigned char options = 0;
  LIST_DATA ldata;

  // fill in data for the callback
  memset(&ldata, '\x0', sizeof(LIST_DATA));
  ldata.nLn = nLn;
  ldata.flp = flp;
  ldata.cnt = 0;
  strcpy(ldata.dataName, dataName);
  strcpy(ldata.lp, leftPart);


  // Initialize csv parser
  if (csv_init(&p, options) != 0)
  {
  	fprintf(stderr, "Failed to initialize csv parser\n");
  	return;
  }
  
  // set white space, eol and delimiter
  csv_set_space_func(&p, is_space_list);
  csv_set_term_func(&p, is_term_list);
  
  csv_set_delim(&p, ',');
  
  unsigned int agrLen = strlen(argString);

  fprintf(stderr, "ParseVarList: argString = %s argLen - %d\n", argString, agrLen);

  memset(inputsLst, '\x0', sizeof(inputsLst));								
  InpCnt = 0;
  
  if (csv_parse(&p, argString, strlen(argString), cbProcessListElement, NULL, &ldata) != agrLen)
  {
  	fprintf(stderr, "ParseVarList: %s\n", csv_strerror(csv_error(&p)));
  	return;
  }
  
  csv_fini(&p, cbProcessListElement, NULL, &ldata);
  csv_free(&p);
  
  return;
  
}
Example #15
0
/*
This example shows how to simply read a CSV file.  This example reads the iris data set.  
The output is shown here.  

Reading CSV file: ./datasets/iris.csv
Field: "sepal_length"
Field: "sepal_width"
Field: "petal_length"
Field: "petal_width"
Field: "class"
Row done
Field: "5.1"
Field: "3.5"
Field: "1.4"
Field: "0.2"
Field: "Iris-setosa"
Row done
Field: "4.9"
Field: "3.0"
Field: "1.4"
Field: "0.2"
Field: "Iris-setosa"
Row done
Field: "4.7"
Field: "3.2"
Field: "1.3"
Field: "0.2"
Field: "Iris-setosa"
...
Row done
Field: "6.5"
Field: "3.0"
Field: "5.2"
Field: "2.0"
Field: "Iris-virginica"
Row done
Field: "6.2"
Field: "3.4"
Field: "5.4"
Field: "2.3"
Field: "Iris-virginica"
Row done
Field: "5.9"
Field: "3.0"
Field: "5.1"
Field: "1.8"
Field: "Iris-virginica"
Row done
755 fields, 151 rows
*/
void ExampleReadCSV(int argIndex, int argc, char **argv) {
	char filename[FILENAME_MAX];
	FILE *fp;
	struct csv_parser p;
	char buf[1024];
	size_t bytes_read;
	struct counts c = {0, 0};
	
	if( argIndex>=argc ) {
		LocateFile("iris.csv",filename,FILENAME_MAX);
	} else {
		strncpy(filename,argv[argIndex],FILENAME_MAX);
	}
	
	printf("Reading CSV file: %s\n", filename);

	/* Setup csvlib to read the CSV file */
	if (csv_init(&p, CSV_APPEND_NULL) != 0) exit(EXIT_FAILURE);
	fp = fopen(filename, "rb");
	if (!fp)
	{ 
		printf("Could not open: %s\n", filename);
		exit(EXIT_FAILURE); 
	}

	/* Loop over the contents.  It is important to note that we are not reading line by
	   line, at this level.  Rather, we are passing blocks off to csvlib.  Then csvlib
	   calls our two callbacks as needed. */

	while ((bytes_read=fread(buf, 1, 1024, fp)) > 0)
		if (csv_parse(&p, buf, bytes_read, CallbackColumn, CallbackRow, &c) != bytes_read) {
			fprintf(stderr, "Error while parsing file: %s\n",
			csv_strerror(csv_error(&p)) );
			exit(EXIT_FAILURE);
		}

	/* Handle any final data.  May call the callbacks once more */
	csv_fini(&p, CallbackColumn, CallbackRow, &c);

	/* Print final stats on CSV file */
	printf("%lu fields, %lu rows\n", c.fields, c.rows);

	/* Cleanup */
	fclose(fp);
	csv_free(&p);
}
int main(int argc, char* argv[]){
	if(argc<2){
		std::cerr << "Usage: " << argv[0] << " file.csv" << std::endl;
		return 0;
	}

	std::vector<std::vector<std::string> > csv_data;
	csv_parse(argv[1],&csv_data);

	for(size_t j = 0; j < csv_data.size(); j++){
		for(size_t i = 0; i < csv_data[j].size(); i++){
			std::cout << "\"" << csv_data[j][i] << "\" ";
		}
		std::cout << std::endl;
	}

	return 1;
}
Example #17
0
int main(int argc, char* argv[])
{
    if (argc < 2)
    {
        fprintf(stderr, "Usage: %s <training-csv-file>\n", argv[0]);
        return 1;
    }

    plotter = gnuplot_init();
    csv_file* csv = csv_parse(argv[1]);
    linear_regression* lr = linreg_run(csv);

    printf("h(x) = %f + %fx\n", lr->theta0, lr->theta1);
    plot_data_and_model(argv[1], lr);

    linreg_free(lr);
    csv_free(csv);
    gnuplot_close(plotter);
    return 0;
}
Example #18
0
int main (void) {
  struct csv_parser p;
  int i;
  char c;

  csv_init(&p, 0);

  while ((i=getc(stdin)) != EOF) {
    c = i;
    if (csv_parse(&p, &c, 1, cb1, cb2, NULL) != 1) {
      fprintf(stderr, "Error: %s\n", csv_strerror(csv_error(&p)));
      exit(EXIT_FAILURE);
    }
  }

  csv_fini(&p, cb1, cb2, NULL);
  csv_free(&p);

  return EXIT_SUCCESS;
}
Example #19
0
/* This static function read a full dataset from the given CSV file
   and returns it.  On failure the function emits appropriate error
   messages and returns NULL. */
dataset * dataset::load_csv (const char * file) {
  FILE * f;
  if ((f = fopen (file, "r")) == NULL) {
    logprint (LOG_ERROR, "error loading `%s': %s\n", file, strerror (errno));
    return NULL;
  }
  csv_in = f;
  csv_restart (csv_in);
  if (csv_parse () != 0) {
    fclose (f);
    return NULL;
  }
  if (csv_check () != 0) {
    fclose (f);
    return NULL;
  }
  fclose (f);
  csv_lex_destroy ();
  csv_result->setFile (file);
  return csv_result;
}
Example #20
0
static int config_load(control_t *control)
{
	struct csv_parser p;
	FILE *fp;
	char buf[MAX_BUF];
	size_t bytes_read;
	
	assert(control);
	assert(control->configfile);
	assert(control->entries == NULL);
	assert(control->start == 0 && control->end == 0);

	// open the file.
	fp = fopen(control->configfile, "rb");
	if (fp) {

		if (csv_init(&p, 0) == 0) {
			while ((bytes_read=fread(buf, 1, MAX_BUF, fp)) > 0) {
				if (csv_parse(&p, buf, bytes_read, csv_data, csv_line, control) != bytes_read) {
					fprintf(stderr, "Error while parsing file: %s\n",
					csv_strerror(csv_error(&p)) );
					exit(EXIT_FAILURE);
				}
			}

			csv_fini(&p, csv_data, csv_line, control);
			csv_free(&p);
		}
	
		fclose(fp);
		return 0;
	}
	else {
		return -1;
	}
}
Example #21
0
readstat_error_t readstat_parse_csv(readstat_parser_t *parser, const char *path, const char *jsonpath, struct csv_metadata* md, void *user_ctx) {
    readstat_error_t retval = READSTAT_OK;
    readstat_io_t *io = parser->io;
    size_t file_size = 0;
    size_t bytes_read;
    struct csv_parser csvparser;
    struct csv_parser *p = &csvparser;
    char buf[BUFSIZ];
    size_t* column_width = md->column_width;
    md->pass = column_width ? 2 : 1;
    md->open_row = 0;
    md->columns = 0;
    md->_rows = md->rows;
    md->rows = 0;
    md->parser = parser;
    md->user_ctx = user_ctx;
    md->json_md = NULL;

    if ((md->json_md = get_json_metadata(jsonpath)) == NULL) {
        fprintf(stderr, "Could not get JSON metadata\n");
        retval = READSTAT_ERROR_PARSE;
        goto cleanup;
    }

    if (io->open(path, io->io_ctx) == -1) {
        retval = READSTAT_ERROR_OPEN;
        goto cleanup;
    }

    file_size = io->seek(0, READSTAT_SEEK_END, io->io_ctx);
    if (file_size == -1) {
        retval = READSTAT_ERROR_SEEK;
        goto cleanup;
    }

    if (io->seek(0, READSTAT_SEEK_SET, io->io_ctx) == -1) {
        retval = READSTAT_ERROR_SEEK;
        goto cleanup;
    }

    if (csv_init(p, CSV_APPEND_NULL) != 0)
    {
        retval = READSTAT_ERROR_OPEN;
        goto cleanup;
    }
    unsigned char sep = get_separator(md->json_md);
    csv_set_delim(p, sep);
    
    while ((bytes_read = io->read(buf, sizeof(buf), io->io_ctx)) > 0)
    {
        if (csv_parse(p, buf, bytes_read, csv_metadata_cell, csv_metadata_row, md) != bytes_read)
        {
            fprintf(stderr, "Error while parsing file: %s\n", csv_strerror(csv_error(p)));
            retval = READSTAT_ERROR_PARSE;
            goto cleanup;
        }
    }
    csv_fini(p, csv_metadata_cell, csv_metadata_row, md);
    if (!md->open_row) {
        md->rows--;
    }
    if (parser->info_handler && md->pass == 1) {
        parser->info_handler(md->rows, md->_columns, user_ctx);
    }

cleanup:
    if (md->variables) {
        free(md->variables);
        md->variables = NULL;
    }
    if (md->is_date) {
        free(md->is_date);
        md->is_date = NULL;
    }
    if (md->json_md) {
        free_json_metadata(md->json_md);
        md->json_md = NULL;
    }
    csv_free(p);
    io->close(io->io_ctx);
    return retval;
}
Example #22
0
int main (int argc, char** argv)
{
    paillier_pubkey_t* pkey;
    paillier_prvkey_t* skey;
    paillier_keygen(128,&pkey,&skey,&paillier_get_rand_devrandom);


    void *context = zmq_ctx_new ();


    struct opts options;
    parse_options(argc,argv, &options);

    if(options.size <= 0 || options.scale <= 0 || !options.fileset){
        fprintf(stderr,"Size and scale must be greater than 0 and file must be set\n");
        exit(EXIT_FAILURE);
    }
    struct classify_data data;
    data.pub = pkey;
    data.prv = skey;
    data.maxcol = options.size;
    data.scale_factor = options.scale;
    data.texts = (paillier_plaintext_t**)malloc(options.size*sizeof(paillier_plaintext_t*));
    data.col = 0;
    data.correct = 0;
    data.total = 0;
    init_rand(data.rand,&paillier_get_rand_devurandom,pkey->bits / 8 + 1);
    

    // Socket to talk to server
    gmp_printf("n: %Zd, lambda: %Zd\n",pkey->n,skey->lambda);
    void *requester = zmq_socket (context, ZMQ_REQ);
    zmq_connect (requester, "ipc:///tmp/karma");
    char* pubkeyhex = paillier_pubkey_to_hex(pkey);
    s_send(requester,pubkeyhex);
    char* recv = s_recv(requester);
    free(recv);
    free(pubkeyhex);

    data.socket = requester;

    char* file = options.file;
    FILE* fp;
    struct csv_parser p;
    char buf[1024];
    size_t bytes_read;
    if(csv_init(&p,0)) {
        fprintf(stderr, "Failed to initialize parser\n");
        exit(EXIT_FAILURE);
    }
    
    fp = fopen(file,"rb");
    if(!fp){
        fprintf(stderr,"Failed to open classify file %s\n",strerror(errno));
        exit(EXIT_FAILURE);
    }

    while ((bytes_read=fread(buf,1,1024,fp)) > 0){
        if(!csv_parse(&p,buf,bytes_read,field_parsed,row_parsed,&data)){
            fprintf(stderr, "Failed to parse file: %s\n",csv_strerror(csv_error(&p)));
        }
    }
    csv_fini(&p,field_parsed,row_parsed,&data);
    //fini took care of freeing the plaintexts
    csv_free(&p);

    free(data.texts);
    gmp_randclear(data.rand);

    printf("Correct(%i)/Total(%i) = %f\n",data.correct,data.total,data.correct/(data.total+0.0));
    

    sleep (2);
    zmq_close (requester);
    zmq_ctx_destroy (context);
    return 0;
}
void SettingsDialogImpl::OnOk(wxCommandEvent& event)
{
    this->m_sdbSizerBtns->GetAffirmativeButton()->Disable();
    bool can_scan = true;
    if( m_tPath->GetValue() == wxEmptyString )
    {
        int latmin = wxMin(m_spFromLat->GetValue(), m_spToLat->GetValue());
        int latmax = wxMax(m_spFromLat->GetValue(), m_spToLat->GetValue());
        int lonmin = wxMin(m_spFromLon->GetValue(), m_spToLon->GetValue());
        int lonmax = wxMax(m_spFromLon->GetValue(), m_spToLon->GetValue());
        //Check if we cross IDL and refuse to run...
        if( (lonmin < -90 && lonmax > 90) || (lonmin < 0 && lonmax > 0 && 180 + lonmin + lonmax < 180) )
        {
            wxMessageBox(_("Sorry, I'm stupid and can't cross the IDL, please divide your scan in two."));
            can_scan = false;
        }
//        this->Hide();
        if( can_scan && m_cb5000000->GetValue() )
            p_plugin->ScanArea( latmin, lonmin, latmax, lonmax, 5000000 );
        if( can_scan && m_cb1000000->GetValue() )
            p_plugin->ScanArea( latmin, lonmin, latmax, lonmax, 1000000 );
        if( can_scan && m_cb200000->GetValue() )
            p_plugin->ScanArea( latmin, lonmin, latmax, lonmax, 200000 );
        if( can_scan && m_cb20000->GetValue() )
            p_plugin->ScanArea( latmin, lonmin, latmax, lonmax, 20000 );
        this->Close();
    }
    else
    {
        if( wxFileExists(m_tPath->GetValue()) )
        {
            std::ifstream inFile( m_tPath->GetValue().mb_str() ); 
            int linecount = std::count(std::istreambuf_iterator<char>(inFile), std::istreambuf_iterator<char>(), '\n');
            m_prgdlg = new wxProgressDialog(_("Import progress..."),
                wxString::Format( _("Importing data from %s."), m_tPath->GetValue().c_str() ),
                linecount, this	);
            m_prgdlg->Show();
            
            FILE *fp;
            if ( NULL==(fp=fopen (m_tPath->GetValue().mb_str(),"r") ) )
            {
                fprintf (stderr, "Cannot open input file sales.csv\n");
                return;
            }
            switch( csv_parse (fp, ProcessCsvLine, this) )
            {
            case E_LINE_TOO_WIDE:
                //fprintf(stderr,"Error parsing csv: line too wide.\n");
                break;
            case E_QUOTED_STRING:
                //fprintf(stderr,"Error parsing csv: ill-formatted quoted string.\n");
                break;
            }
            
            fclose (fp);
            
            m_prgdlg->Close();
            delete m_prgdlg;
            m_iProcessed = 0;
            m_prgdlg = NULL;
        }
        else
        {
            wxMessageBox( wxString::Format( _("The files %s does not exist, nothing to import."), m_tPath->GetValue().c_str() ) );
        }
        this->Close();
    }
}
Example #24
0
void genericParse(
    /*std::istream &file,*/
    std::string filename,
    field_cb_t cb_per_field,
    line_cb_t cb_per_line,
    void *data,
    const csv::params &params
                  ) {
  // Open the file
  typedef std::unique_ptr<std::FILE, int (*)(std::FILE *)> unique_file_ptr;
  unique_file_ptr file(fopen(filename.c_str(), "rb"), fclose);
  if (!file) {
    throw ParserError(std::string("File Opening Failed") +  std::strerror(errno));
  }

  struct csv_parser parser;

  if (!csv_init(&parser, 0)) {
    csv_set_opts(&parser, CSV_APPEND_NULL);
    csv_set_delim(&parser, params.getDelimiter());

    int line_start = params.getLineStart();
    if (line_start > 1) {
      int c;
      do {
        c = fgetc(file.get());
        if ( c== '\n') --line_start;
      } while (c!= EOF  && line_start > 1);
    }

    // 1GB Buffer
    size_t block_size;
    if (getenv("HYRISE_LOAD_BLOCK_SIZE"))
      block_size = strtoul(getenv("HYRISE_LOAD_BLOCK_SIZE"), nullptr, 0);
    else
      block_size = 1024 * 1024;

    // Read from the buffer
    size_t readBytes = 0;
    char rdbuf[block_size];

    // Read the file until we cannot extract more bytes
    do {
      readBytes = fread(rdbuf, 1, block_size, file.get());
      if (csv_parse(&parser,
                    rdbuf,
                    readBytes,
                    cb_per_field,
                    cb_per_line,
                    data) != (size_t) readBytes) {
        throw ParserError(csv_strerror(csv_error(&parser)));
      }
    } while (readBytes == block_size);

    if (ferror(file.get())) {
      throw ParserError("Could not read file");
    }

    csv_fini(&parser,
             cb_per_field,
             cb_per_line,
             data);
  }
  csv_free(&parser);
}
Example #25
0
void UPCdata::readCSVfile(char* filename){
	/* UPC dataset is of an un-orthodox format
	 each row, starting from second, contain 1020 data columns and 6 description columns
	 data columns correspond to 60 ORNs per 17 OR types. 
	 description columns denote compound, set (TrainingSet, TestSet, ValidationSet), conc1, conc2, conc3, sample (minute)
	 I use c-style parameter handling in that vector parameters are handed over by pointers (c++ references to be exact). 
	 */

	cout << "reading file " << filename << endl;

	// empty output vectors
	compounds.resize(0);
	samples.resize(0);
	
	/*
	std::ifstream fh(filename, ifstream::in);
	if (fh.is_open()){
		if(fh.good()){
			string line;
			getline(fh,line); // ignore first line
		}
	*/

	char buf[10000];
	FILE *fp = fopen( filename, "rb" );
	fgets(buf, 10000, fp); // ignore first line
	while(fgets(buf, 10000, fp)){
	//	while(fh.good() ){
			// initalizations for file reading
			string line(buf);
			Sample sample;	
			
			struct counts c = {vector<float>(0),0, 0};

			if (csv_init(&m_csvParser, CSV_STRICT) != 0){
				fprintf(stderr, "Failed to initialize csv parser\n");
				return;
				//exit(EXIT_FAILURE);
			}

			//getline(fh,line);
			int bytes_read=line.size();
			if (csv_parse(&m_csvParser, buf, bytes_read, cbColumn, cbRow, &c) != bytes_read){
				fprintf(stderr, "Error while parsing file: %s\n", csv_strerror(csv_error(&m_csvParser)));
			}			
			csv_fini(&m_csvParser, cbColumn, cbRow, &c);			

			if(c.fData.size()>0){
				for(int i=0;i<c.fData.size()-6;i++)
					sample.data.push_back(c.fData[i]);

				sample.samplenr=c.fData[c.fData.size()-1]; 
				sample.concentrations[0]=c.fData[c.fData.size()-4]; 
				sample.concentrations[1]=c.fData[c.fData.size()-3]; 
				sample.concentrations[2]=c.fData[c.fData.size()-2]; 

				// now get the compound and the set without using regular expressions
				// in order to avoid problems in code compatibility if neither boost nor tr1 are available
				string str=line;
				string setstr="";
				for(int i=0;i<5;i++){
					int found=str.find_last_of(",");
					str=str.substr(0,found);
					if(i==3){
						setstr=str.substr(str.find_last_of(",")+2,str.size()-1);
					}
				}						
				string compound=str.substr(str.find_last_of(",")+2,str.size()-1);						

				if(setstr.compare("TrainingSet\"")==0)
					sample.set=Sample::Training;
				else if(setstr.compare("TestSet\"")==0)			
					sample.set=Sample::Test;
				else if(setstr.compare("ValidationSet\"")==0)
					sample.set=Sample::Validation;
				else if(setstr.compare("InterimSet\"")==0)
					sample.set=Sample::Interim;

				if(sample.set==Sample::Undefined)
					fprintf(stderr, "Storage::ReadUPCfile(): Could not match set\n");
						
				for(int i=0;i<compounds.size();i++){
					if(compounds[i]==compound){
						sample.compoundnr=i;
						break;
					}
				}
				if(sample.compoundnr<0){
					compounds.push_back(compound);
					sample.compoundnr=compounds.size()-1;
				}
				samples.push_back(sample);			
			}
		}  // line treatment
    //fh.close();
	fclose(fp);

	//} else 
	//	cerr<<"Error: Could not open file. ("<<filename<<")\n";

	cout<<"finished reading " << filename << endl;
	return;
}
Example #26
0
vector<vector<float> > Storage::LoadDataFloatCSV(char* filename, int nrItems, bool keepOpen)
{
	vector<vector<float> > out;
	size_t bytes_read;
	char buf[1024];
	struct counts c = {vector<float>(0),0, 0};

	if (csv_init(&m_csvParser, CSV_STRICT) != 0) 
	{
		fprintf(stderr, "Failed to initialize csv parser\n");
		return out;
		//exit(EXIT_FAILURE);
	}

	//cout<<"Loading "<<filename<<"\n";
	m_fp = fopen(filename, "r");

	if(m_fp==NULL)
	{
		cerr<<"Error: Could not open file. ("<<filename<<")\n";
	}

	cout.flush();

	long oldRows = 0;

	while ((bytes_read=fread(buf, 1, 1, m_fp)) > 0) 
	{//((bytes_read=fread(buf, 1, 1024, m_fp)) > 0) {
		if (csv_parse(&m_csvParser, buf, bytes_read, cbColumn, cbRow, &c) != bytes_read) {
			fprintf(stderr, "Error while parsing file: %s\n", csv_strerror(csv_error(&m_csvParser)));
		}
		else{
//			cout<<".";cout.flush();
		}

		if(c.rows>oldRows){
			out.push_back(c.fData);
			c.fData.clear();
			oldRows = c.rows;
		}

		if(c.rows>=nrItems)
			break;
    }

	if(keepOpen == false)
	{
		csv_fini(&m_csvParser, cbColumn, cbRow, &c);
		fclose(m_fp);
	}

	if(m_mpiRank == 0)
	{
		if(out.size()>0)
			cout<<"Loaded "<<out.size()<<" nr items of dimension "<<out[0].size()<<".\n";
		else
			cout<<"Loaded no items.\n";
		cout.flush();
	}

	return out;
}
Example #27
0
int main(int argc, char *argv[])
{
	//check arguments
	if(argc < 2 || argc > 3)
	{
		printf("evoscan_logworks_conv <version %s>\n", version);
		printf("[email protected], @640774n6\n\n");
		printf("usage: evoscan_logworks_conv <input csv path> <output dif path (optional)>\n");
		return 1;
	}

	char *input_path = argv[1];
	char *generated_output_path = create_output_path(input_path);
	char *output_path = (argc == 3) ? argv[2] : generated_output_path;
	
	if(!strcmp(input_path, output_path))
	{
		printf("error: input and output path must be different\n");
		free(generated_output_path);
		return 1;
	}
	
	//open file pointers
	FILE *input_file = fopen(input_path, "rb");
	if(!input_file)
	{
		printf("error: failed to open input @ %s\n", input_path);
		free(generated_output_path);
		return 1;
	}
	
	FILE *output_file = fopen(output_path, "wb");
	if(!output_path)
	{
		printf("error: failed to open output @ %s\n", output_path);
		free(generated_output_path);
		fclose(input_file);
		return 1;
	}
	
	FILE *tmp_file = tmpfile();
	if(!tmp_file)
	{
		printf("error: failed to open tmp file\n");
		free(generated_output_path);
		fclose(input_file);
		fclose(output_file);
		return 1;
	}
	
	//initialize variables
	fields = NULL;
	field_count = 0;
	used_fields = NULL;
	used_fields_min = NULL;
	used_fields_max = NULL;
	used_field_count = 0;
	total_sample_time = 0.0;
	total_sample_count = 0;
	row_sample_count = 0;
	csv_col = 0;
	csv_row = 0;
	
	//create csv parser
	struct csv_parser parser;
	unsigned char options = (CSV_APPEND_NULL | CSV_EMPTY_IS_NULL);
	csv_init(&parser, options);
	
	//main parse loop
	size_t length = 0;
	char buffer[1024];
	while((length = fread(buffer, 1, 1024, input_file)) > 0)
	{
		//parse csv and handle with callbacks
		if(csv_parse(&parser, buffer, length, csv_process_col, csv_process_row, tmp_file) != length)
		{
			printf("error: failed to read from input @ %s\n", input_path);
			free(generated_output_path);
			fclose(input_file);
			fclose(output_file);
			fclose(tmp_file);
			csv_free(&parser);
			remove(output_path);
			return 1;
		}
	}
	
	//write output header
	fprintf(output_file, "TABLE\r\n0,1\r\n\"EXCEL\"\r\n");
	fprintf(output_file, "VECTORS\r\n0,%d\r\n\"\"\r\n", (total_sample_count + 13));
	fprintf(output_file, "TUPLES\r\n0,%d\r\n\"\"\r\n", (used_field_count + 1));
	fprintf(output_file, "DATA\r\n0,0\r\n\"\"\r\n");
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Input Description\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"\"\r\n"); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Stochiometric:\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"\"\r\n"); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"From Device:\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"(EVOSCAN%d)\"\r\n", (i + 1)); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Name:\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"%s\"\r\n", used_fields[i]); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Unit:\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"%.3s\"\r\n", used_fields[i]); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Range:\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "0,%d\r\nV\r\n", used_fields_min[i]); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"equiv(Sample):\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "0,0\r\nV\r\n"); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"to:\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "0,%d\r\nV\r\n", used_fields_max[i] + 1); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"equiv(Sample):\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "0,4096\r\nV\r\n"); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Color:\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "0,%d\r\nV\r\n", color_for_index(i)); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"-End-\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"\"\r\n"); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Session 1\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"\"\r\n"); }
	fprintf(output_file, "-1,0\r\nBOT\r\n");
	
	fprintf(output_file, "1,0\r\n\"Time(sec)\"\r\n");
	for(int i = 0; i < used_field_count; i++)
	{ fprintf(output_file, "1,0\r\n\"%s (%.3s)\"\r\n", used_fields[i], used_fields[i]); }
	
	//append tmp to the output
	fseek(tmp_file, 0, SEEK_SET);
	while((length = fread(buffer, 1, 1024, tmp_file)) > 0)
	{ fwrite(buffer, sizeof(char), length, output_file); }
	
	//write footer
	fprintf(output_file, "-1,0\r\nEOD\r\n");
	
	//free generated output path
	free(generated_output_path);
	
	//free fields
	for(int i = 0; i < field_count; i++)
	{ free(fields[i]); }
	free(fields);
	free(used_fields);
	free(used_fields_min);
	free(used_fields_max);
	
	//close file pointers
	fclose(input_file);
	fclose(output_file);
	fclose(tmp_file);
	
	//free parser
	csv_free(&parser);
	
	return 0;
}
Example #28
0
std::shared_ptr<storage::AbstractTable> RawTableLoader::load(std::shared_ptr<storage::AbstractTable> in,
        const storage::compound_metadata_list *ml,
        const Loader::params &args) {



    csv::params params;
    if (detectHeader(args.getBasePath() + _filename)) params.setLineStart(5);

    // Create the result table
    storage::metadata_vec_t v(in->columnCount());
    for(size_t i=0; i < in->columnCount(); ++i) {
        v[i] = in->metadataAt(i);
    }
    auto result = std::make_shared<storage::RawTable>(v);

    // CSV Parsing
    std::ifstream file(args.getBasePath() + _filename, std::ios::binary);
    if (!file || file.bad()) {
        throw csv::ParserError("CSV file '" + _filename + "' does not exist");
    }

    struct csv_parser parser;

    if (!csv_init(&parser, 0)) {
        csv_set_opts(&parser, CSV_APPEND_NULL);
        csv_set_delim(&parser, params.getDelimiter());

        // If there is a header in the file, we will ignore it
        std::string line;
        int line_start = params.getLineStart();

        if (line_start != 1) {
            while (line_start > 1) {
                std::getline(file, line);
                --line_start;
            }
        }

        // Prepare cb data handler
        struct raw_table_cb_data data(v);
        data.table = result;

        const size_t block_size = 16 * 1024;
        char rdbuf [block_size];

        while (file.read(rdbuf, block_size).good()) {
            auto extracted = file.gcount();
            if (extracted == 0)
                break;

            if (csv_parse(&parser,
                          rdbuf,
                          extracted,
                          (field_cb_t) raw_table_cb_per_field,
                          (line_cb_t) raw_table_cb_per_line,
                          (void*) &data) != (size_t) extracted) {
                throw csv::ParserError(csv_strerror(csv_error(&parser)));
            }
        }

        // Parse the rest
        if (csv_parse(&parser,
                      rdbuf,
                      file.gcount(),
                      (field_cb_t) raw_table_cb_per_field,
                      (line_cb_t) raw_table_cb_per_line,
                      (void*) &data) != (size_t) file.gcount()) {
            throw csv::ParserError(csv_strerror(csv_error(&parser)));
        }

        csv_fini(&parser,
                 (field_cb_t) raw_table_cb_per_field,
                 (line_cb_t) raw_table_cb_per_line,
                 (void*) &data);

    }
    csv_free(&parser);
    return result;
}
Example #29
0
File: rcsv.c Project: fiksu/rcsv
/* An rb_rescue()-compatible Ruby pseudo-method that handles the actual parsing */
VALUE rcsv_raw_parse(VALUE ensure_container) {
  /* Unpacking multiple variables from a single Ruby VALUE */
  VALUE options = rb_ary_entry(ensure_container, 0);
  VALUE csvio   = rb_ary_entry(ensure_container, 1);
  struct rcsv_metadata * meta = (struct rcsv_metadata *)NUM2LONG(rb_ary_entry(ensure_container, 2));
  struct csv_parser * cp = (struct csv_parser *)NUM2LONG(rb_ary_entry(ensure_container, 3));

  /* Helper temporary variables */
  VALUE option, csvstr, buffer_size;

  /* libcsv-related temporary variables */
  char * csv_string;
  size_t csv_string_len;
  int error;

  /* Generic iterator */
  size_t i = 0;

  /* IO buffer size can be controller via an option */
  buffer_size = rb_hash_aref(options, ID2SYM(rb_intern("buffer_size")));

  /* By default, parse as Array of Arrays */
  option = rb_hash_aref(options, ID2SYM(rb_intern("row_as_hash")));
  if (option && (option != Qnil)) {
    meta->row_as_hash = true;
  }

  /* :col_sep sets the column separator, default is comma (,) */
  option = rb_hash_aref(options, ID2SYM(rb_intern("col_sep")));
  if (option != Qnil) {
    csv_set_delim(cp, (unsigned char)*StringValuePtr(option));
  }

  /* :quote_char sets the character used for quoting data; default is double-quote (") */
  option = rb_hash_aref(options, ID2SYM(rb_intern("quote_char")));
  if (option != Qnil) {
    csv_set_quote(cp, (unsigned char)*StringValuePtr(option));
  }

  /* Specify how many rows to skip from the beginning of CSV */
  option = rb_hash_aref(options, ID2SYM(rb_intern("offset_rows")));
  if (option != Qnil) {
    meta->offset_rows = (size_t)NUM2INT(option);
  }

  /* Specify the character encoding of the input data */
  option = rb_hash_aref(options, ID2SYM(rb_intern("output_encoding")));
  if (option && (option != Qnil)) {
    meta->encoding_index = RB_ENC_FIND_INDEX(StringValueCStr(option));
  }

  /* :only_rows is a list of values where row is only parsed
     if its fields match those in the passed array.
     [nil, nil, ["ABC", nil, 1]] skips all rows where 3rd column isn't equal to "ABC", nil or 1 */
  option = rb_hash_aref(options, ID2SYM(rb_intern("only_rows")));
  if (option != Qnil) {
    meta->num_only_rows = (size_t)RARRAY_LEN(option);
    meta->only_rows = (VALUE *)malloc(meta->num_only_rows * sizeof(VALUE));

    for (i = 0; i < meta->num_only_rows; i++) {
      VALUE only_row = rb_ary_entry(option, i);
      meta->only_rows[i] = validate_filter_row("only_rows", only_row);
    }
  }

  /* :except_rows is a list of values where row is only parsed
     if its fields don't match those in the passed array.
     [nil, nil, ["ABC", nil, 1]] skips all rows where 3rd column is equal to "ABC", nil or 1 */
  option = rb_hash_aref(options, ID2SYM(rb_intern("except_rows")));
  if (option != Qnil) {
    meta->num_except_rows = (size_t)RARRAY_LEN(option);
    meta->except_rows = (VALUE *)malloc(meta->num_except_rows * sizeof(VALUE));

    for (i = 0; i < meta->num_except_rows; i++) {
      VALUE except_row = rb_ary_entry(option, i);
      meta->except_rows[i] = validate_filter_row("except_rows", except_row);
    }
  }

  /* :row_defaults is an array of default values that are assigned to fields containing empty strings
     according to matching field positions */
  option = rb_hash_aref(options, ID2SYM(rb_intern("row_defaults")));
  if (option != Qnil) {
    meta->num_row_defaults = RARRAY_LEN(option);
    meta->row_defaults = (VALUE*)malloc(meta->num_row_defaults * sizeof(VALUE*));

    for (i = 0; i < meta->num_row_defaults; i++) {
      VALUE row_default = rb_ary_entry(option, i);
      meta->row_defaults[i] = row_default;
    }
  }

  /* :row_conversions specifies Ruby types that CSV field values should be converted into.
     Each char of row_conversions string represents Ruby type for CSV field with matching position. */
  option = rb_hash_aref(options, ID2SYM(rb_intern("row_conversions")));
  if (option != Qnil) {
    meta->num_row_conversions = RSTRING_LEN(option);
    meta->row_conversions = StringValuePtr(option);
  }

 /* Column names should be declared explicitly when parsing fields as Hashes */
  if (meta->row_as_hash) { /* Only matters for hash results */
    option = rb_hash_aref(options, ID2SYM(rb_intern("column_names")));
    if (option == Qnil) {
      rb_raise(rcsv_parse_error, ":row_as_hash requires :column_names to be set.");
    } else {
      meta->last_entry = rb_hash_new();

      meta->num_columns = (size_t)RARRAY_LEN(option);
      meta->column_names = (VALUE*)malloc(meta->num_columns * sizeof(VALUE*));

      for (i = 0; i < meta->num_columns; i++) {
        meta->column_names[i] = rb_ary_entry(option, i);
      }
    }
  } else {
    meta->last_entry = rb_ary_new();
  }

  while(true) {
    csvstr = rb_funcall(csvio, rb_intern("read"), 1, buffer_size);
    if ((csvstr == Qnil) || (RSTRING_LEN(csvstr) == 0)) { break; }

    csv_string = StringValuePtr(csvstr);
    csv_string_len = strlen(csv_string);

    /* Actual parsing and error handling */
    if (csv_string_len != csv_parse(cp, csv_string, csv_string_len,
                                    &end_of_field_callback, &end_of_line_callback, meta)) {
      error = csv_error(cp);
      switch(error) {
        case CSV_EPARSE:
          rb_raise(rcsv_parse_error, "Error when parsing malformed data");
          break;
        case CSV_ENOMEM:
          rb_raise(rcsv_parse_error, "No memory");
          break;
        case CSV_ETOOBIG:
          rb_raise(rcsv_parse_error, "Field data is too large");
          break;
        case CSV_EINVALID:
          rb_raise(rcsv_parse_error, "%s", (const char *)csv_strerror(error));
        break;
        default:
          rb_raise(rcsv_parse_error, "Failed due to unknown reason");
      }
    }
  }

  /* Flushing libcsv's buffer */
  csv_fini(cp, &end_of_field_callback, &end_of_line_callback, meta);

  return Qnil;
}