static int same_header(char *mrt_filename, unsigned char *previous_header) { off_t read_bytes; io_t *io_h = wandio_create(mrt_filename); if(io_h == NULL) { bgpstream_log_err("\t\tBSDS_SINGLEFILE: can't open file!"); return -1; } read_bytes = wandio_read(io_h, (void *) &(buffer[0]), MAX_HEADER_READ_BYTES); if(read_bytes < 0) { bgpstream_log_err("\t\tBSDS_SINGLEFILE: can't read file!"); wandio_destroy(io_h); return -1; } int ret = memcmp(buffer, previous_header, sizeof(unsigned char) * read_bytes); wandio_destroy(io_h); /* if there is no difference, then they have the same header */ if(ret == 0) { /* fprintf(stderr, "same header\n"); */ return 1; } memcpy(previous_header, buffer, sizeof(unsigned char) * read_bytes); return 0; }
static int pcapfile_start_input(libtrace_t *libtrace) { int err; if (!libtrace->io) { libtrace->io=trace_open_file(libtrace); DATA(libtrace)->started=false; } if (!DATA(libtrace)->started) { if (!libtrace->io) { trace_set_err(libtrace, TRACE_ERR_BAD_IO, "Trace cannot start IO in pcapfile_start_input()"); return -1; } err=wandio_read(libtrace->io, &DATA(libtrace)->header, sizeof(DATA(libtrace)->header)); DATA(libtrace)->started = true; if (!(sizeof(DATA(libtrace)->header) > 0)) { trace_set_err(libtrace, TRACE_ERR_INIT_FAILED, "Trace is missing header in pcapfile_start_input()"); return -1; } if (err<1) { trace_set_err(libtrace, TRACE_ERR_INIT_FAILED, "Error while reading pcap file header\n"); return -1; } if (err != (int)sizeof(DATA(libtrace)->header)) { trace_set_err(libtrace, TRACE_ERR_INIT_FAILED, "Incomplete pcap file header"); return -1; } if (!header_is_magic(&(DATA(libtrace)->header))) { trace_set_err(libtrace,TRACE_ERR_INIT_FAILED, "Not a pcap tracefile (magic=%08x)\n",swapl(libtrace,DATA(libtrace)->header.magic_number)); return -1; /* Not a pcap file */ } if (swaps(libtrace,DATA(libtrace)->header.version_major)!=2 && swaps(libtrace,DATA(libtrace)->header.version_minor)!=4) { trace_set_err(libtrace,TRACE_ERR_INIT_FAILED, "Unknown pcap tracefile version %d.%d\n", swaps(libtrace, DATA(libtrace)->header.version_major), swaps(libtrace, DATA(libtrace)->header.version_minor)); return -1; } } return 0; }
static int64_t lzma_read(io_t *io, void *buffer, int64_t len) { if (DATA(io)->err == ERR_EOF) return 0; /* EOF */ if (DATA(io)->err == ERR_ERROR) { errno=EIO; return -1; /* ERROR! */ } DATA(io)->strm.avail_out = len; DATA(io)->strm.next_out = buffer; while (DATA(io)->err == ERR_OK && DATA(io)->strm.avail_out > 0) { while (DATA(io)->strm.avail_in <= 0) { int bytes_read = wandio_read(DATA(io)->parent, (char*)DATA(io)->inbuff, sizeof(DATA(io)->inbuff)); if (bytes_read == 0) { /* EOF */ if (DATA(io)->strm.avail_out == (uint32_t)len) { DATA(io)->err = ERR_EOF; return 0; } /* Return how much data we've managed to read * so far. */ return len-DATA(io)->strm.avail_out; } if (bytes_read < 0) { /* Error */ /* errno should be set */ DATA(io)->err = ERR_ERROR; /* Return how much data we managed to read ok */ if (DATA(io)->strm.avail_out != (uint32_t)len) { return len-DATA(io)->strm.avail_out; } /* Now return error */ return -1; } DATA(io)->strm.next_in = DATA(io)->inbuff; DATA(io)->strm.avail_in = bytes_read; } /* Decompress some data into the output buffer */ lzma_ret err=lzma_code(&DATA(io)->strm, LZMA_RUN); switch(err) { case LZMA_OK: DATA(io)->err = ERR_OK; break; case LZMA_STREAM_END: DATA(io)->err = ERR_EOF; break; default: errno=EIO; DATA(io)->err = ERR_ERROR; } } /* Return the number of bytes decompressed */ return len-DATA(io)->strm.avail_out; }
off_t corsaro_file_rread(corsaro_file_in_t *file, void *buffer, off_t len) { /* refuse to read from a libtrace file */ assert(file->mode == CORSARO_FILE_MODE_ASCII || file->mode == CORSARO_FILE_MODE_BINARY || file->mode == CORSARO_FILE_MODE_UNKNOWN); assert(file->wand_io != NULL); return wandio_read(file->wand_io, buffer, len); }
int bgpstream_csvfile_datasource_update_input_queue(bgpstream_csvfile_datasource_t* csvfile_ds, bgpstream_input_mgr_t *input_mgr) { bgpstream_debug("\t\tBSDS_CSVFILE: csvfile_ds update input queue start"); io_t *file_io = NULL; char buffer[BUFFER_LEN]; int read = 0; struct timeval tv; gettimeofday(&tv, NULL); /* we accept all timestamp earlier than now() - 1 second */ csvfile_ds->max_accepted_ts = tv.tv_sec - 1; csvfile_ds->num_results = 0; csvfile_ds->max_ts_infile = 0; csvfile_ds->input_mgr = input_mgr; if((file_io = wandio_create(csvfile_ds->csvfile_file)) == NULL) { bgpstream_log_err("\t\tBSDS_CSVFILE: create csvfile_ds can't open file %s", csvfile_ds->csvfile_file); return -1; } while((read = wandio_read(file_io, &buffer, BUFFER_LEN)) > 0) { if(csv_parse(&(csvfile_ds->parser), buffer, read, parse_csvfile_field, parse_csvfile_rowend, csvfile_ds) != read) { bgpstream_log_err("\t\tBSDS_CSVFILE: CSV error %s", csv_strerror(csv_error(&(csvfile_ds->parser)))); return -1; } } if(csv_fini(&(csvfile_ds->parser), parse_csvfile_field, parse_csvfile_rowend, csvfile_ds) != 0) { bgpstream_log_err("\t\tBSDS_CSVFILE: CSV error %s", csv_strerror(csv_error(&(csvfile_ds->parser)))); return -1; } wandio_destroy(file_io); csvfile_ds->input_mgr = NULL; csvfile_ds->last_processed_ts = csvfile_ds->max_ts_infile; bgpstream_debug("\t\tBSDS_CSVFILE: csvfile_ds update input queue end"); return csvfile_ds->num_results; }
static off_t bz_read(io_t *io, void *buffer, off_t len) { if (DATA(io)->err == ERR_EOF) return 0; /* EOF */ if (DATA(io)->err == ERR_ERROR) { errno=EIO; return -1; /* ERROR! */ } DATA(io)->strm.avail_out = len; DATA(io)->strm.next_out = buffer; while (DATA(io)->err == ERR_OK && DATA(io)->strm.avail_out > 0) { while (DATA(io)->strm.avail_in <= 0) { int bytes_read = wandio_read(DATA(io)->parent, DATA(io)->inbuff, sizeof(DATA(io)->inbuff)); if (bytes_read == 0) /* EOF */ return len-DATA(io)->strm.avail_out; if (bytes_read < 0) { /* Error */ /* Errno should already be set */ DATA(io)->err = ERR_ERROR; /* Return how much data we managed to read ok */ if (DATA(io)->strm.avail_out != (uint32_t)len) { return len-DATA(io)->strm.avail_out; } /* Now return error */ return -1; } DATA(io)->strm.next_in = DATA(io)->inbuff; DATA(io)->strm.avail_in = bytes_read; } /* Decompress some data into the output buffer */ int err=BZ2_bzDecompress(&DATA(io)->strm); switch(err) { case BZ_OK: DATA(io)->err = ERR_OK; break; case BZ_STREAM_END: DATA(io)->err = ERR_EOF; break; default: errno=EIO; DATA(io)->err = ERR_ERROR; } } /* Return the number of bytes decompressed */ return len-DATA(io)->strm.avail_out; }
static int64_t peek_peek(io_t *io, void *buffer, int64_t len) { int64_t ret = 0; int res = 0; /* Is there enough data in the buffer to serve this request? */ if (DATA(io)->length - DATA(io)->offset < len) { /* No, we need to extend the buffer. */ int64_t read_amount = len - (DATA(io)->length - DATA(io)->offset); /* Round the read_amount up to the nearest MB */ read_amount += PEEK_SIZE - ((DATA(io)->length + read_amount) % PEEK_SIZE); DATA(io)->buffer = alignedrealloc(DATA(io)->buffer, DATA(io)->length, DATA(io)->length + read_amount, &res); if (DATA(io)->buffer == NULL) { return res; } /* Use the child reader to read more data into our managed * buffer */ read_amount = wandio_read(DATA(io)->child, DATA(io)->buffer + DATA(io)->length, read_amount); /* Pass errors up */ if (read_amount <0) { return read_amount; } DATA(io)->length += read_amount; } /* Right, now return data from the buffer (that now should be large * enough, but might not be if we hit EOF) */ ret = MIN(len, DATA(io)->length - DATA(io)->offset); memcpy(buffer, DATA(io)->buffer + DATA(io)->offset, ret); return ret; }
off_t corsaro_file_rread_packet(corsaro_file_in_t *file, libtrace_packet_t *packet, uint16_t len) { uint8_t *pktbuf; switch(file->mode) { case CORSARO_FILE_MODE_BINARY: if((pktbuf = malloc(len)) == NULL) { fprintf(stderr, "could not malloc the packet buffer\n"); return -1; } if(wandio_read(file->wand_io, pktbuf, len) != len) { fprintf(stderr, "could not read packet into buffer\n"); free(pktbuf); return -1; } trace_construct_packet(packet, TRACE_TYPE_ETH, pktbuf, len); return len; break; case CORSARO_FILE_MODE_TRACE: return trace_read_packet(file->trace_io, packet); break; case CORSARO_FILE_MODE_ASCII: case CORSARO_FILE_MODE_UNKNOWN: /* refuse to read a packet from an ascii file */ /* this is a design flaw in the code if we get here */ assert(1); return -1; } return -1; }
/* The reading thread */ static void *thread_producer(void* userdata) { io_t *state = (io_t*) userdata; int buffer=0; bool running = true; #ifdef PR_SET_NAME char namebuf[17]; if (prctl(PR_GET_NAME, namebuf, 0,0,0) == 0) { namebuf[16] = '\0'; /* Make sure it's NUL terminated */ /* If the filename is too long, overwrite the last few bytes */ if (strlen(namebuf)>9) { strcpy(namebuf+10,"[ior]"); } else { strncat(namebuf," [ior]",16); } prctl(PR_SET_NAME, namebuf, 0,0,0); } #endif pthread_mutex_lock(&DATA(state)->mutex); do { /* If all the buffers are full, we need to wait for one to * become free otherwise we have nowhere to write to! */ while (DATA(state)->buffer[buffer].state == FULL) { if (DATA(state)->closing) break; pthread_cond_wait(&DATA(state)->space_avail, &DATA(state)->mutex); } /* Don't bother reading any more data if we are shutting up * shop */ if (DATA(state)->closing) { break; } pthread_mutex_unlock(&DATA(state)->mutex); /* Get the parent reader to fill the buffer */ DATA(state)->buffer[buffer].len=wandio_read( DATA(state)->io, DATA(state)->buffer[buffer].buffer, sizeof(DATA(state)->buffer[buffer].buffer)); pthread_mutex_lock(&DATA(state)->mutex); DATA(state)->buffer[buffer].state = FULL; /* If we've not reached the end of the file keep going */ running = (DATA(state)->buffer[buffer].len > 0 ); /* Signal that there is data available for the main thread */ pthread_cond_signal(&DATA(state)->data_ready); /* Move on to the next buffer */ buffer=(buffer+1) % max_buffers; } while(running); /* If we reach here, it's all over so start tidying up */ wandio_destroy(DATA(state)->io); pthread_cond_signal(&DATA(state)->data_ready); pthread_mutex_unlock(&DATA(state)->mutex); return NULL; }
static int read_json(bgpstream_broker_datasource_t *broker_ds, bgpstream_input_mgr_t *input_mgr, io_t *jsonfile) { jsmn_parser p; jsmntok_t *tok = NULL; size_t tokcount = 128; int ret; char *js = NULL; size_t jslen = 0; #define BUFSIZE 1024 char buf[BUFSIZE]; // prepare parser jsmn_init(&p); // allocate some tokens to start if ((tok = malloc(sizeof(jsmntok_t) * tokcount)) == NULL) { fprintf(stderr, "ERROR: Could not malloc initial tokens\n"); goto err; } // slurp the whole file into a buffer while (1) { /* do a read */ ret = wandio_read(jsonfile, buf, BUFSIZE); if (ret < 0) { fprintf(stderr, "ERROR: Reading from broker failed\n"); goto err; } if (ret == 0) { // we're done break; } if ((js = realloc(js, jslen + ret + 1)) == NULL) { fprintf(stderr, "ERROR: Could not realloc json string\n"); goto err; } strncpy(js + jslen, buf, ret); jslen += ret; } again: if ((ret = jsmn_parse(&p, js, jslen, tok, tokcount)) < 0) { if (ret == JSMN_ERROR_NOMEM) { tokcount *= 2; if ((tok = realloc(tok, sizeof(jsmntok_t) * tokcount)) == NULL) { fprintf(stderr, "ERROR: Could not realloc tokens\n"); goto err; } goto again; } if (ret == JSMN_ERROR_INVAL) { fprintf(stderr, "ERROR: Invalid character in JSON string\n"); goto err; } fprintf(stderr, "ERROR: JSON parser returned %d\n", ret); goto err; } ret = process_json(broker_ds, input_mgr, js, tok, p.toknext); free(js); free(tok); if (ret == ERR_FATAL) { fprintf(stderr, "ERROR: Received fatal error from process_json\n"); } return ret; err: free(js); free(tok); fprintf(stderr, "%s: Returning fatal error code\n", __func__); return ERR_FATAL; }