/* * function circle roate send opbuf to librdkafka queue , * if the five time all failed it will exit , at the * same time will write some error info to local file * and check librdkafka queue data if it not empty then * will write queuedata file * */ void producer(rd_kafka_t * *rks, char *topic, int partitions, int tag, char *opbuf, int len, int rkcount) { int failnum = 0; int s = 1; while (s) { s = rotate_send_toqueue(rks, topic, partitions, RD_KAFKA_OP_F_FREE, opbuf, len, rkcount); check_queuedata_size(rks, rkcount, g_monitor_qusizelogpath); if (s == 1) { sleep(1); if (++failnum == 5) { char timebuf[50] = { 0 }; strcpy(timebuf, getcurrenttime()); timebuf[strlen(timebuf) - 1] = '\0'; fprintf(stderr, "%s all broker down \n", timebuf); char buf[]="all broker down"; save_error(g_logsavelocal_tag, LOG_INFO, buf); save_snddata_tofile(opbuf); save_queuedata_tofile(rks, rkcount); exit(7); } } } }
/* * function get stdin or local file opbuf to librdkafka queue * if fail will roate very broker queue,if success return 0 * else will return 1 */ int rotate_send_toqueue(rd_kafka_t ** rks, char *topic, int partitions, int tag, char *opbuf, int len, int rkcount) { int i = 0; int partition = 0; int rk = 0; int ret = 0; srand(time(NULL)); rk = rand() % rkcount; for (; i < rkcount; ++i, ++rk) { rk %= rkcount; partition = rand() % partitions; ret = rd_kafka_produce(rks[rk], topic, partition, tag, opbuf, len); if (ret == 0) { return 0; } else { char *buf = calloc(1, strlen(opbuf) + 128); sprintf(buf, "sendkafka[%d]: failed: %s\n", getpid(),opbuf); save_error(g_logsavelocal_tag, LOG_INFO, buf); free(buf); buf = NULL; continue; } } return 1; }
/* * function save opbuf to local file when error exit * and the file path will depends on usr configure * default /var/log/sendkafka */ void save_snddata_tofile(char *opbuf) { if (opbuf == NULL || strlen(opbuf)) return; if (access(g_queue_data_filepath,F_OK) == 0) { unlink(g_queue_data_filepath); } int fd = open(g_queue_data_filepath, O_WRONLY | O_APPEND | O_CREAT, 0666); if (fd == -1) { char buf[100] = { 0 }; sprintf(buf, "%d line open %s file fail...", __LINE__ - 4,g_queue_data_filepath); perror(buf); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(6); } write(fd, opbuf, strlen(opbuf)); close(fd); }
/* * function: check librdkafka queue and write it to * local file if the queue not empty,the path will * depend on usr configure, default /var/log/sendkafka */ void save_queuedata_tofile(rd_kafka_t ** rks, int rkcount) { int fd = open(g_queue_data_filepath, O_WRONLY | O_APPEND | O_CREAT, 0666); if (fd == -1) { char buf[100] = { 0 }; sprintf(buf, "%d line open %s file fail...", __LINE__ - 4,g_queue_data_filepath); perror(buf); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(5); } rd_kafka_op_t *rko = NULL; int i = 0; for (i = 0; i < rkcount; i++) { while (rd_kafka_outq_len(rks[i]) > 0) { rko = rd_kafka_q_read(&(rks[i]->rk_op), RD_POLL_INFINITE); write(fd, rko->rko_payload, rko->rko_len); } } close(fd); }
static int myp_ok( MYSQL *m, int allow_others ) { int code; MYSQL_PACKET *p = &m->packet; if( !myp_read_packet(m,p) ) { error(m,"Failed to read packet",NULL); return 0; } code = myp_read_byte(p); if( code == 0x00 ) return 1; if( code == 0xFF ) save_error(m,p); else if( allow_others ) return 1; else error(m,"Invalid packet error",NULL); return 0; }
void ServingRequest::serve() { check_nref(); LOG << "serving_request::serve(): " << buffer_ << " @ [" << file_ << "|" << file_->get_allocator_id() << "]0x" << std::hex << std::setfill('0') << std::setw(8) << offset_ << "/0x" << bytes_ << ((type_ == Request::READ) ? " READ" : " WRITE"); try { file_->serve(buffer_, offset_, bytes_, type_); } catch (const IoError& ex) { save_error(ex.safe_message()); } check_nref(true); completed(false); }
/* * function monitor librdkafka queue size and write to * local file , the path will depend on usr configure * default /var/log/sendkafka */ void check_queuedata_size(rd_kafka_t ** rks, int num, char *queuesize_path) { static time_t lasttime = 0; time_t curenttime = getcurrents(); if ((curenttime % g_monitor_period) == 0 && curenttime != lasttime) { char buf[128] = { 0 }; int i = 0; rotate_logs(queuesize_path); int fd = open(queuesize_path, O_WRONLY | O_APPEND | O_CREAT, 0666); if(fd == -1){ char buf[1024] = { 0 }; sprintf(buf, "%d line open %s fail...", __LINE__ - 4,queuesize_path); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(3); } char timebuf[50] = { 0 }; strcpy(timebuf, getcurrenttime()); timebuf[strlen(timebuf) - 1] = '\0'; for (; i < num; ++i) { sprintf(buf, "%s|%s| queue size= %d\n", timebuf, rks[i] ? rks[i]->rk_broker.name : "", rd_kafka_outq_len(rks[i])); write(fd, buf, strlen(buf)); memset(buf, '\0', 128); } close(fd); lasttime = curenttime; } }
/* Complete value processing */ static int complete_value_processing(struct parser_obj *po) { int error = EOK; int error2 = EOK; struct value_obj *vo = NULL; struct value_obj *vo_old = NULL; unsigned insertmode; uint32_t mergemode; int suppress = 0; int doinsert = 0; struct collection_item *item = NULL; struct collection_item *section = NULL; int merging = 0; TRACE_FLOW_ENTRY(); if (po->merge_sec) { TRACE_INFO_STRING("Processing value in merge mode", ""); section = po->merge_sec; merging = 1; } else if(!(po->sec)) { TRACE_INFO_STRING("Creating default section", ""); /* If there is not open section create a default one */ error = col_create_collection(&po->sec, INI_DEFAULT_SECTION, COL_CLASS_INI_SECTION); if (error) { TRACE_ERROR_NUMBER("Failed to create default section", error); return error; } section = po->sec; } else { TRACE_INFO_STRING("Processing value in normal mode", ""); section = po->sec; } if (merging) { TRACE_INFO_STRING("Using merge key:", po->merge_key); vo = po->merge_vo; /* We are adding to the merge section so use MV2S flags. * But flags are done in such a way that deviding MV2S by MV1S mask * will translate MV2S flags into MV1S so we can use * MV1S constants. */ TRACE_INFO_NUMBER("Collisions flags:", po->collision_flags); mergemode = (po->collision_flags & INI_MV2S_MASK) / INI_MV1S_MASK; } else { /* Construct value object from what we have */ error = value_create_from_refarray(po->raw_lines, po->raw_lengths, po->keylinenum, INI_VALUE_READ, po->key_len, po->boundary, po->ic, &vo); if (error) { TRACE_ERROR_NUMBER("Failed to create value object", error); return error; } /* Forget about the arrays. They are now owned by the value object */ po->ic = NULL; po->raw_lines = NULL; po->raw_lengths = NULL; mergemode = po->collision_flags & INI_MV1S_MASK; } switch (mergemode) { case INI_MV1S_ERROR: insertmode = COL_INSERT_DUPERROR; doinsert = 1; break; case INI_MV1S_PRESERVE: insertmode = COL_INSERT_DUPERROR; doinsert = 1; suppress = 1; break; case INI_MV1S_ALLOW: insertmode = COL_INSERT_NOCHECK; doinsert = 1; break; case INI_MV1S_OVERWRITE: /* Special handling */ case INI_MV1S_DETECT: default: break; } /* Do not insert but search for dups first */ if (!doinsert) { TRACE_INFO_STRING("Overwrite mode. Looking for:", (char *)(merging ? po->merge_key : po->key)); error = col_get_item(section, merging ? po->merge_key : po->key, COL_TYPE_BINARY, COL_TRAVERSE_DEFAULT, &item); if (error) { TRACE_ERROR_NUMBER("Failed searching for dup", error); value_destroy(vo); return error; } /* Check if there is a dup */ if (item) { /* Check if we are in the detect mode */ if (mergemode == INI_MV1S_DETECT) { po->merge_error = EEXIST; /* There is a dup - inform user about it and continue */ error = save_error(po->el, merging ? po->seclinenum : po->keylinenum, merging ? ERR_DUPKEYSEC : ERR_DUPKEY, ERROR_TXT); if (error) { TRACE_ERROR_NUMBER("Failed to save error", error); value_destroy(vo); return error; } doinsert = 1; insertmode = COL_INSERT_NOCHECK; } else { /* Dup exists - update it */ vo_old = *((struct value_obj **)(col_get_item_data(item))); error = col_modify_binary_item(item, NULL, &vo, sizeof(struct value_obj *)); if (error) { TRACE_ERROR_NUMBER("Failed updating the value", error); value_destroy(vo); return error; } /* If we failed to update it is better to leak then crash, * so destroy original value only on the successful update. */ value_destroy(vo_old); } } else { /* No dup found so we can insert with no check */ doinsert = 1; insertmode = COL_INSERT_NOCHECK; } } if (doinsert) { /* Add value to collection */ error = col_insert_binary_property(section, NULL, COL_DSP_END, NULL, 0, insertmode, merging ? po->merge_key : po->key, &vo, sizeof(struct value_obj *)); if (error) { value_destroy(vo); if ((suppress) && (error == EEXIST)) { TRACE_INFO_STRING("Preseved exisitng value", (char *)(merging ? po->merge_key : po->key)); } else { /* Check if this is a critical error or not */ if ((mergemode == INI_MV1S_ERROR) && (error == EEXIST)) { TRACE_ERROR_NUMBER("Failed to add value object " "to the section", error); error2 = save_error(po->el, merging ? po->seclinenum : po->keylinenum, merging ? ERR_DUPKEYSEC : ERR_DUPKEY, ERROR_TXT); if (error2) { TRACE_ERROR_NUMBER("Failed to save error", error2); return error2; } return error; } else { TRACE_ERROR_NUMBER("Failed to add value object" " to the section", error); return error; } } } } if (!merging) { free(po->key); po->key = NULL; po->key_len = 0; } TRACE_FLOW_EXIT(); return EOK; }
/* Function to read next line from the file */ static int parser_save_section(struct parser_obj *po) { int error = EOK; uint32_t mergemode; int merge = 0; TRACE_FLOW_ENTRY(); if (po->sec) { TRACE_INFO_STRING("Section exists.", ""); /* First detect if we have collision */ error = check_section_collision(po); if (error) { TRACE_ERROR_NUMBER("Failed to check for collision", error); return error; } if (po->merge_sec) { TRACE_INFO_STRING("Merge collision detected", ""); mergemode = po->collision_flags & INI_MS_MASK; switch (mergemode) { case INI_MS_ERROR: /* Report error and return */ TRACE_INFO_STRING("Reporting error", "duplicate section"); error = save_error(po->el, po->seclinenum, ERR_DUPSECTION, ERROR_TXT); if (error) { TRACE_ERROR_NUMBER("Failed to " "save error", error); return error; } /* Return error */ TRACE_FLOW_RETURN(EEXIST); return EEXIST; case INI_MS_PRESERVE: /* Delete new section */ TRACE_INFO_STRING("Preserve mode", ""); col_destroy_collection_with_cb( po->sec, ini_cleanup_cb, NULL); po->sec = NULL; break; case INI_MS_OVERWRITE: /* Empty existing section */ TRACE_INFO_STRING("Ovewrite mode", ""); error = empty_section(po->merge_sec); if (error) { TRACE_ERROR_NUMBER("Failed to " "empty section", error); return error; } merge = 1; break; case INI_MS_DETECT: /* Detect mode */ TRACE_INFO_STRING("Detect mode", ""); po->merge_error = EEXIST; error = save_error(po->el, po->seclinenum, ERR_DUPSECTION, ERROR_TXT); if (error) { TRACE_ERROR_NUMBER("Failed to " "save error", error); return error; } merge = 1; break; case INI_MS_MERGE: /* Merge */ default: TRACE_INFO_STRING("Merge mode", ""); merge = 1; break; } if (merge) { error = merge_section(po); if (error) { TRACE_ERROR_NUMBER("Failed to merge section", error); return error; } } po->merge_sec = NULL; } else { /* Add section to configuration */ TRACE_INFO_STRING("Now adding collection", ""); error = col_add_collection_to_collection(po->top, NULL, NULL, po->sec, COL_ADD_MODE_EMBED); if (error) { TRACE_ERROR_NUMBER("Failed to embed section", error); return error; } po->sec = NULL; } } TRACE_FLOW_EXIT(); return EOK; }
/* Error and warning processing */ static int parser_error(struct parser_obj *po) { int error = EOK; uint32_t action; const char *err_str; TRACE_FLOW_ENTRY(); if (po->last_error & INI_WARNING) err_str = WARNING_TXT; else err_str = ERROR_TXT; error = save_error(po->el, po->linenum, po->last_error & ~INI_WARNING, err_str); if (error) { TRACE_ERROR_NUMBER("Failed to add error to error list", error); return error; } if (po->last_error == ERR_BADCOMMENT) { /* Avoid endless loop */ action = PARSE_DONE; po->ret = EIO; } else if (po->error_level == INI_STOP_ON_ANY) { action = PARSE_DONE; if (po->last_error & INI_WARNING) po->ret = EILSEQ; else po->ret = EIO; } else if (po->error_level == INI_STOP_ON_NONE) { if (po->last_error != ERR_READ) { action = PARSE_READ; if (po->ret == 0) { if (po->last_error & INI_WARNING) po->ret = EILSEQ; else po->ret = EIO; } /* It it was warning but now if it is an error * bump to return code to indicate error. */ else if((po->ret == EILSEQ) && (!(po->last_error & INI_WARNING))) po->ret = EIO; } else { /* Avoid endless loop */ action = PARSE_DONE; po->ret = EIO; } } else { /* Stop on error */ if (po->last_error & INI_WARNING) { action = PARSE_READ; po->ret = EILSEQ; } else { action = PARSE_DONE; po->ret = EIO; } } /* Prepare for reading */ if (action == PARSE_READ) { if (po->last_read) { free(po->last_read); po->last_read = NULL; po->last_read_len = 0; } } else { /* If we are done save the section */ error = parser_save_section(po); if (error) { TRACE_ERROR_NUMBER("Failed to save section", error); /* If merging sections should produce error and we got error * or if we merge sections but dup values produce error and * we got error then it is not a fatal error so we need to handle * it nicely and suppress it here. We already in the procees * of handling another error and merge error does not matter here. * We check for reverse condition and return error, * otherwise fall through. */ if (!((((po->collision_flags & INI_MS_MASK) == INI_MS_ERROR) && (error == EEXIST)) || (((po->collision_flags & INI_MS_MASK) == INI_MS_MERGE) && ((po->collision_flags & INI_MV2S_MASK) == INI_MV2S_ERROR) && (error == EEXIST)))) { return error; } } } /* Move to the next action */ error = col_enqueue_unsigned_property(po->queue, PARSE_ACTION, action); if (error) { TRACE_ERROR_NUMBER("Failed to schedule an action", error); return error; } TRACE_FLOW_EXIT(); return EOK; }
static int do_store( MYSQL *m, MYSQL_RES *r ) { int i; MYSQL_PACKET *p = &m->packet; p->pos = 0; r->nfields = myp_read_bin(p); if( p->error ) return 0; r->fields = (MYSQL_FIELD*)malloc(sizeof(MYSQL_FIELD) * r->nfields); memset(r->fields,0,sizeof(MYSQL_FIELD) * r->nfields); for(i=0;i<r->nfields;i++) { if( !myp_read_packet(m,p) ) return 0; { MYSQL_FIELD *f = r->fields + i; f->catalog = m->is41 ? myp_read_bin_str(p) : NULL; f->db = m->is41 ? myp_read_bin_str(p) : NULL; f->table = myp_read_bin_str(p); f->org_table = m->is41 ? myp_read_bin_str(p) : NULL; f->name = myp_read_bin_str(p); f->org_name = m->is41 ? myp_read_bin_str(p) : NULL; if( m->is41 ) myp_read_byte(p); f->charset = m->is41 ? myp_read_ui16(p) : 0x08; f->length = m->is41 ? myp_read_int(p) : myp_read_bin(p); f->type = (FIELD_TYPE)(m->is41 ? myp_read_byte(p) : myp_read_bin(p)); f->flags = m->is41 ? myp_read_ui16(p) : myp_read_bin(p); f->decimals = myp_read_byte(p); if( m->is41 ) myp_read_byte(p); // should be 0 if( m->is41 ) myp_read_byte(p); // should be 0 if( p->error ) return 0; } } // first EOF packet if( !myp_read_packet(m,p) ) return 0; if( myp_read_byte(p) != 0xFE || p->size >= 9 ) return 0; // reset packet buffer (to prevent to store large buffer in row data) free(p->buf); p->buf = NULL; p->mem = 0; // datas while( 1 ) { if( !myp_read_packet(m,p) ) return 0; // EOF : end of datas if( (unsigned char)p->buf[0] == 0xFE && p->size < 9 ) break; // ERROR ? if( (unsigned char)p->buf[0] == 0xFF ) { save_error(m,p); return 0; } // allocate one more row if( r->row_count == r->memory_rows ) { MYSQL_ROW_DATA *rows; r->memory_rows = r->memory_rows ? (r->memory_rows << 1) : 1; rows = (MYSQL_ROW_DATA*)malloc(r->memory_rows * sizeof(MYSQL_ROW_DATA)); memcpy(rows,r->rows,r->row_count * sizeof(MYSQL_ROW_DATA)); free(r->rows); r->rows = rows; } // read row fields { MYSQL_ROW_DATA *current = r->rows + r->row_count++; int prev = 0; current->raw = p->buf; current->lengths = (unsigned long*)malloc(sizeof(unsigned long) * r->nfields); current->datas = (char**)malloc(sizeof(char*) * r->nfields); for(i=0;i<r->nfields;i++) { int l = myp_read_bin(p); if( !p->error ) p->buf[prev] = 0; if( l == -1 ) { current->lengths[i] = 0; current->datas[i] = NULL; } else { current->lengths[i] = l; current->datas[i] = p->buf + p->pos; p->pos += l; } prev = p->pos; } if( !p->error ) p->buf[prev] = 0; } // the packet buffer as been stored, don't reuse it p->buf = NULL; p->mem = 0; if( p->error ) return 0; } return 1; }
MYSQL *mysql_real_connect( MYSQL *m, const char *host, const char *user, const char *pass, void *unused, int port, const char *socket, int options ) { PHOST h; char scramble_buf[21]; MYSQL_PACKET *p = &m->packet; int pcount = 1; if( socket && *socket ) { error(m,"Unix Socket connections are not supported",NULL); return NULL; } h = phost_resolve(host); if( h == UNRESOLVED_HOST ) { error(m,"Failed to resolve host '%s'",host); return NULL; } m->s = psock_create(); if( m->s == INVALID_SOCKET ) { error(m,"Failed to create socket",NULL); return NULL; } psock_set_fastsend(m->s,1); psock_set_timeout(m->s,50); // 50 seconds if( psock_connect(m->s,h,port) != PS_OK ) { myp_close(m); error(m,"Failed to connect on host '%s'",host); return NULL; } if( !myp_read_packet(m,p) ) { myp_close(m); error(m,"Failed to read handshake packet",NULL); return NULL; } // process handshake packet { char filler[13]; unsigned int len; m->infos.proto_version = myp_read_byte(p); // this seems like an error packet if( m->infos.proto_version == 0xFF ) { myp_close(m); save_error(m,p); return NULL; } m->infos.server_version = strdup(myp_read_string(p)); m->infos.thread_id = myp_read_int(p); myp_read(p,scramble_buf,8); myp_read_byte(p); // should be 0 m->infos.server_flags = myp_read_ui16(p); m->infos.server_charset = myp_read_byte(p); m->infos.server_status = myp_read_ui16(p); m->infos.server_flags |= myp_read_ui16(p) << 16; len = myp_read_byte(p); myp_read(p,filler,10); // try to disable 41 m->is41 = (m->infos.server_flags & FL_PROTOCOL_41) != 0; if( !p->error && m->is41 ) myp_read(p,scramble_buf + 8,13); if( p->pos != p->size ) myp_read_string(p); // 5.5+ if( p->error ) { myp_close(m); error(m,"Failed to decode server handshake",NULL); return NULL; } // fill answer packet { unsigned int flags = m->infos.server_flags; int max_packet_size = 0x01000000; SHA1_DIGEST hpass; char filler[23]; flags &= (FL_PROTOCOL_41 | FL_TRANSACTIONS | FL_SECURE_CONNECTION); myp_begin_packet(p,128); if( m->is41 ) { myp_write_int(p,flags); myp_write_int(p,max_packet_size); myp_write_byte(p,m->infos.server_charset); memset(filler,0,23); myp_write(p,filler,23); myp_write_string(p,user); if( *pass ) { myp_encrypt_password(pass,scramble_buf,hpass); myp_write_bin(p,SHA1_SIZE); myp_write(p,hpass,SHA1_SIZE); myp_write_byte(p,0); } else myp_write_bin(p,0); } else { myp_write_ui16(p,flags); // max_packet_size myp_write_byte(p,0xFF); myp_write_byte(p,0xFF); myp_write_byte(p,0xFF); myp_write_string(p,user); if( *pass ) { char hpass[SEED_LENGTH_323 + 1]; myp_encrypt_pass_323(pass,scramble_buf,hpass); hpass[SEED_LENGTH_323] = 0; myp_write(p,hpass,SEED_LENGTH_323 + 1); } else myp_write_bin(p,0); } } } // send connection packet send_cnx_packet: if( !myp_send_packet(m,p,&pcount) ) { myp_close(m); error(m,"Failed to send connection packet",NULL); return NULL; } // read answer packet if( !myp_read_packet(m,p) ) { myp_close(m); error(m,"Failed to read packet",NULL); return NULL; } // increase packet counter (because we read one packet) pcount++; // process answer { int code = myp_read_byte(p); switch( code ) { case 0: // OK packet break; case 0xFF: // ERROR myp_close(m); save_error(m,p); return NULL; case 0xFE: // EOF // we are asked to send old password authentification if( p->size == 1 ) { char hpass[SEED_LENGTH_323 + 1]; myp_encrypt_pass_323(pass,scramble_buf,hpass); hpass[SEED_LENGTH_323] = 0; myp_begin_packet(p,0); myp_write(p,hpass,SEED_LENGTH_323 + 1); goto send_cnx_packet; } // fallthrough default: myp_close(m); error(m,"Invalid packet error",NULL); return NULL; } } // we are connected, setup a longer timeout psock_set_timeout(m->s,18000); return m; }
int main(int argc, char *argv[],char *envp[]) { rd_kafka_t *rks[1024] = { 0 }; int rkcount = 0; char value[1024] = { 0 }; char brokers[1024] = "localhost:9092"; char *broker = NULL; char topic[1024] = "topic"; int sendcnt = 0; int partitions = 4; int opt; int len = 0; char *opbuf = NULL; char config_file[1024] = ""; char path[PATH_MAX] = {0}; char processname[1024] = {0}; get_executable_path(path,processname,sizeof(processname)); snprintf(config_file, sizeof(config_file), "/etc/sendkafka/%s.conf", processname); if (read_config("brokers", value, sizeof(value), config_file) > 0) { strcpy(brokers, value); } if (read_config("topic", value, sizeof(value),config_file) > 0) { strcpy(topic, value); } if (read_config ("partitions", value, sizeof(value), config_file) > 0) { partitions = atoi(value); if (partitions <= 0 || partitions > 256) { partitions = 4; } } if (read_config ("data_path", value, sizeof(value), config_file) > 0) { strcpy(g_queue_data_filepath, value); } if (read_config ("error_path", value, sizeof(value), config_file) > 0) { strcpy(g_error_logpath, value); } if (read_config ("logsavelocal_tag", value, sizeof(value), config_file) > 0) { g_logsavelocal_tag = atoi(value); } if (read_config ("lognum_max", value, sizeof(value), config_file) > 0) { g_logfilenum_max = atoi(value); } if (read_config ("monitor_period", value, sizeof(value), config_file) > 0) { g_monitor_period = atoi(value); } if (read_config ("logsize_max", value, sizeof(value), config_file) > 0) { g_logfilesize_max = atoi(value); } if (read_config ("queue_sizepath", value, sizeof(value), config_file) > 0) { strcpy(g_monitor_qusizelogpath, value); } while ((opt = getopt(argc, argv, "hb:c:d:p:t:o:m:n:l:x:")) != -1) { switch (opt) { case 'b': strncpy(brokers, optarg, sizeof(brokers)); brokers[sizeof(brokers) - 1] = '\0'; break; case 'c': if (read_config("brokers", value, sizeof(value), optarg) > 0) { strcpy(brokers, value); } if (read_config("topic", value, sizeof(value), optarg) > 0) { strcpy(topic, value); } if (read_config ("partitions", value, sizeof(value), optarg) > 0) { partitions = atoi(value); if (partitions <= 0 || partitions > 256) { partitions = 4; } } if (read_config ("data_path", value, sizeof(value), optarg) > 0) { strcpy(g_queue_data_filepath, value); } if (read_config ("queue_sizepath", value, sizeof(value), optarg) > 0) { strcpy(g_monitor_qusizelogpath, value); } if (read_config ("error_path", value, sizeof(value), optarg) > 0) { strcpy(g_error_logpath, value); } if (read_config ("savelocal_tag", value, sizeof(value), optarg) > 0) { g_logsavelocal_tag = atoi(value); } if (read_config ("monitor_period", value, sizeof(value), optarg) > 0) { g_monitor_period = atoi(value); } if (read_config ("lognum_max", value, sizeof(value), optarg) > 0) { g_logfilenum_max = atoi(value); } if (read_config ("logsize_max", value, sizeof(value), optarg) > 0) { g_logfilesize_max = atoi(value); } break; case 'o': if (NULL != optarg) { g_logsavelocal_tag = atoi(optarg); } break; case 't': if (NULL != optarg) { strncpy(topic, optarg, sizeof(topic)); topic[sizeof(topic) - 1] = '\0'; } break; case 'p': if (NULL != optarg) { partitions = atoi(optarg); if (partitions <= 0 || partitions > 256) { partitions = 4; } } break; case 'm': if (NULL != optarg) { g_logfilesize_max = atoi(optarg); } break; case 'l': if (NULL != optarg) { strcpy(g_error_logpath, optarg); } break; case 'd': if (NULL != optarg) { strcpy(g_queue_data_filepath, optarg); } break; case 'x': if (NULL != optarg) { strcpy(g_monitor_qusizelogpath, optarg); } break; case 'n': if (NULL != optarg) { g_logfilenum_max = atoi(optarg); } break; case 'r': if (NULL != optarg) { g_monitor_period = atoi(optarg); } break; case 'h': default: usage(argv[0]); break; } } if(g_logsavelocal_tag == 0){ rd_kafka_set_logger(save_liberr_tolocal); } else{ rd_kafka_set_logger(rd_kafka_log_syslog); } signal(SIGINT, stop); signal(SIGTERM, stop); // see: https://github.com/edenhill/librdkafka/issues/2 signal(SIGPIPE, SIG_IGN); signal(SIGHUP, stop); /* Producer */ char buf[4096]; //int sendcnt = 0; int i = 0; /* Create Kafka handle */ for (broker = strtok(brokers, ","), rkcount = 0; broker && rkcount < sizeof(rks); broker = strtok(NULL, ","), ++rkcount) { rks[rkcount] = rd_kafka_new(RD_KAFKA_PRODUCER, broker, NULL); if (!rks[rkcount]) { for (i = 0; i < rkcount; i++) { rd_kafka_destroy(rks[i]); rks[i] = NULL; } strcpy(buf, getcurrenttime()); buf[strlen(buf) - 1] = '\0'; strcpy(buf, "kafka_new producer is fail..."); perror(buf); strcpy(buf, "kafka_new producer is fail..."); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(9); } } FILE *fp = NULL; opbuf = NULL; if (access(g_queue_data_filepath, F_OK) == 0) { fp = fopen(g_queue_data_filepath, "r"); if (fp == NULL) { char buf[100] = { 0 }; sprintf(buf, "%d line open %s file fail...", __LINE__ - 4,g_queue_data_filepath); perror(buf); save_error(g_logsavelocal_tag, LOG_CRIT, buf); exit(8); } while (fgets(buf, sizeof(buf), fp)) { ++sendcnt; opbuf = strdup(buf); len = strlen(opbuf); producer(rks, topic, partitions, RD_KAFKA_OP_F_FREE, opbuf, len, rkcount); } if (get_file_size(g_queue_data_filepath) > 0) { unlink(g_queue_data_filepath); } } if(NULL!=fp) { fclose(fp); } char *eptr = NULL; while (g_run_tag) { eptr = fgets(buf, sizeof(buf), stdin); if (EINTR == errno || NULL == eptr) { g_run_tag = 0; break; } ++sendcnt; opbuf = strdup(buf); len = strlen(opbuf); producer(rks, topic, partitions, RD_KAFKA_OP_F_FREE, opbuf, len, rkcount); if ((sendcnt % 100000) == 0) { char timebuf[50] = { 0 }; strcpy(timebuf, getcurrenttime()); timebuf[strlen(timebuf) - 1] = '\0'; fprintf(stderr, "%s sendkafka[%d]: Sent %i messages to topic %s\n", timebuf, getpid(), sendcnt, topic); char *buf = calloc(1, strlen(topic) + 128); sprintf(buf, "sendkafka[%d]: Sent %i messages to topic %s\n", getpid(), sendcnt, topic); save_error(g_logsavelocal_tag, LOG_INFO, buf); free(buf); buf = NULL; } } printf("sendcnt num %d\n", sendcnt); save_queuedata_tofile(rks, rkcount); /* Destroy the handle */ for (i = 0; i < rkcount; i++) { rd_kafka_destroy(rks[i]); } return 0; }