gboolean msg_limit_internal_message(void) { MsgContext *context; if (!evt_context) return FALSE; context = msg_get_context(); if (context->recurse_count > MAX_RECURSIONS) { if (!context->recurse_warning) { msg_event_send( msg_event_create(EVT_PRI_WARNING, "syslog-ng internal() messages are looping back, preventing loop by suppressing further messages", evt_tag_int("recurse_count", context->recurse_count), NULL)); context->recurse_warning = TRUE; } return FALSE; } return TRUE; }
void tls_log_certificate_validation_progress(int ok, X509_STORE_CTX *ctx) { X509 *xs; GString *subject_name, *issuer_name; xs = X509_STORE_CTX_get_current_cert(ctx); subject_name = g_string_sized_new(128); issuer_name = g_string_sized_new(128); tls_x509_format_dn(X509_get_subject_name(xs), subject_name); tls_x509_format_dn(X509_get_issuer_name(xs), issuer_name); if (ok) { msg_debug("Certificate validation progress", evt_tag_str("subject", subject_name->str), evt_tag_str("issuer", issuer_name->str), NULL); } else { gint errnum, errdepth; errnum = X509_STORE_CTX_get_error(ctx); errdepth = X509_STORE_CTX_get_error_depth(ctx); msg_error("Certificate validation failed", evt_tag_str("subject", subject_name->str), evt_tag_str("issuer", issuer_name->str), evt_tag_str("error", X509_verify_cert_error_string(errnum)), evt_tag_int("depth", errdepth), NULL); } g_string_free(subject_name, TRUE); g_string_free(issuer_name, TRUE); }
int main(void) { EVTCONTEXT *ctx; EVTREC *e; char *es; ctx = evt_ctx_init("evtfmt", LOG_AUTH); e = evt_rec_init(ctx, LOG_INFO, "Test message with an embedded ';' in it. It also contains an <XML> like tag."); evt_rec_add_tags(e, evt_tag_str("test:tag", "'value'"), evt_tag_str("test:tag2", "\n\n\n\n"), evt_tag_int("test:fd", fileno(stderr)), evt_tag_errno("test:error", EAGAIN), evt_tag_printf("test:printf", "%d %d", 5, 6), NULL); es = evt_format(e); printf("%s\n", es); free(es); evt_log(e); return 0; }
static void afsocket_sd_save_listener(AFSocketSourceDriver *self) { GlobalConfig *cfg = log_pipe_get_config(&self->super.super.super); if (self->transport_mapper->sock_type == SOCK_STREAM) { afsocket_sd_stop_watches(self); if (!self->connections_kept_alive_accross_reloads) { msg_verbose("Closing listener fd", evt_tag_int("fd", self->fd), NULL); close(self->fd); } else { /* NOTE: the fd is incremented by one when added to persistent config * as persist config cannot store NULL */ cfg_persist_config_add(cfg, afsocket_sd_format_persist_name(self, TRUE), GUINT_TO_POINTER(self->fd + 1), afsocket_sd_close_fd, FALSE); } } }
static gboolean _load_state(QDisk *self, GQueue *qout, GQueue *qbacklog, GQueue *qoverflow) { gint64 qout_ofs; gint qout_count, qout_len; gint64 qbacklog_ofs; gint qbacklog_count, qbacklog_len; gint64 qoverflow_ofs; gint qoverflow_count, qoverflow_len; gint64 end_ofs; if (memcmp(self->hdr->magic, self->file_id, 4) != 0) { msg_error("Error reading disk-queue file header", evt_tag_str("filename", self->filename)); return FALSE; } qout_count = self->hdr->qout_count; qout_len = self->hdr->qout_len; qout_ofs = self->hdr->qout_ofs; qbacklog_count = self->hdr->qbacklog_count; qbacklog_len = self->hdr->qbacklog_len; qbacklog_ofs = self->hdr->qbacklog_ofs; qoverflow_count = self->hdr->qoverflow_count; qoverflow_len = self->hdr->qoverflow_len; qoverflow_ofs = self->hdr->qoverflow_ofs; self->read_qout_ofs = qout_ofs; if ((self->hdr->read_head < QDISK_RESERVED_SPACE) || (self->hdr->write_head < QDISK_RESERVED_SPACE) || (self->hdr->read_head == self->hdr->write_head && self->hdr->length != 0)) { msg_error("Inconsistent header data in disk-queue file, ignoring", evt_tag_str("filename", self->filename), evt_tag_int("read_head", self->hdr->read_head), evt_tag_int("write_head", self->hdr->write_head), evt_tag_int("qdisk_length", self->hdr->length)); return FALSE; } if (!self->options->reliable) { if (!(qout_ofs > 0 && qout_ofs < self->hdr->write_head)) { if (!_load_queue(self, qout, qout_ofs, qout_len, qout_count)) return !self->options->read_only; } else { msg_error("Inconsistent header data in disk-queue file, ignoring qout", evt_tag_str("filename", self->filename), evt_tag_int("qout_ofs", qout_ofs), evt_tag_int("qdisk_length", self->hdr->length)); } if (!(qbacklog_ofs > 0 && qbacklog_ofs < self->hdr->write_head)) { if(!_load_queue(self, qbacklog, qbacklog_ofs, qbacklog_len, qbacklog_count)) return !self->options->read_only; } else { msg_error("Inconsistent header data in disk-queue file, ignoring qbacklog", evt_tag_str("filename", self->filename), evt_tag_int("qbacklog_ofs", qbacklog_ofs), evt_tag_int("qdisk_length", self->hdr->length)); } if (!(qoverflow_ofs > 0 && qoverflow_ofs < self->hdr->write_head)) { if(!_load_queue(self, qoverflow, qoverflow_ofs, qoverflow_len, qoverflow_count)) return !self->options->read_only; } else { msg_error("Inconsistent header data in disk-queue file, ignoring qoverflow", evt_tag_str("filename", self->filename), evt_tag_int("qoverflow_ofs", qoverflow_ofs), evt_tag_int("qdisk_length", self->hdr->length)); } } if (!self->options->read_only) { end_ofs = qout_ofs; if (qbacklog_ofs && qbacklog_ofs < end_ofs) end_ofs = qbacklog_ofs; if (qoverflow_ofs && qoverflow_ofs < end_ofs) end_ofs = qoverflow_ofs; if(end_ofs > QDISK_RESERVED_SPACE) _truncate_file(self, end_ofs); } if (!self->options->reliable) { self->file_size = qout_ofs; msg_info("Disk-buffer state loaded", evt_tag_str("filename", self->filename), evt_tag_int("qout_length", qout_count), evt_tag_int("qbacklog_length", qbacklog_count), evt_tag_int("qoverflow_length", qoverflow_count), evt_tag_int("qdisk_length", self->hdr->length)); } else { struct stat st; fstat(self->fd, &st); self->file_size = st.st_size; msg_info("Reliable disk-buffer state loaded", evt_tag_str("filename", self->filename), evt_tag_int("queue_length", self->hdr->length), evt_tag_int("size", self->hdr->write_head - self->hdr->read_head)); } return TRUE; }
static gboolean afstreams_sd_init(LogPipe *s) { AFStreamsSourceDriver *self = (AFStreamsSourceDriver *) s; GlobalConfig *cfg = log_pipe_get_config(s); gint fd; if (!log_src_driver_init_method(s)) return FALSE; log_reader_options_init(&self->reader_options, cfg, self->super.super.group); fd = open(self->dev_filename->str, O_RDONLY | O_NOCTTY | O_NONBLOCK); if (fd != -1) { struct strioctl ioc; g_fd_set_cloexec(fd, TRUE); memset(&ioc, 0, sizeof(ioc)); ioc.ic_cmd = I_CONSLOG; if (ioctl(fd, I_STR, &ioc) < 0) { msg_error("Error in ioctl(I_STR, I_CONSLOG)", evt_tag_str(EVT_TAG_FILENAME, self->dev_filename->str), evt_tag_errno(EVT_TAG_OSERROR, errno)); close(fd); return FALSE; } g_fd_set_nonblock(fd, TRUE); self->reader = log_reader_new(cfg); log_reader_reopen(self->reader, log_proto_dgram_server_new(log_transport_streams_new(fd), &self->reader_options.proto_options.super), poll_fd_events_new(fd)); log_reader_set_options(self->reader, s, &self->reader_options, STATS_LEVEL1, SCS_SUN_STREAMS, self->super.super.id, self->dev_filename->str); log_pipe_append((LogPipe *) self->reader, s); if (self->door_filename) { /* door creation is deferred, because it creates threads which is * not inherited through forks, and syslog-ng forks during * startup, but _after_ the configuration was initialized */ register_application_hook(AH_POST_DAEMONIZED, afstreams_init_door, self); } if (!log_pipe_init((LogPipe *) self->reader)) { msg_error("Error initializing log_reader, closing fd", evt_tag_int("fd", fd)); log_pipe_unref((LogPipe *) self->reader); self->reader = NULL; close(fd); return FALSE; } } else { msg_error("Error opening syslog device", evt_tag_str(EVT_TAG_FILENAME, self->dev_filename->str), evt_tag_errno(EVT_TAG_OSERROR, errno)); return FALSE; } return TRUE; }
static gboolean afamqp_is_ok(AMQPDestDriver *self, gchar *context, amqp_rpc_reply_t ret) { switch (ret.reply_type) { case AMQP_RESPONSE_NORMAL: break; case AMQP_RESPONSE_NONE: msg_error(context, evt_tag_str("driver", self->super.super.super.id), evt_tag_str("error", "missing RPC reply type"), evt_tag_int("time_reopen", self->super.time_reopen), NULL); log_threaded_dest_driver_suspend(&self->super); return FALSE; case AMQP_RESPONSE_LIBRARY_EXCEPTION: { gchar *errstr = amqp_error_string(ret.library_error); msg_error(context, evt_tag_str("driver", self->super.super.super.id), evt_tag_str("error", errstr), evt_tag_int("time_reopen", self->super.time_reopen), NULL); g_free (errstr); log_threaded_dest_driver_suspend(&self->super); return FALSE; } case AMQP_RESPONSE_SERVER_EXCEPTION: switch (ret.reply.id) { case AMQP_CONNECTION_CLOSE_METHOD: { amqp_connection_close_t *m = (amqp_connection_close_t *) ret.reply.decoded; msg_error(context, evt_tag_str("driver", self->super.super.super.id), evt_tag_str("error", "server connection error"), evt_tag_int("code", m->reply_code), evt_tag_str("text", m->reply_text.bytes), evt_tag_int("time_reopen", self->super.time_reopen), NULL); log_threaded_dest_driver_suspend(&self->super); return FALSE; } case AMQP_CHANNEL_CLOSE_METHOD: { amqp_channel_close_t *m = (amqp_channel_close_t *) ret.reply.decoded; msg_error(context, evt_tag_str("driver", self->super.super.super.id), evt_tag_str("error", "server channel error"), evt_tag_int("code", m->reply_code), evt_tag_str("text", m->reply_text.bytes), evt_tag_int("time_reopen", self->super.time_reopen), NULL); log_threaded_dest_driver_suspend(&self->super); return FALSE; } default: msg_error(context, evt_tag_str("driver", self->super.super.super.id), evt_tag_str("error", "unknown server error"), evt_tag_printf("method id", "0x%08X", ret.reply.id), evt_tag_int("time_reopen", self->super.time_reopen), NULL); log_threaded_dest_driver_suspend(&self->super); return FALSE; } return FALSE; } return TRUE; }
gboolean cfg_lexer_start_next_include(CfgLexer *self) { CfgIncludeLevel *level = &self->include_stack[self->include_depth]; gchar *filename; gboolean buffer_processed = FALSE; if (self->include_depth == 0) { return FALSE; } if (level->yybuf) { msg_debug("Finishing include", evt_tag_str((level->include_type == CFGI_FILE ? "filename" : "content"), level->name), evt_tag_int("depth", self->include_depth)); buffer_processed = TRUE; } /* reset the include state, should also handle initial invocations, in which case everything is NULL */ if (level->yybuf) { _cfg_lexer__delete_buffer(level->yybuf, self->state); level->yybuf = NULL; } if (level->include_type == CFGI_FILE) { if (level->file.include_file) { fclose(level->file.include_file); level->file.include_file = NULL; } } if ((level->include_type == CFGI_BUFFER && buffer_processed) || (level->include_type == CFGI_FILE && !level->file.files)) { /* we finished with an include statement that included a series of * files (e.g. directory include). */ /* NOTE: this couple of lines should become just a call to * cfg_lexer_clear_include_level(), however this entire function is * playing nasty tricks with the data members within the * CfgIncludeLevel, which I can't decipher right now, so I am leaving * this as is. Memory management in the lexer is clearly messed * up. */ g_free(level->name); if (level->include_type == CFGI_BUFFER) { g_free(level->buffer.content); g_free(level->buffer.original_content); } memset(level, 0, sizeof(*level)); self->include_depth--; _cfg_lexer__switch_to_buffer(self->include_stack[self->include_depth].yybuf, self->state); return TRUE; } /* now populate "level" with the new include information */ if (level->include_type == CFGI_BUFFER) { level->yybuf = _cfg_lexer__scan_buffer(level->buffer.content, level->buffer.content_length, self->state); } else if (level->include_type == CFGI_FILE) { FILE *include_file; filename = (gchar *) level->file.files->data; level->file.files = g_slist_delete_link(level->file.files, level->file.files); include_file = fopen(filename, "r"); if (!include_file) { msg_error("Error opening include file", evt_tag_str("filename", filename), evt_tag_int("depth", self->include_depth)); g_free(filename); return FALSE; } msg_debug("Starting to read include file", evt_tag_str("filename", filename), evt_tag_int("depth", self->include_depth)); g_free(level->name); level->name = filename; level->file.include_file = include_file; level->yybuf = _cfg_lexer__create_buffer(level->file.include_file, YY_BUF_SIZE, self->state); } else { g_assert_not_reached(); } level->lloc.first_line = level->lloc.last_line = 1; level->lloc.first_column = level->lloc.last_column = 1; level->lloc.level = level; _cfg_lexer__switch_to_buffer(level->yybuf, self->state); return TRUE; }
/** * Parse the zoneinfo file structure (see tzfile.h) into a ZoneInfo */ static ZoneInfo* zone_info_parser(unsigned char **input, gboolean is64bitData, gint *version) { gint32 i = 0; unsigned char *buf = NULL; ZoneInfo *info = NULL; gint64 *transition_times = NULL; guint8 *transition_types = NULL; gint32 *gmt_offsets = NULL; gint64 isgmtcnt, isdstcnt, leapcnt, timecnt, typecnt, charcnt; gboolean insertInitial = FALSE; buf = *input; *input += 4; if (strncmp((gchar*)buf, TZ_MAGIC, 4) != 0) { msg_error("Error while processing the time zone file", evt_tag_str("message", TZ_MAGIC" signature is missing"), NULL); goto error; } /* read the version byte */ buf = *input; *input += 1; /* * if '\0', we have just one copy of data, * if '2', there is additional 64 bit version at the end. */ if (buf[0] != 0 && buf[0] != '2' && buf[0] != '3') { msg_error("Error in the time zone file", evt_tag_str("message", "Bad Olson version info"), NULL); goto error; } else { if (buf[0] != 0) *version = buf[0] - '0'; else *version = 0; } /* Read reserved bytes */ *input += 15; /* Read array sizes */ isgmtcnt = readcoded32(input, 0, G_MAXINT64); isdstcnt = readcoded32(input, 0, G_MAXINT64); leapcnt = readcoded32(input, 0, G_MAXINT64); timecnt = readcoded32(input, 0, G_MAXINT64); typecnt = readcoded32(input, 0, G_MAXINT64); charcnt = readcoded32(input, 0, G_MAXINT64); /* * Confirm sizes that we assume to be equal. These assumptions * are drawn from a reading of the zic source (2003a), so they * should hold unless the zic source changes. */ if (isgmtcnt != typecnt || isdstcnt != typecnt) { msg_warning("Error in the time zone file", evt_tag_str("message", "Count mismatch between tzh_ttisgmtcnt, tzh_ttisdstcnt, tth_typecnt"), NULL); } /* * Used temporarily to store transition times and types. We need * to do this because the times and types are stored in two * separate arrays. */ transition_times = g_new0(gint64, timecnt); transition_types = g_new0(guint8, timecnt); gmt_offsets = g_new0(gint32, typecnt); /* Read transition times */ for (i = 0; i < timecnt; ++i) { if (is64bitData) { transition_times[i] = readcoded64(input, G_MININT64, G_MAXINT64); } else { transition_times[i] = readcoded32(input, G_MININT64, G_MAXINT64); } } /* Read transition types */ for (i = 0; i < timecnt; ++i) { guint8 t = (guint8)readchar(input); if (t >= typecnt) { msg_warning("Error in the time zone file", evt_tag_str("message", "Illegal type number"), evt_tag_printf("val", "%ld", (long) t), evt_tag_printf("expected", "[0, %" G_GINT64_FORMAT "]", typecnt-1), NULL); goto error; } transition_types[i] = t; } /* Read types (except for the isstd and isgmt flags, which come later (why??)) */ for (i = 0; i<typecnt; ++i) { gint offs = 24; if (*version == 3) offs = 167; gmt_offsets[i] = readcoded32(input, G_MININT64, G_MAXINT64); if (gmt_offsets[i] > offs * 60 * 60 || gmt_offsets[i] < -1 * offs * 60 * 60) { msg_warning("Error in the time zone file", evt_tag_str("message", "Illegal gmtoffset number"), evt_tag_int("val", gmt_offsets[i]), evt_tag_printf("expected", "[%d, %d]", -1 * offs * 60 * 60, offs * 60 * 60), NULL); goto error; } /* ignore isdst flag */ readbool(input); /* ignore abbr index */ readchar(input); } /* allocate a new ZoneInfo structure */ if (typecnt > 0 && timecnt == 0) { /* only one type info is in the time zone file so add it with 1901 */ info = zone_info_new(1); info->transitions[0].time = LOWEST_TIME32; info->transitions[0].gmtoffset = gmt_offsets[0]; } else { info = zone_info_new(timecnt); } /* Build transitions vector out of corresponding times and types. */ insertInitial = FALSE; if (is64bitData) { if (timecnt > 0) { gint32 minidx = -1; gint32 last_transition_index = 0; for (i = 0; i < timecnt; ++i) { if (transition_times[i] < LOWEST_TIME32) { if (minidx == -1 || transition_times[i] > transition_times[minidx]) { /* Preserve the latest transition before the 32bit minimum time */ minidx = i; } } else { info->transitions[last_transition_index].time = transition_times[i]; info->transitions[last_transition_index].gmtoffset = gmt_offsets[transition_types[i]]; last_transition_index++; } } if (minidx != -1) { /* * If there are any transitions before the 32bit minimum time, * put the type information with the 32bit minimum time */ memmove(&info->transitions[1], &info->transitions[0], sizeof(Transition) * (timecnt-1)); info->transitions[0].time = LOWEST_TIME32; info->transitions[0].gmtoffset = gmt_offsets[transition_types[minidx]]; info->timecnt -= minidx; } else { /* Otherwise, we need insert the initial type later */ insertInitial = TRUE; } } } else { for (i = 0; i < timecnt; ++i) { info->transitions[i].time = transition_times[i]; info->transitions[i].gmtoffset = gmt_offsets[transition_types[i]]; } } if (insertInitial) { g_assert(timecnt > 0); g_assert(typecnt > 0); /* reallocate the transitions vector to be able to store a new entry */ info->timecnt ++; timecnt ++; info->transitions = g_renew(Transition, info->transitions, timecnt); /* Add the initial type associated with the lowest int32 time */ memmove(&info->transitions[1], &info->transitions[0], sizeof(Transition) * (timecnt-1)); info->transitions[0].time = LOWEST_TIME32; info->transitions[0].gmtoffset = gmt_offsets[0]; } /* ignore the abbreviation string */ if (charcnt) *input += charcnt; /* ignore leap second info, if any */ for (i=0; i<leapcnt; ++i) { if(is64bitData) readcoded64(input, G_MININT64, G_MAXINT64);/* leap second transition time */ else readcoded32(input, G_MININT64, G_MAXINT64);/* leap second transition time */ readcoded32(input, G_MININT64, G_MAXINT64);/* total correction after above */ } /* http://osdir.com/ml/time.tz/2006-02/msg00041.html */ /* We dont nead this flags to compute the wall time of the timezone*/ /* Ignore isstd flags */ for (i=0; i<typecnt; i++) readbool(input); /* Ignore isgmt flags */ for (i=0; i<typecnt; i++) readbool(input); error: g_free(transition_times); g_free(transition_types); g_free(gmt_offsets); return info; }
/* follow timer callback. Check if the file has new content, or deleted or * moved. Ran every follow_freq seconds. */ static void log_reader_io_follow_file(gpointer s) { LogReader *self = (LogReader *) s; struct stat st, followed_st; off_t pos = -1; gint fd = log_proto_get_fd(self->proto); msg_trace("Checking if the followed file has new lines", evt_tag_str("follow_filename", self->follow_filename), NULL); if (fd >= 0) { pos = lseek(fd, 0, SEEK_CUR); if (pos == (off_t) -1) { msg_error("Error invoking seek on followed file", evt_tag_errno("error", errno), NULL); goto reschedule; } if (fstat(fd, &st) < 0) { if (errno == ESTALE) { msg_trace("log_reader_fd_check file moved ESTALE", evt_tag_str("follow_filename", self->follow_filename), NULL); log_pipe_notify(self->control, &self->super.super, NC_FILE_MOVED, self); return; } else { msg_error("Error invoking fstat() on followed file", evt_tag_errno("error", errno), NULL); goto reschedule; } } msg_trace("log_reader_fd_check", evt_tag_int("pos", pos), evt_tag_int("size", st.st_size), NULL); if (pos < st.st_size) { /* we have data to read */ log_reader_io_process_input(s); return; } else if (pos == st.st_size) { /* we are at EOF */ log_pipe_notify(self->control, &self->super.super, NC_FILE_EOF, self); } else if (pos > st.st_size) { /* the last known position is larger than the current size of the file. it got truncated. Restart from the beginning. */ log_pipe_notify(self->control, &self->super.super, NC_FILE_MOVED, self); /* we may be freed by the time the notification above returns */ return; } } if (self->follow_filename) { if (stat(self->follow_filename, &followed_st) != -1) { if (fd < 0 || (st.st_ino != followed_st.st_ino && followed_st.st_size > 0)) { msg_trace("log_reader_fd_check file moved eof", evt_tag_int("pos", pos), evt_tag_int("size", followed_st.st_size), evt_tag_str("follow_filename", self->follow_filename), NULL); /* file was moved and we are at EOF, follow the new file */ log_pipe_notify(self->control, &self->super.super, NC_FILE_MOVED, self); /* we may be freed by the time the notification above returns */ return; } } else { msg_verbose("Follow mode file still does not exist", evt_tag_str("filename", self->follow_filename), NULL); } } reschedule: log_reader_update_watches(self); }
static gboolean afamqp_dd_connect(AMQPDestDriver *self, gboolean reconnect) { int sockfd_ret; amqp_rpc_reply_t ret; if (reconnect && self->conn) { ret = amqp_get_rpc_reply(self->conn); if (ret.reply_type == AMQP_RESPONSE_NORMAL) { return TRUE; } else { _amqp_connection_disconnect(self); } } self->conn = amqp_new_connection(); if (self->conn == NULL) { msg_error("Error allocating AMQP connection.", NULL); goto exception_amqp_dd_connect_failed_init; } self->sockfd = amqp_tcp_socket_new(self->conn); struct timeval delay; delay.tv_sec = 1; delay.tv_usec = 0; sockfd_ret = amqp_socket_open_noblock(self->sockfd, self->host, self->port, &delay); if (sockfd_ret != AMQP_STATUS_OK) { msg_error("Error connecting to AMQP server", evt_tag_str("driver", self->super.super.super.id), evt_tag_str("error", amqp_error_string2(-sockfd_ret)), evt_tag_int("time_reopen", self->super.time_reopen), NULL); goto exception_amqp_dd_connect_failed_init; } ret = amqp_login(self->conn, self->vhost, 0, 131072, 0, AMQP_SASL_METHOD_PLAIN, self->user, self->password); if (!afamqp_is_ok(self, "Error during AMQP login", ret)) { goto exception_amqp_dd_connect_failed_init; } amqp_channel_open(self->conn, 1); ret = amqp_get_rpc_reply(self->conn); if (!afamqp_is_ok(self, "Error during AMQP channel open", ret)) { goto exception_amqp_dd_connect_failed_channel; } if (self->declare) { amqp_exchange_declare(self->conn, 1, amqp_cstring_bytes(self->exchange), amqp_cstring_bytes(self->exchange_type), 0, 0, 0, 0, amqp_empty_table); ret = amqp_get_rpc_reply(self->conn); if (!afamqp_is_ok(self, "Error during AMQP exchange declaration", ret)) { goto exception_amqp_dd_connect_failed_exchange; } } msg_debug ("Connecting to AMQP succeeded", evt_tag_str("driver", self->super.super.super.id), NULL); return TRUE; /* Exceptions */ exception_amqp_dd_connect_failed_exchange: amqp_channel_close(self->conn, 1, AMQP_REPLY_SUCCESS); exception_amqp_dd_connect_failed_channel: amqp_connection_close(self->conn, AMQP_REPLY_SUCCESS); exception_amqp_dd_connect_failed_init: _amqp_connection_deinit(self); return FALSE; }
static gboolean perl_worker_eval(LogThrDestDriver *d) { PerlDestDriver *self = (PerlDestDriver *)d; gboolean success, vp_ok; LogMessage *msg; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; PerlInterpreter *my_perl = self->perl; int count; HV *kvmap; gpointer args[3]; dSP; success = log_queue_pop_head(self->super.queue, &msg, &path_options, FALSE, FALSE); if (!success) return TRUE; msg_set_context(msg); ENTER; SAVETMPS; PUSHMARK(SP); kvmap = newHV(); args[0] = self->perl; args[1] = kvmap; args[2] = self; vp_ok = value_pairs_foreach(self->vp, perl_worker_vp_add_one, msg, self->seq_num, LTZ_SEND, &self->template_options, args); if (!vp_ok && (self->template_options.on_error & ON_ERROR_DROP_MESSAGE)) goto exit; XPUSHs(sv_2mortal(newRV_noinc((SV *)kvmap))); PUTBACK; count = call_pv(self->queue_func_name, G_EVAL | G_SCALAR); SPAGAIN; msg_set_context(NULL); if (SvTRUE(ERRSV)) { msg_error("Error while calling a Perl function", evt_tag_str("driver", self->super.super.super.id), evt_tag_str("script", self->filename), evt_tag_str("function", self->queue_func_name), evt_tag_str("error-message", SvPV_nolen(ERRSV)), NULL); (void) POPs; success = FALSE; } if (count != 1) { msg_error("Too many values returned by a Perl function", evt_tag_str("driver", self->super.super.super.id), evt_tag_str("script", self->filename), evt_tag_str("function", self->queue_func_name), evt_tag_int("returned-values", count), evt_tag_int("expected-values", 1), NULL); success = FALSE; } else { int r = POPi; success = (r != 0); } exit: PUTBACK; FREETMPS; LEAVE; if (success && vp_ok) { stats_counter_inc(self->super.stored_messages); step_sequence_number(&self->seq_num); log_msg_ack(msg, &path_options); log_msg_unref(msg); } else { stats_counter_inc(self->super.dropped_messages); step_sequence_number(&self->seq_num); log_msg_ack(msg, &path_options); log_msg_unref(msg); } return success; }
/** * afsql_dd_insert_db: * * This function is running in the database thread * * Returns: FALSE to indicate that the connection should be closed and * this destination suspended for time_reopen() time. **/ static gboolean afsql_dd_insert_db(AFSqlDestDriver *self) { GString *table, *query_string; LogMessage *msg; gboolean success; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; afsql_dd_connect(self); g_mutex_lock(self->db_thread_mutex); /* FIXME: this is a workaround because of the non-proper locking semantics * of the LogQueue. It might happen that the _queue() method sees 0 * elements in the queue, while the thread is still busy processing the * previous message. In that case arming the parallel push callback is * not needed and will cause assertions to fail. This is ugly and should * be fixed by properly defining the "blocking" semantics of the LogQueue * object w/o having to rely on user-code messing with parallel push * callbacks. */ log_queue_reset_parallel_push(self->queue); success = log_queue_pop_head(self->queue, &msg, &path_options, (self->flags & AFSQL_DDF_EXPLICIT_COMMITS), FALSE); g_mutex_unlock(self->db_thread_mutex); if (!success) return TRUE; msg_set_context(msg); table = afsql_dd_validate_table(self, msg); if (!table) { /* If validate table is FALSE then close the connection and wait time_reopen time (next call) */ msg_error("Error checking table, disconnecting from database, trying again shortly", evt_tag_int("time_reopen", self->time_reopen), NULL); msg_set_context(NULL); g_string_free(table, TRUE); return afsql_dd_insert_fail_handler(self, msg, &path_options); } query_string = afsql_dd_construct_query(self, table, msg); if (self->flush_lines_queued == 0 && !afsql_dd_begin_txn(self)) return FALSE; success = afsql_dd_run_query(self, query_string->str, FALSE, NULL); if (success && self->flush_lines_queued != -1) { self->flush_lines_queued++; if (self->flush_lines && self->flush_lines_queued == self->flush_lines && !afsql_dd_commit_txn(self, TRUE)) return FALSE; } g_string_free(table, TRUE); g_string_free(query_string, TRUE); msg_set_context(NULL); if (!success) return afsql_dd_insert_fail_handler(self, msg, &path_options); /* we only ACK if each INSERT is a separate transaction */ if ((self->flags & AFSQL_DDF_EXPLICIT_COMMITS) == 0) log_msg_ack(msg, &path_options); log_msg_unref(msg); step_sequence_number(&self->seq_num); self->failed_message_counter = 0; return TRUE; }
static gboolean affile_sd_init(LogPipe *s) { AFFileSourceDriver *self = (AFFileSourceDriver *) s; GlobalConfig *cfg = log_pipe_get_config(s); gint fd; gboolean file_opened, open_deferred = FALSE; if (!log_src_driver_init_method(s)) return FALSE; log_reader_options_init(&self->reader_options, cfg, self->super.super.group); if ((self->multi_line_mode != MLM_PREFIX_GARBAGE && self->multi_line_mode != MLM_PREFIX_SUFFIX ) && (self->multi_line_prefix || self->multi_line_garbage)) { msg_error("multi-line-prefix() and/or multi-line-garbage() specified but multi-line-mode() is not regexp based (prefix-garbage or prefix-suffix), please set multi-line-mode() properly", NULL); return FALSE; } file_opened = affile_sd_open_file(self, self->filename->str, &fd); if (!file_opened && self->follow_freq > 0) { msg_info("Follow-mode file source not found, deferring open", evt_tag_str("filename", self->filename->str), NULL); open_deferred = TRUE; fd = -1; } if (file_opened || open_deferred) { LogProtoServer *proto; PollEvents *poll_events; poll_events = affile_sd_construct_poll_events(self, fd); if (!poll_events) { close(fd); return FALSE; } proto = affile_sd_construct_proto(self, fd); self->reader = log_reader_new(self->super.super.super.cfg); log_reader_reopen(self->reader, proto, poll_events); log_reader_set_options(self->reader, s, &self->reader_options, STATS_LEVEL1, SCS_FILE, self->super.super.id, self->filename->str); /* NOTE: if the file could not be opened, we ignore the last * remembered file position, if the file is created in the future * we're going to read from the start. */ log_pipe_append((LogPipe *) self->reader, s); if (!log_pipe_init((LogPipe *) self->reader)) { msg_error("Error initializing log_reader, closing fd", evt_tag_int("fd", fd), NULL); log_pipe_unref((LogPipe *) self->reader); self->reader = NULL; close(fd); return FALSE; } affile_sd_recover_state(s, cfg, proto); } else { msg_error("Error opening file for reading", evt_tag_str("filename", self->filename->str), evt_tag_errno(EVT_TAG_OSERROR, errno), NULL); return self->super.super.optional; } return TRUE; }
static gboolean affile_open_file(gchar *name, gint flags, gint uid, gint gid, gint mode, gint dir_uid, gint dir_gid, gint dir_mode, gboolean create_dirs, gboolean privileged, gboolean is_pipe, gint *fd) { cap_t saved_caps; struct stat st; if (strstr(name, "../") || strstr(name, "/..")) { msg_error("Spurious path, logfile not created", evt_tag_str("path", name), NULL); return FALSE; } if (create_dirs && !create_containing_directory(name, dir_uid, dir_gid, dir_mode)) return FALSE; saved_caps = g_process_cap_save(); if (privileged) { g_process_cap_modify(CAP_DAC_READ_SEARCH, TRUE); g_process_cap_modify(CAP_SYS_ADMIN, TRUE); } *fd = -1; if (stat(name, &st) >= 0) { if (is_pipe && !S_ISFIFO(st.st_mode)) { msg_warning("WARNING: you are using the pipe driver, underlying file is not a FIFO, it should be used by file()", evt_tag_str("filename", name), NULL); } else if (!is_pipe && S_ISFIFO(st.st_mode)) { msg_warning("WARNING: you are using the file driver, underlying file is a FIFO, it should be used by pipe()", evt_tag_str("filename", name), NULL); } } *fd = open(name, flags, mode); if (is_pipe && *fd < 0 && errno == ENOENT) { if (mkfifo(name, 0666) >= 0) *fd = open(name, flags, 0666); } if (*fd != -1) { g_fd_set_cloexec(*fd, TRUE); g_process_cap_modify(CAP_CHOWN, TRUE); g_process_cap_modify(CAP_FOWNER, TRUE); if (uid >= 0) fchown(*fd, (uid_t) uid, -1); if (gid >= 0) fchown(*fd, -1, (gid_t) gid); if (mode >= 0) fchmod(*fd, (mode_t) mode); } g_process_cap_restore(saved_caps); msg_trace("affile_open_file", evt_tag_str("path", name), evt_tag_int("fd",*fd), NULL); return *fd != -1; }
static GIOStatus log_proto_buffered_server_fetch_into_buffer(LogProtoBufferedServer *self) { guchar *raw_buffer = NULL; gint avail; gint rc; LogProtoBufferedServerState *state = log_proto_buffered_server_get_state(self); GIOStatus result = G_IO_STATUS_NORMAL; if (G_UNLIKELY(!self->buffer)) log_proto_buffered_server_allocate_buffer(self, state); if (self->convert == (GIConv) -1) { /* no conversion, we read directly into our buffer */ raw_buffer = self->buffer + state->pending_buffer_end; avail = state->buffer_size - state->pending_buffer_end; } else { /* if conversion is needed, we first read into an on-stack * buffer, and then convert it into our internal buffer */ raw_buffer = g_alloca(self->super.options->init_buffer_size + state->raw_buffer_leftover_size); memcpy(raw_buffer, state->raw_buffer_leftover, state->raw_buffer_leftover_size); avail = self->super.options->init_buffer_size; } if (avail == 0) goto exit; rc = log_proto_buffered_server_read_data(self, raw_buffer + state->raw_buffer_leftover_size, avail); if (rc < 0) { if (errno == EAGAIN) { /* ok we don't have any more data to read, return to main poll loop */ result = G_IO_STATUS_AGAIN; } else { /* an error occurred while reading */ msg_error("I/O error occurred while reading", evt_tag_int(EVT_TAG_FD, self->super.transport->fd), evt_tag_errno(EVT_TAG_OSERROR, errno)); result = G_IO_STATUS_ERROR; } } else if (rc == 0) { /* EOF read */ msg_verbose("EOF occurred while reading", evt_tag_int(EVT_TAG_FD, self->super.transport->fd)); if (state->raw_buffer_leftover_size > 0) { msg_error("EOF read on a channel with leftovers from previous character conversion, dropping input"); state->pending_buffer_pos = state->pending_buffer_end = 0; } result = G_IO_STATUS_EOF; } else { state->pending_raw_buffer_size += rc; rc += state->raw_buffer_leftover_size; state->raw_buffer_leftover_size = 0; if (self->convert == (GIConv) -1) { state->pending_buffer_end += rc; } else if (!log_proto_buffered_server_convert_from_raw(self, raw_buffer, rc)) { result = G_IO_STATUS_ERROR; } } exit: log_proto_buffered_server_put_state(self); return result; }
static gboolean log_proto_buffered_server_convert_from_raw(LogProtoBufferedServer *self, const guchar *raw_buffer, gsize raw_buffer_len) { /* some data was read */ gsize avail_in = raw_buffer_len; gsize avail_out; gchar *out; gint ret = -1; gboolean success = FALSE; LogProtoBufferedServerState *state = log_proto_buffered_server_get_state(self); do { avail_out = state->buffer_size - state->pending_buffer_end; out = (gchar *) self->buffer + state->pending_buffer_end; ret = g_iconv(self->convert, (gchar **) &raw_buffer, &avail_in, (gchar **) &out, &avail_out); if (ret == (gsize) -1) { switch (errno) { case EINVAL: if (self->stream_based) { /* Incomplete text, do not report an error, rather try to read again */ state->pending_buffer_end = state->buffer_size - avail_out; if (avail_in > 0) { if (avail_in > sizeof(state->raw_buffer_leftover)) { msg_error("Invalid byte sequence, the remaining raw buffer is larger than the supported leftover size", evt_tag_str("encoding", self->super.options->encoding), evt_tag_int("avail_in", avail_in), evt_tag_int("leftover_size", sizeof(state->raw_buffer_leftover))); goto error; } memcpy(state->raw_buffer_leftover, raw_buffer, avail_in); state->raw_buffer_leftover_size = avail_in; state->raw_buffer_size -= avail_in; msg_trace("Leftover characters remained after conversion, delaying message until another chunk arrives", evt_tag_str("encoding", self->super.options->encoding), evt_tag_int("avail_in", avail_in)); goto success; } } else { msg_error("Byte sequence too short, cannot convert an individual frame in its entirety", evt_tag_str("encoding", self->super.options->encoding), evt_tag_int("avail_in", avail_in)); goto error; } break; case E2BIG: state->pending_buffer_end = state->buffer_size - avail_out; /* extend the buffer */ if (state->buffer_size < self->super.options->max_buffer_size) { state->buffer_size *= 2; if (state->buffer_size > self->super.options->max_buffer_size) state->buffer_size = self->super.options->max_buffer_size; self->buffer = g_realloc(self->buffer, state->buffer_size); /* recalculate the out pointer, and add what we have now */ ret = -1; } else { msg_error("Incoming byte stream requires a too large conversion buffer, probably invalid character sequence", evt_tag_str("encoding", self->super.options->encoding), evt_tag_printf("buffer", "%.*s", (gint) state->pending_buffer_end, self->buffer)); goto error; } break; case EILSEQ: default: msg_notice("Invalid byte sequence or other error while converting input, skipping character", evt_tag_str("encoding", self->super.options->encoding), evt_tag_printf("char", "0x%02x", *(guchar *) raw_buffer)); goto error; } } else { state->pending_buffer_end = state->buffer_size - avail_out; } } while (avail_in > 0); success: success = TRUE; error: log_proto_buffered_server_put_state(self); return success; }
static gboolean afamqp_worker_publish(AMQPDestDriver *self, LogMessage *msg) { gint pos = 0, ret; amqp_table_t table; amqp_basic_properties_t props; gboolean success = TRUE; SBGString *routing_key = sb_gstring_acquire(); SBGString *body = sb_gstring_acquire(); amqp_bytes_t body_bytes = amqp_cstring_bytes(""); gpointer user_data[] = { &self->entries, &pos, &self->max_entries }; value_pairs_foreach(self->vp, afamqp_vp_foreach, msg, self->super.seq_num, LTZ_SEND, &self->template_options, user_data); table.num_entries = pos; table.entries = self->entries; props._flags = AMQP_BASIC_CONTENT_TYPE_FLAG | AMQP_BASIC_DELIVERY_MODE_FLAG | AMQP_BASIC_HEADERS_FLAG; props.content_type = amqp_cstring_bytes("text/plain"); props.delivery_mode = self->persistent; props.headers = table; log_template_format(self->routing_key_template, msg, NULL, LTZ_LOCAL, self->super.seq_num, NULL, sb_gstring_string(routing_key)); if (self->body_template) { log_template_format(self->body_template, msg, NULL, LTZ_LOCAL, self->super.seq_num, NULL, sb_gstring_string(body)); body_bytes = amqp_cstring_bytes(sb_gstring_string(body)->str); } ret = amqp_basic_publish(self->conn, 1, amqp_cstring_bytes(self->exchange), amqp_cstring_bytes(sb_gstring_string(routing_key)->str), 0, 0, &props, body_bytes); sb_gstring_release(routing_key); sb_gstring_release(body); if (ret < 0) { msg_error("Network error while inserting into AMQP server", evt_tag_str("driver", self->super.super.super.id), evt_tag_str("error", amqp_error_string2(-ret)), evt_tag_int("time_reopen", self->super.time_reopen), NULL); success = FALSE; } while (--pos >= 0) { amqp_bytes_free(self->entries[pos].key); amqp_bytes_free(self->entries[pos].value.value.bytes); } return success; }
static gboolean afmongodb_worker_insert (MongoDBDestDriver *self) { gboolean success; guint8 *oid; LogMessage *msg; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; afmongodb_dd_connect(self, TRUE); g_mutex_lock(self->queue_mutex); log_queue_reset_parallel_push(self->queue); success = log_queue_pop_head(self->queue, &msg, &path_options, FALSE, FALSE); g_mutex_unlock(self->queue_mutex); if (!success) return TRUE; msg_set_context(msg); bson_reset (self->bson_sel); bson_reset (self->bson_upd); bson_reset (self->bson_set); oid = mongo_util_oid_new_with_time (self->last_msg_stamp, self->seq_num); bson_append_oid (self->bson_sel, "_id", oid); g_free (oid); bson_finish (self->bson_sel); value_pairs_foreach (self->vp, afmongodb_vp_foreach, msg, self->seq_num, self->bson_set); bson_finish (self->bson_set); bson_append_document (self->bson_upd, "$set", self->bson_set); bson_finish (self->bson_upd); if (!mongo_sync_cmd_update (self->conn, self->ns, MONGO_WIRE_FLAG_UPDATE_UPSERT, self->bson_sel, self->bson_upd)) { msg_error ("Network error while inserting into MongoDB", evt_tag_int("time_reopen", self->time_reopen), NULL); success = FALSE; } msg_set_context(NULL); if (success) { stats_counter_inc(self->stored_messages); step_sequence_number(&self->seq_num); log_msg_ack(msg, &path_options); log_msg_unref(msg); } else { g_mutex_lock(self->queue_mutex); log_queue_push_head(self->queue, msg, &path_options); g_mutex_unlock(self->queue_mutex); } return success; }
void r_insert_node(RNode *root, guint8 *key, gpointer value, RNodeGetValueFunc value_func) { RNode *node; gint keylen = strlen(key); gint nodelen = root->keylen; gint i = 0; if (key[0] == '@') { guint8 *end; if (keylen >= 2 && key[1] == '@') { /* we found and escape, so check if we already have a child with '@', or add a child like that */ node = r_find_child_by_first_character(root, key[1]); if (!node) { /* no child so we create one * if we are at the end of the key than use value, otherwise it is just a gap node */ node = r_new_node("@", (keylen == 2 ? value : NULL)); r_add_child(root, node); } else if (keylen == 2) { /* if we are at the end of the key set the value if it is not already exists, * otherwise it is duplicate node */ if (!node->value) node->value = value; else msg_error("Duplicate key in parser radix tree", evt_tag_str("key", "@"), evt_tag_str("value", value_func ? value_func(value) : "unknown"), NULL); } /* go down building the tree if there is key left */ if (keylen > 2) r_insert_node(node, key + 2, value, value_func); } else if ((keylen >= 2) && (end = strchr((const gchar *)key + 1, '@')) != NULL) { /* we are a parser node */ *end = '\0'; RParserNode *parser_node = r_new_pnode(key + 1); if (parser_node) { node = r_find_pchild(root, parser_node); if (!node) { node = r_new_node(NULL, NULL); node->parser = parser_node; r_add_pchild(root, node); } else { r_free_pnode_only(parser_node); } if ((end - key) < (keylen - 1)) { /* the key is not over so go on building the tree */ r_insert_node(node, end + 1, value, value_func); } else { /* the key is over so set value if it is not yet set */ if (!node->value) { node->value = value; } else { /* FIXME: print parser type in string format */ msg_error("Duplicate parser node in radix tree", evt_tag_int("type", node->parser->type), evt_tag_str("name", log_msg_get_value_name(node->parser->handle, NULL)), evt_tag_str("value", value_func ? value_func(value) : "unknown"), NULL); } } } } else msg_error("Key contains '@' without escaping", evt_tag_str("key", key), evt_tag_str("value", value_func ? value_func(value) : "unknown"), NULL); } else { /* we are not starting with @ sign or we are not interested in @ at all */ while (i < keylen && i < nodelen) { /* check if key is the same, or if it is a parser */ if ((key[i] != root->key[i]) || (key[i] == '@')) break; i++; } if (nodelen == 0 || i == 0 || (i < keylen && i >= nodelen)) { /*either at the root or we need to go down the tree on the right child */ node = r_find_child_by_first_character(root, key[i]); if (node) { /* @ is always a singel node, and we also have an @ so insert us under root */ if (key[i] == '@') r_insert_node(root, key + i, value, value_func); else r_insert_node(node, key + i, value, value_func); } else { r_add_child_check(root, key + i, value, value_func); } } else if (i == keylen && i == nodelen) { /* exact match */ if (!root->value) root->value = value; else msg_error("Duplicate key in radix tree", evt_tag_str("key", key), evt_tag_str("value", value_func ? value_func(value) : "unknown"), NULL); } else if (i > 0 && i < nodelen) { RNode *old_tree; guint8 *new_key; /* we need to split the current node */ old_tree = r_new_node(root->key + i, root->value); if (root->num_children) { old_tree->children = root->children; old_tree->num_children = root->num_children; root->children = NULL; root->num_children = 0; } if (root->num_pchildren) { old_tree->pchildren = root->pchildren; old_tree->num_pchildren = root->num_pchildren; root->pchildren = NULL; root->num_pchildren = 0; } root->value = NULL; new_key = g_strndup(root->key, i); g_free(root->key); root->key = new_key; root->keylen = i; r_add_child(root, old_tree); if (i < keylen) { /* we add a new sub tree */ r_add_child_check(root, key + i, value, value_func); } else { /* the split is us */ root->value = value; } } else { /* simply a new children */ r_add_child_check(root, key + i, value, value_func); } } }
RNode * r_find_node_dbg(RNode *root, guint8 *whole_key, guint8 *key, gint keylen, GArray *matches, GArray *dbg_list) #endif { RNode *node, *ret; gint nodelen = root->keylen; gint j, m; register gint i; #ifdef RADIX_DBG gint dbg_entries; #endif if (nodelen < 1) i = 0; else if (nodelen == 1) i = 1; else { m = MIN(keylen, nodelen); /* this is a prefix match algorithm, we are interested how long * the common part between key and root->key is. Currently this * uses a byte-by-byte comparison, using a 64/32/16 bit units * would be better. * * The code below to perform aligned comparison is commented out * as it does not seem to matter and it is way more complex than * the simple algorithm below. */ #if 0 if (2 <= m && (((unsigned long)key % 2) == 0)) { gushort *keylong = key; gushort *rootlong = root->key; i = 0; while ((i + 2) <= m) { if (*keylong != *rootlong) break; i += 2; keylong = key + i; rootlong = root->key + i; } /*printf("RESULT %d\n", i); */ } else #endif i = 1; while (i < m) { if (key[i] != root->key[i]) break; i++; } } #ifdef RADIX_DBG r_add_debug_info(dbg_list, root, NULL, i, 0, 0); dbg_entries = dbg_list->len; #endif msg_trace("Looking up node in the radix tree", evt_tag_int("i", i), evt_tag_int("nodelen", nodelen), evt_tag_int("keylen", keylen), evt_tag_str("root_key", root->key), evt_tag_str("key", key), NULL); if (i == keylen && (i == nodelen || nodelen == -1)) { if (root->value) return root; } else if ((nodelen < 1) || (i < keylen && i >= nodelen)) { ret = NULL; node = r_find_child(root, key[i]); if (node) { #ifndef RADIX_DBG ret = r_find_node(node, whole_key, key + i, keylen - i, matches); #else ret = r_find_node_dbg(node, whole_key, key + i, keylen - i, matches, dbg_list); #endif } /* we only search if there is no match */ if (!ret) { gint len; RParserNode *parser_node; gint match_ofs = 0; RParserMatch *match = NULL; if (matches) { match_ofs = matches->len; g_array_set_size(matches, match_ofs + 1); } for (j = 0; j < root->num_pchildren; j++) { parser_node = root->pchildren[j]->parser; if (matches) { match = &g_array_index(matches, RParserMatch, match_ofs); memset(match, 0, sizeof(*match)); } #ifdef RADIX_DBG r_truncate_debug_info(dbg_list, dbg_entries); #endif if (((parser_node->first <= key[i]) && (key[i] <= parser_node->last)) && (parser_node->parse(key + i, &len, parser_node->param, parser_node->state, match))) { /* FIXME: we don't try to find the longest match in case * the radix tree is split on a parser node. The correct * approach would be to try all parsers and select the * best match, however it is quite expensive and difficult * to implement and we don't really expect this to be a * realistic case. A log message is printed if such a * collision occurs, so there's a slight chance we'll * recognize if this happens in real life. */ #ifndef RADIX_DBG ret = r_find_node(root->pchildren[j], whole_key, key + i + len, keylen - (i + len), matches); #else r_add_debug_info(dbg_list, root, parser_node, len, ((gint16) match->ofs) + (key + i) - whole_key, ((gint16) match->len) + len); ret = r_find_node_dbg(root->pchildren[j], whole_key, key + i + len, keylen - (i + len), matches, dbg_list); #endif if (matches) { match = &g_array_index(matches, RParserMatch, match_ofs); if (ret) { if (!(match->match)) { /* NOTE: we allow the parser to return relative * offset & length to the field parsed, this way * quote characters can still be returned as * REF_MATCH and we only need to duplicate the * result if the string is indeed modified */ match->type = parser_node->type; match->ofs = match->ofs + (key + i) - whole_key; match->len = (gint16) match->len + len; match->handle = parser_node->handle; } break; } else { if (match->match) { /* free the stored match, if this was a dead-end */ g_free(match->match); match->match = NULL; } } } } } if (!ret && matches) { /* the values in the matches array has already been freed if we come here */ g_array_set_size(matches, match_ofs); } } if (ret) return ret; else if (root->value) return root; } return NULL; }
/** * afsql_dd_insert_db: * * This function is running in the database thread * * Returns: FALSE to indicate that the connection should be closed and * this destination suspended for time_reopen() time. **/ static gboolean afsql_dd_insert_db(AFSqlDestDriver *self) { GString *table = NULL; GString *insert_command = NULL; LogMessage *msg; gboolean success; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; if (!afsql_dd_ensure_initialized_connection(self)) return FALSE; /* connection established, try to insert a message */ success = log_queue_pop_head(self->queue, &msg, &path_options, FALSE, self->flags & AFSQL_DDF_EXPLICIT_COMMITS); if (!success) return TRUE; msg_set_context(msg); table = afsql_dd_ensure_accessible_database_table(self, msg); if (!table) { success = FALSE; goto out; } if (afsql_dd_should_start_new_transaction(self) && !afsql_dd_begin_txn(self)) { success = FALSE; goto out; } insert_command = afsql_dd_build_insert_command(self, msg, table); success = afsql_dd_run_query(self, insert_command->str, FALSE, NULL); if (success && self->flush_lines_queued != -1) { self->flush_lines_queued++; if (afsql_dd_should_commit_transaction(self) && !afsql_dd_commit_txn(self)) { /* Assuming that in case of error, the queue is rewound by afsql_dd_commit_txn() */ g_string_free(insert_command, TRUE); msg_set_context(NULL); return FALSE; } } out: if (table != NULL) g_string_free(table, TRUE); if (insert_command != NULL) g_string_free(insert_command, TRUE); msg_set_context(NULL); if (success) { log_msg_ack(msg, &path_options); log_msg_unref(msg); step_sequence_number(&self->seq_num); self->failed_message_counter = 0; } else { if (self->failed_message_counter < self->num_retries - 1) { if (!afsql_dd_handle_insert_row_error_depending_on_connection_availability(self, msg, &path_options)) return FALSE; self->failed_message_counter++; } else { msg_error("Multiple failures while inserting this record into the database, message dropped", evt_tag_int("attempts", self->num_retries), NULL); stats_counter_inc(self->dropped_messages); log_msg_drop(msg, &path_options); self->failed_message_counter = 0; success = TRUE; } } return success; }
static gboolean cfg_lexer_include_file_simple(CfgLexer *self, const gchar *filename) { CfgIncludeLevel *level; struct stat st; if (stat(filename, &st) < 0) { return FALSE; } self->include_depth++; level = &self->include_stack[self->include_depth]; level->include_type = CFGI_FILE; if (S_ISDIR(st.st_mode)) { GDir *dir; GError *error = NULL; const gchar *entry; dir = g_dir_open(filename, 0, &error); if (!dir) { msg_error("Error opening directory for reading", evt_tag_str("filename", filename), evt_tag_str("error", error->message)); goto drop_level; } while ((entry = g_dir_read_name(dir))) { const gchar *p; if (entry[0] == '.') { msg_debug("Skipping include file, it cannot begin with .", evt_tag_str("filename", entry)); continue; } for (p = entry; *p; p++) { if (!((*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z') || (*p >= '0' && *p <= '9') || (*p == '_') || (*p == '-') || (*p == '.'))) { msg_debug("Skipping include file, does not match pattern [\\-_a-zA-Z0-9]+", evt_tag_str("filename", entry)); p = NULL; break; } } if (p) { gchar *full_filename = g_build_filename(filename, entry, NULL); if (stat(full_filename, &st) < 0 || S_ISDIR(st.st_mode)) { msg_debug("Skipping include file as it is a directory", evt_tag_str("filename", entry)); g_free(full_filename); continue; } level->file.files = g_slist_insert_sorted(level->file.files, full_filename, (GCompareFunc) strcmp); msg_debug("Adding include file", evt_tag_str("filename", entry), evt_tag_int("depth", self->include_depth)); } } g_dir_close(dir); if (!level->file.files) { /* no include files in the specified directory */ msg_debug("No files in this include directory", evt_tag_str("dir", filename)); self->include_depth--; return TRUE; } } else { g_assert(level->file.files == NULL); level->file.files = g_slist_prepend(level->file.files, g_strdup(filename)); } return cfg_lexer_start_next_include(self); drop_level: g_slist_foreach(level->file.files, (GFunc) g_free, NULL); g_slist_free(level->file.files); level->file.files = NULL; return FALSE; }
/* * log_proto_file_writer_flush: * * this function flushes the file output buffer * it is called either form log_proto_file_writer_post (normal mode: the buffer is full) * or from log_proto_flush (foced flush: flush time, exit, etc) * */ static LogProtoStatus log_proto_file_writer_flush(LogProtoClient *s) { LogProtoFileWriter *self = (LogProtoFileWriter *)s; gint rc, i, i0, sum, ofs, pos; if (self->partial) { /* there is still some data from the previous file writing process */ gint len = self->partial_len - self->partial_pos; rc = write(self->fd, self->partial + self->partial_pos, len); if (rc > 0 && self->fsync) fsync(self->fd); if (rc < 0) { goto write_error; } else if (rc != len) { self->partial_pos += rc; return LPS_SUCCESS; } else { g_free(self->partial); self->partial = NULL; } } /* we might be called from log_writer_deinit() without having a buffer at all */ if (self->buf_count == 0) return LPS_SUCCESS; rc = writev(self->fd, self->buffer, self->buf_count); if (rc > 0 && self->fsync) fsync(self->fd); if (rc < 0) { goto write_error; } else if (rc != self->sum_len) { /* partial success: not everything has been written out */ /* look for the first chunk that has been cut */ sum = self->buffer[0].iov_len; /* sum is the cumulated length of the already processed items */ i = 0; while (rc > sum) sum += self->buffer[++i].iov_len; self->partial_len = sum - rc; /* this is the length of the first non-written chunk */ i0 = i; ++i; /* add the lengths of the following messages */ while (i < self->buf_count) self->partial_len += self->buffer[i++].iov_len; /* allocate and copy the remaning data */ self->partial = (guchar *)g_malloc(self->partial_len); ofs = sum - rc; /* the length of the remaning (not processed) chunk in the first message */ pos = self->buffer[i0].iov_len - ofs; memcpy(self->partial, self->buffer[i0].iov_base + pos, ofs); i = i0 + 1; while (i < self->buf_count) { memcpy(self->partial + ofs, self->buffer[i].iov_base, self->buffer[i].iov_len); ofs += self->buffer[i].iov_len; ++i; } self->partial_pos = 0; } /* free the previous message strings (the remaning part has been copied to the partial buffer) */ for (i = 0; i < self->buf_count; ++i) g_free(self->buffer[i].iov_base); self->buf_count = 0; self->sum_len = 0; return LPS_SUCCESS; write_error: if (errno != EINTR && errno != EAGAIN) { msg_error("I/O error occurred while writing", evt_tag_int("fd", self->super.transport->fd), evt_tag_errno(EVT_TAG_OSERROR, errno), NULL); return LPS_ERROR; } return LPS_SUCCESS; }
/* * * @batch_items: the number of items processed in a batch (e.g. the number of items the consumer is preferred to process at a single invocation) * @partial_batch: true is returned if some elements (but less than batch_items) are already buffered * @timeout: the number of milliseconds that the consumer needs to wait before we can possibly proceed */ gboolean log_queue_check_items(LogQueue *self, gint *timeout, LogQueuePushNotifyFunc parallel_push_notify, gpointer user_data, GDestroyNotify user_data_destroy) { gint64 num_elements; g_static_mutex_lock(&self->lock); /* drop reference to the previous callback/userdata */ if (self->parallel_push_data && self->parallel_push_data_destroy) self->parallel_push_data_destroy(self->parallel_push_data); num_elements = log_queue_get_length(self); if (num_elements == 0) { self->parallel_push_notify = parallel_push_notify; self->parallel_push_data = user_data; self->parallel_push_data_destroy = user_data_destroy; g_static_mutex_unlock(&self->lock); return FALSE; } /* consume the user_data reference as we won't use the callback */ if (user_data && user_data_destroy) user_data_destroy(user_data); self->parallel_push_notify = NULL; self->parallel_push_data = NULL; g_static_mutex_unlock(&self->lock); /* recalculate buckets, throttle is only running in the output thread, no need to lock it. */ if (self->throttle > 0) { gint64 diff; gint new_buckets; GTimeVal now; g_get_current_time(&now); /* throttling is enabled, calculate new buckets */ if (self->last_throttle_check.tv_sec != 0) { diff = g_time_val_diff(&now, &self->last_throttle_check); } else { diff = 0; self->last_throttle_check = now; } new_buckets = (self->throttle * diff) / G_USEC_PER_SEC; if (new_buckets) { /* if new_buckets is zero, we don't save the current time as * last_throttle_check. The reason is that new_buckets could be * rounded to zero when only a minimal interval passes between * poll iterations. */ self->throttle_buckets = MIN(self->throttle, self->throttle_buckets + new_buckets); self->last_throttle_check = now; } if (num_elements && self->throttle_buckets == 0) { if (timeout) { /* we are unable to send because of throttling, make sure that we * wake up when the rate limits lets us send at least 1 message */ *timeout = (1000 / self->throttle) + 1; msg_debug("Throttling output", evt_tag_int("wait", *timeout)); } return FALSE; } } return TRUE; }
static void log_proto_buffered_server_apply_state(LogProtoBufferedServer *self, PersistEntryHandle handle, const gchar *persist_name) { struct stat st; gint64 ofs = 0; LogProtoBufferedServerState *state; gint fd; fd = self->super.transport->fd; self->persist_handle = handle; if (fstat(fd, &st) < 0) return; state = log_proto_buffered_server_get_state(self); if (!self->buffer) { self->buffer = g_malloc(state->buffer_size); } state->pending_buffer_end = 0; if (state->file_inode && state->file_inode == st.st_ino && state->file_size <= st.st_size && state->raw_stream_pos <= st.st_size) { ofs = state->raw_stream_pos; lseek(fd, ofs, SEEK_SET); } else { if (state->file_inode) { /* the stored state does not match the current file */ msg_notice("The current log file has a mismatching size/inode information, restarting from the beginning", evt_tag_str("state", persist_name), evt_tag_int("stored_inode", state->file_inode), evt_tag_int("cur_file_inode", st.st_ino), evt_tag_int("stored_size", state->file_size), evt_tag_int("cur_file_size", st.st_size), evt_tag_int("raw_stream_pos", state->raw_stream_pos)); } goto error; } if (state->raw_buffer_size) { gssize rc; guchar *raw_buffer; if (!self->super.options->encoding) { /* no conversion, we read directly into our buffer */ if (state->raw_buffer_size > state->buffer_size) { msg_notice("Invalid LogProtoBufferedServerState.raw_buffer_size, larger than buffer_size and no encoding is set, restarting from the beginning", evt_tag_str("state", persist_name), evt_tag_int("raw_buffer_size", state->raw_buffer_size), evt_tag_int("buffer_size", state->buffer_size), evt_tag_int("init_buffer_size", self->super.options->init_buffer_size)); goto error; } raw_buffer = self->buffer; } else { if (state->raw_buffer_size > self->super.options->max_buffer_size) { msg_notice("Invalid LogProtoBufferedServerState.raw_buffer_size, larger than max_buffer_size, restarting from the beginning", evt_tag_str("state", persist_name), evt_tag_int("raw_buffer_size", state->raw_buffer_size), evt_tag_int("init_buffer_size", self->super.options->init_buffer_size), evt_tag_int("max_buffer_size", self->super.options->max_buffer_size)); goto error; } raw_buffer = g_alloca(state->raw_buffer_size); } rc = log_transport_read(self->super.transport, raw_buffer, state->raw_buffer_size, NULL); if (rc != state->raw_buffer_size) { msg_notice("Error re-reading buffer contents of the file to be continued, restarting from the beginning", evt_tag_str("state", persist_name)); goto error; } state->pending_buffer_end = 0; if (self->super.options->encoding) { if (!log_proto_buffered_server_convert_from_raw(self, raw_buffer, rc)) { msg_notice("Error re-converting buffer contents of the file to be continued, restarting from the beginning", evt_tag_str("state", persist_name)); goto error; } } else { state->pending_buffer_end += rc; } if (state->buffer_pos > state->pending_buffer_end) { msg_notice("Converted buffer contents is smaller than the current buffer position, starting from the beginning of the buffer, some lines may be duplicated", evt_tag_str("state", persist_name)); state->buffer_pos = state->pending_buffer_pos = 0; } } else { /* although we do have buffer position information, but the * complete contents of the buffer is already processed, instead * of reading and then dropping it, position the file after the * indicated block */ state->raw_stream_pos += state->raw_buffer_size; ofs = state->raw_stream_pos; state->raw_buffer_size = 0; state->buffer_pos = state->pending_buffer_end = 0; lseek(fd, state->raw_stream_pos, SEEK_SET); } goto exit; error: ofs = 0; state->buffer_pos = 0; state->pending_buffer_end = 0; state->__deprecated_buffer_cached_eol = 0; state->raw_stream_pos = 0; state->raw_buffer_size = 0; state->raw_buffer_leftover_size = 0; lseek(fd, 0, SEEK_SET); exit: state->file_inode = st.st_ino; state->file_size = st.st_size; state->raw_stream_pos = ofs; state->pending_buffer_pos = state->buffer_pos; state->pending_raw_stream_pos = state->raw_stream_pos; state->pending_raw_buffer_size = state->raw_buffer_size; state->__deprecated_buffer_cached_eol = 0; state = NULL; log_proto_buffered_server_put_state(self); }
gboolean qdisk_pop_head(QDisk *self, GString *record) { if (self->hdr->read_head != self->hdr->write_head) { guint32 n; gssize res; res = pread(self->fd, (gchar *) &n, sizeof(n), self->hdr->read_head); if (res == 0) { /* hmm, we are either at EOF or at hdr->qout_ofs, we need to wrap */ self->hdr->read_head = QDISK_RESERVED_SPACE; res = pread(self->fd, (gchar *) &n, sizeof(n), self->hdr->read_head); } if (res != sizeof(n)) { msg_error("Error reading disk-queue file", evt_tag_str("error", res < 0 ? g_strerror(errno) : "short read"), evt_tag_str("filename", self->filename)); return FALSE; } n = GUINT32_FROM_BE(n); if (n > 10 * 1024 * 1024) { msg_warning("Disk-queue file contains possibly invalid record-length", evt_tag_int("rec_length", n), evt_tag_str("filename", self->filename)); return FALSE; } else if (n == 0) { msg_error("Disk-queue file contains empty record", evt_tag_int("rec_length", n), evt_tag_str("filename", self->filename)); return FALSE; } g_string_set_size(record, n); res = pread(self->fd, record->str, n, self->hdr->read_head + sizeof(n)); if (res != n) { msg_error("Error reading disk-queue file", evt_tag_str("filename", self->filename), evt_tag_str("error", res < 0 ? g_strerror(errno) : "short read"), evt_tag_int("read_length", n)); return FALSE; } self->hdr->read_head = self->hdr->read_head + record->len + sizeof(n); if (self->hdr->read_head > self->hdr->write_head) { self->hdr->read_head = _correct_position_if_eof(self, &self->hdr->read_head); } self->hdr->length--; if (!self->options->reliable) { self->hdr->backlog_head = self->hdr->read_head; } if (self->hdr->length == 0 && !self->options->reliable) { msg_debug("Queue file became empty, truncating file", evt_tag_str("filename", self->filename)); self->hdr->read_head = QDISK_RESERVED_SPACE; self->hdr->write_head = QDISK_RESERVED_SPACE; if (!self->options->reliable) { self->hdr->backlog_head = self->hdr->read_head; } self->hdr->length = 0; _truncate_file(self, self->hdr->write_head); } return TRUE; } return FALSE; }
static gboolean log_proto_buffered_server_convert_state(LogProtoBufferedServer *self, guint8 persist_version, gpointer old_state, gsize old_state_size, LogProtoBufferedServerState *state) { if (persist_version <= 2) { state->header.version = 0; state->file_inode = 0; state->raw_stream_pos = strtoll((gchar *) old_state, NULL, 10); state->file_size = 0; return TRUE; } else if (persist_version == 3) { SerializeArchive *archive; guint32 read_length; gint64 cur_size; gint64 cur_inode; gint64 cur_pos; guint16 version; gchar *buffer; gsize buffer_len; cur_inode = -1; cur_pos = 0; cur_size = 0; archive = serialize_buffer_archive_new(old_state, old_state_size); /* NOTE: the v23 conversion code adds an extra length field which we * need to read out. */ g_assert(serialize_read_uint32(archive, &read_length) && read_length == old_state_size - sizeof(read_length)); /* original v3 format starts here */ if (!serialize_read_uint16(archive, &version) || version != 0) { msg_error("Internal error restoring log reader state, stored data has incorrect version", evt_tag_int("version", version)); goto error_converting_v3; } if (!serialize_read_uint64(archive, (guint64 *) &cur_pos) || !serialize_read_uint64(archive, (guint64 *) &cur_inode) || !serialize_read_uint64(archive, (guint64 *) &cur_size)) { msg_error("Internal error restoring information about the current file position, restarting from the beginning"); goto error_converting_v3; } if (!serialize_read_uint16(archive, &version) || version != 0) { msg_error("Internal error, protocol state has incorrect version", evt_tag_int("version", version)); goto error_converting_v3; } if (!serialize_read_cstring(archive, &buffer, &buffer_len)) { msg_error("Internal error, error reading buffer contents", evt_tag_int("version", version)); goto error_converting_v3; } if (!self->buffer || state->buffer_size < buffer_len) { gsize buffer_size = MAX(self->super.options->init_buffer_size, buffer_len); self->buffer = g_realloc(self->buffer, buffer_size); } serialize_archive_free(archive); memcpy(self->buffer, buffer, buffer_len); state->buffer_pos = 0; state->pending_buffer_end = buffer_len; g_free(buffer); state->header.version = 0; state->file_inode = cur_inode; state->raw_stream_pos = cur_pos; state->file_size = cur_size; return TRUE; error_converting_v3: serialize_archive_free(archive); } return FALSE; }
gboolean qdisk_start(QDisk *self, const gchar *filename, GQueue *qout, GQueue *qbacklog, GQueue *qoverflow) { gboolean new_file = FALSE; gpointer p = NULL; int openflags = 0; /* * If qdisk_start is called for already initialized qdisk file * it can cause message loosing. * We need this assert to detect programming error as soon as possible. */ g_assert(!qdisk_initialized(self)); if (self->options->disk_buf_size <= 0) return TRUE; if (self->options->read_only && !filename) return FALSE; if (!filename) { new_file = TRUE; /* NOTE: this'd be a security problem if we were not in our private directory. But we are. */ filename = _next_filename(self); } else { struct stat st; if (stat(filename,&st) == -1) { new_file = TRUE; } } self->filename = g_strdup(filename); /* assumes self is zero initialized */ openflags = self->options->read_only ? (O_RDONLY | O_LARGEFILE) : (O_RDWR | O_LARGEFILE | (new_file ? O_CREAT : 0)); self->fd = open(filename, openflags, 0600); if (self->fd < 0) { msg_error("Error opening disk-queue file", evt_tag_str("filename", self->filename), evt_tag_errno("error", errno)); return FALSE; } p = mmap(0, sizeof(QDiskFileHeader), self->options->read_only ? (PROT_READ) : (PROT_READ | PROT_WRITE), MAP_SHARED, self->fd, 0); if (p == MAP_FAILED) { msg_error("Error returned by mmap", evt_tag_errno("errno", errno), evt_tag_str("filename", self->filename)); return FALSE; } else { madvise(p, sizeof(QDiskFileHeader), MADV_RANDOM); } if (self->options->read_only) { self->hdr = g_malloc(sizeof(QDiskFileHeader)); memcpy(self->hdr, p, sizeof(QDiskFileHeader)); munmap(p, sizeof(QDiskFileHeader) ); p = NULL; } else { self->hdr = p; } /* initialize new file */ if (new_file) { QDiskFileHeader tmp; memset(&tmp, 0, sizeof(tmp)); if (!pwrite_strict(self->fd, &tmp, sizeof(tmp), 0)) { msg_error("Error occured while initalizing the new queue file",evt_tag_str("filename",self->filename),evt_tag_errno("error",errno)); munmap((void *)self->hdr, sizeof(QDiskFileHeader)); self->hdr = NULL; close(self->fd); self->fd = -1; return FALSE; } self->hdr->version = 1; self->hdr->big_endian = (G_BYTE_ORDER == G_BIG_ENDIAN); self->hdr->read_head = QDISK_RESERVED_SPACE; self->hdr->write_head = QDISK_RESERVED_SPACE; self->hdr->backlog_head = self->hdr->read_head; self->hdr->length = 0; if (!qdisk_save_state(self, qout, qbacklog, qoverflow)) { munmap((void *)self->hdr, sizeof(QDiskFileHeader)); self->hdr = NULL; close(self->fd); self->fd = -1; return FALSE; } } else { struct stat st; if (fstat(self->fd, &st) != 0 || st.st_size == 0) { msg_error("Error loading disk-queue file", evt_tag_str("filename", self->filename), evt_tag_errno("fstat error", errno), evt_tag_int("size", st.st_size)); munmap((void *)self->hdr, sizeof(QDiskFileHeader)); self->hdr = NULL; close(self->fd); self->fd = -1; return FALSE; } if (self->hdr->version == 0) { self->hdr->big_endian = TRUE; self->hdr->version = 1; self->hdr->backlog_head = self->hdr->read_head; self->hdr->backlog_len = 0; } if ((self->hdr->big_endian && G_BYTE_ORDER == G_LITTLE_ENDIAN) || (!self->hdr->big_endian && G_BYTE_ORDER == G_BIG_ENDIAN)) { self->hdr->read_head = GUINT64_SWAP_LE_BE(self->hdr->read_head); self->hdr->write_head = GUINT64_SWAP_LE_BE(self->hdr->write_head); self->hdr->length = GUINT64_SWAP_LE_BE(self->hdr->length); self->hdr->qout_ofs = GUINT64_SWAP_LE_BE(self->hdr->qout_ofs); self->hdr->qout_len = GUINT32_SWAP_LE_BE(self->hdr->qout_len); self->hdr->qout_count = GUINT32_SWAP_LE_BE(self->hdr->qout_count); self->hdr->qbacklog_ofs = GUINT64_SWAP_LE_BE(self->hdr->qbacklog_ofs); self->hdr->qbacklog_len = GUINT32_SWAP_LE_BE(self->hdr->qbacklog_len); self->hdr->qbacklog_count = GUINT32_SWAP_LE_BE(self->hdr->qbacklog_count); self->hdr->qoverflow_ofs = GUINT64_SWAP_LE_BE(self->hdr->qoverflow_ofs); self->hdr->qoverflow_len = GUINT32_SWAP_LE_BE(self->hdr->qoverflow_len); self->hdr->qoverflow_count = GUINT32_SWAP_LE_BE(self->hdr->qoverflow_count); self->hdr->backlog_head = GUINT64_SWAP_LE_BE(self->hdr->backlog_head); self->hdr->backlog_len = GUINT64_SWAP_LE_BE(self->hdr->backlog_len); self->hdr->big_endian = (G_BYTE_ORDER == G_BIG_ENDIAN); } if (!_load_state(self, qout, qbacklog, qoverflow)) { munmap((void *)self->hdr, sizeof(QDiskFileHeader)); self->hdr = NULL; close(self->fd); self->fd = -1; return FALSE; } } return TRUE; }
gboolean log_proto_buffered_server_restart_with_state(LogProtoServer *s, PersistState *persist_state, const gchar *persist_name) { LogProtoBufferedServer *self = (LogProtoBufferedServer *) s; guint8 persist_version; PersistEntryHandle old_state_handle; gpointer old_state; gsize old_state_size; PersistEntryHandle new_state_handle = 0; gpointer new_state = NULL; gboolean success; self->pos_tracking = TRUE; self->persist_state = persist_state; old_state_handle = persist_state_lookup_entry(persist_state, persist_name, &old_state_size, &persist_version); if (!old_state_handle) { new_state_handle = log_proto_buffered_server_alloc_state(self, persist_state, persist_name); if (!new_state_handle) goto fallback_non_persistent; log_proto_buffered_server_apply_state(self, new_state_handle, persist_name); return TRUE; } if (persist_version < 4) { new_state_handle = log_proto_buffered_server_alloc_state(self, persist_state, persist_name); if (!new_state_handle) goto fallback_non_persistent; old_state = persist_state_map_entry(persist_state, old_state_handle); new_state = persist_state_map_entry(persist_state, new_state_handle); success = log_proto_buffered_server_convert_state(self, persist_version, old_state, old_state_size, new_state); persist_state_unmap_entry(persist_state, old_state_handle); persist_state_unmap_entry(persist_state, new_state_handle); /* we're using the newly allocated state structure regardless if * conversion succeeded. If the conversion went wrong then * new_state is still in its freshly initialized form since the * convert function will not touch the state in the error * branches. */ log_proto_buffered_server_apply_state(self, new_state_handle, persist_name); return success; } else if (persist_version == 4) { LogProtoBufferedServerState *state; old_state = persist_state_map_entry(persist_state, old_state_handle); state = old_state; if ((state->header.big_endian && G_BYTE_ORDER == G_LITTLE_ENDIAN) || (!state->header.big_endian && G_BYTE_ORDER == G_BIG_ENDIAN)) { /* byte order conversion in order to avoid the hassle with scattered byte order conversions in the code */ state->header.big_endian = !state->header.big_endian; state->buffer_pos = GUINT32_SWAP_LE_BE(state->buffer_pos); state->pending_buffer_pos = GUINT32_SWAP_LE_BE(state->pending_buffer_pos); state->pending_buffer_end = GUINT32_SWAP_LE_BE(state->pending_buffer_end); state->buffer_size = GUINT32_SWAP_LE_BE(state->buffer_size); state->raw_stream_pos = GUINT64_SWAP_LE_BE(state->raw_stream_pos); state->raw_buffer_size = GUINT32_SWAP_LE_BE(state->raw_buffer_size); state->pending_raw_stream_pos = GUINT64_SWAP_LE_BE(state->pending_raw_stream_pos); state->pending_raw_buffer_size = GUINT32_SWAP_LE_BE(state->pending_raw_buffer_size); state->file_size = GUINT64_SWAP_LE_BE(state->file_size); state->file_inode = GUINT64_SWAP_LE_BE(state->file_inode); } if (state->header.version > 0) { msg_error("Internal error restoring log reader state, stored data is too new", evt_tag_int("version", state->header.version)); goto error; } persist_state_unmap_entry(persist_state, old_state_handle); log_proto_buffered_server_apply_state(self, old_state_handle, persist_name); return TRUE; } else { msg_error("Internal error restoring log reader state, stored data is too new", evt_tag_int("version", persist_version)); goto error; } return TRUE; fallback_non_persistent: new_state = g_new0(LogProtoBufferedServerState, 1); error: if (!new_state) { new_state_handle = log_proto_buffered_server_alloc_state(self, persist_state, persist_name); if (!new_state_handle) goto fallback_non_persistent; new_state = persist_state_map_entry(persist_state, new_state_handle); } if (new_state) { LogProtoBufferedServerState *state = new_state; /* error happened, restart the file from the beginning */ state->raw_stream_pos = 0; state->file_inode = 0; state->file_size = 0; if (new_state_handle) log_proto_buffered_server_apply_state(self, new_state_handle, persist_name); else self->state1 = new_state; } if (new_state_handle) { persist_state_unmap_entry(persist_state, new_state_handle); } return FALSE; }