static int64_t rd_kafka_offset_file_read (rd_kafka_toppar_t *rktp) { char buf[22]; char *end; int64_t offset; size_t r; if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Seek (for read) failed on offset file %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_offset_path, rd_strerror(errno)); rd_kafka_offset_file_close(rktp); return RD_KAFKA_OFFSET_INVALID; } r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp); if (r == 0) { rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: offset file (%s) is empty", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_offset_path); return RD_KAFKA_OFFSET_INVALID; } buf[r] = '\0'; offset = strtoull(buf, &end, 10); if (buf == end) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Unable to parse offset in %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_offset_path); return RD_KAFKA_OFFSET_INVALID; } rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: Read offset %"PRId64" from offset " "file (%s)", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offset, rktp->rktp_offset_path); return offset; }
/** * Take action when the offset for a toppar becomes unusable. * NOTE: toppar_lock(rktp) must be held */ void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, rd_kafka_resp_err_t err, const char *reason) { int64_t offset = RD_KAFKA_OFFSET_ERROR; rd_kafka_op_t *rko; int64_t offset_reset = rktp->rktp_rkt->rkt_conf.auto_offset_reset; if (offset_reset == RD_KAFKA_OFFSET_END || offset_reset == RD_KAFKA_OFFSET_BEGINNING || offset_reset <= RD_KAFKA_OFFSET_TAIL_BASE) { offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset; rktp->rktp_query_offset = offset; rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY; } else if (offset_reset == RD_KAFKA_OFFSET_ERROR) { rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); rko->rko_err = err; rko->rko_rkmessage.offset = err_offset; rko->rko_rkmessage.rkt = rktp->rktp_rkt; rko->rko_rkmessage.partition = rktp->rktp_partition; rko->rko_payload = strdup(reason); rko->rko_len = strlen(rko->rko_payload); rko->rko_flags |= RD_KAFKA_OP_F_FREE; rd_kafka_topic_keep(rko->rko_rkmessage.rkt); rd_kafka_q_enq(&rktp->rktp_fetchq, rko); rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE; } rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: offset reset (at offset %"PRId64") " "to %"PRId64": %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, err_offset, offset, reason, rd_kafka_err2str(err)); }
static rd_kafka_resp_err_t rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp) { rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_cgrp != NULL); rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE); rktp->rktp_committing_offset = rktp->rktp_stored_offset; offsets = rd_kafka_topic_partition_list_new(1); rktpar = rd_kafka_topic_partition_list_add( offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); rktpar->offset = rktp->rktp_committing_offset; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSETCMT", "%.*s [%"PRId32"]: committing offset %"PRId64, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rktp->rktp_committing_offset); rd_kafka_commit0(rktp->rktp_rkt->rkt_rk, offsets, rktp, RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), rd_kafka_offset_broker_commit_cb, NULL); rd_kafka_topic_partition_list_destroy(offsets); return RD_KAFKA_RESP_ERR__IN_PROGRESS; }
/** * Commit offset to backing store. * This might be an async operation. * * Locality: toppar handler thread */ rd_kafka_resp_err_t rd_kafka_offset_commit (rd_kafka_toppar_t *rktp) { if (1) // FIXME rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: commit: " "stored offset %"PRId64" > committed offset %"PRId64"?", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_stored_offset, rktp->rktp_committed_offset); /* Already committed */ if (rktp->rktp_stored_offset <= rktp->rktp_committed_offset) return RD_KAFKA_RESP_ERR_NO_ERROR; /* Already committing (for async ops) */ if (rktp->rktp_stored_offset <= rktp->rktp_committing_offset) return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: return rd_kafka_offset_file_commit(rktp); case RD_KAFKA_OFFSET_METHOD_BROKER: return rd_kafka_offset_broker_commit(rktp); default: /* UNREACHABLE */ return RD_KAFKA_RESP_ERR__INVALID_ARG; } }
/** * Terminates toppar's offset store, this is the finalizing step after * offset_store_stop(). * * Locks: rd_kafka_toppar_lock() MUST be held. */ void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, rd_kafka_resp_err_t err) { rd_kafka_resp_err_t err2; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM", "%s [%"PRId32"]: offset store terminating", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING; rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_commit_tmr, 1/*lock*/); switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: err2 = rd_kafka_offset_file_term(rktp); break; case RD_KAFKA_OFFSET_METHOD_BROKER: err2 = rd_kafka_offset_broker_term(rktp); break; case RD_KAFKA_OFFSET_METHOD_NONE: err2 = RD_KAFKA_RESP_ERR_NO_ERROR; break; } /* Prioritize the input error (probably from commit), fall * back on termination error. */ if (!err) err = err2; rd_kafka_toppar_fetch_stopped(rktp, err); }
/** * Take action when the offset for a toppar becomes unusable. * * Locality: toppar handler thread * Locks: toppar_lock() MUST be held */ void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, rd_kafka_resp_err_t err, const char *reason) { int64_t offset = RD_KAFKA_OFFSET_INVALID; rd_kafka_op_t *rko; /* Enqueue op for toppar handler thread if we're on the wrong thread. */ if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_CALLBACK); rko->rko_op_cb = rd_kafka_offset_reset_op_cb; rko->rko_rktp = rd_kafka_toppar_keep(rktp); rko->rko_err = err; rko->rko_offset = err_offset; rko->rko_flags |= RD_KAFKA_OP_F_FREE; rko->rko_payload = rd_strdup(reason); rko->rko_len = strlen(reason); rd_kafka_q_enq(&rktp->rktp_ops, rko); return; } if (err_offset == RD_KAFKA_OFFSET_INVALID || err) offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset; else offset = err_offset; if (offset == RD_KAFKA_OFFSET_INVALID) { /* Error, auto.offset.reset tells us to error out. */ rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR); rko->rko_err = err; rko->rko_rkmessage.offset = err_offset; rko->rko_rkmessage.partition = rktp->rktp_partition; rko->rko_payload = rd_strdup(reason); rko->rko_len = strlen(rko->rko_payload); rko->rko_flags |= RD_KAFKA_OP_F_FREE; rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_q_enq(&rktp->rktp_fetchq, rko); rd_kafka_toppar_set_fetch_state( rktp, RD_KAFKA_TOPPAR_FETCH_NONE); } else { /* Query logical offset */ rktp->rktp_query_offset = offset; rd_kafka_toppar_set_fetch_state( rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); } rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: offset reset (at offset %s) " "to %s: %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rd_kafka_offset2str(err_offset), rd_kafka_offset2str(offset), reason, rd_kafka_err2str(err)); if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0); }
/** * Called when a broker commit is done. * * Locality: toppar handler thread * Locks: none */ static void rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque) { shptr_rd_kafka_toppar_t *s_rktp; rd_kafka_toppar_t *rktp; if (!(s_rktp = rd_kafka_topic_partition_list_get_toppar(rk, offsets, 0))) { rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT", "No local partition found for %s [%"PRId32"] " "while parsing OffsetCommit response " "(offset %"PRId64", error \"%s\")", offsets->elems[0].topic, offsets->elems[0].partition, offsets->elems[0].offset, rd_kafka_err2str(offsets->elems[0].err)); return; } rktp = rd_kafka_toppar_s2i(s_rktp); if (!err) err = offsets->elems[0].err; rd_kafka_toppar_offset_commit_result(rktp, err, offsets); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: offset %"PRId64" committed: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offsets->elems[0].offset, rd_kafka_err2str(err)); rktp->rktp_committing_offset = 0; rd_kafka_toppar_lock(rktp); if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING) rd_kafka_offset_store_term(rktp, err); rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(s_rktp); }
void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { rd_kafka_toppar_t *rktp = arg; rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "Topic %s [%"PRId32"]: timed offset query for %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rd_kafka_offset2str(rktp->rktp_query_offset)); rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0); rd_kafka_toppar_unlock(rktp); }
/** * Prepare a toppar for using an offset file. * * NOTE: toppar_lock(rktp) must be held. */ static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { struct stat st; char spath[4096]; const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; int64_t offset = -1; if (stat(path, &st) == 0 && S_ISDIR(st.st_mode)) { snprintf(spath, sizeof(spath), "%s%s%s-%"PRId32".offset", path, path[strlen(path)-1] == '/' ? "" : "/", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); path = spath; } rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"] using offset file %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, path); rktp->rktp_offset_path = strdup(path); rd_kafka_timer_start(rktp->rktp_rkt->rkt_rk, &rktp->rktp_offset_commit_tmr, rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms * 1000, rd_kafka_offset_file_commit_tmr_cb, rktp); if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) rd_kafka_timer_start(rktp->rktp_rkt->rkt_rk, &rktp->rktp_offset_sync_tmr, rktp->rktp_rkt->rkt_conf. offset_store_sync_interval_ms * 1000, rd_kafka_offset_file_sync_tmr_cb, rktp); if (rd_kafka_offset_file_open(rktp) != -1) { /* Read offset from offset file. */ offset = rd_kafka_offset_file_read(rktp); } if (offset != -1) { /* Start fetching from offset */ rktp->rktp_commited_offset = offset; rktp->rktp_next_offset = offset; rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_ACTIVE; } else { /* Offset was not usable: perform offset reset logic */ rktp->rktp_commited_offset = 0; rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_ERROR, RD_KAFKA_RESP_ERR__FS, "non-readable offset file"); } }
/** * Offset file sync timer callback */ static void rd_kafka_offset_file_sync_tmr_cb (rd_kafka_t *rk, void *arg) { rd_kafka_toppar_t *rktp = arg; rd_kafka_toppar_lock(rktp); if (rktp->rktp_offset_fd != -1) { rd_kafka_dbg(rk, TOPIC, "SYNC", "%s [%"PRId32"]: offset file sync", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); fsync(rktp->rktp_offset_fd); } rd_kafka_toppar_unlock(rktp); }
/** * Offset file commit timer callback. */ static void rd_kafka_offset_file_commit_tmr_cb (rd_kafka_t *rk, void *arg) { rd_kafka_toppar_t *rktp = arg; rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rk, TOPIC, "OFFSET", "%s [%"PRId32"]: periodic commit: " "stored offset %"PRId64" > commited offset %"PRId64" ?", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_stored_offset, rktp->rktp_commited_offset); if (rktp->rktp_stored_offset > rktp->rktp_commited_offset) rd_kafka_offset_file_commit(rktp, rktp->rktp_stored_offset); rd_kafka_toppar_unlock(rktp); }
/** * Sync/flush offset file. */ static int rd_kafka_offset_file_sync (rd_kafka_toppar_t *rktp) { if (!rktp->rktp_offset_fp) return 0; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "SYNC", "%s [%"PRId32"]: offset file sync", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); #ifndef _MSC_VER (void)fflush(rktp->rktp_offset_fp); (void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME #else // FIXME // FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp))); #endif return 0; }
/** * Stop toppar's offset store, committing the final offsets, etc. * * Returns RD_KAFKA_RESP_ERR_NO_ERROR on success, * RD_KAFKA_RESP_ERR__IN_PROGRESS if the term triggered an * async operation (e.g., broker offset commit), or * any other error in case of immediate failure. * * The offset layer will call rd_kafka_offset_store_term() when * the offset management has been fully stopped for this partition. * * Locks: rd_kafka_toppar_lock() MUST be held. */ rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE)) goto done; rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: stopping offset store " "(stored offset %"PRId64 ", committed offset %"PRId64", EOF offset %"PRId64")", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_stored_offset, rktp->rktp_committed_offset, rktp->rktp_offsets_fin.eof_offset); /* Store end offset for empty partitions */ if (((!rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) && rktp->rktp_rkt->rkt_rk->rk_conf.enable_auto_commit) || rktp->rktp_rkt->rkt_conf.auto_commit) && rktp->rktp_stored_offset == -1 && rktp->rktp_offsets_fin.eof_offset > 0) rd_kafka_offset_store0(rktp, rktp->rktp_offsets_fin.eof_offset, 0/*no lock*/); /* Commit offset to backing store. * This might be an async operation. */ if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) && rktp->rktp_stored_offset > rktp->rktp_committed_offset) err = rd_kafka_offset_commit(rktp); /* If stop is in progress (async commit), return now. */ if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) return err; done: /* Stop is done */ rd_kafka_offset_store_term(rktp, err); return RD_KAFKA_RESP_ERR_NO_ERROR; }
/** * Initialize toppar's offset store. * * Locality: toppar handler thread */ void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) { static const char *store_names[] = { "none", "file", "broker" }; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: using offset store method: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, store_names[rktp->rktp_rkt->rkt_conf.offset_store_method]); /* The committed offset is unknown at this point. */ rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; /* Set up the commit interval (for simple consumer). */ if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) && rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms > 0) rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_commit_tmr, rktp->rktp_rkt->rkt_conf. auto_commit_interval_ms * 1000ll, rd_kafka_offset_auto_commit_tmr_cb, rktp); switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: rd_kafka_offset_file_init(rktp); break; case RD_KAFKA_OFFSET_METHOD_BROKER: rd_kafka_offset_broker_init(rktp); break; case RD_KAFKA_OFFSET_METHOD_NONE: break; default: /* NOTREACHED */ return; } rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE; }
/** * OpenSSL password query callback * * Locality: application thread */ static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag, void *userdata) { rd_kafka_t *rk = userdata; int pwlen; rd_kafka_dbg(rk, SECURITY, "SSLPASSWD", "Private key file \"%s\" requires password", rk->rk_conf.ssl.key_location); if (!rk->rk_conf.ssl.key_password) { rd_kafka_log(rk, LOG_WARNING, "SSLPASSWD", "Private key file \"%s\" requires password but " "no password configured (ssl.key.password)", rk->rk_conf.ssl.key_location); return -1; } pwlen = (int) strlen(rk->rk_conf.ssl.key_password); memcpy(buf, rk->rk_conf.ssl.key_password, RD_MIN(pwlen, size)); return pwlen; }
/** * Initialize toppar's offset store. * NOTE: toppar_lock(rktp) must be held. */ void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) { static const char *store_names[] = { "file", "broker" }; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: using offset store method: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, store_names[rktp->rktp_rkt->rkt_conf.offset_store_method]); switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: rd_kafka_offset_file_init(rktp); break; case RD_KAFKA_OFFSET_METHOD_BROKER: rd_kafka_offset_broker_init(rktp); break; default: /* NOTREACHED */ return; } rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE; }
/** * Prepare a toppar for using an offset file. * * NOTE: toppar_lock(rktp) must be held. */ static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { struct stat st; char spath[4096]; const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; int64_t offset = -1; if (stat(path, &st) == 0 && S_ISDIR(st.st_mode)) { char tmpfile[1024]; char escfile[4096]; /* Include group.id in filename if configured. */ if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_conf.group_id)) snprintf(tmpfile, sizeof(tmpfile), "%s-%"PRId32"-%.*s.offset", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, RD_KAFKAP_STR_PR(rktp->rktp_rkt-> rkt_conf.group_id)); else snprintf(tmpfile, sizeof(tmpfile), "%s-%"PRId32".offset", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); /* Escape filename to make it safe. */ mk_esc_filename(tmpfile, escfile, sizeof(escfile)); snprintf(spath, sizeof(spath), "%s%s%s", path, path[strlen(path)-1] == '/' ? "" : "/", escfile); path = spath; } rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"] using offset file %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, path); rktp->rktp_offset_path = strdup(path); rd_kafka_timer_start(rktp->rktp_rkt->rkt_rk, &rktp->rktp_offset_commit_tmr, rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms * 1000, rd_kafka_offset_file_commit_tmr_cb, rktp); if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) rd_kafka_timer_start(rktp->rktp_rkt->rkt_rk, &rktp->rktp_offset_sync_tmr, rktp->rktp_rkt->rkt_conf. offset_store_sync_interval_ms * 1000, rd_kafka_offset_file_sync_tmr_cb, rktp); if (rd_kafka_offset_file_open(rktp) != -1) { /* Read offset from offset file. */ offset = rd_kafka_offset_file_read(rktp); } if (offset != -1) { /* Start fetching from offset */ rktp->rktp_commited_offset = offset; rktp->rktp_next_offset = offset; rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_ACTIVE; } else { /* Offset was not usable: perform offset reset logic */ rktp->rktp_commited_offset = 0; rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_ERROR, RD_KAFKA_RESP_ERR__FS, "non-readable offset file"); } }
/** * Prepare a toppar for using an offset file. * * Locality: rdkafka main thread * Locks: toppar_lock(rktp) must be held */ static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { char spath[4096]; const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; int64_t offset = RD_KAFKA_OFFSET_INVALID; if (rd_kafka_path_is_dir(path)) { char tmpfile[1024]; char escfile[4096]; /* Include group.id in filename if configured. */ if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_rk-> rk_conf.group_id)) rd_snprintf(tmpfile, sizeof(tmpfile), "%s-%"PRId32"-%.*s.offset", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_rk-> rk_conf.group_id)); else rd_snprintf(tmpfile, sizeof(tmpfile), "%s-%"PRId32".offset", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); /* Escape filename to make it safe. */ mk_esc_filename(tmpfile, escfile, sizeof(escfile)); rd_snprintf(spath, sizeof(spath), "%s%s%s", path, path[strlen(path)-1] == '/' ? "" : "/", escfile); path = spath; } rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: using offset file %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, path); rktp->rktp_offset_path = rd_strdup(path); /* Set up the offset file sync interval. */ if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_sync_tmr, rktp->rktp_rkt->rkt_conf. offset_store_sync_interval_ms * 1000ll, rd_kafka_offset_sync_tmr_cb, rktp); if (rd_kafka_offset_file_open(rktp) != -1) { /* Read offset from offset file. */ offset = rd_kafka_offset_file_read(rktp); } if (offset != RD_KAFKA_OFFSET_INVALID) { /* Start fetching from offset */ rktp->rktp_stored_offset = offset; rktp->rktp_committed_offset = offset; rd_kafka_toppar_next_offset_handle(rktp, offset); } else { /* Offset was not usable: perform offset reset logic */ rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_INVALID, RD_KAFKA_RESP_ERR__FS, "non-readable offset file"); } }
/** * Once per rd_kafka_t handle initialization of OpenSSL * * Locality: application thread * * NOTE: rd_kafka_wrlock() MUST be held */ int rd_kafka_transport_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { int r; SSL_CTX *ctx; call_once(&rd_kafka_ssl_init_once, rd_kafka_transport_ssl_init); ctx = SSL_CTX_new(SSLv23_client_method()); if (!ctx) goto fail; /* Key file password callback */ SSL_CTX_set_default_passwd_cb(ctx, rd_kafka_transport_ssl_passwd_cb); SSL_CTX_set_default_passwd_cb_userdata(ctx, rk); /* Ciphers */ if (rk->rk_conf.ssl.cipher_suites) { rd_kafka_dbg(rk, SECURITY, "SSL", "Setting cipher list: %s", rk->rk_conf.ssl.cipher_suites); if (!SSL_CTX_set_cipher_list(ctx, rk->rk_conf.ssl.cipher_suites)) { rd_snprintf(errstr, errstr_size, "No recognized ciphers"); goto fail; } } if (rk->rk_conf.ssl.ca_location) { /* CA certificate location, either file or directory. */ int is_dir = rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location); rd_kafka_dbg(rk, SECURITY, "SSL", "Loading CA certificate(s) from %s %s", is_dir ? "directory":"file", rk->rk_conf.ssl.ca_location); r = SSL_CTX_load_verify_locations(ctx, !is_dir ? rk->rk_conf.ssl. ca_location : NULL, is_dir ? rk->rk_conf.ssl. ca_location : NULL); if (r != 1) goto fail; } if (rk->rk_conf.ssl.cert_location) { rd_kafka_dbg(rk, SECURITY, "SSL", "Loading certificate from file %s", rk->rk_conf.ssl.cert_location); r = SSL_CTX_use_certificate_chain_file(ctx, rk->rk_conf.ssl.cert_location); if (r != 1) goto fail; } if (rk->rk_conf.ssl.key_location) { rd_kafka_dbg(rk, SECURITY, "SSL", "Loading private key file from %s", rk->rk_conf.ssl.key_location); r = SSL_CTX_use_PrivateKey_file(ctx, rk->rk_conf.ssl.key_location, SSL_FILETYPE_PEM); if (r != 1) goto fail; } SSL_CTX_set_mode(ctx, SSL_MODE_ENABLE_PARTIAL_WRITE); rk->rk_conf.ssl.ctx = ctx; return 0; fail: rd_kafka_ssl_error(rk, NULL, errstr, errstr_size); SSL_CTX_free(ctx); return -1; }
/** * NOTE: rktp lock is not required. * Locality: rdkafka main thread */ static int rd_kafka_offset_file_commit (rd_kafka_toppar_t *rktp, int64_t offset) { rd_kafka_topic_t *rkt = rktp->rktp_rkt; int attempt; for (attempt = 0 ; attempt < 2 ; attempt++) { char buf[22]; int len; if (rktp->rktp_offset_fd == -1) if (rd_kafka_offset_file_open(rktp) == -1) continue; if (lseek(rktp->rktp_offset_fd, 0, SEEK_SET) == -1) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Seek failed on offset file %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_offset_path, strerror(errno)); rd_kafka_offset_file_close(rktp); continue; } len = snprintf(buf, sizeof(buf), "%"PRId64"\n", offset); if (write(rktp->rktp_offset_fd, buf, len) == -1) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Failed to write offset %"PRId64" to " "offset file %s (fd %i): %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offset, rktp->rktp_offset_path, rktp->rktp_offset_fd, strerror(errno)); rd_kafka_offset_file_close(rktp); continue; } if (ftruncate(rktp->rktp_offset_fd, len) == -1) ; /* Ignore truncate failures */ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: wrote offset %"PRId64" to " "file %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offset, rktp->rktp_offset_path); rktp->rktp_commited_offset = offset; /* If sync interval is set to immediate we sync right away. */ if (rkt->rkt_conf.offset_store_sync_interval_ms == 0) fsync(rktp->rktp_offset_fd); return 0; } return -1; }
/** * Write offset to offset file. * * Locality: toppar's broker thread */ static rd_kafka_resp_err_t rd_kafka_offset_file_commit (rd_kafka_toppar_t *rktp) { rd_kafka_itopic_t *rkt = rktp->rktp_rkt; int attempt; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; int64_t offset = rktp->rktp_stored_offset; for (attempt = 0 ; attempt < 2 ; attempt++) { char buf[22]; int len; if (!rktp->rktp_offset_fp) if (rd_kafka_offset_file_open(rktp) == -1) continue; if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Seek failed on offset file %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_offset_path, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; rd_kafka_offset_file_close(rktp); continue; } len = rd_snprintf(buf, sizeof(buf), "%"PRId64"\n", offset); if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) { rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, "%s [%"PRId32"]: " "Failed to write offset %"PRId64" to " "offset file %s: %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offset, rktp->rktp_offset_path, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; rd_kafka_offset_file_close(rktp); continue; } /* Need to flush before truncate to preserve write ordering */ (void)fflush(rktp->rktp_offset_fp); /* Truncate file */ #ifdef _MSC_VER if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1) ; /* Ignore truncate failures */ #else if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1) ; /* Ignore truncate failures */ #endif rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", "%s [%"PRId32"]: wrote offset %"PRId64" to " "file %s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, offset, rktp->rktp_offset_path); rktp->rktp_committed_offset = offset; /* If sync interval is set to immediate we sync right away. */ if (rkt->rkt_conf.offset_store_sync_interval_ms == 0) rd_kafka_offset_file_sync(rktp); return RD_KAFKA_RESP_ERR_NO_ERROR; } return err; }