コード例 #1
0
ファイル: 0035-api_version.c プロジェクト: BDeus/librdkafka
int main_0035_api_version (int argc, char **argv) {
	rd_kafka_t *rk;
	rd_kafka_conf_t *conf;
	const struct rd_kafka_metadata *metadata;
	rd_kafka_resp_err_t err;
	test_timing_t t_meta;

	test_conf_init(&conf, NULL, 30);
	test_conf_set(conf, "socket.timeout.ms", "12000");
	rk = test_create_handle(RD_KAFKA_PRODUCER, conf);

	TEST_SAY("Querying for metadata\n");
	TIMING_START(&t_meta, "metadata()");
	err = rd_kafka_metadata(rk, 0, NULL, &metadata, 10*1000);
	TIMING_STOP(&t_meta);
	if (err)
		TEST_FAIL("metadata() failed: %s",
			  rd_kafka_err2str(err));

	if (TIMING_DURATION(&t_meta) / 1000 > 11*1000)
		TEST_FAIL("metadata() took too long: %.3fms",
			  (float)TIMING_DURATION(&t_meta) / 1000.0f);

	rd_kafka_metadata_destroy(metadata);

	TEST_SAY("Metadata succeeded\n");

	rd_kafka_destroy(rk);

	return 0;
}
コード例 #2
0
static void rebalance_cb (rd_kafka_t *rk,
			  rd_kafka_resp_err_t err,
			  rd_kafka_topic_partition_list_t *partitions,
			  void *opaque) {
        char *memberid = rd_kafka_memberid(rk);

	TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
		 rd_kafka_name(rk), memberid, rd_kafka_err2str(err));

        if (memberid)
                free(memberid);

	test_print_partition_list(partitions);

	switch (err)
	{
	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
		assign_cnt++;
		rd_kafka_assign(rk, partitions);
		break;

	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
		if (assign_cnt == 0)
			TEST_FAIL("asymetric rebalance_cb\n");
		assign_cnt--;
		rd_kafka_assign(rk, NULL);
		break;

	default:
		TEST_FAIL("rebalance failed: %s\n",
			  rd_kafka_err2str(err));
		break;
	}
}
コード例 #3
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
rd_kafka_topic_t *test_create_producer_topic (rd_kafka_t *rk,
	const char *topic, ...) {
	rd_kafka_topic_t *rkt;
	rd_kafka_topic_conf_t *topic_conf;
	char errstr[512];
	va_list ap;
	const char *name, *val;

	test_conf_init(NULL, &topic_conf, 20);

	va_start(ap, topic);
	while ((name = va_arg(ap, const char *)) &&
	       (val = va_arg(ap, const char *))) {
		if (rd_kafka_topic_conf_set(topic_conf, name, val,
			errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
			TEST_FAIL("Conf failed: %s\n", errstr);
	}
	va_end(ap);

	/* Make sure all replicas are in-sync after producing
	 * so that consume test wont fail. */
        rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
                                errstr, sizeof(errstr));


	rkt = rd_kafka_topic_new(rk, topic, topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
                          rd_kafka_err2str(rd_kafka_errno2err(errno)));

	return rkt;

}
コード例 #4
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
/**
 * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up.
 */
void test_wait_exit (int timeout) {
	int r;
        time_t start = time(NULL);

	while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) {
		TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r);
		rd_sleep(1);
	}

	TEST_SAY("%i thread(s) in use by librdkafka\n", r);

	if (r > 0) {
		TEST_FAIL("%i thread(s) still active in librdkafka", r);
	}

        timeout -= (int)(time(NULL) - start);
        if (timeout > 0) {
		TEST_SAY("Waiting %d seconds for all librdkafka memory "
			 "to be released\n", timeout);
                if (rd_kafka_wait_destroyed(timeout * 1000) == -1)
			TEST_FAIL("Not all internal librdkafka "
				  "objects destroyed\n");
	}

}
コード例 #5
0
ファイル: ViewerConfig.cpp プロジェクト: Aliceljm1/TightVNC-1
bool ViewerConfig::loadFromStorage(SettingsManager *storage)
{
  bool loadAllOk = true;

  TEST_FAIL(storage->getInt(_T("LogLevel"), &m_logLevel), loadAllOk);
  setLogLevel(m_logLevel);
  TEST_FAIL(storage->getInt(_T("ListenPort"), &m_listenPort), loadAllOk);
  TEST_FAIL(storage->getInt(_T("HistoryLimit"), &m_historyLimit), loadAllOk);
  setHistoryLimit(m_historyLimit);
  //
  // FIXME: Why registry entry has "NoToolbar" name but
  // meaning of code in what, that is this flag is set then
  // toolbar become visible.
  //

  TEST_FAIL(storage->getBoolean(_T("NoToolbar"), &m_showToolbar), loadAllOk);

  if (storage->getBoolean(_T("SkipFullScreenPrompt"), &m_promptOnFullscreen)) {
    m_promptOnFullscreen = !m_promptOnFullscreen;
  } else {
    loadAllOk = false;
  }

  return loadAllOk;
}
コード例 #6
0
ファイル: 0004-conf.c プロジェクト: simon-rock/librdkafka
/**
 * @brief When rd_kafka_new() succeeds it takes ownership of the config object,
 *        but when it fails the config object remains in application custody.
 *        These tests makes sure that's the case (preferably run with valgrind)
 */
static void do_test_kafka_new_failures (void) {
        rd_kafka_conf_t *conf;
        rd_kafka_t *rk;
        char errstr[512];

        conf = rd_kafka_conf_new();

        rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new() failed: %s", errstr);
        rd_kafka_destroy(rk);

        /* Set an erroneous configuration value that is not checked
         * by conf_set() but by rd_kafka_new() */
        conf = rd_kafka_conf_new();
        if (rd_kafka_conf_set(conf, "partition.assignment.strategy",
                              "range,thiswillfail", errstr, sizeof(errstr)) !=
            RD_KAFKA_CONF_OK)
                TEST_FAIL("%s", errstr);

        rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
        TEST_ASSERT(!rk, "kafka_new() should have failed");

        /* config object should still belong to us,
         * correct the erroneous config and try again. */
        if (rd_kafka_conf_set(conf, "partition.assignment.strategy", NULL,
                              errstr, sizeof(errstr)) !=
            RD_KAFKA_CONF_OK)
                TEST_FAIL("%s", errstr);

        rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
        TEST_ASSERT(rk, "kafka_new() failed: %s", errstr);
        rd_kafka_destroy(rk);
}
コード例 #7
0
static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
			  rd_kafka_topic_partition_list_t *parts, void *opaque) {
	int i;

	TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err));
	test_print_partition_list(parts);

	if (parts->cnt < partitions)
		TEST_FAIL("rebalance_cb: Expected %d partitions, not %d",
			  partitions, parts->cnt);

	switch (err)
	{
	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
		for (i = 0 ; i < parts->cnt ; i++) {
			if (i < partitions)
				parts->elems[i].offset = msgcnt / 2;
			else
				parts->elems[i].offset = RD_KAFKA_OFFSET_END;
		}
		TEST_SAY("Use these offsets:\n");
		test_print_partition_list(parts);
		test_consumer_assign("HL.REBALANCE", rk, parts);
		break;

	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
		test_consumer_unassign("HL.REBALANCE", rk);
		break;

	default:
		TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
	}
}
コード例 #8
0
ファイル: 0039-event.c プロジェクト: Whissi/librdkafka
/**
 * Handle delivery reports
 */
static void handle_drs (rd_kafka_event_t *rkev) {
	const rd_kafka_message_t *rkmessage;

	while ((rkmessage = rd_kafka_event_message_next(rkev))) {
		int msgid = *(int *)rkmessage->_private;

		free(rkmessage->_private);

		TEST_SAYL(3,"Got rkmessage %s [%"PRId32"] @ %"PRId64": %s\n",
			  rd_kafka_topic_name(rkmessage->rkt),
			  rkmessage->partition, rkmessage->offset,
			  rd_kafka_err2str(rkmessage->err));
			 

		if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
			TEST_FAIL("Message delivery failed: %s\n",
				  rd_kafka_err2str(rkmessage->err));

		if (msgid != msgid_next) {
			fails++;
			TEST_FAIL("Delivered msg %i, expected %i\n",
				  msgid, msgid_next);
			return;
		}

		msgid_next = msgid+1;
	}
}
コード例 #9
0
ファイル: 0013-null-msgs.c プロジェクト: Kitter/librdkafka
static void consume_messages (uint64_t testid, const char *topic,
			      int32_t partition, int msg_base, int batch_cnt,
			      int msgcnt) {
	rd_kafka_t *rk;
	rd_kafka_topic_t *rkt;
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *topic_conf;
	int i;

	test_conf_init(&conf, &topic_conf, 20);

	/* Create kafka instance */
	rk = test_create_handle(RD_KAFKA_CONSUMER, conf);

	rkt = rd_kafka_topic_new(rk, topic, topic_conf);
	if (!rkt)
		TEST_FAIL("Failed to create topic: %s\n",
                          rd_kafka_err2str(rd_kafka_last_error()));

	TEST_SAY("Consuming %i messages from partition %i\n",
		 batch_cnt, partition);

	/* Consume messages */
	if (rd_kafka_consume_start(rkt, partition,
			     RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
		TEST_FAIL("consume_start(%i, -%i) failed: %s",
			  (int)partition, batch_cnt,
			  rd_kafka_err2str(rd_kafka_last_error()));

	for (i = 0 ; i < batch_cnt ; i++) {
		rd_kafka_message_t *rkmessage;

		rkmessage = rd_kafka_consume(rkt, partition, tmout_multip(5000));
		if (!rkmessage)
			TEST_FAIL("Failed to consume message %i/%i from "
				  "partition %i: %s",
				  i, batch_cnt, (int)partition,
				  rd_kafka_err2str(rd_kafka_last_error()));
		if (rkmessage->err)
			TEST_FAIL("Consume message %i/%i from partition %i "
				  "has error: %s",
				  i, batch_cnt, (int)partition,
				  rd_kafka_err2str(rkmessage->err));

		verify_consumed_msg(testid, partition, msg_base+i, rkmessage);

		rd_kafka_message_destroy(rkmessage);
	}

	rd_kafka_consume_stop(rkt, partition);

	/* Destroy topic */
	rd_kafka_topic_destroy(rkt);

	/* Destroy rdkafka instance */
	TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
	rd_kafka_destroy(rk);
}
コード例 #10
0
ファイル: 0013-null-msgs.c プロジェクト: Kitter/librdkafka
static void verify_consumed_msg0 (const char *func, int line,
				  uint64_t testid, int32_t partition,
				  int msgnum,
				  rd_kafka_message_t *rkmessage) {
	uint64_t in_testid;
	int in_part;
	int in_msgnum;
	char buf[128];

        if (rkmessage->len != 0)
                TEST_FAIL("Incoming message not NULL: %i bytes",
                          (int)rkmessage->len);

	if (rkmessage->key_len +1 >= sizeof(buf))
		TEST_FAIL("Incoming message key too large (%i): "
			  "not sourced by this test",
			  (int)rkmessage->key_len);

	rd_snprintf(buf, sizeof(buf), "%.*s",
		 (int)rkmessage->key_len, (char *)rkmessage->key);

	if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i",
		   &in_testid, &in_part, &in_msgnum) != 3)
		TEST_FAIL("Incorrect key format: %s", buf);

	if (testid != in_testid ||
	    (partition != -1 && partition != in_part) ||
	    (msgnum != -1 && msgnum != in_msgnum) ||
	    (in_msgnum < 0 || in_msgnum > cons_msgs_size))
		goto fail_match;

	if (test_level > 2) {
		TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), "
			 "msg %i/%i did "
			 ", key's: \"%s\"\n",
			 func, line,
			 testid, (int)partition, (int)rkmessage->partition,
			 msgnum, cons_msgs_size, buf);
	}

	if (cons_msgs_cnt == cons_msgs_size) {
		TEST_SAY("Too many messages in cons_msgs (%i) while reading "
			 "message key \"%s\"\n",
			 cons_msgs_cnt, buf);
		verify_consumed_msg_check();
		TEST_FAIL("See above error(s)");
	}

	cons_msgs[cons_msgs_cnt++] = in_msgnum;

	return;

 fail_match:
	TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did "
		  "not match message's key: \"%s\"\n",
		  func, line,
		  testid, (int)partition, msgnum, cons_msgs_size, buf);
}
コード例 #11
0
static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
                         rd_kafka_topic_partition_list_t *partitions,
                         void *opaque) {
        int i;
        char *memberid = rd_kafka_memberid(rk);

        TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
                 rd_kafka_name(rk), memberid, rd_kafka_err2str(err));

        if (memberid)
                free(memberid);

        test_print_partition_list(partitions);

        switch (err) {
        case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
                assign_cnt++;

                rd_kafka_assign(rk, partitions);
                mtx_lock(&lock);
                consumers_running = 1;
                mtx_unlock(&lock);

                for (i = 0; i < partitions->cnt && i < MAX_THRD_CNT; ++i) {
                        rd_kafka_topic_partition_t part = partitions->elems[i];
                        rd_kafka_queue_t *rkqu;
                        /* This queue is loosed in partition-consume. */
                        rkqu = rd_kafka_queue_get_partition(rk, part.topic,
                                                            part.partition);

                        rd_kafka_queue_forward(rkqu, NULL);
                        tids[part.partition] = spawn_thread(rkqu,
                                                            part.partition);
                }

                rebalanced = 1;

                break;

        case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
                if (assign_cnt == 0)
                        TEST_FAIL("asymetric rebalance_cb");
                assign_cnt--;
                rd_kafka_assign(rk, NULL);
                mtx_lock(&lock);
                consumers_running = 0;
                mtx_unlock(&lock);

                break;

        default:
                TEST_FAIL("rebalance failed: %s", rd_kafka_err2str(err));
                break;
        }
}
コード例 #12
0
ファイル: ViewerConfig.cpp プロジェクト: Aliceljm1/TightVNC-1
bool ViewerConfig::saveToStorage(SettingsManager *storage) const
{
  bool saveAllOk = true;

  TEST_FAIL(storage->setInt(_T("LogLevel"),  m_logLevel), saveAllOk);
  TEST_FAIL(storage->setInt(_T("ListenPort"), m_listenPort), saveAllOk);
  TEST_FAIL(storage->setInt(_T("HistoryLimit"), m_historyLimit), saveAllOk);
  TEST_FAIL(storage->setBoolean(_T("NoToolbar"), m_showToolbar), saveAllOk);
  TEST_FAIL(storage->setBoolean(_T("SkipFullScreenPrompt"), !m_promptOnFullscreen), saveAllOk);

  return saveAllOk;
}
コード例 #13
0
static int partition_consume (void *args) {
        part_consume_info_t *info = (part_consume_info_t *)args;
        rd_kafka_queue_t *rkqu = info->rkqu;
        int partition = info->partition;
        int64_t ts_start = test_clock();
        int max_time = (test_session_timeout_ms + 3000) * 1000;
        int running = 1;

        free(args); /* Free the parameter struct dynamically allocated for us */

        while (ts_start + max_time > test_clock() && running &&
               is_consuming()) {
                rd_kafka_message_t *rkmsg;

                rkmsg = rd_kafka_consume_queue(rkqu, 500);

                if (!rkmsg)
                        continue;
                else if (rkmsg->err == RD_KAFKA_RESP_ERR__PARTITION_EOF)
                        running = 0;
                else if (rkmsg->err) {
                        mtx_lock(&lock);
                        TEST_FAIL("Message error "
                                  "(at offset %" PRId64 " after "
                                  "%d/%d messages and %dms): %s",
                                  rkmsg->offset, consumed_msg_cnt, exp_msg_cnt,
                                  (int)(test_clock() - ts_start) / 1000,
                                  rd_kafka_message_errstr(rkmsg));
                        mtx_unlock(&lock);
                } else {
                        if (rkmsg->partition != partition) {
                                mtx_lock(&lock);
                                TEST_FAIL("Message consumed has partition %d "
                                          "but we expected partition %d.",
                                          rkmsg->partition, partition);
                                mtx_unlock(&lock);
                        }
                }
                rd_kafka_message_destroy(rkmsg);

                mtx_lock(&lock);
                if (running && ++consumed_msg_cnt >= exp_msg_cnt) {
                        TEST_SAY("All messages consumed\n");
                        running = 0;
                }
                mtx_unlock(&lock);
        }

        rd_kafka_queue_destroy(rkqu);

        return thrd_success;
}
コード例 #14
0
ファイル: 0025-timers.c プロジェクト: sfdazsdf/librdkafka
/**
 * Enable statistics with a set interval, make sure the stats callbacks are
 * called within reasonable intervals.
 */
static void do_test_stats_timer (void) {
    rd_kafka_t *rk;
    rd_kafka_conf_t *conf;
    const int exp_calls = 10;
    char errstr[512];
    struct state state;
    test_timing_t t_new;

    memset(&state, 0, sizeof(state));

    state.interval = 600*1000;

    test_conf_init(&conf, NULL, 200);

    test_conf_set(conf, "statistics.interval.ms", "600");
    rd_kafka_conf_set_stats_cb(conf, stats_cb);
    rd_kafka_conf_set_opaque(conf, &state);


    TIMING_START(&t_new, "rd_kafka_new()");
    rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
    TIMING_STOP(&t_new);
    if (!rk)
        TEST_FAIL("Failed to create instance: %s\n", errstr);

    TEST_SAY("Starting wait loop for %d expected stats_cb calls "
             "with an interval of %dms\n",
             exp_calls, state.interval/1000);


    while (state.calls < exp_calls) {
        test_timing_t t_poll;
        TIMING_START(&t_poll, "rd_kafka_poll()");
        rd_kafka_poll(rk, 100);
        TIMING_STOP(&t_poll);

        if (TIMING_DURATION(&t_poll) > 150*1000)
            TEST_WARN("rd_kafka_poll(rk,100) "
                      "took more than 50%% extra\n");
    }

    rd_kafka_destroy(rk);

    if (state.calls > exp_calls)
        TEST_SAY("Got more calls than expected: %d > %d\n",
                 state.calls, exp_calls);

    if (state.fails)
        TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls);
    else
        TEST_SAY("All %d intervals okay\n", state.calls);
}
コード例 #15
0
ファイル: write.c プロジェクト: lebauce/tsumufs
int test_multiple_writes(void)
{
    const char *output = "Zorba!\n";
    int maxcount = 5;
    int fd = open(g_testfilepath, O_CREAT|O_RDWR, 0644);
    int i = 0;
    int total = 0;
    int result = 0;
    int old_errno = errno;

    TEST_START();

    if (fd < 0) {
        TEST_FAIL();
        TEST_COMPLETE_FAIL("Unable to create file %s\n"
                           "Errno %d: %s\n",
                           g_testfilepath, old_errno, strerror(old_errno));
    }
    TEST_OK();

    for (i=0; i<maxcount; i++) {
        total = 0;
        result = 0;

        while (total < strlen(output)) {
            result = write(fd, output + total, strlen(output));
            old_errno = errno;

            if (result < 0) {
                close(fd);

                TEST_FAIL();
                TEST_COMPLETE_FAIL("Unable to write to file %s\n"
                                   "Errno %d: %s\n",
                                   g_testfilepath, old_errno,
                                   strerror(old_errno));
            }

            total += result;
        }
    }
    TEST_OK();

    if (close(fd) < 0) {
        TEST_FAIL();
        TEST_COMPLETE_FAIL("Unable to close %s\nErrno %d: %s\n",
                           g_testfilepath, errno, strerror(errno));
    }
    TEST_OK();

    TEST_COMPLETE_OK();
}
コード例 #16
0
/**
 * Wait for REBALANCE ASSIGN event and perform assignment
 *
 * Va-args are \p topic_cnt tuples of the expected assignment:
 *   { const char *topic, int partition_cnt }
 */
static void await_assignment (const char *pfx, rd_kafka_t *rk,
			      rd_kafka_queue_t *queue,
			      int topic_cnt, ...) {
	rd_kafka_event_t *rkev;
	rd_kafka_topic_partition_list_t *tps;
	int i;
	va_list ap;
	int fails = 0;
	int exp_part_cnt = 0;

	TEST_SAY("%s: waiting for assignment\n", pfx);
	rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
	if (!rkev)
		TEST_FAIL("timed out waiting for assignment");
	TEST_ASSERT(rd_kafka_event_error(rkev) ==
		    RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
		    "expected ASSIGN, got %s",
		    rd_kafka_err2str(rd_kafka_event_error(rkev)));
	tps = rd_kafka_event_topic_partition_list(rkev);

	TEST_SAY("%s: assignment:\n", pfx);
	test_print_partition_list(tps);

	va_start(ap, topic_cnt);
	for (i = 0 ; i < topic_cnt ; i++) {
		const char *topic = va_arg(ap, const char *);
		int partition_cnt = va_arg(ap, int);
		int p;
		TEST_SAY("%s: expecting %s with %d partitions\n",
			 pfx, topic, partition_cnt);
		for (p = 0 ; p < partition_cnt ; p++) {
			if (!rd_kafka_topic_partition_list_find(tps, topic, p)) {
				TEST_FAIL_LATER("%s: expected partition %s [%d] "
						"not found in assginment",
						pfx, topic, p);
				fails++;
			}
		}
		exp_part_cnt += partition_cnt;
	}
	va_end(ap);

	TEST_ASSERT(exp_part_cnt == tps->cnt,
		    "expected assignment of %d partitions, got %d",
		    exp_part_cnt, tps->cnt);

	if (fails > 0)
		TEST_FAIL("%s: assignment mismatch: see above", pfx);

	rd_kafka_assign(rk, tps);
	rd_kafka_event_destroy(rkev);
}
コード例 #17
0
/**
 * Delivery reported callback.
 * Called for each message once to signal its delivery status.
 */
static void dr_cb (rd_kafka_t *rk, void *payload, size_t len,
		   rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) {

	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		TEST_FAIL("Message delivery failed: %s\n",
			  rd_kafka_err2str(err));

	if (prod_msg_remains == 0)
		TEST_FAIL("Too many messages delivered (prod_msg_remains %i)",
			  prod_msg_remains);

	prod_msg_remains--;
}
コード例 #18
0
ファイル: write.c プロジェクト: lebauce/tsumufs
int test_single_write(void)
{
    const char *output = "Zorba!\n";
    int fd = open(g_testfilepath, O_CREAT|O_RDWR, 0644);
    int total = 0;
    int result = 0;
    int old_errno = errno;

    TEST_START();

    if (fd < 0) {
        TEST_FAIL();
        TEST_COMPLETE_FAIL("Unable to open %s in %s\n"
                           "Errno %d: %s\n",
                           g_testfilepath, __func__,
                           old_errno, strerror(old_errno));
    }
    TEST_OK();

    while (total < strlen(output)) {
        result = write(fd, output + total, strlen(output));

        if (result < 0) {
            old_errno = errno;
            TEST_FAIL();
            TEST_COMPLETE_FAIL("Unable to write to %s in %s\n"
                               "Errno %d: %s\n",
                               g_testfilepath, __func__,
                               old_errno, strerror(old_errno));

            // Don't care about this output -- we're going to die soon, anyway.
            close(fd);
        }

        total += result;
    }
    TEST_OK();

    if (close(fd) < 0) {
        old_errno = errno;

        TEST_FAIL();
        TEST_COMPLETE_FAIL("Unable to close %s in %s\n"
                           "Errno %d: %s\n",
                           g_testfilepath, __func__,
                           old_errno, strerror(old_errno));
    }
    TEST_OK();

    TEST_COMPLETE_OK();
}
コード例 #19
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
void test_dr_cb (rd_kafka_t *rk, void *payload, size_t len,
                 rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) {
	int *remainsp = msg_opaque;

	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		TEST_FAIL("Message delivery failed: %s\n",
			  rd_kafka_err2str(err));

	if (*remainsp == 0)
		TEST_FAIL("Too many messages delivered (remains %i)",
			  *remainsp);

	(*remainsp)--;
}
コード例 #20
0
ファイル: 0011-produce_batch.c プロジェクト: k29/librdkafka
/**
 * Delivery reported callback.
 * Called for each message once to signal its delivery status.
 */
static void dr_partitioner_cb (rd_kafka_t *rk, void *payload, size_t len,
                               rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) {
    int msgid = *(int *)msg_opaque;

    free(msg_opaque);

    if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
        TEST_FAIL("Message delivery failed: %s\n",
                  rd_kafka_err2str(err));

    if (msg_remains <= 0)
        TEST_FAIL("Too many message dr_cb callback calls "
                  "(at msgid #%i)\n", msgid);
    msg_remains--;
}
コード例 #21
0
char *test_HandlerParser_execute() 
{
    TEST("5a9a6354-fc33-4468-8ccd-5d736737dad7 2:12, The body", 1, "T1");
    TEST("5a9a6354-fc33-4468-8ccd-5d736737dad7 11:0 1 2 3 4 5, The body", 6, "T2");
    TEST("5a9a6354-fc33-4468-8ccd-5d736737dad7 5:12 34, Another body.", 2, "T3");
    TEST("5a9a6354fc3344688ccd5d736737dad7 5:12 34, ", 2, "EMPTY");

    TEST_FAIL("this.is.wrong 5:12 34, ", 2, "BAD UUID");
    TEST_FAIL("5a9a6354fc3344688ccd5d736737dad7 10:12 34, ", 2, "TOO LONG NETSTRING");
    TEST_FAIL("5a9a6354fc3344688ccd5d736737dad7 3:12 34, ", 2, "TOO SHORT NETSTRING");
    TEST_FAIL("5a9a6354fc3344688ccd5d736737dad7 5:12 34,", 2, "NO TRAILING SPACE");
    TEST_FAIL(" 5:12 34,", 2, "NO UUID");

    return NULL;
}
コード例 #22
0
ファイル: test-1.c プロジェクト: antoniocorreia/cprojects
static int run_test (const char *testname,
                     int (*test_main) (int, char **),
                     int argc, char **argv) {
        int r;

        if (tests_run_in_parallel) {
#ifdef _MSC_VER
                TEST_FAIL("Parallel runs not supported on this platform, yet\n");
#else
                pthread_t thr;
                struct run_args *run_args = calloc(1, sizeof(*run_args));
                run_args->testname = testname;
                run_args->test_main = test_main;
                run_args->argc = argc;
                run_args->argv = argv;

                pthread_mutex_lock(&test_lock);
                tests_running_cnt++;
                pthread_mutex_unlock(&test_lock);

                r = pthread_create(&thr, NULL, run_test_from_thread, run_args);
                if (r != 0) {
                        pthread_mutex_lock(&test_lock);
                        tests_running_cnt--;
                        pthread_mutex_unlock(&test_lock);

                        TEST_FAIL("Failed to start thread for test %s: %s\n",
                                  testname, strerror(r));
                }
#endif
        } else {
		struct run_args run_args = { .testname = testname,
					     .test_main = test_main,
					     .argc = argc,
					     .argv = argv };
		
		tests_running_cnt++;
		r =  run_test0(&run_args);
		tests_running_cnt--;
		
        /* Wait for everything to be cleaned up since broker
         * destroys are handled in its own thread. */
        test_wait_exit(5);

		test_curr = NULL;
        }
        return r;
}
コード例 #23
0
ファイル: MockIO.c プロジェクト: DeadlyEmbrace68/tddec-code
ioData IO_Read(ioAddress offset)
{
  Mock.IO_Read_CallCount++;
  if (Mock.IO_Read_CallCount > Mock.IO_Read_CallsExpected)
  {
    TEST_FAIL("Function 'IO_Read' called more times than expected");
  }

  if (Mock.IO_Read_Expected_offset != Mock.IO_Read_Expected_offset_Tail)
  {
    ioAddress* p_expected = Mock.IO_Read_Expected_offset;
    Mock.IO_Read_Expected_offset++;
    TEST_ASSERT_EQUAL_MEMORY_MESSAGE(
        (void*)p_expected, (void*)&(offset), sizeof(ioAddress),
        "Function 'IO_Read' called with unexpected value for argument 'offset'.");
  }

  if (Mock.IO_Read_Return != Mock.IO_Read_Return_Tail)
  {
    ioData toReturn = *Mock.IO_Read_Return;
    Mock.IO_Read_Return++;
    return toReturn;
  }
  else
  {
    return *(Mock.IO_Read_Return_Tail - 1);
  }
}
コード例 #24
0
static void do_nonexist_commit (void) {
	rd_kafka_t *rk;
	char group_id[64];
	rd_kafka_conf_t *conf;
	rd_kafka_topic_conf_t *tconf;
	rd_kafka_topic_partition_list_t *offsets;
	const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1);
	rd_kafka_resp_err_t err;

	test_conf_init(&conf, &tconf, 20);
	test_str_id_generate(group_id, sizeof(group_id));

	TEST_SAY(_C_MAG "[ do_nonexist_commit group.id %s ]\n", group_id);

	rk = test_create_consumer(group_id, NULL, conf, tconf, NULL);

	TEST_SAY("Try nonexist commit\n");
	offsets = rd_kafka_topic_partition_list_new(2);
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123;
	rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456;

	err = rd_kafka_commit_queue(rk, offsets, NULL,
				    nonexist_offset_commit_cb, NULL);
	TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err));
	if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
		TEST_FAIL("commit() should succeed, not: %s",
			  rd_kafka_err2str(err));

	rd_kafka_topic_partition_list_destroy(offsets);

	test_consumer_close(rk);

	rd_kafka_destroy(rk);
}
コード例 #25
0
int main()
{
    print_assert_exception([]{ TEST_FAIL("Good"); });

    TEST_ASSERT_TRUE(true);
    TEST_ASSERT_TRUE_MSG(true, "Bad");
    
    std::string a("Hello world");
    std::string b("Hello, world");

    print_assert_exception([&]{ TEST_ASSERT_TRUE(a == b); });
    print_assert_exception([&]{ TEST_ASSERT_TRUE_MSG(a == b, "Good"); });

    TEST_ASSERT_FALSE(false);
    TEST_ASSERT_FALSE_MSG(false, "Bad");
    
    print_assert_exception([&]{ TEST_ASSERT_FALSE(a != b); });
    print_assert_exception([&]{ TEST_ASSERT_FALSE_MSG(a != b, "Good"); });

    TEST_ASSERT_EQUALS(std::string, a, "Hello world");
    TEST_ASSERT_EQUALS_MSG(std::string, a, "Hello world", "Bad");

    print_assert_exception([&]{ TEST_ASSERT_EQUALS(std::string, a, b); });
    print_assert_exception([&]{ TEST_ASSERT_EQUALS_MSG(std::string, a, b, "Good"); });

    return EXIT_SUCCESS;
}
コード例 #26
0
static void verify_consumed_msg_check0 (const char *func, int line) {
	int i;
	int fails = 0;

	if (cons_msgs_cnt < cons_msgs_size) {
		TEST_SAY("Missing %i messages in consumer\n",
			 cons_msgs_size - cons_msgs_cnt);
		fails++;
	}

	qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);

	for (i = 0 ; i < cons_msgs_size ; i++) {
		if (cons_msgs[i] != i) {
			TEST_SAY("Consumed message #%i is wrong, "
				 "expected #%i\n",
				 cons_msgs[i], i);
			fails++;
		}
	}

	if (fails)
		TEST_FAIL("See above error(s)");

	verify_consumed_msg_reset(0);
}
コード例 #27
0
ファイル: test.c プロジェクト: lindsay-show/librdkafka
void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val) {
        char errstr[512];
        if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
            RD_KAFKA_CONF_OK)
                TEST_FAIL("Failed to set config \"%s\"=\"%s\": %s\n",
                          name, val, errstr);
}
コード例 #28
0
ファイル: MockIO.c プロジェクト: DeadlyEmbrace68/tddec-code
void IO_Write(ioAddress offset, ioData data)
{

  Mock.IO_Write_CallCount++;
  if (Mock.IO_Write_CallCount > Mock.IO_Write_CallsExpected)
  {
    TEST_FAIL("Function 'IO_Write' called more times than expected");
  }

  if (Mock.IO_Write_Expected_offset != Mock.IO_Write_Expected_offset_Tail)
  {
    ioAddress* p_expected = Mock.IO_Write_Expected_offset;
    Mock.IO_Write_Expected_offset++;
    TEST_ASSERT_EQUAL_MEMORY_MESSAGE((void*)p_expected, (void*)&(offset), sizeof(ioAddress), "Function 'IO_Write' called with unexpected value for argument 'offset'.");

  }

  if (Mock.IO_Write_Expected_data != Mock.IO_Write_Expected_data_Tail)
  {
    ioData* p_expected = Mock.IO_Write_Expected_data;
    Mock.IO_Write_Expected_data++;
    TEST_ASSERT_EQUAL_MEMORY_MESSAGE((void*)p_expected, (void*)&(data), sizeof(ioData), "Function 'IO_Write' called with unexpected value for argument 'data'.");

  }
}
コード例 #29
0
static void
baddie(void *a) {
  int *addr = (int *) 0xdeadbea7;
  TEST_DIAG("%d", *addr);
  TEST_FAIL("Baddie wasn't killed");
  while(1);
}
コード例 #30
0
ファイル: test_7.3.10.c プロジェクト: h-s-c/libKD
KDint KD_APIENTRY kdMain(KDint argc, const KDchar *const *argv)
{
    test_once_count = kdAtomicIntCreateVEN(0);
    KDThread *threads[THREAD_COUNT] = {KD_NULL};
    for(KDint i = 0; i < THREAD_COUNT; i++)
    {
        threads[i] = kdThreadCreate(KD_NULL, test_func, KD_NULL);
        if(threads[i] == KD_NULL)
        {
            if(kdGetError() == KD_ENOSYS)
            {
                return 0;
            }
            TEST_FAIL();
        }
    }
    for(KDint k = 0; k < THREAD_COUNT; k++)
    {
        kdThreadJoin(threads[k], KD_NULL);
    }

    TEST_EQ(kdAtomicIntLoadVEN(test_once_count), 1);

    kdAtomicIntFreeVEN(test_once_count);
    return 0;
}