X-Git-Url: https://git.octo.it/?p=collectd.git;a=blobdiff_plain;f=src%2Fwrite_kafka.c;h=2baaf0e5f1b371dddea530e0acf7073211dad665;hp=dba09c03de4ea049437a0f3de33cd5a141c8ffc5;hb=1159cb5d383c55a80a0db100b8f7aadcf44740a5;hpb=79963d13c1884d1d92667cc502ad20758b084a12 diff --git a/src/write_kafka.c b/src/write_kafka.c index dba09c03..2baaf0e5 100644 --- a/src/write_kafka.c +++ b/src/write_kafka.c @@ -31,6 +31,7 @@ #include "utils_cmd_putval.h" #include "utils_format_graphite.h" #include "utils_format_json.h" +#include "utils_random.h" #include #include @@ -76,6 +77,14 @@ static void kafka_log(const rd_kafka_t *rkt, int level, const char *fac, } #endif +static rd_kafka_resp_err_t kafka_error() { +#if RD_KAFKA_VERSION >= 0x000b00ff + return rd_kafka_last_error(); +#else + return rd_kafka_errno2err(errno); +#endif +} + static uint32_t kafka_hash(const char *keydata, size_t keylen) { uint32_t hash = 5381; for (; keylen > 0; keylen--) @@ -88,7 +97,7 @@ static uint32_t kafka_hash(const char *keydata, size_t keylen) { #define KAFKA_RANDOM_KEY_BUFFER \ (char[KAFKA_RANDOM_KEY_SIZE]) { "" } static char *kafka_random_key(char buffer[static KAFKA_RANDOM_KEY_SIZE]) { - ssnprintf(buffer, KAFKA_RANDOM_KEY_SIZE, "%08lX", (unsigned long)mrand48()); + snprintf(buffer, KAFKA_RANDOM_KEY_SIZE, "%08" PRIX32, cdrand_u()); return buffer; } @@ -112,12 +121,12 @@ static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ rd_kafka_topic_conf_t *topic_conf; if (ctx->kafka != NULL && ctx->topic != NULL) - return (0); + return 0; if (ctx->kafka == NULL) { if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) { ERROR("write_kafka plugin: cannot duplicate kafka config"); - return (1); + return 1; } if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf, @@ -146,7 +155,7 @@ static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name, topic_conf)) == NULL) { ERROR("write_kafka plugin: cannot create topic : %s\n", - rd_kafka_err2str(rd_kafka_errno2err(errno))); + rd_kafka_err2str(kafka_error())); return errno; } @@ -157,7 +166,7 @@ static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ rd_kafka_topic_name(ctx->topic)); } - return (0); + return 0; } /* }}} int kafka_handle */ @@ -185,9 +194,10 @@ static int kafka_write(const data_set_t *ds, /* {{{ */ switch (ctx->format) { case KAFKA_FORMAT_COMMAND: - status = create_putval(buffer, sizeof(buffer), ds, vl); + status = cmd_create_putval(buffer, sizeof(buffer), ds, vl); if (status != 0) { - ERROR("write_kafka plugin: create_putval failed with status %i.", status); + ERROR("write_kafka plugin: cmd_create_putval failed with status %i.", + status); return status; } blen = strlen(buffer); @@ -369,6 +379,10 @@ static void kafka_config_topic(rd_kafka_conf_t *conf, status = cf_util_get_flag(child, &tctx->graphite_flags, GRAPHITE_ALWAYS_APPEND_DS); + } else if (strcasecmp("GraphitePreserveSeparator", child->key) == 0) { + status = cf_util_get_flag(child, &tctx->graphite_flags, + GRAPHITE_PRESERVE_SEPARATOR); + } else if (strcasecmp("GraphitePrefix", child->key) == 0) { status = cf_util_get_string(child, &tctx->prefix); } else if (strcasecmp("GraphitePostfix", child->key) == 0) { @@ -392,12 +406,14 @@ static void kafka_config_topic(rd_kafka_conf_t *conf, rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition); rd_kafka_topic_conf_set_opaque(tctx->conf, tctx); - ssnprintf(callback_name, sizeof(callback_name), "write_kafka/%s", - tctx->topic_name); - - user_data_t ud = {.data = tctx, .free_func = kafka_topic_context_free}; + snprintf(callback_name, sizeof(callback_name), "write_kafka/%s", + tctx->topic_name); - status = plugin_register_write(callback_name, kafka_write, &ud); + status = plugin_register_write( + callback_name, kafka_write, + &(user_data_t){ + .data = tctx, .free_func = kafka_topic_context_free, + }); if (status != 0) { WARNING("write_kafka plugin: plugin_register_write (\"%s\") " "failed with status %i.", @@ -473,7 +489,7 @@ static int kafka_config(oconfig_item_t *ci) /* {{{ */ } if (conf != NULL) rd_kafka_conf_destroy(conf); - return (0); + return 0; errout: if (conf != NULL) rd_kafka_conf_destroy(conf);