X-Git-Url: https://git.octo.it/?a=blobdiff_plain;f=src%2Fwrite_kafka.c;h=6b5bc398dcb8cbf3e2c75353e96faf1f926db230;hb=71bbf854d3e6f8c6d6c3582527263bb01a3a7e04;hp=97db42657055999fa114cd1c70ca9f882ba4325c;hpb=e560f215eebf564b4b5f5005059bf3ad56f3cf29;p=collectd.git diff --git a/src/write_kafka.c b/src/write_kafka.c index 97db4265..6b5bc398 100644 --- a/src/write_kafka.c +++ b/src/write_kafka.c @@ -1,19 +1,24 @@ /** * collectd - src/write_kafka.c - * * Copyright (C) 2014 Pierre-Yves Ritschard * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER - * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING - * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. * * Authors: * Pierre-Yves Ritschard @@ -23,38 +28,39 @@ #include "plugin.h" #include "common.h" #include "configfile.h" -#include "utils_cache.h" #include "utils_cmd_putval.h" #include "utils_format_graphite.h" #include "utils_format_json.h" -#include "utils_crc32.h" -#include +#include #include -#include -#include +#include struct kafka_topic_context { +#define KAFKA_FORMAT_JSON 0 #define KAFKA_FORMAT_COMMAND 1 #define KAFKA_FORMAT_GRAPHITE 2 -#define KAFKA_FORMAT_JSON 3 - u_int8_t format; + uint8_t format; unsigned int graphite_flags; _Bool store_rates; rd_kafka_topic_conf_t *conf; rd_kafka_topic_t *topic; + rd_kafka_conf_t *kafka_conf; rd_kafka_t *kafka; - int has_key; - u_int32_t key; + char *key; char *prefix; char *postfix; char escape_char; char *topic_name; + pthread_mutex_t lock; }; +static int kafka_handle(struct kafka_topic_context *); static int kafka_write(const data_set_t *, const value_list_t *, user_data_t *); static int32_t kafka_partition(const rd_kafka_topic_t *, const void *, size_t, int32_t, void *, void *); + +#if defined HAVE_LIBRDKAFKA_LOGGER || defined HAVE_LIBRDKAFKA_LOG_CB static void kafka_log(const rd_kafka_t *, int, const char *, const char *); static void kafka_log(const rd_kafka_t *rkt, int level, @@ -62,31 +68,106 @@ static void kafka_log(const rd_kafka_t *rkt, int level, { plugin_log(level, "%s", msg); } +#endif + +static uint32_t kafka_hash(const char *keydata, size_t keylen) +{ + uint32_t hash = 5381; + for (; keylen > 0; keylen--) + hash = ((hash << 5) + hash) + keydata[keylen - 1]; + return hash; +} static int32_t kafka_partition(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *p, void *m) { - u_int32_t key = *((u_int32_t *)keydata ); + uint32_t key = kafka_hash(keydata, keylen); + uint32_t target = key % partition_cnt; + int32_t i = partition_cnt; - return key % partition_cnt; + while (--i > 0 && !rd_kafka_topic_partition_available(rkt, target)) { + target = (target + 1) % partition_cnt; + } + return target; } +static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */ +{ + char errbuf[1024]; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + + if (ctx->kafka != NULL && ctx->topic != NULL) + return(0); + + if (ctx->kafka == NULL) { + if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) { + ERROR("write_kafka plugin: cannot duplicate kafka config"); + return(1); + } + + if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, + errbuf, sizeof(errbuf))) == NULL) { + ERROR("write_kafka plugin: cannot create kafka handle."); + return 1; + } + + rd_kafka_conf_destroy(ctx->kafka_conf); + ctx->kafka_conf = NULL; + + INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka)); + +#if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB) + rd_kafka_set_logger(ctx->kafka, kafka_log); +#endif + } + + if (ctx->topic == NULL ) { + if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) { + ERROR("write_kafka plugin: cannot duplicate kafka topic config"); + return 1; + } + + if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name, + topic_conf)) == NULL) { + ERROR("write_kafka plugin: cannot create topic : %s\n", + rd_kafka_err2str(rd_kafka_errno2err(errno))); + return errno; + } + + rd_kafka_topic_conf_destroy(ctx->conf); + ctx->conf = NULL; + + INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic)); + } + + return(0); + +} /* }}} int kafka_handle */ + static int kafka_write(const data_set_t *ds, /* {{{ */ - const value_list_t *vl, - user_data_t *ud) + const value_list_t *vl, + user_data_t *ud) { - int status = 0; - u_int32_t key; - char buffer[8192]; - size_t bfree = sizeof(buffer); - size_t bfill = 0; - size_t blen = 0; - struct kafka_topic_context *ctx = ud->data; + int status = 0; + void *key; + size_t keylen = 0; + char buffer[8192]; + size_t bfree = sizeof(buffer); + size_t bfill = 0; + size_t blen = 0; + struct kafka_topic_context *ctx = ud->data; if ((ds == NULL) || (vl == NULL) || (ctx == NULL)) return EINVAL; + pthread_mutex_lock (&ctx->lock); + status = kafka_handle(ctx); + pthread_mutex_unlock (&ctx->lock); + if( status != 0 ) + return status; + bzero(buffer, sizeof(buffer)); switch (ctx->format) { @@ -100,7 +181,6 @@ static int kafka_write(const data_set_t *ds, /* {{{ */ blen = strlen(buffer); break; case KAFKA_FORMAT_JSON: - format_json_initialize(buffer, &bfill, &bfree); format_json_value_list(buffer, &bfill, &bfree, ds, vl, ctx->store_rates); @@ -123,27 +203,25 @@ static int kafka_write(const data_set_t *ds, /* {{{ */ return -1; } - /* - * We partition our stream by metric name - */ - if (ctx->has_key) - key = ctx->key; + key = ctx->key; + if (key != NULL) + keylen = strlen (key); else - key = rand(); + keylen = 0; rd_kafka_produce(ctx->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, buffer, blen, - &key, sizeof(key), NULL); + key, keylen, NULL); - return status; + return status; } /* }}} int kafka_write */ static void kafka_topic_context_free(void *p) /* {{{ */ { - struct kafka_topic_context *ctx = p; + struct kafka_topic_context *ctx = p; - if (ctx == NULL) - return; + if (ctx == NULL) + return; if (ctx->topic_name != NULL) sfree(ctx->topic_name); @@ -151,6 +229,10 @@ static void kafka_topic_context_free(void *p) /* {{{ */ rd_kafka_topic_destroy(ctx->topic); if (ctx->conf != NULL) rd_kafka_topic_conf_destroy(ctx->conf); + if (ctx->kafka_conf != NULL) + rd_kafka_conf_destroy(ctx->kafka_conf); + if (ctx->kafka != NULL) + rd_kafka_destroy(ctx->kafka); sfree(ctx); } /* }}} void kafka_topic_context_free */ @@ -160,33 +242,36 @@ static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ int status; int i; struct kafka_topic_context *tctx; - char *key; + char *key = NULL; char *val; char callback_name[DATA_MAX_NAME_LEN]; char errbuf[1024]; user_data_t ud; - oconfig_item_t *child; + oconfig_item_t *child; rd_kafka_conf_res_t ret; - if ((tctx = calloc(1, sizeof (*tctx))) == NULL) { - ERROR ("write_kafka plugin: calloc failed."); + if ((tctx = calloc(1, sizeof (*tctx))) == NULL) { + ERROR ("write_kafka plugin: calloc failed."); return; - } + } tctx->escape_char = '.'; tctx->store_rates = 1; + tctx->format = KAFKA_FORMAT_JSON; + tctx->key = NULL; - rd_kafka_conf_set_log_cb(conf, kafka_log); - if ((tctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errbuf, sizeof(errbuf))) == NULL) { + if ((tctx->kafka_conf = rd_kafka_conf_dup(conf)) == NULL) { sfree(tctx); - ERROR("write_kafka plugin: cannot create kafka handle."); + ERROR("write_kafka plugin: cannot allocate memory for kafka config"); return; } - conf = NULL; + +#ifdef HAVE_LIBRDKAFKA_LOG_CB + rd_kafka_conf_set_log_cb(tctx->kafka_conf, kafka_log); +#endif if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) { - rd_kafka_destroy(tctx->kafka); + rd_kafka_conf_destroy(tctx->kafka_conf); sfree(tctx); ERROR ("write_kafka plugin: cannot create topic configuration."); return; @@ -207,48 +292,37 @@ static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ goto errout; } - for (i = 0; i < ci->children_num; i++) { - /* - * The code here could be simplified but makes room - * for easy adding of new options later on. - */ - child = &ci->children[i]; - status = 0; - - if (strcasecmp ("Property", child->key) == 0) { - if (child->values_num != 2) { - WARNING("kafka properties need both a key and a value."); + for (i = 0; i < ci->children_num; i++) { + /* + * The code here could be simplified but makes room + * for easy adding of new options later on. + */ + child = &ci->children[i]; + status = 0; + + if (strcasecmp ("Property", child->key) == 0) { + if (child->values_num != 2) { + WARNING("kafka properties need both a key and a value."); goto errout; - } - if (child->values[0].type != OCONFIG_TYPE_STRING || - child->values[1].type != OCONFIG_TYPE_STRING) { - WARNING("kafka properties needs string arguments."); + } + if (child->values[0].type != OCONFIG_TYPE_STRING || + child->values[1].type != OCONFIG_TYPE_STRING) { + WARNING("kafka properties needs string arguments."); goto errout; - } + } key = child->values[0].value.string; - val = child->values[0].value.string; + val = child->values[1].value.string; ret = rd_kafka_topic_conf_set(tctx->conf,key, val, errbuf, sizeof(errbuf)); if (ret != RD_KAFKA_CONF_OK) { - WARNING("cannot set kafka topic property %s to %s: %s.", + WARNING("cannot set kafka topic property %s to %s: %s.", key, val, errbuf); goto errout; - } - - } else if (strcasecmp ("Key", child->key) == 0) { - char *tmp_buf = NULL; - status = cf_util_get_string(child, &tmp_buf); - if (status != 0) { - WARNING("write_kafka plugin: invalid key supplied"); - break; - } - - if (strcasecmp(tmp_buf, "Random") != 0) { - tctx->has_key = 1; - tctx->key = crc32_buffer((u_char *)tmp_buf, strlen(tmp_buf)); } - sfree(tmp_buf); + } else if (strcasecmp ("Key", child->key) == 0) { + cf_util_get_string (child, &tctx->key); + assert (tctx->key != NULL); } else if (strcasecmp ("Format", child->key) == 0) { status = cf_util_get_string(child, &key); if (status != 0) @@ -257,7 +331,6 @@ static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ assert(key != NULL); if (strcasecmp(key, "Command") == 0) { - tctx->format = KAFKA_FORMAT_COMMAND; } else if (strcasecmp(key, "Graphite") == 0) { @@ -270,6 +343,7 @@ static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ WARNING ("write_kafka plugin: Invalid format string: %s", key); } + sfree(key); } else if (strcasecmp ("StoreRates", child->key) == 0) { @@ -308,47 +382,38 @@ static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition); rd_kafka_topic_conf_set_opaque(tctx->conf, tctx); - if ((tctx->topic = rd_kafka_topic_new(tctx->kafka, tctx->topic_name, - tctx->conf)) == NULL) { - ERROR("write_kafka plugin: cannot create topic."); - goto errout; - } - tctx->conf = NULL; - ssnprintf(callback_name, sizeof(callback_name), "write_kafka/%s", tctx->topic_name); ud.data = tctx; ud.free_func = kafka_topic_context_free; - status = plugin_register_write (callback_name, kafka_write, &ud); - if (status != 0) { - WARNING ("write_kafka plugin: plugin_register_write (\"%s\") " - "failed with status %i.", - callback_name, status); + status = plugin_register_write (callback_name, kafka_write, &ud); + if (status != 0) { + WARNING ("write_kafka plugin: plugin_register_write (\"%s\") " + "failed with status %i.", + callback_name, status); goto errout; } + + pthread_mutex_init (&tctx->lock, /* attr = */ NULL); + return; errout: - if (conf != NULL) - rd_kafka_conf_destroy(conf); - if (tctx->kafka != NULL) - rd_kafka_destroy(tctx->kafka); - if (tctx->topic != NULL) - rd_kafka_topic_destroy(tctx->topic); if (tctx->topic_name != NULL) free(tctx->topic_name); if (tctx->conf != NULL) rd_kafka_topic_conf_destroy(tctx->conf); + if (tctx->kafka_conf != NULL) + rd_kafka_conf_destroy(tctx->kafka_conf); sfree(tctx); } /* }}} int kafka_config_topic */ static int kafka_config(oconfig_item_t *ci) /* {{{ */ { - int i; - oconfig_item_t *child; + int i; + oconfig_item_t *child; rd_kafka_conf_t *conf; - rd_kafka_conf_t *cloned; rd_kafka_conf_res_t ret; char errbuf[1024]; @@ -356,54 +421,52 @@ static int kafka_config(oconfig_item_t *ci) /* {{{ */ WARNING("cannot allocate kafka configuration."); return -1; } + for (i = 0; i < ci->children_num; i++) { + child = &ci->children[i]; - for (i = 0; i < ci->children_num; i++) { - child = &ci->children[i]; + if (strcasecmp("Topic", child->key) == 0) { + kafka_config_topic (conf, child); + } else if (strcasecmp(child->key, "Property") == 0) { + char *key = NULL; + char *val = NULL; - if (strcasecmp("Topic", child->key) == 0) { - if ((cloned = rd_kafka_conf_dup(conf)) == NULL) { - WARNING("write_kafka plugin: cannot allocate memory for kafka config"); + if (child->values_num != 2) { + WARNING("kafka properties need both a key and a value."); goto errout; } - kafka_config_topic (cloned, child); - } else if (strcasecmp(child->key, "Property") == 0) { - char *key = NULL; - char *val = NULL; - - if (child->values_num != 2) { - WARNING("kafka properties need both a key and a value."); - goto errout; - } - if (child->values[0].type != OCONFIG_TYPE_STRING || - child->values[1].type != OCONFIG_TYPE_STRING) { - WARNING("kafka properties needs string arguments."); + if (child->values[0].type != OCONFIG_TYPE_STRING || + child->values[1].type != OCONFIG_TYPE_STRING) { + WARNING("kafka properties needs string arguments."); goto errout; - } - if ((key = strdup(child->values[0].value.string)) == NULL) { - WARNING("cannot allocate memory for attribute key."); + } + if ((key = strdup(child->values[0].value.string)) == NULL) { + WARNING("cannot allocate memory for attribute key."); goto errout; - } - if ((val = strdup(child->values[1].value.string)) == NULL) { - WARNING("cannot allocate memory for attribute value."); + } + if ((val = strdup(child->values[1].value.string)) == NULL) { + WARNING("cannot allocate memory for attribute value."); + sfree(key); goto errout; - } + } ret = rd_kafka_conf_set(conf, key, val, errbuf, sizeof(errbuf)); if (ret != RD_KAFKA_CONF_OK) { WARNING("cannot set kafka property %s to %s: %s", key, val, errbuf); + sfree(key); + sfree(val); goto errout; } - sfree(key); - sfree(val); - } else { - WARNING ("write_kafka plugin: Ignoring unknown " - "configuration option \"%s\" at top level.", - child->key); - } - } + sfree(key); + sfree(val); + } else { + WARNING ("write_kafka plugin: Ignoring unknown " + "configuration option \"%s\" at top level.", + child->key); + } + } if (conf != NULL) rd_kafka_conf_destroy(conf); - return (0); + return (0); errout: if (conf != NULL) rd_kafka_conf_destroy(conf); @@ -412,7 +475,5 @@ static int kafka_config(oconfig_item_t *ci) /* {{{ */ void module_register(void) { - plugin_register_complex_config ("write_kafka", kafka_config); + plugin_register_complex_config ("write_kafka", kafka_config); } - -/* vim: set sw=8 sts=8 ts=8 noet : */