src/*.5
src/.pod2man.tmp.*
src/libcollectdclient/collectd/lcc_features.h
-src/utils_vl_lookup_test
# patch stuff
*.rej
# tests stuff
src/tests/.deps/
src/tests/mock/.deps/
+src/tests/.dirstamp
+src/tests/mock/.dirstamp
# new daemon repo
src/daemon/.deps/
Dan Berrange <berrange at redhat.com>
- uuid plugin.
+Dan Ryder <daryder at cisco.com>
+ - ceph plugin.
+
David Bacher <drbacher at gmail.com>
- serial plugin.
Fabien Wernli <cpan at faxm0dem.org>
- Solaris improvements in the memory and interfaces plugin.
+Fabrice A. Marie <fabrice at kibinlabs.com>
+ - write_sensu plugin.
+
Flavio Stanchina <flavio at stanchina.net>
- mbmon plugin.
+2015-02-26, Version 5.4.2
+ * Build system: Numerous fixes. Thanks to Bjørn Nordbø, Jim Radford,
+ KOMEDA Shinji, Lauri Tirkkonen, Manuel Luis Sanmartin Rozada, Marc
+ Fournier, Rainer Müller, Yoga Ramalingam and Yves Mettier. #326,
+ #373, #653, #828
+ * collectd: A use-after-free has been fixed in the "parse_value()"
+ function. Thanks to Matthias Urlichs.
+ * collectd: Fix carriage return sign in types_list Thanks to Marc
+ Fournier and @NsLib.
+ * collectd: Fix programming error in src/configfile.c. Thanks to
+ Wilfried Goesgens.
+ * collectd: An off-by-one error has been fixed in the
+ "strstripnewline()" function. Patch by Florian Forster.
+ * collectd: Use the complain mechanism to report filter chain write
+ failures. Thanks to Sebastian Harl.
+ * collectd: Spelling and grammar of error messages have been fixed.
+ Thanks to Katelyn Perry and Tim Laszlo.
+ * collectdctl: Fixed buffering issues which caused trouble on AIX and
+ Solaris. Thanks to Yoga Ramalingam.
+ * Documentation: Details and example about multi-instance filterchain
+ targets have been added. Thanks to Marc Fournier.
+ * Documentation: The "CollectStatistics" option of the rrdcached has
+ been documented. Thanks to Micha Krause. #907
+ * Documentation: The write_redis has been documented. Thanks to Marc
+ Fournier.
+ * Documentation: The "GraphiteSeparateInstances" and
+ "GraphiteAlwaysAppendDS" options of the amqp have been documented.
+ Thanks to Marc Fournier.
+ * Documentation: Documentation of the "AutoLoadPlugin" option has been
+ improved. Thanks to Florian Forster. #715
+ * aggregation: "utils_vl_lookup": A race when creating user objects
+ has been fixed. Thanks to Sebastian Harl. #535
+ * cpu: Temperature code for Mac OS X has been removed.
+ Thanks to Florian Forster and Marc Fournier. #22
+ * cURL, cURL-JSON, cURL-XML and Write HTTP plugins: Call
+ "curl_global_init()" in the plugins' "init()" callback. Thanks to
+ Jeremy Katz.
+ * cURL and memcachec plugins: Fix calculation of gauge, average,
+ minimum and maximum. Previously, they were calculated from the start
+ of the daemon, which is not the documented behavior. Thanks to
+ Florian Forster. #663
+ * curl-json: A bug, which triggered when two URLs with a long common
+ prefix were configured, was fixed. Thanks to Marc Fournier. #582
+ * dbi: Compatibility with new versions of libdbi has been restored.
+ Thanks to Florian Forster. #950
+ * Exec, UnixSock plugins: Fix parsing of the "time" option of the
+ "PUTNOTIF" command. Thanks to Adrian Miron. #477
+ * ipmi: A conflict with the java over the "SIGUSR2" signal has been
+ fixed. Thanks to Vincent Bernat. #114
+ * java: Conversion from Java's time representation to collectd's
+ representation has been fixed. Thanks to Manuel Luis Sanmartín
+ Rozada.
+ * java: Make sure "cjni_thread_detach()" is called on all paths.
+ Thanks to Florian Forster.
+ * LogFile and SysLog plugins: Avoid total silence in case of a
+ misconfiguration. Thanks to Marc Fournier and Wilfried Goesgens.
+ * network: Support for recent versions of gcrypt has been added.
+ Thanks to Vincent Bernat. #632
+ * network: Robustness of the client connecting behavior has been
+ improved. Thanks to Florian Forster. #627
+ * python: Don't create empty "meta_data_t" objects. Thanks to Florian
+ Forster. #716
+ * python: Fix Py list length check in "cpy_build_meta()". Thanks to
+ Yoga Ramalingam.
+ * python: The "interval" member was fixed to export seconds as a
+ double. Thanks to Justin Burnham.
+ * RRDtool and RRDCacheD plugins: A memory leak when creating RRD files
+ has been fixed. Thanks to Yves Mettier. #661
+ * snmp: Fix a memory leak. Thanks to Marc Fournier and Pierre-Yves
+ Ritschard. #610, #804
+ * statsd: Support for samplerates in timer metrics was added. Thanks
+ to John Leach. #461
+ * swap: Fix behavior under OpenVZ by making "cached" optional. Thanks
+ to Florian Forster. #733
+ * threshold: Population of the "time" field in notifications has been
+ added. Thanks to Manuel Luis Sanmartín Rozada.
+ * libvirt: Only gather stats for running domains. Thanks to Ruben
+ Kerkhof.
+ * lvm: An issue with Volume Groups (VGs) without Logical Volumes (LVs)
+ has been fixed. Thanks to Jan Kundrát.
+ * write_graphite: Escape characters not supported by Graphite. Thanks
+ to Pierre-Yves Ritschard and Marc Fournier.
+ * write_http: Make callback names context-dependent. Thanks to Marc
+ Fournier. #821
+ * write_redis: A formatting bug, which resulted in totally unusable
+ numbers being transmitted to Redis, was fixed. Thanks to Marc
+ Fournier.
+ * write_riemann: Receive acknowledge message when using TCP. Thanks to
+ John-John Tedro.
+
2014-01-26, Version 5.4.1
* amqp plugin: Add support for RabbitMQ 0.4.x to avoid compiler
warnings. Thanks to Sebastian Harl for implementing this.
* zfs_arc plugin: Support for FreeBSD has been added. Thanks to Xin Li
for his patch.
+2015-02-26, Version 5.3.2
+ * Build system: Numerous fixes. Thanks to Bjørn Nordbø, Jim Radford,
+ KOMEDA Shinji, Lauri Tirkkonen, Manuel Luis Sanmartin Rozada, Marc
+ Fournier, Rainer Müller, Yoga Ramalingam and Yves Mettier. #326,
+ #373, #653, #828
+ * collectd: A use-after-free has been fixed in the "parse_value()"
+ function. Thanks to Matthias Urlichs.
+ * collectd: Fix carriage return sign in types_list Thanks to Marc
+ Fournier and @NsLib.
+ * collectd: Fix programming error in src/configfile.c Thanks to
+ Wilfried Goesgens.
+ * collectd: An off-by-one error has been fixed in the
+ "strstripnewline()" function. Patch by Florian Forster.
+ * collectd: Use the complain mechanism to report filter chain write
+ failures. Thanks to Sebastian Harl.
+ * collectd: Spelling and grammar of error messages have been fixed.
+ Thanks to Katelyn Perry and Tim Laszlo.
+ * collectdctl: Fixed buffering issues which caused trouble on AIX and
+ Solaris. Thanks to Yoga Ramalingam.
+ * Documentation: Details and example about multi-instance filterchain
+ targets have been added. Thanks to Marc Fournier.
+ * Documentation: The "CollectStatistics" option of the rrdcached has
+ been documented. Thanks to Micha Krause. #907
+ * Documentation: The write_redis has been documented. Thanks to Marc
+ Fournier.
+ * Documentation: The synopsis of the threshold has been fixed. Thanks
+ to Fabien Wernli.
+ * Documentation: The "GraphiteSeparateInstances" and
+ "GraphiteAlwaysAppendDS" options of the amqp have been documented.
+ Thanks to Marc Fournier.
+ * aggregation: "utils_vl_lookup": A race when creating user objects
+ has been fixed. Thanks to Sebastian Harl. #535
+ * cpu: Temperature code for Mac OS X has been removed.
+ Thanks to Florian Forster and Marc Fournier. #22
+ * csv: A regression which would lead to the "DataDir" option to be
+ ignored has been fixed. Thanks to Manuel Luis Sanmartin Rozada.
+ * curl, curl-json, curl-xml and write_http plugins: Call
+ "curl_global_init()" in the plugins' "init()" callback. Thanks to
+ Jeremy Katz.
+ * curl and memcachec plugins: Fix calculation of gauge, average,
+ minimum and maximum. Previously, they were calculated from the start
+ of the daemon, which is not the documented behavior. Thanks to
+ Florian Forster. #663
+ * dbi plugin: Compatibility with new versions of libdbi has been
+ restored. Thanks to Florian Forster. #950
+ * exec, unixsock plugins: Fix parsing of the "time" option of the
+ "PUTNOTIF" command. Thanks to Adrian Miron. #477
+ * java: Conversion from Java's time representation to collectd's
+ representation has been fixed. Thanks to Manuel Luis Sanmartín
+ Rozada.
+ * ipmi: A conflict with the java over the "SIGUSR2" signal has been
+ fixed. Thanks to Vincent Bernat. #114
+ * java: Make sure "cjni_thread_detach()" is called on all paths.
+ Thanks to Florian Forster.
+ * logfile and syslog plugins: Avoid total silence in case of a
+ misconfiguration. Thanks to Marc Fournier and Wilfried Goesgens.
+ * memcached: Connecting to a UNIX socket has been fixed. Thanks to Jim
+ Radford.
+ * network: Support for recent versions of gcrypt has been added.
+ Thanks to Vincent Bernat. #632
+ * network: Robustness of the client connecting behavior has been
+ improved. Thanks to Florian Forster. #627
+ * python: Don't create empty "meta_data_t" objects. Thanks to Florian
+ Forster. #716
+ * python: Fix Py list length check in "cpy_build_meta()". Thanks to
+ Yoga Ramalingam.
+ * python: The "interval" member was fixed to export seconds as a
+ double. Thanks to Justin Burnham.
+ * replace and set targets: Fix error message. Thanks to Marc Fournier.
+ #448
+ * rrdtool and rrdcached plugins: Honor the "DataDir" config option;
+ this fixes a regression. Thanks to Florian Forster. #380
+ * rrdtool and rrdcached plugins: A memory leak when creating RRD files
+ has been fixed. Thanks to Yves Mettier. #661
+ * snmp: Fix a memory leak. Thanks to Marc Fournier and Pierre-Yves
+ Ritschard. #610, #804
+ * swap: Fix behavior under OpenVZ by making "cached" optional. Thanks
+ to Florian Forster. #733
+ * threshold: Population of the "time" field in notifications has been
+ added. Thanks to Manuel Luis Sanmartín Rozada.
+ * libvirt: Only gather stats for running domains. Thanks to Ruben
+ Kerkhof.
+ * write_graphite: Escape characters not supported by Graphite. Thanks
+ to Pierre-Yves Ritschard and Marc Fournier.
+ * write_http: Make callback names context-dependent. Thanks to Marc
+ Fournier. #821
+ * write_riemann: Receive acknowledge message when using TCP. Thanks to
+ John-John Tedro.
+
2013-07-13, Version 5.3.1
* Documentation: Various fixes.
* Configuration: Fix error handling: Errors in included files were
Statistics about Ascent, a free server for the game `World of Warcraft'.
- barometer
- Using digital barometer sensor MPL115A2 or MPL3115 from Freescale
- provides absolute barometric pressure, air pressure reduced to sea level
- and temperature.
+ Reads absolute barometric pressure, air pressure reduced to sea level and
+ temperature. Supported sensors are MPL115A2 and MPL3115 from Freescale
+ and BMP085 from Bosch.
- battery
Batterycharge, -current and voltage of ACPI and PMU based laptop
Name server and resolver statistics from the `statistics-channel'
interface of BIND 9.5, 9,6 and later.
+ - ceph
+ Statistics from the Ceph distributed storage system.
+
- cgroups
CPU accounting information for process groups under Linux.
Interface traffic: Number of octets, packets and errors for each
interface.
- - iptables
- Iptables' counters: Number of bytes that were matched by a certain
- iptables rule.
+ - ipc
+ IPC counters: semaphores used, number of allocated segments in shared
+ memory and more.
- ipmi
IPMI (Intelligent Platform Management Interface) sensors information.
+ - iptables
+ Iptables' counters: Number of bytes that were matched by a certain
+ iptables rule.
+
- ipvs
IPVS connection statistics (number of connections, octets and packets
for each service and destination).
- ntpd
NTP daemon statistics: Local clock drift, offset to peers, etc.
+ - numa
+ Information about Non-Uniform Memory Access (NUMA).
+
- nut
Network UPS tools: UPS current, voltage, power, charge, utilisation,
temperature, etc. See upsd(8).
- - numa
- Information about Non-Uniform Memory Access (NUMA).
-
- olsrd
Queries routing information from the “Optimized Link State Routing”
daemon.
can be configured to avoid logging send errors (especially useful when
using UDP).
- - write_tsdb
- Sends data OpenTSDB, a scalable no master, no shared state time series
- database.
-
- write_http
Sends the values collected by collectd to a web-server using HTTP POST
requests. The transmitted data is either in a form understood by the
- write_riemann
Sends data to Riemann, a stream processing and monitoring system.
+ - write_sensu
+ Sends data to Sensu, a stream processing and monitoring system, via the
+ Sensu client local TCP socket.
+
+ - write_tsdb
+ Sends data OpenTSDB, a scalable no master, no shared state time series
+ database.
+
* Logging is, as everything in collectd, provided by plugins. The following
plugins keep up informed about what's going on:
<http://www.xmms.org/>
* libyajl (optional)
- Parse JSON data. This is needed for the `curl_json' and `log_logstash'
- plugins.
+ Parse JSON data. This is needed for the `ceph', `curl_json' and
+ `log_logstash' plugins.
<http://github.com/lloyd/yajl>
* libvarnish (optional)
&& rm -f src/*.o \
&& rm -f src/*.la \
&& rm -f src/*.lo \
-&& rm -f src/collectd \
&& rm -f src/collectd.1 \
&& rm -f src/collectd.conf \
&& rm -f src/collectdctl \
&& rm -f src/*.pb-c.c \
&& rm -f src/*.pb-c.h \
&& rm -f src/Makefile.in \
+&& rm -f src/test-suite.log \
+&& rm -f src/test_common* \
+&& rm -f src/test_utils* \
+&& rm -f -r src/tests/.deps \
+&& rm -f -r src/tests/mock/.deps \
+&& rm -f src/tests/*.o \
+&& rm -f src/tests/mock/*.o \
+&& rm -f -r src/daemon/.deps \
+&& rm -f -r src/daemon/.libs \
+&& rm -f src/daemon/*.o \
+&& rm -f src/daemon/*.la \
+&& rm -f src/daemon/*.lo \
+&& rm -f src/daemon/collectd \
+&& rm -f src/daemon/Makefile.in \
+&& rm -f src/daemon/Makefile \
&& rm -f src/liboconfig/*.o \
&& rm -f src/liboconfig/*.la \
&& rm -f src/liboconfig/*.lo \
struct mntent *me;
fh = setmntent ("/etc/mtab", "r");
me = getmntent (fh);
+return(me->mnt_passno);
]]]
)],
[c_cv_have_one_getmntent="yes"],
int status;
fh = fopen ("/etc/mnttab", "r");
status = getmntent (fh, &mt);
+ return(status);
]]]
)],
[c_cv_have_two_getmntent="yes"],
[have_curlopt_username="yes"],
[have_curlopt_username="no"],
[[#include <curl/curl.h>]])
+ AC_CHECK_DECL(CURLOPT_TIMEOUT_MS,
+ [have_curlopt_timeout="yes"],
+ [have_curlopt_timeout="no"],
+ [[#include <curl/curl.h>]])
fi
fi
if test "x$with_libcurl" = "xyes"
then
AC_DEFINE(HAVE_CURLOPT_USERNAME, 1, [Define if libcurl supports CURLOPT_USERNAME option.])
fi
+
+ if test "x$have_curlopt_timeout" = "xyes"
+ then
+ AC_DEFINE(HAVE_CURLOPT_TIMEOUT_MS, 1, [Define if libcurl supports CURLOPT_TIMEOUT_MS option.])
+ fi
fi
AM_CONDITIONAL(BUILD_WITH_LIBCURL, test "x$with_libcurl" = "xyes")
# }}}
LDFLAGS="$LDFLAGS $with_libdbi_ldflags"
AC_CHECK_LIB(dbi, dbi_initialize, [with_libdbi="yes"], [with_libdbi="no (Symbol 'dbi_initialize' not found)"])
- AC_CHECK_LIB(dbi, dbi_driver_open_r, [with_libdbi_r="yes"], [with_libdbi_r="no"])
CPPFLAGS="$SAVE_CPPFLAGS"
LDFLAGS="$SAVE_LDFLAGS"
AC_SUBST(BUILD_WITH_LIBDBI_CPPFLAGS)
AC_SUBST(BUILD_WITH_LIBDBI_LDFLAGS)
AC_SUBST(BUILD_WITH_LIBDBI_LIBS)
-
- if test "x$with_libdbi_r" = "xyes"
- then
- AC_DEFINE(HAVE_LIBDBI_R, 1, [Define if reentrant dbi facility is present and usable.])
- fi
fi
AM_CONDITIONAL(BUILD_WITH_LIBDBI, test "x$with_libdbi" = "xyes")
# }}}
# --with-java {{{
with_java_home="$JAVA_HOME"
+if test "x$with_java_home" = "x"
+then
+ with_java_home="/usr/lib/jvm"
+fi
with_java_vmtype="client"
with_java_cflags=""
with_java_libs=""
if test -d "$with_java_home"
then
AC_MSG_CHECKING([for jni.h])
- TMPVAR=`find "$with_java_home" -name jni.h -type f -exec 'dirname' '{}' ';' 2>/dev/null | head -n 1`
+ TMPVAR=`find -L "$with_java_home" -name jni.h -type f -exec 'dirname' '{}' ';' 2>/dev/null | head -n 1`
if test "x$TMPVAR" != "x"
then
AC_MSG_RESULT([found in $TMPVAR])
fi
AC_MSG_CHECKING([for jni_md.h])
- TMPVAR=`find "$with_java_home" -name jni_md.h -type f -exec 'dirname' '{}' ';' 2>/dev/null | head -n 1`
+ TMPVAR=`find -L "$with_java_home" -name jni_md.h -type f -exec 'dirname' '{}' ';' 2>/dev/null | head -n 1`
if test "x$TMPVAR" != "x"
then
AC_MSG_RESULT([found in $TMPVAR])
fi
AC_MSG_CHECKING([for libjvm.so])
- TMPVAR=`find "$with_java_home" -name libjvm.so -type f -exec 'dirname' '{}' ';' 2>/dev/null | head -n 1`
+ TMPVAR=`find -L "$with_java_home" -name libjvm.so -type f -exec 'dirname' '{}' ';' 2>/dev/null | head -n 1`
if test "x$TMPVAR" != "x"
then
AC_MSG_RESULT([found in $TMPVAR])
if test "x$JAVAC" = "x"
then
AC_MSG_CHECKING([for javac])
- TMPVAR=`find "$with_java_home" -name javac -type f 2>/dev/null | head -n 1`
+ TMPVAR=`find -L "$with_java_home" -name javac -type f 2>/dev/null | head -n 1`
if test "x$TMPVAR" != "x"
then
JAVAC="$TMPVAR"
if test "x$JAR" = "x"
then
AC_MSG_CHECKING([for jar])
- TMPVAR=`find "$with_java_home" -name jar -type f 2>/dev/null | head -n 1`
+ TMPVAR=`find -L "$with_java_home" -name jar -type f 2>/dev/null | head -n 1`
if test "x$TMPVAR" != "x"
then
JAR="$TMPVAR"
]]],
[[[
int val = PCAP_ERROR_IFACE_NOT_UP;
+ return(val);
]]]
)],
[c_cv_libpcap_have_pcap_error_iface_not_up="yes"],
&& test -n "$perl_interpreter"
then
SAVE_CFLAGS="$CFLAGS"
- SAVE_LDFLAGS="$LDFLAGS"
+ SAVE_LIBS="$LIBS"
dnl ARCHFLAGS="" -> disable multi -arch on OSX (see Config_heavy.pl:fetch_string)
PERL_CFLAGS=`ARCHFLAGS="" $perl_interpreter -MExtUtils::Embed -e ccopts`
- PERL_LDFLAGS=`ARCHFLAGS="" $perl_interpreter -MExtUtils::Embed -e ldopts`
+ PERL_LIBS=`ARCHFLAGS="" $perl_interpreter -MExtUtils::Embed -e ldopts`
CFLAGS="$CFLAGS $PERL_CFLAGS"
- LDFLAGS="$LDFLAGS $PERL_LDFLAGS"
+ LIBS="$LIBS $PERL_LIBS"
AC_CACHE_CHECK([for libperl],
[c_cv_have_libperl],
then
AC_DEFINE(HAVE_LIBPERL, 1, [Define if libperl is present and usable.])
AC_SUBST(PERL_CFLAGS)
- AC_SUBST(PERL_LDFLAGS)
+ AC_SUBST(PERL_LIBS)
else
with_libperl="no"
fi
CFLAGS="$SAVE_CFLAGS"
- LDFLAGS="$SAVE_LDFLAGS"
+ LIBS="$SAVE_LIBS"
else if test -z "$perl_interpreter"; then
with_libperl="no (no perl interpreter found)"
c_cv_have_libperl="no"
if test "x$with_libperl" = "xyes"
then
SAVE_CFLAGS="$CFLAGS"
- SAVE_LDFLAGS="$LDFLAGS"
+ SAVE_LIBS="$LIBS"
CFLAGS="$CFLAGS $PERL_CFLAGS"
- LDFLAGS="$LDFLAGS $PERL_LDFLAGS"
+ LIBS="$LIBS $PERL_LIBS"
AC_CACHE_CHECK([if perl supports ithreads],
[c_cv_have_perl_ithreads],
fi
CFLAGS="$SAVE_CFLAGS"
- LDFLAGS="$SAVE_LDFLAGS"
+ LIBS="$SAVE_LIBS"
fi
if test "x$with_libperl" = "xyes"
then
SAVE_CFLAGS="$CFLAGS"
- SAVE_LDFLAGS="$LDFLAGS"
+ SAVE_LIBS="$LIBS"
# trigger an error if Perl_load_module*() uses __attribute__nonnull__(3)
# (see issues #41 and #42)
CFLAGS="$CFLAGS $PERL_CFLAGS -Wall -Werror"
- LDFLAGS="$LDFLAGS $PERL_LDFLAGS"
+ LIBS="$LIBS $PERL_LIBS"
AC_CACHE_CHECK([for broken Perl_load_module()],
[c_cv_have_broken_perl_load_module],
)
CFLAGS="$SAVE_CFLAGS"
- LDFLAGS="$SAVE_LDFLAGS"
+ LIBS="$SAVE_LIBS"
fi
AM_CONDITIONAL(HAVE_BROKEN_PERL_LOAD_MODULE,
test "x$c_cv_have_broken_perl_load_module" = "xyes")
if test "x$with_libperl" = "xyes"
then
SAVE_CFLAGS="$CFLAGS"
- SAVE_LDFLAGS="$LDFLAGS"
+ SAVE_LIBS="$LIBS"
CFLAGS="$CFLAGS $PERL_CFLAGS"
- LDFLAGS="$LDFLAGS $PERL_LDFLAGS"
+ LIBS="$LIBS $PERL_LIBS"
AC_CHECK_MEMBER(
[struct mgvtbl.svt_local],
fi
CFLAGS="$SAVE_CFLAGS"
- LDFLAGS="$SAVE_LDFLAGS"
+ LIBS="$SAVE_LIBS"
fi
# }}}
# --with-librdkafka {{{
AC_ARG_WITH(librdkafka, [AS_HELP_STRING([--with-librdkafka@<:@=PREFIX@:>@], [Path to librdkafka.])],
[
- if test "x$withval" = "xno" && test "x$withval" != "xyes"
+ if test "x$withval" != "xno" && test "x$withval" != "xyes"
then
with_librdkafka_cppflags="-I$withval/include"
with_librdkafka_ldflags="-L$withval/lib"
+ with_librdkafka_rpath="$withval/lib"
with_librdkafka="yes"
else
with_librdkafka="$withval"
SAVE_CPPFLAGS="$CPPFLAGS"
SAVE_LDFLAGS="$LDFLAGS"
+CPPFLAGS="$CPPFLAGS $with_librdkafka_cppflags"
+LDFLAGS="$LDFLAGS $with_librdkafka_ldflags"
+
if test "x$with_librdkafka" = "xyes"
then
AC_CHECK_HEADERS(librdkafka/rdkafka.h, [with_librdkafka="yes"], [with_librdkafka="no (librdkafka/rdkafka.h not found)"])
then
AC_CHECK_LIB(rdkafka, rd_kafka_new, [with_librdkafka="yes"], [with_librdkafka="no (Symbol 'rd_kafka_new' not found)"])
AC_CHECK_LIB(rdkafka, rd_kafka_conf_set_log_cb, [with_librdkafka_log_cb="yes"], [with_librdkafka_log_cb="no"])
- AC_CHECK_LIB(rdkafka, rd_kafka_conf_set_logger, [with_librdkafka_logger="yes"], [with_librdkafka_logger="no"])
+ AC_CHECK_LIB(rdkafka, rd_kafka_set_logger, [with_librdkafka_logger="yes"], [with_librdkafka_logger="no"])
fi
if test "x$with_librdkafka" = "xyes"
then
BUILD_WITH_LIBRDKAFKA_CPPFLAGS="$with_librdkafka_cppflags"
BUILD_WITH_LIBRDKAFKA_LDFLAGS="$with_librdkafka_ldflags"
- BUILD_WITH_LIBRDKAFKA_LIBS="-lrdkafka"
+ if test "x$with_librdkafka_rpath" != "x"
+ then
+ BUILD_WITH_LIBRDKAFKA_LIBS="-Wl,-rpath,$with_librdkafka_rpath -lrdkafka"
+ else
+ BUILD_WITH_LIBRDKAFKA_LIBS="-lrdkafka"
+ fi
AC_SUBST(BUILD_WITH_LIBRDKAFKA_CPPFLAGS)
AC_SUBST(BUILD_WITH_LIBRDKAFKA_LDFLAGS)
AC_SUBST(BUILD_WITH_LIBRDKAFKA_LIBS)
if test "x$with_libstatgrab" = "xyes"
then
SAVE_CFLAGS="$CFLAGS"
- SAVE_LDFLAGS="$LDFLAGS"
+ SAVE_LIBS="$LIBS"
CFLAGS="$CFLAGS $with_libstatgrab_cflags"
LDFLAGS="$LDFLAGS $with_libstatgrab_ldflags"
+ LIBS="-lstatgrab $LIBS"
AC_CACHE_CHECK([if libstatgrab >= 0.90],
[c_cv_have_libstatgrab_0_90],
CFLAGS="$SAVE_CFLAGS"
LDFLAGS="$SAVE_LDFLAGS"
+ LIBS="$SAVE_LIBS"
fi
AM_CONDITIONAL(BUILD_WITH_LIBSTATGRAB, test "x$with_libstatgrab" = "xyes")
plugin_barometer="no"
plugin_battery="no"
plugin_bind="no"
+plugin_ceph="no"
plugin_cgroups="no"
plugin_conntrack="no"
plugin_contextswitch="no"
plugin_entropy="yes"
plugin_fscache="yes"
plugin_interface="yes"
+ plugin_ipc="yes"
plugin_irq="yes"
plugin_load="yes"
plugin_lvm="yes"
if test "x$ac_system" = "xAIX"
then
plugin_tcpconns="yes"
+ plugin_ipc="yes"
fi
# FreeBSD
plugin_curl_xml="yes"
fi
+if test "x$with_libyajl" = "xyes"
+then
+ plugin_ceph="yes"
+fi
+
if test "x$have_processor_info" = "xyes"
then
plugin_cpu="yes"
AC_PLUGIN([barometer], [$plugin_barometer], [Barometer sensor on I2C])
AC_PLUGIN([battery], [$plugin_battery], [Battery statistics])
AC_PLUGIN([bind], [$plugin_bind], [ISC Bind nameserver statistics])
+AC_PLUGIN([ceph], [$plugin_ceph], [Ceph daemon statistics])
AC_PLUGIN([conntrack], [$plugin_conntrack], [nf_conntrack statistics])
AC_PLUGIN([contextswitch], [$plugin_contextswitch], [context switch statistics])
AC_PLUGIN([cpufreq], [$plugin_cpufreq], [CPU frequency statistics])
AC_PLUGIN([gmond], [$with_libganglia], [Ganglia plugin])
AC_PLUGIN([hddtemp], [yes], [Query hddtempd])
AC_PLUGIN([interface], [$plugin_interface], [Interface traffic statistics])
+AC_PLUGIN([ipc], [$plugin_ipc], [IPC statistics])
AC_PLUGIN([ipmi], [$plugin_ipmi], [IPMI sensor statistics])
AC_PLUGIN([iptables], [$with_libiptc], [IPTables rule counters])
AC_PLUGIN([ipvs], [$plugin_ipvs], [IPVS connection statistics])
AC_PLUGIN([write_mongodb], [$with_libmongoc], [MongoDB output plugin])
AC_PLUGIN([write_redis], [$with_libhiredis], [Redis output plugin])
AC_PLUGIN([write_riemann], [$have_protoc_c], [Riemann output plugin])
+AC_PLUGIN([write_sensu], [yes], [Sensu output plugin])
AC_PLUGIN([write_tsdb], [yes], [TSDB output plugin])
AC_PLUGIN([xmms], [$with_libxmms], [XMMS statistics])
AC_PLUGIN([zfs_arc], [$plugin_zfs_arc], [ZFS ARC statistics])
libatasmart . . . . . $with_libatasmart
libcurl . . . . . . . $with_libcurl
libdbi . . . . . . . $with_libdbi
- libhiredis . . . . . $with_libhiredis
libesmtp . . . . . . $with_libesmtp
libganglia . . . . . $with_libganglia
libgcrypt . . . . . . $with_libgcrypt
+ libhal . . . . . . . $with_libhal
+ libhiredis . . . . . $with_libhiredis
libi2c-dev . . . . . $with_libi2c
libiokit . . . . . . $with_libiokit
libiptc . . . . . . . $with_libiptc
libmemcached . . . . $with_libmemcached
libmnl . . . . . . . $with_libmnl
libmodbus . . . . . . $with_libmodbus
+ libmongoc . . . . . . $with_libmongoc
libmysql . . . . . . $with_libmysql
libnetapp . . . . . . $with_libnetapp
libnetsnmp . . . . . $with_libnetsnmp
liboconfig . . . . . $with_liboconfig
libopenipmi . . . . . $with_libopenipmipthread
liboping . . . . . . $with_liboping
+ libowcapi . . . . . . $with_libowcapi
libpcap . . . . . . . $with_libpcap
libperfstat . . . . . $with_perfstat
libperl . . . . . . . $with_libperl
libxml2 . . . . . . . $with_libxml2
libxmms . . . . . . . $with_libxmms
libyajl . . . . . . . $with_libyajl
- libevent . . . . . . $with_libevent
- protobuf-c . . . . . $have_protoc_c
oracle . . . . . . . $with_oracle
+ protobuf-c . . . . . $have_protoc_c
python . . . . . . . $with_python
Features:
amqp . . . . . . . $enable_amqp
apache . . . . . . . $enable_apache
apcups . . . . . . . $enable_apcups
- aquaero . . . . . . . $enable_aquaero
apple_sensors . . . . $enable_apple_sensors
+ aquaero . . . . . . . $enable_aquaero
ascent . . . . . . . $enable_ascent
barometer . . . . . . $enable_barometer
battery . . . . . . . $enable_battery
bind . . . . . . . . $enable_bind
+ ceph . . . . . . . . $enable_ceph
+ cgroups . . . . . . . $enable_cgroups
conntrack . . . . . . $enable_conntrack
contextswitch . . . . $enable_contextswitch
- cgroups . . . . . . . $enable_cgroups
cpu . . . . . . . . . $enable_cpu
cpufreq . . . . . . . $enable_cpufreq
csv . . . . . . . . . $enable_csv
gmond . . . . . . . . $enable_gmond
hddtemp . . . . . . . $enable_hddtemp
interface . . . . . . $enable_interface
+ ipc . . . . . . . . . $enable_ipc
ipmi . . . . . . . . $enable_ipmi
iptables . . . . . . $enable_iptables
ipvs . . . . . . . . $enable_ipvs
java . . . . . . . . $enable_java
load . . . . . . . . $enable_load
logfile . . . . . . . $enable_logfile
- lpar . . . . . . . . $enable_lpar
log_logstash . . . . $enable_log_logstash
+ lpar . . . . . . . . $enable_lpar
lvm . . . . . . . . . $enable_lvm
madwifi . . . . . . . $enable_madwifi
match_empty_counter . $enable_match_empty_counter
swap . . . . . . . . $enable_swap
syslog . . . . . . . $enable_syslog
table . . . . . . . . $enable_table
- tail . . . . . . . . $enable_tail
tail_csv . . . . . . $enable_tail_csv
+ tail . . . . . . . . $enable_tail
tape . . . . . . . . $enable_tape
target_notification . $enable_target_notification
target_replace . . . $enable_target_replace
write_mongodb . . . . $enable_write_mongodb
write_redis . . . . . $enable_write_redis
write_riemann . . . . $enable_write_riemann
+ write_sensu . . . . . $enable_write_sensu
write_tsdb . . . . . $enable_write_tsdb
xmms . . . . . . . . $enable_xmms
zfs_arc . . . . . . . $enable_zfs_arc
# - enable the EPEL repository (http://dl.fedoraproject.org/pub/epel/) in the
# configuration files for your target systems (/etc/mock/*.cfg).
#
-# - copy this file in your ~/rpmbuild/SPECS/ directory
-#
# - fetch the desired collectd release file from https://collectd.org/files/
-# and save it in your ~/rpmbuild/SOURCES/ directory
+# and save it in your ~/rpmbuild/SOURCES/ directory (or build your own out of
+# the git repository: ./build.sh && ./configure && make-dist-bz2)
+#
+# - copy this file in your ~/rpmbuild/SPECS/ directory. Make sure the
+# "Version:" tag matches the version from the tarball.
#
# - build the SRPM first:
# mock -r centos-6-x86_64 --buildsrpm --spec ~/rpmbuild/SPECS/collectd.spec \
#
%global _hardened_build 1
+%{?perl_default_filter}
# plugins only buildable on RHEL6
# (NB: %{elN} macro is not available on RHEL < 6)
%define with_ascent 0%{!?_without_ascent:1}
%define with_battery 0%{!?_without_battery:1}
%define with_bind 0%{!?_without_bind:1}
+%define with_ceph 0%{!?_without_ceph:0%{?_has_libyajl}}
%define with_cgroups 0%{!?_without_cgroups:1}
%define with_conntrack 0%{!?_without_conntrack:1}
%define with_contextswitch 0%{!?_without_contextswitch:1}
%define with_gmond 0%{!?_without_gmond:0%{?_has_recent_libganglia}}
%define with_hddtemp 0%{!?_without_hddtemp:1}
%define with_interface 0%{!?_without_interface:1}
+%define with_ipc 0%{!?_without_ipc:1}
%define with_ipmi 0%{!?_without_ipmi:1}
%define with_iptables 0%{!?_without_iptables:0%{?_has_working_libiptc}}
%define with_ipvs 0%{!?_without_ipvs:0%{?_has_ip_vs_h}}
%define with_write_log 0%{!?_without_write_log:1}
%define with_write_redis 0%{!?_without_write_redis:0%{?_has_hiredis}}
%define with_write_riemann 0%{!?_without_write_riemann:1}
+%define with_write_sensu 0%{!?_without_write_sensu:1}
%define with_write_tsdb 0%{!?_without_write_tsdb:1}
%define with_zfs_arc 0%{!?_without_zfs_arc:1}
%define with_zookeeper 0%{!?_without_zookeeper:1}
# plugin xmms disabled, requires xmms
%define with_xmms 0%{!?_without_xmms:0}
-Summary: Statistics collection daemon for filling RRD files
+Summary: statistics collection and monitoring daemon
Name: collectd
-Version: 5.4.0
+Version: 5.4.2
Release: 1%{?dist}
URL: http://collectd.org
Source: http://collectd.org/files/%{name}-%{version}.tar.bz2
via HTTP and submits the values to collectd.
%endif
+%if %{with_ceph}
+%package ceph
+Summary: Ceph plugin for collectd
+Group: System Environment/Daemons
+Requires: %{name}%{?_isa} = %{version}-%{release}
+BuildRequires: yajl-devel
+%description ceph
+Ceph plugin for collectd
+%endif
+
%if %{with_curl}
%package curl
Summary: Curl plugin for collectd
%define _with_csv --disable-csv
%endif
+%if %{with_ceph}
+%define _with_ceph --enable-ceph
+%else
+%define _with_ceph --disable-ceph
+%endif
+
%if %{with_curl}
%define _with_curl --enable-curl
%else
%if %{with_dbi}
%define _with_dbi --enable-dbi
%else
-%define _with_dbi --disable-dbi --without-libdbi
+%define _with_dbi --disable-dbi
%endif
%if %{with_df}
%define _with_interface --disable-interface
%endif
+%if %{with_ipc}
+%define _with_ipc --enable-ipc
+%else
+%define _with_ipc --disable-ipc
+%endif
+
%if %{with_ipmi}
%define _with_ipmi --enable-ipmi
%else
%if %{with_notify_email}
%define _with_notify_email --enable-notify_email
%else
-%define _with_notify_email --disable-notify_email --without-libesmpt
+%define _with_notify_email --disable-notify_email
%endif
%if %{with_ntpd}
%if %{with_perl}
%define _with_perl --enable-perl --with-perl-bindings="INSTALLDIRS=vendor"
%else
-%define _with_perl --disable-perl --without-libperl
+%define _with_perl --disable-perl
%endif
%if %{with_pf}
%if %{with_write_mongodb}
%define _with_write_mongodb --enable-write_mongodb
%else
-%define _with_write_mongodb --disable-write_mongodb --without-libmongoc
+%define _with_write_mongodb --disable-write_mongodb
%endif
%if %{with_write_redis}
%define _with_write_riemann --disable-write_riemann
%endif
+%if %{with_write_sensu}
+%define _with_write_sensu --enable-write_sensu
+%else
+%define _with_write_sensu --disable-write_sensu
+%endif
+
%if %{with_write_tsdb}
%define _with_write_tsdb --enable-write_tsdb
%else
%{?_with_barometer} \
%{?_with_battery} \
%{?_with_bind} \
+ %{?_with_ceph} \
%{?_with_cgroups} \
%{?_with_conntrack} \
%{?_with_contextswitch} \
%{?_with_gmond} \
%{?_with_hddtemp} \
%{?_with_interface} \
+ %{?_with_ipc} \
%{?_with_ipmi} \
%{?_with_iptables} \
%{?_with_ipvs} \
%{?_with_write_http} \
%{?_with_write_log} \
%{?_with_write_riemann} \
+ %{?_with_write_sensu} \
%{?_with_write_tsdb}
%{__mkdir} -p %{buildroot}%{_localstatedir}/www
%{__mkdir} -p %{buildroot}/%{_sysconfdir}/httpd/conf.d
-%{__cp} -a contrib/collection3 %{buildroot}%{_localstatedir}/www
-%{__cp} -a contrib/redhat/collection3.conf %{buildroot}/%{_sysconfdir}/httpd/conf.d/
+%{__mv} contrib/collection3 %{buildroot}%{_localstatedir}/www
+%{__mv} contrib/redhat/collection3.conf %{buildroot}/%{_sysconfdir}/httpd/conf.d/
-%{__cp} -a contrib/php-collection %{buildroot}%{_localstatedir}/www
-%{__cp} -a contrib/redhat/php-collection.conf %{buildroot}/%{_sysconfdir}/httpd/conf.d/
+%{__mv} contrib/php-collection %{buildroot}%{_localstatedir}/www
+%{__mv} contrib/redhat/php-collection.conf %{buildroot}/%{_sysconfdir}/httpd/conf.d/
### Clean up docs
find contrib/ -type f -exec %{__chmod} a-x {} \;
# *.la files shouldn't be distributed.
rm -f %{buildroot}/%{_libdir}/{collectd/,}*.la
-# Move the Perl examples to a separate directory.
-mkdir perl-examples
-find contrib -name '*.p[lm]' -exec mv {} perl-examples/ \;
-
# Remove Perl hidden .packlist files.
find %{buildroot} -type f -name .packlist -delete
# Remove Perl temporary file perllocal.pod
%if ! %{with_perl}
rm -f %{buildroot}%{_mandir}/man5/collectd-perl.5*
rm -f %{buildroot}%{_mandir}/man3/Collectd::Unixsock.3pm*
-rm -fr perl-examples/
rm -fr %{buildroot}/usr/lib/perl5/
%endif
%if %{with_interface}
%{_libdir}/%{name}/interface.so
%endif
+%if %{with_ipc}
+%{_libdir}/%{name}/ipc.so
+%endif
%if %{with_ipvs}
%{_libdir}/%{name}/ipvs.so
%endif
%if %{with_thermal}
%{_libdir}/%{name}/thermal.so
%endif
-%if %{with_load}
+%if %{with_threshold}
%{_libdir}/%{name}/threshold.so
%endif
%if %{with_unixsock}
%if %{with_write_log}
%{_libdir}/%{name}/write_log.so
%endif
+%if %{with_write_sensu}
+%{_libdir}/%{name}/write_sensu.so
+%endif
%if %{with_write_tsdb}
%{_libdir}/%{name}/write_tsdb.so
%endif
%{_libdir}/%{name}/bind.so
%endif
+%if %{with_ceph}
+%files ceph
+%{_libdir}/%{name}/ceph.so
+%endif
+
%if %{with_curl}
%files curl
%{_libdir}/%{name}/curl.so
%if %{with_perl}
%files perl
-%doc perl-examples/*
%{perl_vendorlib}/Collectd.pm
%{perl_vendorlib}/Collectd/
%{_mandir}/man3/Collectd::Unixsock.3pm*
%changelog
# * TODO 5.5.0-1
# - New upstream version
-# - New plugins enabled by default: drbd, log_logstash, write_tsdb, smart, openldap, redis, write_redis, zookeeper, write_log
+# - New plugins enabled by default: ceph, drbd, log_logstash, write_tsdb, smart, openldap, redis, write_redis, zookeeper, write_log, write_sensu, ipc
# - New plugins disabled by default: barometer, write_kafka
# - Enable zfs_arc, now supported on Linux
# - Install disk plugin in a dedicated package, as it depends on libudev
- Enabled netlink plugin on RHEL6 and RHEL7
- Allow perl plugin to build on RHEL5
- Add support for RHEL7
+- Misc perl-related improvements:
+ * prevent rpmbuild from extracting dependencies from files in /usr/share/doc
+ * don't package collection3 and php-collection twice
+ * keep perl scripts from contrib/ in collectd-contrib
* Wed Apr 10 2013 Marc Fournier <marc.fournier@camptocamp.com> 5.3.0-1
- New upstream version
bind_la_LIBADD = $(BUILD_WITH_LIBCURL_LIBS) $(BUILD_WITH_LIBXML2_LIBS)
endif
+if BUILD_PLUGIN_CEPH
+pkglib_LTLIBRARIES += ceph.la
+ceph_la_SOURCES = ceph.c
+ceph_la_CFLAGS = $(AM_CFLAGS)
+ceph_la_LDFLAGS = $(PLUGIN_LDFLAGS) $(BUILD_WITH_LIBYAJL_LDFLAGS)
+ceph_la_CPPFLAGS = $(AM_CPPFLAGS) $(BUILD_WITH_LIBYAJL_CPPFLAGS)
+ceph_la_LIBADD = $(BUILD_WITH_LIBYAJL_LIBS)
+endif
+
if BUILD_PLUGIN_CGROUPS
pkglib_LTLIBRARIES += cgroups.la
cgroups_la_SOURCES = cgroups.c \
endif
endif # BUILD_PLUGIN_INTERFACE
+if BUILD_PLUGIN_IPC
+pkglib_LTLIBRARIES += ipc.la
+ipc_la_SOURCES = ipc.c
+ipc_la_CFLAGS = $(AM_CFLAGS)
+ipc_la_LDFLAGS = $(PLUGIN_LDFLAGS)
+endif
+
if BUILD_PLUGIN_IPTABLES
pkglib_LTLIBRARIES += iptables.la
iptables_la_SOURCES = iptables.c
pkglib_LTLIBRARIES += notify_email.la
notify_email_la_SOURCES = notify_email.c
notify_email_la_LDFLAGS = $(PLUGIN_LDFLAGS)
-notify_email_la_LIBADD = -lesmtp -lssl -lcrypto -lpthread -ldl
+notify_email_la_LIBADD = -lesmtp -lssl -lcrypto -lpthread
endif
if BUILD_PLUGIN_NTPD
endif
perl_la_LDFLAGS = $(PLUGIN_LDFLAGS) \
$(PERL_LDFLAGS)
+perl_la_LIBADD = $(PERL_LIBS)
endif
if BUILD_PLUGIN_PF
utils_format_json.c utils_format_json.h \
utils_cmd_putval.c utils_cmd_putval.h \
utils_crc32.c utils_crc32.h
+write_kafka_la_CPPFLAGS = $(AM_CPPFLAGS) $(BUILD_WITH_LIBRDKAFKA_CPPFLAGS)
write_kafka_la_LDFLAGS = $(PLUGIN_LDFLAGS) $(BUILD_WITH_LIBRDKAFKA_LDFLAGS)
write_kafka_la_LIBADD = $(BUILD_WITH_LIBRDKAFKA_LIBS)
endif
write_riemann_la_LIBADD = -lprotobuf-c
endif
+if BUILD_PLUGIN_WRITE_SENSU
+pkglib_LTLIBRARIES += write_sensu.la
+write_sensu_la_SOURCES = write_sensu.c
+write_sensu_la_LDFLAGS = $(PLUGIN_LDFLAGS)
+endif
+
if BUILD_PLUGIN_WRITE_TSDB
pkglib_LTLIBRARIES += write_tsdb.la
write_tsdb_la_SOURCES = write_tsdb.c
test_common_SOURCES = tests/test_common.c \
daemon/common.h daemon/common.c \
+ tests/macros.h \
tests/mock/plugin.c \
tests/mock/utils_cache.c \
tests/mock/utils_time.c
char *exchange;
char *routing_key;
+ /* Number of seconds to wait before connection is retried */
+ int connection_retry_delay;
+
/* publish only */
uint8_t delivery_mode;
_Bool store_rates;
/* type = */ amqp_cstring_bytes (conf->exchange_type),
/* passive = */ 0,
/* durable = */ 0,
+#if defined(AMQP_VERSION) && AMQP_VERSION >= 0x00060000
+ /* auto delete = */ 0,
+ /* internal = */ 0,
+#endif
/* arguments = */ argument_table);
if ((ed_ret == NULL) && camqp_is_error (conf))
{
static int camqp_connect (camqp_config_t *conf) /* {{{ */
{
+ static time_t last_connect_time = 0;
+
amqp_rpc_reply_t reply;
int status;
#ifdef HAVE_AMQP_TCP_SOCKET
if (conf->connection != NULL)
return (0);
+ time_t now = time(NULL);
+ if (now < (last_connect_time + conf->connection_retry_delay))
+ {
+ DEBUG("amqp plugin: skipping connection retry, "
+ "ConnectionRetryDelay: %d", conf->connection_retry_delay);
+ return(1);
+ }
+ else
+ {
+ DEBUG ("amqp plugin: retrying connection");
+ last_connect_time = now;
+ }
+
conf->connection = amqp_new_connection ();
if (conf->connection == NULL)
{
conf->password = NULL;
conf->exchange = NULL;
conf->routing_key = NULL;
+ conf->connection_retry_delay = 0;
+
/* publish only */
conf->delivery_mode = CAMQP_DM_VOLATILE;
conf->store_rates = 0;
conf->escape_char = tmp_buff[0];
sfree (tmp_buff);
}
+ else if (strcasecmp ("ConnectionRetryDelay", child->key) == 0)
+ status = cf_util_get_int (child, &conf->connection_retry_delay);
else
WARNING ("amqp plugin: Ignoring unknown "
"configuration option \"%s\".", child->key);
_Bool verify_peer;
_Bool verify_host;
char *cacert;
+ char *ssl_ciphers;
char *server; /* user specific server type */
char *apache_buffer;
char apache_curl_error[CURL_ERROR_SIZE];
size_t apache_buffer_size;
size_t apache_buffer_fill;
+ int timeout;
CURL *curl;
}; /* apache_s */
sfree (st->user);
sfree (st->pass);
sfree (st->cacert);
+ sfree (st->ssl_ciphers);
sfree (st->server);
sfree (st->apache_buffer);
if (st->curl) {
}
memset (st, 0, sizeof (*st));
+ st->timeout = -1;
+
status = cf_util_get_string (ci, &st->name);
if (status != 0)
{
status = cf_util_get_boolean (child, &st->verify_host);
else if (strcasecmp ("CACert", child->key) == 0)
status = cf_util_get_string (child, &st->cacert);
+ else if (strcasecmp ("SSLCiphers", child->key) == 0)
+ status = cf_util_get_string (child, &st->ssl_ciphers);
else if (strcasecmp ("Server", child->key) == 0)
status = cf_util_get_string (child, &st->server);
+ else if (strcasecmp ("Timeout", child->key) == 0)
+ status = cf_util_get_int (child, &st->timeout);
else
{
WARNING ("apache plugin: Option `%s' not allowed here.",
/* initialize curl for each host */
static int init_host (apache_t *st) /* {{{ */
{
- static char credentials[1024];
-
assert (st->url != NULL);
/* (Assured by `config_add') */
if (st->user != NULL)
{
+#ifdef HAVE_CURLOPT_USERNAME
+ curl_easy_setopt (st->curl, CURLOPT_USERNAME, st->user);
+ curl_easy_setopt (st->curl, CURLOPT_PASSWORD,
+ (st->pass == NULL) ? "" : st->pass);
+#else
+ static char credentials[1024];
int status;
status = ssnprintf (credentials, sizeof (credentials), "%s:%s",
}
curl_easy_setopt (st->curl, CURLOPT_USERPWD, credentials);
+#endif
}
curl_easy_setopt (st->curl, CURLOPT_URL, st->url);
st->verify_host ? 2L : 0L);
if (st->cacert != NULL)
curl_easy_setopt (st->curl, CURLOPT_CAINFO, st->cacert);
+ if (st->ssl_ciphers != NULL)
+ curl_easy_setopt (st->curl, CURLOPT_SSL_CIPHER_LIST,st->ssl_ciphers);
+
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ if (st->timeout >= 0)
+ curl_easy_setopt (st->curl, CURLOPT_TIMEOUT_MS, (long) st->timeout);
+ else
+ curl_easy_setopt (st->curl, CURLOPT_TIMEOUT_MS,
+ CDTIME_T_TO_MS(plugin_get_interval()));
+#endif
return (0);
} /* }}} int init_host */
static char *verify_peer = NULL;
static char *verify_host = NULL;
static char *cacert = NULL;
+static char *timeout = NULL;
static CURL *curl = NULL;
"Password",
"VerifyPeer",
"VerifyHost",
- "CACert"
+ "CACert",
+ "Timeout",
};
static int config_keys_num = STATIC_ARRAY_SIZE (config_keys);
return (config_set (&verify_host, value));
else if (strcasecmp (key, "CACert") == 0)
return (config_set (&cacert, value));
+ else if (strcasecmp (key, "Timeout") == 0)
+ return (config_set (&timeout, value));
else
return (-1);
} /* }}} int ascent_config */
static int ascent_init (void) /* {{{ */
{
- static char credentials[1024];
-
if (url == NULL)
{
WARNING ("ascent plugin: ascent_init: No URL configured, "
if (user != NULL)
{
+#ifdef HAVE_CURLOPT_USERNAME
+ curl_easy_setopt (curl, CURLOPT_USERNAME, user);
+ curl_easy_setopt (curl, CURLOPT_PASSWORD, (pass == NULL) ? "" : pass);
+#else
+ static char credentials[1024];
int status;
status = ssnprintf (credentials, sizeof (credentials), "%s:%s",
}
curl_easy_setopt (curl, CURLOPT_USERPWD, credentials);
+#endif
}
curl_easy_setopt (curl, CURLOPT_URL, url);
if (cacert != NULL)
curl_easy_setopt (curl, CURLOPT_CAINFO, cacert);
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ if (timeout != NULL)
+ curl_easy_setopt (curl, CURLOPT_TIMEOUT_MS, atol(timeout));
+ else
+ curl_easy_setopt (curl, CURLOPT_TIMEOUT_MS,
+ CDTIME_T_TO_MS(plugin_get_interval()));
+#endif
+
return (0);
} /* }}} int ascent_init */
#define MPL3115_NUM_CONV_VALS 5
+/* ------------ BMP085 defines ------------ */
+/* I2C address of the BMP085 sensor */
+#define BMP085_I2C_ADDRESS 0x77
+
+/* register addresses */
+#define BMP085_ADDR_ID_REG 0xD0
+#define BMP085_ADDR_VERSION 0xD1
+
+#define BMP085_ADDR_CONV 0xF6
+
+#define BMP085_ADDR_CTRL_REG 0xF4
+#define BMP085_ADDR_COEFFS 0xAA
+
+/* register sizes */
+#define BMP085_NUM_COEFFS 22
+
+/* commands, values */
+#define BMP085_CHIP_ID 0x55
+
+#define BMP085_CMD_CONVERT_TEMP 0x2E
+
+#define BMP085_CMD_CONVERT_PRESS_0 0x34
+#define BMP085_CMD_CONVERT_PRESS_1 0x74
+#define BMP085_CMD_CONVERT_PRESS_2 0xB4
+#define BMP085_CMD_CONVERT_PRESS_3 0xF4
+
+/* in us */
+#define BMP085_TIME_CNV_TEMP 4500
+
+#define BMP085_TIME_CNV_PRESS_0 4500
+#define BMP085_TIME_CNV_PRESS_1 7500
+#define BMP085_TIME_CNV_PRESS_2 13500
+#define BMP085_TIME_CNV_PRESS_3 25500
+
+
/* ------------ Normalization ------------ */
/* Mean sea level pressure normalization methods */
#define MSLP_NONE 0
/** Temperature reference history depth for averaging. See #get_reference_temperature */
#define REF_TEMP_AVG_NUM 5
+
/* ------------------------------------------ */
+
+/** Supported sensor types */
+enum Sensor_type {
+ Sensor_none = 0,
+ Sensor_MPL115,
+ Sensor_MPL3115,
+ Sensor_BMP085
+};
+
static const char *config_keys[] =
{
"Device",
static _Bool configured = 0; /**< the whole plugin config status */
static int i2c_bus_fd = -1; /**< I2C bus device FD */
-
-static _Bool is_MPL3115 = 0; /**< is this MPL3115? */
-static __s32 oversample_MPL3115 = 0; /**< MPL3115 CTRL1 oversample setting */
+
+static enum Sensor_type sensor_type = Sensor_none; /**< detected/used sensor type */
+
+static __s32 mpl3115_oversample = 0; /**< MPL3115 CTRL1 oversample setting */
+
+// BMP085 configuration
+static unsigned bmp085_oversampling; /**< BMP085 oversampling (0-3) */
+static unsigned long bmp085_timeCnvPress; /**< BMP085 conversion time for pressure in us */
+static __u8 bmp085_cmdCnvPress; /**< BMP085 pressure conversion command */
/* MPL115 conversion coefficients */
static double mpl115_coeffC11;
static double mpl115_coeffC22;
+/* BMP085 conversion coefficients */
+static short bmp085_AC1;
+static short bmp085_AC2;
+static short bmp085_AC3;
+static unsigned short bmp085_AC4;
+static unsigned short bmp085_AC5;
+static unsigned short bmp085_AC6;
+static short bmp085_B1;
+static short bmp085_B2;
+static short bmp085_MB;
+static short bmp085_MC;
+static short bmp085_MD;
+
+
+
/* ------------------------ averaging ring buffer ------------------------ */
/* Used only for MPL115. MPL3115 supports real oversampling in the device so */
/* no need for any postprocessing. */
return 0;
}
+
/* ------------------------ MPL115 access ------------------------ */
/**
+ * Detect presence of a MPL115 pressure sensor.
+ *
+ * Unfortunately there seems to be no ID register so we just try to read first
+ * conversion coefficient from device at MPL115 address and hope it is really
+ * MPL115. We should use this check as the last resort (which would be the typical
+ * case anyway since MPL115 is the least accurate sensor).
+ * As a sideeffect will leave set I2C slave address.
+ *
+ * @return 1 if MPL115, 0 otherwise
+ */
+static int MPL115_detect(void)
+{
+ __s32 res;
+ char errbuf[1024];
+
+ if (ioctl(i2c_bus_fd, I2C_SLAVE_FORCE, MPL115_I2C_ADDRESS) < 0)
+ {
+ ERROR("barometer: MPL115_detect problem setting i2c slave address to 0x%02X: %s",
+ MPL115_I2C_ADDRESS,
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 0 ;
+ }
+
+ res = i2c_smbus_read_byte_data(i2c_bus_fd, MPL115_ADDR_COEFFS);
+ if(res >= 0)
+ {
+ DEBUG ("barometer: MPL115_detect - positive detection");
+ return 1;
+ }
+
+ DEBUG ("barometer: MPL115_detect - negative detection");
+ return 0;
+}
+
+/**
* Read the MPL115 sensor conversion coefficients.
*
* These are (device specific) constants so we can read them just once.
mpl115_coeffs);
if (res < 0)
{
- ERROR ("barometer: read_mpl115_coeffs - problem reading data: %s",
+ ERROR ("barometer: MPL115_read_coeffs - problem reading data: %s",
sstrerror (errno, errbuf, sizeof (errbuf)));
return -1;
}
mpl115_coeffC22 /= 32.0; //16-11=5
mpl115_coeffC22 /= 33554432.0; /* 10+15=25 fract */
- DEBUG("barometer: read_mpl115_coeffs: a0=%lf, b1=%lf, b2=%lf, c12=%lf, c11=%lf, c22=%lf",
+ DEBUG("barometer: MPL115_read_coeffs: a0=%lf, b1=%lf, b2=%lf, c12=%lf, c11=%lf, c22=%lf",
mpl115_coeffA0,
mpl115_coeffB1,
mpl115_coeffB2,
}
-/*
+/**
* Convert raw adc values to real data using the sensor coefficients.
*
* @param adc_pressure adc pressure value to be converted
*pressure = ((1150.0-500.0) * Pcomp / 1023.0) + 500.0;
*temperature = (472.0 - adc_temp) / 5.35 + 25.0;
- DEBUG ("barometer: convert_adc_to_real - got %lf hPa, %lf C",
+ DEBUG ("barometer: MPL115_convert_adc_to_real - got %lf hPa, %lf C",
*pressure,
*temperature);
}
/**
* Detect presence of a MPL3115 pressure sensor by checking register "WHO AM I"
+ *
+ * As a sideeffect will leave set I2C slave address.
*
* @return 1 if MPL3115, 0 otherwise
*/
static int MPL3115_detect(void)
{
__s32 res;
+ char errbuf[1024];
+
+ if (ioctl(i2c_bus_fd, I2C_SLAVE_FORCE, MPL3115_I2C_ADDRESS) < 0)
+ {
+ ERROR("barometer: MPL3115_detect problem setting i2c slave address to 0x%02X: %s",
+ MPL3115_I2C_ADDRESS,
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 0 ;
+ }
res = i2c_smbus_read_byte_data(i2c_bus_fd, MPL3115_REG_WHO_AM_I);
if(res == MPL3115_WHO_AM_I_RESP)
if(config_oversample > 100)
{
new_val = 128;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_128;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_128;
}
else if(config_oversample > 48)
{
new_val = 64;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_64;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_64;
}
else if(config_oversample > 24)
{
new_val = 32;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_32;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_32;
}
else if(config_oversample > 12)
{
new_val = 16;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_16;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_16;
}
else if(config_oversample > 6)
{
new_val = 8;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_8;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_8;
}
else if(config_oversample > 3)
{
new_val = 4;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_4;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_4;
}
else if(config_oversample > 1)
{
new_val = 2;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_2;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_2;
}
else
{
new_val = 1;
- oversample_MPL3115 = MPL3115_CTRL_REG1_OST_1;
+ mpl3115_oversample = MPL3115_CTRL_REG1_OST_1;
}
- DEBUG("barometer: correcting oversampling for MPL3115 from %d to %d",
+ DEBUG("barometer: MPL3115_adjust_oversampling - correcting oversampling from %d to %d",
config_oversample,
new_val);
config_oversample = new_val;
tmp_value = (data[0] << 16) | (data[1] << 8) | data[2];
*pressure = ((double) tmp_value) / 4.0 / 16.0 / 100.0;
- DEBUG ("barometer: MPL3115_read, absolute pressure = %lf hPa", *pressure);
+ DEBUG ("barometer: MPL3115_read - absolute pressure = %lf hPa", *pressure);
if(data[3] > 0x7F)
{
}
*temperature += (double)(data[4]) / 256.0;
- DEBUG ("barometer: MPL3115_read, temperature = %lf C", *temperature);
+ DEBUG ("barometer: MPL3115_read - temperature = %lf C", *temperature);
return 0;
}
/* Set to barometer with an OSR */
res = i2c_smbus_write_byte_data(i2c_bus_fd,
MPL3115_REG_CTRL_REG1,
- oversample_MPL3115);
+ mpl3115_oversample);
if (res < 0)
{
ERROR ("barometer: MPL3115_init_sensor - problem configuring CTRL_REG1: %s",
return 0;
}
+/* ------------------------ BMP085 access ------------------------ */
+
+/**
+ * Detect presence of a BMP085 pressure sensor by checking its ID register
+ *
+ * As a sideeffect will leave set I2C slave address.
+ *
+ * @return 1 if BMP085, 0 otherwise
+ */
+static int BMP085_detect(void)
+{
+ __s32 res;
+ char errbuf[1024];
+
+ if (ioctl(i2c_bus_fd, I2C_SLAVE_FORCE, BMP085_I2C_ADDRESS) < 0)
+ {
+ ERROR("barometer: BMP085_detect - problem setting i2c slave address to 0x%02X: %s",
+ BMP085_I2C_ADDRESS,
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 0 ;
+ }
+
+ res = i2c_smbus_read_byte_data(i2c_bus_fd, BMP085_ADDR_ID_REG);
+ if(res == BMP085_CHIP_ID)
+ {
+ DEBUG ("barometer: BMP085_detect - positive detection");
+
+ /* get version */
+ res = i2c_smbus_read_byte_data(i2c_bus_fd, BMP085_ADDR_VERSION );
+ if (res < 0)
+ {
+ ERROR("barometer: BMP085_detect - problem checking chip version: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 0 ;
+ }
+ DEBUG ("barometer: BMP085_detect - chip version ML:0x%02X AL:0x%02X",
+ res & 0x0f,
+ (res & 0xf0) >> 4);
+ return 1;
+ }
+
+ DEBUG ("barometer: BMP085_detect - negative detection");
+ return 0;
+}
+
+
+/**
+ * Adjusts oversampling settings to values supported by BMP085
+ *
+ * BMP085 supports only 1,2,4 or 8 samples.
+ */
+static void BMP085_adjust_oversampling(void)
+{
+ int new_val = 0;
+
+ if( config_oversample > 6 ) /* 8 */
+ {
+ new_val = 8;
+ bmp085_oversampling = 3;
+ bmp085_cmdCnvPress = BMP085_CMD_CONVERT_PRESS_3;
+ bmp085_timeCnvPress = BMP085_TIME_CNV_PRESS_3;
+ }
+ else if( config_oversample > 3 ) /* 4 */
+ {
+ new_val = 4;
+ bmp085_oversampling = 2;
+ bmp085_cmdCnvPress = BMP085_CMD_CONVERT_PRESS_2;
+ bmp085_timeCnvPress = BMP085_TIME_CNV_PRESS_2;
+ }
+ else if( config_oversample > 1 ) /* 2 */
+ {
+ new_val = 2;
+ bmp085_oversampling = 1;
+ bmp085_cmdCnvPress = BMP085_CMD_CONVERT_PRESS_1;
+ bmp085_timeCnvPress = BMP085_TIME_CNV_PRESS_1;
+ }
+ else /* 1 */
+ {
+ new_val = 1;
+ bmp085_oversampling = 0;
+ bmp085_cmdCnvPress = BMP085_CMD_CONVERT_PRESS_0;
+ bmp085_timeCnvPress = BMP085_TIME_CNV_PRESS_0;
+ }
+
+ DEBUG("barometer: BMP085_adjust_oversampling - correcting oversampling from %d to %d",
+ config_oversample,
+ new_val);
+ config_oversample = new_val;
+}
+
+
+/**
+ * Read the BMP085 sensor conversion coefficients.
+ *
+ * These are (device specific) constants so we can read them just once.
+ *
+ * @return Zero when successful
+ */
+static int BMP085_read_coeffs(void)
+{
+ __s32 res;
+ __u8 coeffs[BMP085_NUM_COEFFS];
+ char errbuf[1024];
+
+ res = i2c_smbus_read_i2c_block_data(i2c_bus_fd,
+ BMP085_ADDR_COEFFS,
+ BMP085_NUM_COEFFS,
+ coeffs);
+ if (res < 0)
+ {
+ ERROR ("barometer: BMP085_read_coeffs - problem reading data: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return -1;
+ }
+
+ bmp085_AC1 = ((int16_t) coeffs[0] <<8) | (int16_t) coeffs[1];
+ bmp085_AC2 = ((int16_t) coeffs[2] <<8) | (int16_t) coeffs[3];
+ bmp085_AC3 = ((int16_t) coeffs[4] <<8) | (int16_t) coeffs[5];
+ bmp085_AC4 = ((uint16_t) coeffs[6] <<8) | (uint16_t) coeffs[7];
+ bmp085_AC5 = ((uint16_t) coeffs[8] <<8) | (uint16_t) coeffs[9];
+ bmp085_AC6 = ((uint16_t) coeffs[10] <<8) | (uint16_t) coeffs[11];
+ bmp085_B1 = ((int16_t) coeffs[12] <<8) | (int16_t) coeffs[13];
+ bmp085_B2 = ((int16_t) coeffs[14] <<8) | (int16_t) coeffs[15];
+ bmp085_MB = ((int16_t) coeffs[16] <<8) | (int16_t) coeffs[17];
+ bmp085_MC = ((int16_t) coeffs[18] <<8) | (int16_t) coeffs[19];
+ bmp085_MD = ((int16_t) coeffs[20] <<8) | (int16_t) coeffs[21];
+
+ DEBUG("barometer: BMP085_read_coeffs - AC1=%d, AC2=%d, AC3=%d, AC4=%u,"\
+ " AC5=%u, AC6=%u, B1=%d, B2=%d, MB=%d, MC=%d, MD=%d",
+ bmp085_AC1,
+ bmp085_AC2,
+ bmp085_AC3,
+ bmp085_AC4,
+ bmp085_AC5,
+ bmp085_AC6,
+ bmp085_B1,
+ bmp085_B2,
+ bmp085_MB,
+ bmp085_MC,
+ bmp085_MD);
+
+ return 0;
+}
+
+
+/**
+ * Convert raw BMP085 adc values to real data using the sensor coefficients.
+ *
+ * @param adc_pressure adc pressure value to be converted
+ * @param adc_temp adc temperature value to be converted
+ * @param pressure computed real pressure
+ * @param temperature computed real temperature
+ */
+static void BMP085_convert_adc_to_real(long adc_pressure,
+ long adc_temperature,
+ double * pressure,
+ double * temperature)
+
+{
+ long X1, X2, X3;
+ long B3, B5, B6;
+ unsigned long B4, B7;
+
+ long T;
+ long P;
+
+
+ /* calculate real temperature */
+ X1 = ( (adc_temperature - bmp085_AC6) * bmp085_AC5) >> 15;
+ X2 = (bmp085_MC << 11) / (X1 + bmp085_MD);
+
+ /* B5, T */
+ B5 = X1 + X2;
+ T = (B5 + 8) >> 4;
+ *temperature = (double)T * 0.1;
+
+ /* calculate real pressure */
+ /* in general X1, X2, X3 are recycled while values of B3, B4, B5, B6 are kept */
+
+ /* B6, B3 */
+ B6 = B5 - 4000;
+ X1 = ((bmp085_B2 * ((B6 * B6)>>12)) >> 11 );
+ X2 = (((long)bmp085_AC2 * B6) >> 11);
+ X3 = X1 + X2;
+ B3 = (((((long)bmp085_AC1 * 4) + X3) << bmp085_oversampling) + 2) >> 2;
+
+ /* B4 */
+ X1 = (((long)bmp085_AC3*B6) >> 13);
+ X2 = (bmp085_B1*((B6*B6) >> 12) ) >> 16;
+ X3 = ((X1 + X2) + 2 ) >> 2;
+ B4 = ((long)bmp085_AC4* (unsigned long)(X3 + 32768)) >> 15;
+
+ /* B7, P */
+ B7 = (unsigned long)(adc_pressure - B3)*(50000>>bmp085_oversampling);
+ if( B7 < 0x80000000 )
+ {
+ P = (B7 << 1) / B4;
+ }
+ else
+ {
+ P = (B7/B4) << 1;
+ }
+ X1 = (P >> 8) * (P >> 8);
+ X1 = (X1 * 3038) >> 16;
+ X2 = ((-7357) * P) >> 16;
+ P = P + ( ( X1 + X2 + 3791 ) >> 4);
+
+ *pressure = P / 100.0; // in [hPa]
+ DEBUG ("barometer: BMP085_convert_adc_to_real - got %lf hPa, %lf C",
+ *pressure,
+ *temperature);
+}
+
+
+/**
+ * Read compensated sensor measurements
+ *
+ * @param pressure averaged measured pressure
+ * @param temperature averaged measured temperature
+ *
+ * @return Zero when successful
+ */
+static int BMP085_read(double * pressure, double * temperature)
+{
+ __s32 res;
+ __u8 measBuff[3];
+
+ long adc_pressure;
+ long adc_temperature;
+
+ char errbuf[1024];
+
+ /* start conversion of temperature */
+ res = i2c_smbus_write_byte_data( i2c_bus_fd,
+ BMP085_ADDR_CTRL_REG,
+ BMP085_CMD_CONVERT_TEMP );
+ if (res < 0)
+ {
+ ERROR ("barometer: BMP085_read - problem requesting temperature conversion: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 1;
+ }
+
+ usleep(BMP085_TIME_CNV_TEMP); /* wait for the conversion */
+
+ res = i2c_smbus_read_i2c_block_data( i2c_bus_fd,
+ BMP085_ADDR_CONV,
+ 2,
+ measBuff);
+ if (res < 0)
+ {
+ ERROR ("barometer: BMP085_read - problem reading temperature data: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 1;
+ }
+
+ adc_temperature = ( (unsigned short)measBuff[0] << 8 ) + measBuff[1];
+
+
+ /* get presure */
+ res = i2c_smbus_write_byte_data( i2c_bus_fd,
+ BMP085_ADDR_CTRL_REG,
+ bmp085_cmdCnvPress );
+ if (res < 0)
+ {
+ ERROR ("barometer: BMP085_read - problem requesting pressure conversion: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 1;
+ }
+
+ usleep(bmp085_timeCnvPress); /* wait for the conversion */
+
+ res = i2c_smbus_read_i2c_block_data( i2c_bus_fd,
+ BMP085_ADDR_CONV,
+ 3,
+ measBuff );
+ if (res < 0)
+ {
+ ERROR ("barometer: BMP085_read - problem reading pressure data: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return 1;
+ }
+
+ adc_pressure = (long)((((ulong)measBuff[0]<<16) | ((ulong)measBuff[1]<<8) | (ulong)measBuff[2] ) >> (8 - bmp085_oversampling));
+
+
+ DEBUG ("barometer: BMP085_read - raw pressure ADC value = %ld, " \
+ "raw temperature ADC value = %ld",
+ adc_pressure,
+ adc_temperature);
+
+ BMP085_convert_adc_to_real(adc_pressure, adc_temperature, pressure, temperature);
+
+ return 0;
+}
+
+
+
+/* ------------------------ Sensor detection ------------------------ */
+/**
+ * Detect presence of a supported sensor.
+ *
+ * As a sideeffect will leave set I2C slave address.
+ * The detection is done in the order BMP085, MPL3115, MPL115 and stops after
+ * first sensor beeing found.
+ *
+ * @return detected sensor type
+ */
+enum Sensor_type Detect_sensor_type(void)
+{
+ if(BMP085_detect())
+ return Sensor_BMP085;
+
+ else if(MPL3115_detect())
+ return Sensor_MPL3115;
+
+ else if(MPL115_detect())
+ return Sensor_MPL115;
+
+ return Sensor_none;
+}
/* ------------------------ Common functionality ------------------------ */
double temp = 0.0;
int result = 0;
- DEBUG ("barometer: abs_to_mean_sea_level_pressure: absPressure = %lf, method = %d",
- abs_pressure,
- config_normalize);
-
if (config_normalize >= MSLP_DEU_WETT)
{
result = get_reference_temperature(&temp);
case MSLP_INTERNATIONAL:
mean = abs_pressure / \
- pow(1.0 - 0.0065*config_altitude/288.15, 0.0065*0.0289644/(8.31447*0.0065));
+ pow(1.0 - 0.0065*config_altitude/288.15, 9.80665*0.0289644/(8.31447*0.0065));
break;
case MSLP_DEU_WETT:
break;
}
+ DEBUG ("barometer: abs_to_mean_sea_level_pressure: absPressure = %lf hPa, method = %d, meanPressure = %lf hPa",
+ abs_pressure,
+ config_normalize,
+ mean);
+
return mean;
}
if (oversampling_tmp < 1 || oversampling_tmp > 1024)
{
WARNING ("barometer: collectd_barometer_config: invalid oversampling: %d." \
- " Allowed values are 1 to 1024 (for MPL115) or 128 (for MPL3115).",
+ " Allowed values are 1 to 1024 (for MPL115) or 1 to 128 (for MPL3115) or 1 to 8 (for BMP085).",
oversampling_tmp);
return 1;
}
{
DEBUG ("barometer: collectd_barometer_shutdown");
- if(!is_MPL3115)
+ if(sensor_type == Sensor_MPL115)
{
averaging_delete (&pressure_averaging);
averaging_delete (&temperature_averaging);
/**
+ * Plugin read callback for BMP085.
+ *
+ * Dispatching will create values:
+ * - <hostname>/barometer-bmp085/pressure-normalized
+ * - <hostname>/barometer-bmp085/pressure-absolute
+ * - <hostname>/barometer-bmp085/temperature
+ *
+ * @return Zero when successful.
+ */
+static int BMP085_collectd_barometer_read (void)
+{
+ int result = 0;
+
+ double pressure = 0.0;
+ double temperature = 0.0;
+ double norm_pressure = 0.0;
+
+ value_list_t vl = VALUE_LIST_INIT;
+ value_t values[1];
+
+ DEBUG("barometer: BMP085_collectd_barometer_read");
+
+ if (!configured)
+ {
+ return -1;
+ }
+
+ result = BMP085_read(&pressure, &temperature);
+ if(result)
+ return result;
+
+ norm_pressure = abs_to_mean_sea_level_pressure(pressure);
+
+ sstrncpy (vl.host, hostname_g, sizeof (vl.host));
+ sstrncpy (vl.plugin, "barometer", sizeof (vl.plugin));
+ sstrncpy (vl.plugin_instance, "bmp085", sizeof (vl.plugin_instance));
+
+ vl.values_len = 1;
+ vl.values = values;
+
+ /* dispatch normalized air pressure */
+ sstrncpy (vl.type, "pressure", sizeof (vl.type));
+ sstrncpy (vl.type_instance, "normalized", sizeof (vl.type_instance));
+ values[0].gauge = norm_pressure;
+ plugin_dispatch_values (&vl);
+
+ /* dispatch absolute air pressure */
+ sstrncpy (vl.type, "pressure", sizeof (vl.type));
+ sstrncpy (vl.type_instance, "absolute", sizeof (vl.type_instance));
+ values[0].gauge = pressure;
+ plugin_dispatch_values (&vl);
+
+ /* dispatch sensor temperature */
+ sstrncpy (vl.type, "temperature", sizeof (vl.type));
+ sstrncpy (vl.type_instance, "", sizeof (vl.type_instance));
+ values[0].gauge = temperature;
+ plugin_dispatch_values (&vl);
+
+ return 0;
+}
+
+
+/**
* Initialization callback
*
* Check config, initialize I2C bus access, conversion coefficients and averaging
return -1;
}
- if (ioctl(i2c_bus_fd, I2C_SLAVE_FORCE, MPL115_I2C_ADDRESS) < 0)
- {
- ERROR("barometer: collectd_barometer_init problem setting i2c slave address to 0x%02X: %s",
- MPL115_I2C_ADDRESS,
- sstrerror (errno, errbuf, sizeof (errbuf)));
- return -1;
- }
-
- /* detect sensor type - MPL115 or MPL3115 */
- is_MPL3115 = MPL3115_detect();
+ /* detect sensor type - this will also set slave address */
+ sensor_type = Detect_sensor_type();
/* init correct sensor type */
- if(is_MPL3115) /* MPL3115 */
+ switch(sensor_type)
+ {
+/* MPL3115 */
+ case Sensor_MPL3115:
{
MPL3115_adjust_oversampling();
-
+
if(MPL3115_init_sensor())
return -1;
-
+
plugin_register_read ("barometer", MPL3115_collectd_barometer_read);
}
- else /* MPL115 */
+ break;
+
+/* MPL115 */
+ case Sensor_MPL115:
{
if (averaging_create (&pressure_averaging, config_oversample))
{
if (MPL115_read_coeffs() < 0)
return -1;
-
+
plugin_register_read ("barometer", MPL115_collectd_barometer_read);
}
+ break;
+
+/* BMP085 */
+ case Sensor_BMP085:
+ {
+ BMP085_adjust_oversampling();
+
+ if (BMP085_read_coeffs() < 0)
+ return -1;
+
+ plugin_register_read ("barometer", BMP085_collectd_barometer_read);
+ }
+ break;
+
+/* anything else -> error */
+ default:
+ ERROR("barometer: collectd_barometer_init - no supported sensor found");
+ return -1;
+ }
+
configured = 1;
return 0;
if (fgets (buffer, buffer_size, fp) == NULL)
{
- char errbuf[1024];
status = errno;
- WARNING ("battery plugin: fgets failed: %s",
- sstrerror (status, errbuf, sizeof (errbuf)));
+ if (status != ENODEV)
+ {
+ char errbuf[1024];
+ WARNING ("battery plugin: fgets (%s) failed: %s", filename,
+ sstrerror (status, errbuf, sizeof (errbuf)));
+ }
fclose (fp);
return status;
}
v *= -1.0;
battery_submit (plugin_instance, "power", v * SYSFS_FACTOR);
}
+ if (sysfs_file_to_gauge (dir, power_supply, "current_now", &v) == 0)
+ {
+ if (discharging)
+ v *= -1.0;
+ battery_submit (plugin_instance, "current", v * SYSFS_FACTOR);
+ }
if (sysfs_file_to_gauge (dir, power_supply, "voltage_now", &v) == 0)
battery_submit (plugin_instance, "voltage", v * SYSFS_FACTOR);
-#if 0
- if (sysfs_file_to_gauge (dir, power_supply, "voltage_min_design", &v) == 0)
- battery_submit (plugin_instance, "voltage", v * SYSFS_FACTOR);
-#endif
return (0);
} /* }}} int read_sysfs_callback */
static int global_zone_maint_stats = 1;
static int global_resolver_stats = 0;
static int global_memory_stats = 1;
+static int timeout = -1;
static cb_view_t *views = NULL;
static size_t views_num = 0;
if (type_instance) {
sstrncpy(vl.type_instance, type_instance,
sizeof(vl.type_instance));
- replace_special (vl.plugin_instance, sizeof (vl.plugin_instance));
+ replace_special (vl.type_instance, sizeof (vl.type_instance));
}
plugin_dispatch_values(&vl);
} /* }}} void submit */
{
ERROR ("bind plugin: Parsing string \"%s\" to derive value failed.",
str_ptr);
+ xmlFree(str_ptr);
return (-1);
}
+ xmlFree(str_ptr);
*ret_value = value.derive;
return (0);
} /* }}} int bind_xml_read_derive */
int i;
size_t j;
- path_obj = xmlXPathEvalExpression (BAD_CAST "name", path_ctx);
- if (path_obj == NULL)
+ if (version >= 3)
{
- ERROR ("bind plugin: xmlXPathEvalExpression failed.");
- return (-1);
+ char *n = (char *) xmlGetProp (node, BAD_CAST "name");
+ char *c = (char *) xmlGetProp (node, BAD_CAST "rdataclass");
+ if (n && c)
+ {
+ zone_name = (char *) xmlMalloc(strlen(n) + strlen(c) + 2);
+ snprintf(zone_name, strlen(n) + strlen(c) + 2, "%s/%s", n, c);
+ }
+ xmlFree(n);
+ xmlFree(c);
}
-
- for (i = 0; path_obj->nodesetval && (i < path_obj->nodesetval->nodeNr); i++)
+ else
{
- zone_name = (char *) xmlNodeListGetString (doc,
- path_obj->nodesetval->nodeTab[i]->xmlChildrenNode, 1);
- if (zone_name != NULL)
- break;
+ path_obj = xmlXPathEvalExpression (BAD_CAST "name", path_ctx);
+ if (path_obj == NULL)
+ {
+ ERROR ("bind plugin: xmlXPathEvalExpression failed.");
+ return (-1);
+ }
+
+ for (i = 0; path_obj->nodesetval && (i < path_obj->nodesetval->nodeNr); i++)
+ {
+ zone_name = (char *) xmlNodeListGetString (doc,
+ path_obj->nodesetval->nodeTab[i]->xmlChildrenNode, 1);
+ if (zone_name != NULL)
+ break;
+ }
+ xmlXPathFreeObject (path_obj);
}
if (zone_name == NULL)
{
ERROR ("bind plugin: Could not determine zone name.");
- xmlXPathFreeObject (path_obj);
return (-1);
}
zone_name = NULL;
if (j >= views->zones_num)
- {
- xmlXPathFreeObject (path_obj);
return (0);
- }
zone_name = view->zones[j];
ssnprintf (plugin_instance, sizeof (plugin_instance), "%s-zone-%s",
view->name, zone_name);
- bind_parse_generic_value_list (/* xpath = */ "counters",
+ if (version == 3)
+ {
+ list_info_ptr_t list_info =
+ {
+ plugin_instance,
+ /* type = */ "dns_qtype"
+ };
+ bind_parse_generic_name_attr_value_list (/* xpath = */ "counters[@type='rcode']",
/* callback = */ bind_xml_table_callback,
/* user_data = */ &table_ptr,
doc, path_ctx, current_time, DS_TYPE_COUNTER);
+ bind_parse_generic_name_attr_value_list (/* xpath = */ "counters[@type='qtype']",
+ /* callback = */ bind_xml_list_callback,
+ /* user_data = */ &list_info,
+ doc, path_ctx, current_time, DS_TYPE_COUNTER);
+ }
+ else
+ {
+ bind_parse_generic_value_list (/* xpath = */ "counters",
+ /* callback = */ bind_xml_table_callback,
+ /* user_data = */ &table_ptr,
+ doc, path_ctx, current_time, DS_TYPE_COUNTER);
+ }
} /* }}} */
- xmlXPathFreeObject (path_obj);
-
return (0);
} /* }}} int bind_xml_stats_handle_zone */
doc, path_ctx, current_time, DS_TYPE_GAUGE);
} /* }}} */
- // v3 does not provide per-zone stats any more
- if (version < 3 && view->zones_num > 0)
+ if (view->zones_num > 0)
bind_xml_stats_search_zones (version, doc, path_ctx, node, view,
current_time);
bind_config_add_view (child);
else if (strcasecmp ("ParseTime", child->key) == 0)
cf_util_get_boolean (child, &config_parse_time);
+ else if (strcasecmp ("Timeout", child->key) == 0)
+ cf_util_get_int (child, &timeout);
else
{
WARNING ("bind plugin: Unknown configuration option "
curl_easy_setopt (curl, CURLOPT_URL, (url != NULL) ? url : BIND_DEFAULT_URL);
curl_easy_setopt (curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt (curl, CURLOPT_MAXREDIRS, 50L);
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ curl_easy_setopt (curl, CURLOPT_TIMEOUT_MS, (timeout >= 0) ?
+ (long) timeout : CDTIME_T_TO_MS(plugin_get_interval()));
+#endif
+
return (0);
} /* }}} int bind_init */
--- /dev/null
+/**
+ * collectd - src/ceph.c
+ * Copyright (C) 2011 New Dream Network
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; only version 2 of the License is applicable.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors:
+ * Colin McCabe <cmccabe@alumni.cmu.edu>
+ * Dennis Zou <yunzou@cisco.com>
+ * Dan Ryder <daryder@cisco.com>
+ **/
+
+#define _DEFAULT_SOURCE
+#define _BSD_SOURCE
+
+#include "collectd.h"
+#include "common.h"
+#include "plugin.h"
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <yajl/yajl_parse.h>
+#if HAVE_YAJL_YAJL_VERSION_H
+#include <yajl/yajl_version.h>
+#endif
+
+#include <limits.h>
+#include <poll.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <math.h>
+#include <inttypes.h>
+
+#define RETRY_AVGCOUNT -1
+
+#if defined(YAJL_MAJOR) && (YAJL_MAJOR > 1)
+# define HAVE_YAJL_V2 1
+#endif
+
+#define RETRY_ON_EINTR(ret, expr) \
+ while(1) { \
+ ret = expr; \
+ if(ret >= 0) \
+ break; \
+ ret = -errno; \
+ if(ret != -EINTR) \
+ break; \
+ }
+
+/** Timeout interval in seconds */
+#define CEPH_TIMEOUT_INTERVAL 1
+
+/** Maximum path length for a UNIX domain socket on this system */
+#define UNIX_DOMAIN_SOCK_PATH_MAX (sizeof(((struct sockaddr_un*)0)->sun_path))
+
+/** Yajl callback returns */
+#define CEPH_CB_CONTINUE 1
+#define CEPH_CB_ABORT 0
+
+#if HAVE_YAJL_V2
+typedef size_t yajl_len_t;
+#else
+typedef unsigned int yajl_len_t;
+#endif
+
+/** Number of types for ceph defined in types.db */
+#define CEPH_DSET_TYPES_NUM 3
+/** ceph types enum */
+enum ceph_dset_type_d
+{
+ DSET_LATENCY = 0,
+ DSET_BYTES = 1,
+ DSET_RATE = 2,
+ DSET_TYPE_UNFOUND = 1000
+};
+
+/** Valid types for ceph defined in types.db */
+const char * ceph_dset_types [CEPH_DSET_TYPES_NUM] =
+ {"ceph_latency", "ceph_bytes", "ceph_rate"};
+
+/******* ceph_daemon *******/
+struct ceph_daemon
+{
+ /** Version of the admin_socket interface */
+ uint32_t version;
+ /** daemon name **/
+ char name[DATA_MAX_NAME_LEN];
+
+ /** Path to the socket that we use to talk to the ceph daemon */
+ char asok_path[UNIX_DOMAIN_SOCK_PATH_MAX];
+
+ /** Number of counters */
+ int ds_num;
+ /** Track ds types */
+ uint32_t *ds_types;
+ /** Track ds names to match with types */
+ char **ds_names;
+
+ /**
+ * Keep track of last data for latency values so we can calculate rate
+ * since last poll.
+ */
+ struct last_data **last_poll_data;
+ /** index of last poll data */
+ int last_idx;
+};
+
+/******* JSON parsing *******/
+typedef int (*node_handler_t)(void *, const char*, const char*);
+
+/** Track state and handler while parsing JSON */
+struct yajl_struct
+{
+ node_handler_t handler;
+ void * handler_arg;
+ struct {
+ char key[DATA_MAX_NAME_LEN];
+ int key_len;
+ } state[YAJL_MAX_DEPTH];
+ int depth;
+};
+typedef struct yajl_struct yajl_struct;
+
+enum perfcounter_type_d
+{
+ PERFCOUNTER_LATENCY = 0x4, PERFCOUNTER_DERIVE = 0x8,
+};
+
+/** Give user option to use default (long run = since daemon started) avg */
+static int long_run_latency_avg = 0;
+
+/**
+ * Give user option to use default type for special cases -
+ * filestore.journal_wr_bytes is currently only metric here. Ceph reports the
+ * type as a sum/count pair and will calculate it the same as a latency value.
+ * All other "bytes" metrics (excluding the used/capacity bytes for the OSD)
+ * use the DERIVE type. Unless user specifies to use given type, convert this
+ * metric to use DERIVE.
+ */
+static int convert_special_metrics = 1;
+
+/** Array of daemons to monitor */
+static struct ceph_daemon **g_daemons = NULL;
+
+/** Number of elements in g_daemons */
+static int g_num_daemons = 0;
+
+/**
+ * A set of data that we build up in memory while parsing the JSON.
+ */
+struct values_tmp
+{
+ /** ceph daemon we are processing data for*/
+ struct ceph_daemon *d;
+ /** track avgcount across counters for avgcount/sum latency pairs */
+ uint64_t avgcount;
+ /** current index of counters - used to get type of counter */
+ int index;
+ /** do we already have an avgcount for latency pair */
+ int avgcount_exists;
+ /**
+ * similar to index, but current index of latency type counters -
+ * used to get last poll data of counter
+ */
+ int latency_index;
+ /**
+ * values list - maintain across counters since
+ * host/plugin/plugin instance are always the same
+ */
+ value_list_t vlist;
+};
+
+/**
+ * A set of count/sum pairs to keep track of latency types and get difference
+ * between this poll data and last poll data.
+ */
+struct last_data
+{
+ char ds_name[DATA_MAX_NAME_LEN];
+ double last_sum;
+ uint64_t last_count;
+};
+
+/******* network I/O *******/
+enum cstate_t
+{
+ CSTATE_UNCONNECTED = 0,
+ CSTATE_WRITE_REQUEST,
+ CSTATE_READ_VERSION,
+ CSTATE_READ_AMT,
+ CSTATE_READ_JSON,
+};
+
+enum request_type_t
+{
+ ASOK_REQ_VERSION = 0,
+ ASOK_REQ_DATA = 1,
+ ASOK_REQ_SCHEMA = 2,
+ ASOK_REQ_NONE = 1000,
+};
+
+struct cconn
+{
+ /** The Ceph daemon that we're talking to */
+ struct ceph_daemon *d;
+
+ /** Request type */
+ uint32_t request_type;
+
+ /** The connection state */
+ enum cstate_t state;
+
+ /** The socket we use to talk to this daemon */
+ int asok;
+
+ /** The amount of data remaining to read / write. */
+ uint32_t amt;
+
+ /** Length of the JSON to read */
+ uint32_t json_len;
+
+ /** Buffer containing JSON data */
+ unsigned char *json;
+
+ /** Keep data important to yajl processing */
+ struct yajl_struct yajl;
+};
+
+static int ceph_cb_null(void *ctx)
+{
+ return CEPH_CB_CONTINUE;
+}
+
+static int ceph_cb_boolean(void *ctx, int bool_val)
+{
+ return CEPH_CB_CONTINUE;
+}
+
+static int
+ceph_cb_number(void *ctx, const char *number_val, yajl_len_t number_len)
+{
+ yajl_struct *yajl = (yajl_struct*)ctx;
+ char buffer[number_len+1];
+ int i, latency_type = 0, result;
+ char key[128];
+
+ memcpy(buffer, number_val, number_len);
+ buffer[sizeof(buffer) - 1] = 0;
+
+ ssnprintf(key, yajl->state[0].key_len, "%s", yajl->state[0].key);
+ for(i = 1; i < yajl->depth; i++)
+ {
+ if((i == yajl->depth-1) && ((strcmp(yajl->state[i].key,"avgcount") == 0)
+ || (strcmp(yajl->state[i].key,"sum") == 0)))
+ {
+ if(convert_special_metrics)
+ {
+ /**
+ * Special case for filestore:JournalWrBytes. For some reason,
+ * Ceph schema encodes this as a count/sum pair while all
+ * other "Bytes" data (excluding used/capacity bytes for OSD
+ * space) uses a single "Derive" type. To spare further
+ * confusion, keep this KPI as the same type of other "Bytes".
+ * Instead of keeping an "average" or "rate", use the "sum" in
+ * the pair and assign that to the derive value.
+ */
+ if((strcmp(yajl->state[i-1].key, "journal_wr_bytes") == 0) &&
+ (strcmp(yajl->state[i-2].key,"filestore") == 0) &&
+ (strcmp(yajl->state[i].key,"avgcount") == 0))
+ {
+ DEBUG("ceph plugin: Skipping avgcount for filestore.JournalWrBytes");
+ yajl->depth = (yajl->depth - 1);
+ return CEPH_CB_CONTINUE;
+ }
+ }
+ //probably a avgcount/sum pair. if not - we'll try full key later
+ latency_type = 1;
+ break;
+ }
+ strncat(key, ".", 1);
+ strncat(key, yajl->state[i].key, yajl->state[i].key_len+1);
+ }
+
+ result = yajl->handler(yajl->handler_arg, buffer, key);
+
+ if((result == RETRY_AVGCOUNT) && latency_type)
+ {
+ strncat(key, ".", 1);
+ strncat(key, yajl->state[yajl->depth-1].key,
+ yajl->state[yajl->depth-1].key_len+1);
+ result = yajl->handler(yajl->handler_arg, buffer, key);
+ }
+
+ if(result == -ENOMEM)
+ {
+ ERROR("ceph plugin: memory allocation failed");
+ return CEPH_CB_ABORT;
+ }
+
+ yajl->depth = (yajl->depth - 1);
+ return CEPH_CB_CONTINUE;
+}
+
+static int ceph_cb_string(void *ctx, const unsigned char *string_val,
+ yajl_len_t string_len)
+{
+ return CEPH_CB_CONTINUE;
+}
+
+static int ceph_cb_start_map(void *ctx)
+{
+ return CEPH_CB_CONTINUE;
+}
+
+static int
+ceph_cb_map_key(void *ctx, const unsigned char *key, yajl_len_t string_len)
+{
+ yajl_struct *yajl = (yajl_struct*)ctx;
+
+ if((yajl->depth+1) >= YAJL_MAX_DEPTH)
+ {
+ ERROR("ceph plugin: depth exceeds max, aborting.");
+ return CEPH_CB_ABORT;
+ }
+
+ char buffer[string_len+1];
+
+ memcpy(buffer, key, string_len);
+ buffer[sizeof(buffer) - 1] = 0;
+
+ snprintf(yajl->state[yajl->depth].key, sizeof(buffer), "%s", buffer);
+ yajl->state[yajl->depth].key_len = sizeof(buffer);
+ yajl->depth = (yajl->depth + 1);
+
+ return CEPH_CB_CONTINUE;
+}
+
+static int ceph_cb_end_map(void *ctx)
+{
+ yajl_struct *yajl = (yajl_struct*)ctx;
+
+ yajl->depth = (yajl->depth - 1);
+ return CEPH_CB_CONTINUE;
+}
+
+static int ceph_cb_start_array(void *ctx)
+{
+ return CEPH_CB_CONTINUE;
+}
+
+static int ceph_cb_end_array(void *ctx)
+{
+ return CEPH_CB_CONTINUE;
+}
+
+static yajl_callbacks callbacks = {
+ ceph_cb_null,
+ ceph_cb_boolean,
+ NULL,
+ NULL,
+ ceph_cb_number,
+ ceph_cb_string,
+ ceph_cb_start_map,
+ ceph_cb_map_key,
+ ceph_cb_end_map,
+ ceph_cb_start_array,
+ ceph_cb_end_array
+};
+
+static void ceph_daemon_print(const struct ceph_daemon *d)
+{
+ DEBUG("ceph plugin: name=%s, asok_path=%s", d->name, d->asok_path);
+}
+
+static void ceph_daemons_print(void)
+{
+ int i;
+ for(i = 0; i < g_num_daemons; ++i)
+ {
+ ceph_daemon_print(g_daemons[i]);
+ }
+}
+
+static void ceph_daemon_free(struct ceph_daemon *d)
+{
+ int i = 0;
+ for(; i < d->last_idx; i++)
+ {
+ sfree(d->last_poll_data[i]);
+ }
+ sfree(d->last_poll_data);
+ d->last_poll_data = NULL;
+ d->last_idx = 0;
+ for(i = 0; i < d->ds_num; i++)
+ {
+ sfree(d->ds_names[i]);
+ }
+ sfree(d->ds_types);
+ sfree(d->ds_names);
+ sfree(d);
+}
+
+/**
+ * Compact ds name by removing special characters and trimming length to
+ * DATA_MAX_NAME_LEN if necessary
+ */
+static void compact_ds_name(char *source, char *dest)
+{
+ int keys_num = 0, i;
+ char *save_ptr = NULL, *tmp_ptr = source;
+ char *keys[16];
+ char len_str[3];
+ char tmp[DATA_MAX_NAME_LEN];
+ size_t key_chars_remaining = (DATA_MAX_NAME_LEN-1);
+ int reserved = 0;
+ int offset = 0;
+ memset(tmp, 0, sizeof(tmp));
+ if(source == NULL || dest == NULL || source[0] == '\0' || dest[0] != '\0')
+ {
+ return;
+ }
+ size_t src_len = strlen(source);
+ snprintf(len_str, sizeof(len_str), "%zu", src_len);
+ unsigned char append_status = 0x0;
+ append_status |= (source[src_len - 1] == '-') ? 0x1 : 0x0;
+ append_status |= (source[src_len - 1] == '+') ? 0x2 : 0x0;
+ while ((keys[keys_num] = strtok_r(tmp_ptr, ":_-+", &save_ptr)) != NULL)
+ {
+ tmp_ptr = NULL;
+ /** capitalize 1st char **/
+ keys[keys_num][0] = toupper(keys[keys_num][0]);
+ keys_num++;
+ if(keys_num >= 16)
+ {
+ break;
+ }
+ }
+ /** concatenate each part of source string **/
+ for(i = 0; i < keys_num; i++)
+ {
+ strncat(tmp, keys[i], key_chars_remaining);
+ key_chars_remaining -= strlen(keys[i]);
+ }
+ tmp[DATA_MAX_NAME_LEN - 1] = '\0';
+ /** to coordinate limitation of length of type_instance
+ * we will truncate ds_name
+ * when the its length is more than
+ * DATA_MAX_NAME_LEN
+ */
+ if(strlen(tmp) > DATA_MAX_NAME_LEN - 1)
+ {
+ append_status |= 0x4;
+ /** we should reserve space for
+ * len_str
+ */
+ reserved += 2;
+ }
+ if(append_status & 0x1)
+ {
+ /** we should reserve space for
+ * "Minus"
+ */
+ reserved += 5;
+ }
+ if(append_status & 0x2)
+ {
+ /** we should reserve space for
+ * "Plus"
+ */
+ reserved += 4;
+ }
+ snprintf(dest, DATA_MAX_NAME_LEN - reserved, "%s", tmp);
+ offset = strlen(dest);
+ switch (append_status)
+ {
+ case 0x1:
+ memcpy(dest + offset, "Minus", 5);
+ break;
+ case 0x2:
+ memcpy(dest + offset, "Plus", 5);
+ break;
+ case 0x4:
+ memcpy(dest + offset, len_str, 2);
+ break;
+ case 0x5:
+ memcpy(dest + offset, "Minus", 5);
+ memcpy(dest + offset + 5, len_str, 2);
+ break;
+ case 0x6:
+ memcpy(dest + offset, "Plus", 4);
+ memcpy(dest + offset + 4, len_str, 2);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * Parse key to remove "type" if this is for schema and initiate compaction
+ */
+static int parse_keys(const char *key_str, char *ds_name)
+{
+ char *ptr, *rptr;
+ size_t ds_name_len = 0;
+ /**
+ * allow up to 100 characters before compaction - compact_ds_name will not
+ * allow more than DATA_MAX_NAME_LEN chars
+ */
+ int max_str_len = 100;
+ char tmp_ds_name[max_str_len];
+ memset(tmp_ds_name, 0, sizeof(tmp_ds_name));
+ if(ds_name == NULL || key_str == NULL || key_str[0] == '\0' ||
+ ds_name[0] != '\0')
+ {
+ return -1;
+ }
+ if((ptr = strchr(key_str, '.')) == NULL
+ || (rptr = strrchr(key_str, '.')) == NULL)
+ {
+ memcpy(tmp_ds_name, key_str, max_str_len - 1);
+ goto compact;
+ }
+
+ ds_name_len = (rptr - ptr) > max_str_len ? max_str_len : (rptr - ptr);
+ if((ds_name_len == 0) || strncmp(rptr + 1, "type", 4))
+ { /** copy whole key **/
+ memcpy(tmp_ds_name, key_str, max_str_len - 1);
+ }
+ else
+ {/** more than two keys **/
+ memcpy(tmp_ds_name, key_str, ((rptr - key_str) > (max_str_len - 1) ?
+ (max_str_len - 1) : (rptr - key_str)));
+ }
+
+ compact: compact_ds_name(tmp_ds_name, ds_name);
+ return 0;
+}
+
+/**
+ * while parsing ceph admin socket schema, save counter name and type for later
+ * data processing
+ */
+static int ceph_daemon_add_ds_entry(struct ceph_daemon *d, const char *name,
+ int pc_type)
+{
+ uint32_t type;
+ char ds_name[DATA_MAX_NAME_LEN];
+ memset(ds_name, 0, sizeof(ds_name));
+
+ if(convert_special_metrics)
+ {
+ /**
+ * Special case for filestore:JournalWrBytes. For some reason, Ceph
+ * schema encodes this as a count/sum pair while all other "Bytes" data
+ * (excluding used/capacity bytes for OSD space) uses a single "Derive"
+ * type. To spare further confusion, keep this KPI as the same type of
+ * other "Bytes". Instead of keeping an "average" or "rate", use the
+ * "sum" in the pair and assign that to the derive value.
+ */
+ if((strcmp(name,"filestore.journal_wr_bytes.type") == 0))
+ {
+ pc_type = 10;
+ }
+ }
+
+ d->ds_names = realloc(d->ds_names, sizeof(char *) * (d->ds_num + 1));
+ if(!d->ds_names)
+ {
+ return -ENOMEM;
+ }
+
+ d->ds_types = realloc(d->ds_types, sizeof(uint32_t) * (d->ds_num + 1));
+ if(!d->ds_types)
+ {
+ return -ENOMEM;
+ }
+
+ d->ds_names[d->ds_num] = malloc(sizeof(char) * DATA_MAX_NAME_LEN);
+ if(!d->ds_names[d->ds_num])
+ {
+ return -ENOMEM;
+ }
+
+ type = (pc_type & PERFCOUNTER_DERIVE) ? DSET_RATE :
+ ((pc_type & PERFCOUNTER_LATENCY) ? DSET_LATENCY : DSET_BYTES);
+ d->ds_types[d->ds_num] = type;
+
+ if(parse_keys(name, ds_name))
+ {
+ return 1;
+ }
+
+ sstrncpy(d->ds_names[d->ds_num], ds_name, DATA_MAX_NAME_LEN -1);
+ d->ds_num = (d->ds_num + 1);
+
+ return 0;
+}
+
+/******* ceph_config *******/
+static int cc_handle_str(struct oconfig_item_s *item, char *dest, int dest_len)
+{
+ const char *val;
+ if(item->values_num != 1)
+ {
+ return -ENOTSUP;
+ }
+ if(item->values[0].type != OCONFIG_TYPE_STRING)
+ {
+ return -ENOTSUP;
+ }
+ val = item->values[0].value.string;
+ if(snprintf(dest, dest_len, "%s", val) > (dest_len - 1))
+ {
+ ERROR("ceph plugin: configuration parameter '%s' is too long.\n",
+ item->key);
+ return -ENAMETOOLONG;
+ }
+ return 0;
+}
+
+static int cc_handle_bool(struct oconfig_item_s *item, int *dest)
+{
+ if(item->values_num != 1)
+ {
+ return -ENOTSUP;
+ }
+
+ if(item->values[0].type != OCONFIG_TYPE_BOOLEAN)
+ {
+ return -ENOTSUP;
+ }
+
+ *dest = (item->values[0].value.boolean) ? 1 : 0;
+ return 0;
+}
+
+static int cc_add_daemon_config(oconfig_item_t *ci)
+{
+ int ret, i;
+ struct ceph_daemon *array, *nd, cd;
+ memset(&cd, 0, sizeof(struct ceph_daemon));
+
+ if((ci->values_num != 1) || (ci->values[0].type != OCONFIG_TYPE_STRING))
+ {
+ WARNING("ceph plugin: `Daemon' blocks need exactly one string "
+ "argument.");
+ return (-1);
+ }
+
+ ret = cc_handle_str(ci, cd.name, DATA_MAX_NAME_LEN);
+ if(ret)
+ {
+ return ret;
+ }
+
+ for(i=0; i < ci->children_num; i++)
+ {
+ oconfig_item_t *child = ci->children + i;
+
+ if(strcasecmp("SocketPath", child->key) == 0)
+ {
+ ret = cc_handle_str(child, cd.asok_path, sizeof(cd.asok_path));
+ if(ret)
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ WARNING("ceph plugin: ignoring unknown option %s", child->key);
+ }
+ }
+ if(cd.name[0] == '\0')
+ {
+ ERROR("ceph plugin: you must configure a daemon name.\n");
+ return -EINVAL;
+ }
+ else if(cd.asok_path[0] == '\0')
+ {
+ ERROR("ceph plugin(name=%s): you must configure an administrative "
+ "socket path.\n", cd.name);
+ return -EINVAL;
+ }
+ else if(!((cd.asok_path[0] == '/') ||
+ (cd.asok_path[0] == '.' && cd.asok_path[1] == '/')))
+ {
+ ERROR("ceph plugin(name=%s): administrative socket paths must begin "
+ "with '/' or './' Can't parse: '%s'\n", cd.name, cd.asok_path);
+ return -EINVAL;
+ }
+
+ array = realloc(g_daemons,
+ sizeof(struct ceph_daemon *) * (g_num_daemons + 1));
+ if(array == NULL)
+ {
+ /* The positive return value here indicates that this is a
+ * runtime error, not a configuration error. */
+ return ENOMEM;
+ }
+ g_daemons = (struct ceph_daemon**) array;
+ nd = malloc(sizeof(struct ceph_daemon));
+ if(!nd)
+ {
+ return ENOMEM;
+ }
+ memcpy(nd, &cd, sizeof(struct ceph_daemon));
+ g_daemons[g_num_daemons++] = nd;
+ return 0;
+}
+
+static int ceph_config(oconfig_item_t *ci)
+{
+ int ret, i;
+
+ for(i = 0; i < ci->children_num; ++i)
+ {
+ oconfig_item_t *child = ci->children + i;
+ if(strcasecmp("Daemon", child->key) == 0)
+ {
+ ret = cc_add_daemon_config(child);
+ if(ret == ENOMEM)
+ {
+ ERROR("ceph plugin: Couldn't allocate memory");
+ return ret;
+ }
+ else if(ret)
+ {
+ //process other daemons and ignore this one
+ continue;
+ }
+ }
+ else if(strcasecmp("LongRunAvgLatency", child->key) == 0)
+ {
+ ret = cc_handle_bool(child, &long_run_latency_avg);
+ if(ret)
+ {
+ return ret;
+ }
+ }
+ else if(strcasecmp("ConvertSpecialMetricTypes", child->key) == 0)
+ {
+ ret = cc_handle_bool(child, &convert_special_metrics);
+ if(ret)
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ WARNING("ceph plugin: ignoring unknown option %s", child->key);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Parse JSON and get error message if present
+ */
+static int
+traverse_json(const unsigned char *json, uint32_t json_len, yajl_handle hand)
+{
+ yajl_status status = yajl_parse(hand, json, json_len);
+ unsigned char *msg;
+
+ switch(status)
+ {
+ case yajl_status_error:
+ msg = yajl_get_error(hand, /* verbose = */ 1,
+ /* jsonText = */ (unsigned char *) json,
+ (unsigned int) json_len);
+ ERROR ("ceph plugin: yajl_parse failed: %s", msg);
+ yajl_free_error(hand, msg);
+ return 1;
+ case yajl_status_client_canceled:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Add entry for each counter while parsing schema
+ */
+static int
+node_handler_define_schema(void *arg, const char *val, const char *key)
+{
+ struct ceph_daemon *d = (struct ceph_daemon *) arg;
+ int pc_type;
+ pc_type = atoi(val);
+ return ceph_daemon_add_ds_entry(d, key, pc_type);
+}
+
+/**
+ * Latency counter does not yet have an entry in last poll data - add it.
+ */
+static int add_last(struct ceph_daemon *d, const char *ds_n, double cur_sum,
+ uint64_t cur_count)
+{
+ d->last_poll_data[d->last_idx] = malloc(1 * sizeof(struct last_data));
+ if(!d->last_poll_data[d->last_idx])
+ {
+ return -ENOMEM;
+ }
+ sstrncpy(d->last_poll_data[d->last_idx]->ds_name,ds_n,
+ sizeof(d->last_poll_data[d->last_idx]->ds_name));
+ d->last_poll_data[d->last_idx]->last_sum = cur_sum;
+ d->last_poll_data[d->last_idx]->last_count = cur_count;
+ d->last_idx = (d->last_idx + 1);
+ return 0;
+}
+
+/**
+ * Update latency counter or add new entry if it doesn't exist
+ */
+static int update_last(struct ceph_daemon *d, const char *ds_n, int index,
+ double cur_sum, uint64_t cur_count)
+{
+ if((d->last_idx > index) && (strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0))
+ {
+ d->last_poll_data[index]->last_sum = cur_sum;
+ d->last_poll_data[index]->last_count = cur_count;
+ return 0;
+ }
+
+ if(!d->last_poll_data)
+ {
+ d->last_poll_data = malloc(1 * sizeof(struct last_data *));
+ if(!d->last_poll_data)
+ {
+ return -ENOMEM;
+ }
+ }
+ else
+ {
+ struct last_data **tmp_last = realloc(d->last_poll_data,
+ ((d->last_idx+1) * sizeof(struct last_data *)));
+ if(!tmp_last)
+ {
+ return -ENOMEM;
+ }
+ d->last_poll_data = tmp_last;
+ }
+ return add_last(d, ds_n, cur_sum, cur_count);
+}
+
+/**
+ * If using index guess failed (shouldn't happen, but possible if counters
+ * get rearranged), resort to searching for counter name
+ */
+static int backup_search_for_last_avg(struct ceph_daemon *d, const char *ds_n)
+{
+ int i = 0;
+ for(; i < d->last_idx; i++)
+ {
+ if(strcmp(d->last_poll_data[i]->ds_name, ds_n) == 0)
+ {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/**
+ * Calculate average b/t current data and last poll data
+ * if last poll data exists
+ */
+static double get_last_avg(struct ceph_daemon *d, const char *ds_n, int index,
+ double cur_sum, uint64_t cur_count)
+{
+ double result = -1.1, sum_delt = 0.0;
+ uint64_t count_delt = 0;
+ int tmp_index = 0;
+ if(d->last_idx > index)
+ {
+ if(strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0)
+ {
+ tmp_index = index;
+ }
+ //test previous index
+ else if((index > 0) && (strcmp(d->last_poll_data[index-1]->ds_name, ds_n) == 0))
+ {
+ tmp_index = (index - 1);
+ }
+ else
+ {
+ tmp_index = backup_search_for_last_avg(d, ds_n);
+ }
+
+ if((tmp_index > -1) && (cur_count > d->last_poll_data[tmp_index]->last_count))
+ {
+ sum_delt = (cur_sum - d->last_poll_data[tmp_index]->last_sum);
+ count_delt = (cur_count - d->last_poll_data[tmp_index]->last_count);
+ result = (sum_delt / count_delt);
+ }
+ }
+
+ if(result == -1.1)
+ {
+ result = NAN;
+ }
+ if(update_last(d, ds_n, tmp_index, cur_sum, cur_count) == -ENOMEM)
+ {
+ return -ENOMEM;
+ }
+ return result;
+}
+
+/**
+ * If using index guess failed, resort to searching for counter name
+ */
+static uint32_t backup_search_for_type(struct ceph_daemon *d, char *ds_name)
+{
+ int idx = 0;
+ for(; idx < d->ds_num; idx++)
+ {
+ if(strcmp(d->ds_names[idx], ds_name) == 0)
+ {
+ return d->ds_types[idx];
+ }
+ }
+ return DSET_TYPE_UNFOUND;
+}
+
+/**
+ * Process counter data and dispatch values
+ */
+static int node_handler_fetch_data(void *arg, const char *val, const char *key)
+{
+ value_t uv;
+ double tmp_d;
+ uint64_t tmp_u;
+ struct values_tmp *vtmp = (struct values_tmp*) arg;
+ uint32_t type = DSET_TYPE_UNFOUND;
+ int index = vtmp->index;
+
+ char ds_name[DATA_MAX_NAME_LEN];
+ memset(ds_name, 0, sizeof(ds_name));
+
+ if(parse_keys(key, ds_name))
+ {
+ return 1;
+ }
+
+ if(index >= vtmp->d->ds_num)
+ {
+ //don't overflow bounds of array
+ index = (vtmp->d->ds_num - 1);
+ }
+
+ /**
+ * counters should remain in same order we parsed schema... we maintain the
+ * index variable to keep track of current point in list of counters. first
+ * use index to guess point in array for retrieving type. if that doesn't
+ * work, use the old way to get the counter type
+ */
+ if(strcmp(ds_name, vtmp->d->ds_names[index]) == 0)
+ {
+ //found match
+ type = vtmp->d->ds_types[index];
+ }
+ else if((index > 0) && (strcmp(ds_name, vtmp->d->ds_names[index-1]) == 0))
+ {
+ //try previous key
+ type = vtmp->d->ds_types[index-1];
+ }
+
+ if(type == DSET_TYPE_UNFOUND)
+ {
+ //couldn't find right type by guessing, check the old way
+ type = backup_search_for_type(vtmp->d, ds_name);
+ }
+
+ switch(type)
+ {
+ case DSET_LATENCY:
+ if(vtmp->avgcount_exists == -1)
+ {
+ sscanf(val, "%" PRIu64, &vtmp->avgcount);
+ vtmp->avgcount_exists = 0;
+ //return after saving avgcount - don't dispatch value
+ //until latency calculation
+ return 0;
+ }
+ else
+ {
+ double sum, result;
+ sscanf(val, "%lf", &sum);
+
+ if(vtmp->avgcount == 0)
+ {
+ vtmp->avgcount = 1;
+ }
+
+ /** User wants latency values as long run avg */
+ if(long_run_latency_avg)
+ {
+ result = (sum / vtmp->avgcount);
+ }
+ else
+ {
+ result = get_last_avg(vtmp->d, ds_name, vtmp->latency_index, sum, vtmp->avgcount);
+ if(result == -ENOMEM)
+ {
+ return -ENOMEM;
+ }
+ }
+
+ uv.gauge = result;
+ vtmp->avgcount_exists = -1;
+ vtmp->latency_index = (vtmp->latency_index + 1);
+ }
+ break;
+ case DSET_BYTES:
+ sscanf(val, "%lf", &tmp_d);
+ uv.gauge = tmp_d;
+ break;
+ case DSET_RATE:
+ sscanf(val, "%" PRIu64, &tmp_u);
+ uv.derive = tmp_u;
+ break;
+ case DSET_TYPE_UNFOUND:
+ default:
+ ERROR("ceph plugin: ds %s was not properly initialized.", ds_name);
+ return -1;
+ }
+
+ sstrncpy(vtmp->vlist.type, ceph_dset_types[type], sizeof(vtmp->vlist.type));
+ sstrncpy(vtmp->vlist.type_instance, ds_name, sizeof(vtmp->vlist.type_instance));
+ vtmp->vlist.values = &uv;
+ vtmp->vlist.values_len = 1;
+
+ vtmp->index = (vtmp->index + 1);
+ plugin_dispatch_values(&vtmp->vlist);
+
+ return 0;
+}
+
+static int cconn_connect(struct cconn *io)
+{
+ struct sockaddr_un address;
+ int flags, fd, err;
+ if(io->state != CSTATE_UNCONNECTED)
+ {
+ ERROR("ceph plugin: cconn_connect: io->state != CSTATE_UNCONNECTED");
+ return -EDOM;
+ }
+ fd = socket(PF_UNIX, SOCK_STREAM, 0);
+ if(fd < 0)
+ {
+ int err = -errno;
+ ERROR("ceph plugin: cconn_connect: socket(PF_UNIX, SOCK_STREAM, 0) "
+ "failed: error %d", err);
+ return err;
+ }
+ memset(&address, 0, sizeof(struct sockaddr_un));
+ address.sun_family = AF_UNIX;
+ snprintf(address.sun_path, sizeof(address.sun_path), "%s",
+ io->d->asok_path);
+ RETRY_ON_EINTR(err,
+ connect(fd, (struct sockaddr *) &address, sizeof(struct sockaddr_un)));
+ if(err < 0)
+ {
+ ERROR("ceph plugin: cconn_connect: connect(%d) failed: error %d",
+ fd, err);
+ return err;
+ }
+
+ flags = fcntl(fd, F_GETFL, 0);
+ if(fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0)
+ {
+ err = -errno;
+ ERROR("ceph plugin: cconn_connect: fcntl(%d, O_NONBLOCK) error %d",
+ fd, err);
+ return err;
+ }
+ io->asok = fd;
+ io->state = CSTATE_WRITE_REQUEST;
+ io->amt = 0;
+ io->json_len = 0;
+ io->json = NULL;
+ return 0;
+}
+
+static void cconn_close(struct cconn *io)
+{
+ io->state = CSTATE_UNCONNECTED;
+ if(io->asok != -1)
+ {
+ int res;
+ RETRY_ON_EINTR(res, close(io->asok));
+ }
+ io->asok = -1;
+ io->amt = 0;
+ io->json_len = 0;
+ sfree(io->json);
+ io->json = NULL;
+}
+
+/* Process incoming JSON counter data */
+static int
+cconn_process_data(struct cconn *io, yajl_struct *yajl, yajl_handle hand)
+{
+ int ret;
+ struct values_tmp *vtmp = calloc(1, sizeof(struct values_tmp) * 1);
+ if(!vtmp)
+ {
+ return -ENOMEM;
+ }
+
+ vtmp->vlist = (value_list_t)VALUE_LIST_INIT;
+ sstrncpy(vtmp->vlist.host, hostname_g, sizeof(vtmp->vlist.host));
+ sstrncpy(vtmp->vlist.plugin, "ceph", sizeof(vtmp->vlist.plugin));
+ sstrncpy(vtmp->vlist.plugin_instance, io->d->name, sizeof(vtmp->vlist.plugin_instance));
+
+ vtmp->d = io->d;
+ vtmp->avgcount_exists = -1;
+ vtmp->latency_index = 0;
+ vtmp->index = 0;
+ yajl->handler_arg = vtmp;
+ ret = traverse_json(io->json, io->json_len, hand);
+ sfree(vtmp);
+ return ret;
+}
+
+/**
+ * Initiate JSON parsing and print error if one occurs
+ */
+static int cconn_process_json(struct cconn *io)
+{
+ if((io->request_type != ASOK_REQ_DATA) &&
+ (io->request_type != ASOK_REQ_SCHEMA))
+ {
+ return -EDOM;
+ }
+
+ int result = 1;
+ yajl_handle hand;
+ yajl_status status;
+
+ hand = yajl_alloc(&callbacks,
+#if HAVE_YAJL_V2
+ /* alloc funcs = */ NULL,
+#else
+ /* alloc funcs = */ NULL, NULL,
+#endif
+ /* context = */ (void *)(&io->yajl));
+
+ if(!hand)
+ {
+ ERROR ("ceph plugin: yajl_alloc failed.");
+ return ENOMEM;
+ }
+
+ io->yajl.depth = 0;
+
+ switch(io->request_type)
+ {
+ case ASOK_REQ_DATA:
+ io->yajl.handler = node_handler_fetch_data;
+ result = cconn_process_data(io, &io->yajl, hand);
+ break;
+ case ASOK_REQ_SCHEMA:
+ //init daemon specific variables
+ io->d->ds_num = 0;
+ io->d->last_idx = 0;
+ io->d->last_poll_data = NULL;
+ io->yajl.handler = node_handler_define_schema;
+ io->yajl.handler_arg = io->d;
+ result = traverse_json(io->json, io->json_len, hand);
+ break;
+ }
+
+ if(result)
+ {
+ goto done;
+ }
+
+#if HAVE_YAJL_V2
+ status = yajl_complete_parse(hand);
+#else
+ status = yajl_parse_complete(hand);
+#endif
+
+ if (status != yajl_status_ok)
+ {
+ unsigned char *errmsg = yajl_get_error (hand, /* verbose = */ 0,
+ /* jsonText = */ NULL, /* jsonTextLen = */ 0);
+ ERROR ("ceph plugin: yajl_parse_complete failed: %s",
+ (char *) errmsg);
+ yajl_free_error (hand, errmsg);
+ yajl_free (hand);
+ return 1;
+ }
+
+ done:
+ yajl_free (hand);
+ return result;
+}
+
+static int cconn_validate_revents(struct cconn *io, int revents)
+{
+ if(revents & POLLERR)
+ {
+ ERROR("ceph plugin: cconn_validate_revents(name=%s): got POLLERR",
+ io->d->name);
+ return -EIO;
+ }
+ switch (io->state)
+ {
+ case CSTATE_WRITE_REQUEST:
+ return (revents & POLLOUT) ? 0 : -EINVAL;
+ case CSTATE_READ_VERSION:
+ case CSTATE_READ_AMT:
+ case CSTATE_READ_JSON:
+ return (revents & POLLIN) ? 0 : -EINVAL;
+ default:
+ ERROR("ceph plugin: cconn_validate_revents(name=%s) got to "
+ "illegal state on line %d", io->d->name, __LINE__);
+ return -EDOM;
+ }
+}
+
+/** Handle a network event for a connection */
+static int cconn_handle_event(struct cconn *io)
+{
+ int ret;
+ switch (io->state)
+ {
+ case CSTATE_UNCONNECTED:
+ ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
+ "state on line %d", io->d->name, __LINE__);
+
+ return -EDOM;
+ case CSTATE_WRITE_REQUEST:
+ {
+ char cmd[32];
+ snprintf(cmd, sizeof(cmd), "%s%d%s", "{ \"prefix\": \"",
+ io->request_type, "\" }\n");
+ size_t cmd_len = strlen(cmd);
+ RETRY_ON_EINTR(ret,
+ write(io->asok, ((char*)&cmd) + io->amt, cmd_len - io->amt));
+ DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,amt=%d,ret=%d)",
+ io->d->name, io->state, io->amt, ret);
+ if(ret < 0)
+ {
+ return ret;
+ }
+ io->amt += ret;
+ if(io->amt >= cmd_len)
+ {
+ io->amt = 0;
+ switch (io->request_type)
+ {
+ case ASOK_REQ_VERSION:
+ io->state = CSTATE_READ_VERSION;
+ break;
+ default:
+ io->state = CSTATE_READ_AMT;
+ break;
+ }
+ }
+ return 0;
+ }
+ case CSTATE_READ_VERSION:
+ {
+ RETRY_ON_EINTR(ret,
+ read(io->asok, ((char*)(&io->d->version)) + io->amt,
+ sizeof(io->d->version) - io->amt));
+ DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
+ io->d->name, io->state, ret);
+ if(ret < 0)
+ {
+ return ret;
+ }
+ io->amt += ret;
+ if(io->amt >= sizeof(io->d->version))
+ {
+ io->d->version = ntohl(io->d->version);
+ if(io->d->version != 1)
+ {
+ ERROR("ceph plugin: cconn_handle_event(name=%s) not "
+ "expecting version %d!", io->d->name, io->d->version);
+ return -ENOTSUP;
+ }
+ DEBUG("ceph plugin: cconn_handle_event(name=%s): identified as "
+ "version %d", io->d->name, io->d->version);
+ io->amt = 0;
+ cconn_close(io);
+ io->request_type = ASOK_REQ_SCHEMA;
+ }
+ return 0;
+ }
+ case CSTATE_READ_AMT:
+ {
+ RETRY_ON_EINTR(ret,
+ read(io->asok, ((char*)(&io->json_len)) + io->amt,
+ sizeof(io->json_len) - io->amt));
+ DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
+ io->d->name, io->state, ret);
+ if(ret < 0)
+ {
+ return ret;
+ }
+ io->amt += ret;
+ if(io->amt >= sizeof(io->json_len))
+ {
+ io->json_len = ntohl(io->json_len);
+ io->amt = 0;
+ io->state = CSTATE_READ_JSON;
+ io->json = calloc(1, io->json_len + 1);
+ if(!io->json)
+ {
+ ERROR("ceph plugin: error callocing io->json");
+ return -ENOMEM;
+ }
+ }
+ return 0;
+ }
+ case CSTATE_READ_JSON:
+ {
+ RETRY_ON_EINTR(ret,
+ read(io->asok, io->json + io->amt, io->json_len - io->amt));
+ DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
+ io->d->name, io->state, ret);
+ if(ret < 0)
+ {
+ return ret;
+ }
+ io->amt += ret;
+ if(io->amt >= io->json_len)
+ {
+ ret = cconn_process_json(io);
+ if(ret)
+ {
+ return ret;
+ }
+ cconn_close(io);
+ io->request_type = ASOK_REQ_NONE;
+ }
+ return 0;
+ }
+ default:
+ ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
+ "state on line %d", io->d->name, __LINE__);
+ return -EDOM;
+ }
+}
+
+static int cconn_prepare(struct cconn *io, struct pollfd* fds)
+{
+ int ret;
+ if(io->request_type == ASOK_REQ_NONE)
+ {
+ /* The request has already been serviced. */
+ return 0;
+ }
+ else if((io->request_type == ASOK_REQ_DATA) && (io->d->ds_num == 0))
+ {
+ /* If there are no counters to report on, don't bother
+ * connecting */
+ return 0;
+ }
+
+ switch (io->state)
+ {
+ case CSTATE_UNCONNECTED:
+ ret = cconn_connect(io);
+ if(ret > 0)
+ {
+ return -ret;
+ }
+ else if(ret < 0)
+ {
+ return ret;
+ }
+ fds->fd = io->asok;
+ fds->events = POLLOUT;
+ return 1;
+ case CSTATE_WRITE_REQUEST:
+ fds->fd = io->asok;
+ fds->events = POLLOUT;
+ return 1;
+ case CSTATE_READ_VERSION:
+ case CSTATE_READ_AMT:
+ case CSTATE_READ_JSON:
+ fds->fd = io->asok;
+ fds->events = POLLIN;
+ return 1;
+ default:
+ ERROR("ceph plugin: cconn_prepare(name=%s) got to illegal state "
+ "on line %d", io->d->name, __LINE__);
+ return -EDOM;
+ }
+}
+
+/** Returns the difference between two struct timevals in milliseconds.
+ * On overflow, we return max/min int.
+ */
+static int milli_diff(const struct timeval *t1, const struct timeval *t2)
+{
+ int64_t ret;
+ int sec_diff = t1->tv_sec - t2->tv_sec;
+ int usec_diff = t1->tv_usec - t2->tv_usec;
+ ret = usec_diff / 1000;
+ ret += (sec_diff * 1000);
+ return (ret > INT_MAX) ? INT_MAX : ((ret < INT_MIN) ? INT_MIN : (int)ret);
+}
+
+/** This handles the actual network I/O to talk to the Ceph daemons.
+ */
+static int cconn_main_loop(uint32_t request_type)
+{
+ int i, ret, some_unreachable = 0;
+ struct timeval end_tv;
+ struct cconn io_array[g_num_daemons];
+
+ DEBUG("ceph plugin: entering cconn_main_loop(request_type = %d)", request_type);
+
+ /* create cconn array */
+ memset(io_array, 0, sizeof(io_array));
+ for(i = 0; i < g_num_daemons; ++i)
+ {
+ io_array[i].d = g_daemons[i];
+ io_array[i].request_type = request_type;
+ io_array[i].state = CSTATE_UNCONNECTED;
+ }
+
+ /** Calculate the time at which we should give up */
+ gettimeofday(&end_tv, NULL);
+ end_tv.tv_sec += CEPH_TIMEOUT_INTERVAL;
+
+ while (1)
+ {
+ int nfds, diff;
+ struct timeval tv;
+ struct cconn *polled_io_array[g_num_daemons];
+ struct pollfd fds[g_num_daemons];
+ memset(fds, 0, sizeof(fds));
+ nfds = 0;
+ for(i = 0; i < g_num_daemons; ++i)
+ {
+ struct cconn *io = io_array + i;
+ ret = cconn_prepare(io, fds + nfds);
+ if(ret < 0)
+ {
+ WARNING("ceph plugin: cconn_prepare(name=%s,i=%d,st=%d)=%d",
+ io->d->name, i, io->state, ret);
+ cconn_close(io);
+ io->request_type = ASOK_REQ_NONE;
+ some_unreachable = 1;
+ }
+ else if(ret == 1)
+ {
+ polled_io_array[nfds++] = io_array + i;
+ }
+ }
+ if(nfds == 0)
+ {
+ /* finished */
+ ret = 0;
+ goto done;
+ }
+ gettimeofday(&tv, NULL);
+ diff = milli_diff(&end_tv, &tv);
+ if(diff <= 0)
+ {
+ /* Timed out */
+ ret = -ETIMEDOUT;
+ WARNING("ceph plugin: cconn_main_loop: timed out.");
+ goto done;
+ }
+ RETRY_ON_EINTR(ret, poll(fds, nfds, diff));
+ if(ret < 0)
+ {
+ ERROR("ceph plugin: poll(2) error: %d", ret);
+ goto done;
+ }
+ for(i = 0; i < nfds; ++i)
+ {
+ struct cconn *io = polled_io_array[i];
+ int revents = fds[i].revents;
+ if(revents == 0)
+ {
+ /* do nothing */
+ }
+ else if(cconn_validate_revents(io, revents))
+ {
+ WARNING("ceph plugin: cconn(name=%s,i=%d,st=%d): "
+ "revents validation error: "
+ "revents=0x%08x", io->d->name, i, io->state, revents);
+ cconn_close(io);
+ io->request_type = ASOK_REQ_NONE;
+ some_unreachable = 1;
+ }
+ else
+ {
+ int ret = cconn_handle_event(io);
+ if(ret)
+ {
+ WARNING("ceph plugin: cconn_handle_event(name=%s,"
+ "i=%d,st=%d): error %d", io->d->name, i, io->state, ret);
+ cconn_close(io);
+ io->request_type = ASOK_REQ_NONE;
+ some_unreachable = 1;
+ }
+ }
+ }
+ }
+ done: for(i = 0; i < g_num_daemons; ++i)
+ {
+ cconn_close(io_array + i);
+ }
+ if(some_unreachable)
+ {
+ DEBUG("ceph plugin: cconn_main_loop: some Ceph daemons were unreachable.");
+ }
+ else
+ {
+ DEBUG("ceph plugin: cconn_main_loop: reached all Ceph daemons :)");
+ }
+ return ret;
+}
+
+static int ceph_read(void)
+{
+ return cconn_main_loop(ASOK_REQ_DATA);
+}
+
+/******* lifecycle *******/
+static int ceph_init(void)
+{
+ int ret;
+ ceph_daemons_print();
+
+ ret = cconn_main_loop(ASOK_REQ_VERSION);
+
+ return (ret) ? ret : 0;
+}
+
+static int ceph_shutdown(void)
+{
+ int i;
+ for(i = 0; i < g_num_daemons; ++i)
+ {
+ ceph_daemon_free(g_daemons[i]);
+ }
+ sfree(g_daemons);
+ g_daemons = NULL;
+ g_num_daemons = 0;
+ DEBUG("ceph plugin: finished ceph_shutdown");
+ return 0;
+}
+
+void module_register(void)
+{
+ plugin_register_complex_config("ceph", ceph_config);
+ plugin_register_init("ceph", ceph_init);
+ plugin_register_read("ceph", ceph_read);
+ plugin_register_shutdown("ceph", ceph_shutdown);
+}
HTTP: At first there is a "header" with one line per field. Every line consists
of a field name, ended by a colon, and the associated value until end-of-line.
The "header" is ended by two newlines immediately following another,
-i.E<nbsp>e. an empty line. The rest, basically the "body", is the message of
-the notification.
+i.e. an empty line. The rest, basically the "body", is the message of the
+notification.
The following is an example notification passed to a program:
Severity: FAILURE
- Time: 1200928930
+ Time: 1200928930.515
Host: myhost.mydomain.org
\n
This is a test notification to demonstrate the format
=item B<Time>
-The time in epoch, i.E<nbsp>e. as seconds since 1970-01-01 00:00:00 UTC.
+The time in epoch, i.e. as seconds since 1970-01-01 00:00:00 UTC. The value
+currently has millisecond precision (i.e. three decimal places), but scripts
+should accept arbitrary numbers of decimal places, including no decimal places.
=item B<Host>
=head1 SYNOPSIS
- <LoadPlugin perl>
- Globals true
- </LoadPlugin>
+ LoadPlugin perl
# ...
<Plugin perl>
IncludeDir "/path/to/perl/plugins"
Perl-script every time you want to read a value with the C<exec plugin> (see
L<collectd-exec(5)>) and provides a lot more functionality, too.
-When loading the C<perl plugin>, the B<Globals> option should be enabled.
-Else, the perl plugin will fail to load any Perl modules implemented in C,
-which includes, amongst many others, the B<threads> module used by the plugin
-itself. See the documentation of the B<Globals> option in L<collectd.conf(5)>
-for details.
-
=head1 CONFIGURATION
=over 4
=head1 SYNOPSIS
- <LoadPlugin python>
- Globals true
- </LoadPlugin>
+ LoadPlugin python
# ...
<Plugin python>
ModulePath "/path/to/your/python/modules"
=item B<LoadPlugin> I<Plugin>
-Loads the Python plugin I<Plugin>. Unlike most other LoadPlugin lines, this one
-should be a block containing the line "Globals true". This will cause collectd
-to export the name of all objects in the Python interpreter for all plugins to
-see. If you don't do this or your platform does not support it, the embedded
-interpreter will start anyway but you won't be able to load certain Python
-modules, e.g. "time".
+Loads the Python plugin I<Plugin>.
=item B<Encoding> I<Name>
The default encoding for Unicode objects you pass to collectd. If you omit this
-option it will default to B<ascii> on I<Python 2> and B<utf-8> on I<Python 3>.
-This is hardcoded in Python and will ignore everything else, including your
-locale.
+option it will default to B<ascii> on I<Python 2>. On I<Python 3> it will
+always be B<utf-8>, as this function was removed, so this will be silently
+ignored.
+These defaults are hardcoded in Python and will ignore everything else,
+including your locale.
=item B<ModulePath> I<Name>
#@BUILD_PLUGIN_BAROMETER_TRUE@LoadPlugin barometer
#@BUILD_PLUGIN_BATTERY_TRUE@LoadPlugin battery
#@BUILD_PLUGIN_BIND_TRUE@LoadPlugin bind
+#@BUILD_PLUGIN_CEPH_TRUE@LoadPlugin ceph
#@BUILD_PLUGIN_CONNTRACK_TRUE@LoadPlugin conntrack
#@BUILD_PLUGIN_CONTEXTSWITCH_TRUE@LoadPlugin contextswitch
#@BUILD_PLUGIN_CGROUPS_TRUE@LoadPlugin cgroups
#@BUILD_PLUGIN_GMOND_TRUE@LoadPlugin gmond
#@BUILD_PLUGIN_HDDTEMP_TRUE@LoadPlugin hddtemp
@BUILD_PLUGIN_INTERFACE_TRUE@@BUILD_PLUGIN_INTERFACE_TRUE@LoadPlugin interface
+#@BUILD_PLUGIN_IPC_TRUE@@BUILD_PLUGIN_IPC_TRUE@LoadPlugin ipc
#@BUILD_PLUGIN_IPTABLES_TRUE@LoadPlugin iptables
#@BUILD_PLUGIN_IPMI_TRUE@LoadPlugin ipmi
#@BUILD_PLUGIN_IPVS_TRUE@LoadPlugin ipvs
#@BUILD_PLUGIN_OPENLDAP_TRUE@LoadPlugin openldap
#@BUILD_PLUGIN_OPENVPN_TRUE@LoadPlugin openvpn
#@BUILD_PLUGIN_ORACLE_TRUE@LoadPlugin oracle
-#@BUILD_PLUGIN_PERL_TRUE@<LoadPlugin perl>
-#@BUILD_PLUGIN_PERL_TRUE@ Globals true
-#@BUILD_PLUGIN_PERL_TRUE@</LoadPlugin>
+#@BUILD_PLUGIN_PERL_TRUE@LoadPlugin perl
#@BUILD_PLUGIN_PINBA_TRUE@LoadPlugin pinba
#@BUILD_PLUGIN_PING_TRUE@LoadPlugin ping
#@BUILD_PLUGIN_POSTGRESQL_TRUE@LoadPlugin postgresql
#@BUILD_PLUGIN_POWERDNS_TRUE@LoadPlugin powerdns
#@BUILD_PLUGIN_PROCESSES_TRUE@LoadPlugin processes
#@BUILD_PLUGIN_PROTOCOLS_TRUE@LoadPlugin protocols
-#@BUILD_PLUGIN_PYTHON_TRUE@<LoadPlugin python>
-#@BUILD_PLUGIN_PYTHON_TRUE@ Globals true
-#@BUILD_PLUGIN_PYTHON_TRUE@</LoadPlugin>
+#@BUILD_PLUGIN_PYTHON_TRUE@LoadPlugin python
#@BUILD_PLUGIN_REDIS_TRUE@LoadPlugin redis
#@BUILD_PLUGIN_ROUTEROS_TRUE@LoadPlugin routeros
#@BUILD_PLUGIN_RRDCACHED_TRUE@LoadPlugin rrdcached
#@BUILD_PLUGIN_WRITE_MONGODB_TRUE@LoadPlugin write_mongodb
#@BUILD_PLUGIN_WRITE_REDIS_TRUE@LoadPlugin write_redis
#@BUILD_PLUGIN_WRITE_RIEMANN_TRUE@LoadPlugin write_riemann
+#@BUILD_PLUGIN_WRITE_SENSU_TRUE@LoadPlugin write_sensu
#@BUILD_PLUGIN_WRITE_TSDB_TRUE@LoadPlugin write_tsdb
#@BUILD_PLUGIN_XMMS_TRUE@LoadPlugin xmms
#@BUILD_PLUGIN_ZFS_ARC_TRUE@LoadPlugin zfs_arc
# RoutingKey "collectd"
# Persistent false
# StoreRates false
+# ConnectionRetryDelay 0
# </Publish>
#</Plugin>
# </View>
#</Plugin>
+#<Plugin ceph>
+# LongRunAvgLatency false
+# ConvertSpecialMetricTypes true
+# <Daemon "osd.0">
+# SocketPath "/var/run/ceph/ceph-osd.0.asok"
+# </Daemon>
+# <Daemon "osd.1">
+# SocketPath "/var/run/ceph/ceph-osd.1.asok"
+# </Daemon>
+# <Daemon "mon.a">
+# SocketPath "/var/run/ceph/ceph-mon.ceph1.asok"
+# </Daemon>
+# <Daemon "mds.a">
+# SocketPath "/var/run/ceph/ceph-mds.ceph1.asok"
+# </Daemon>
+#</Plugin>
+
#<Plugin cgroups>
# CGroup "libvirt"
# IgnoreSelected false
#<Plugin modbus>
# <Data "data_name">
# RegisterBase 1234
+# RegisterCmd ReadHolding
# RegisterType float
-# ModbusRegisterType holding
# Type gauge
# Instance "..."
# </Data>
#</Plugin>
#<Plugin write_http>
-# <URL "http://example.com/collectd-post">
+# <Node "example">
+# URL "http://example.com/collectd-post"
# User "collectd"
# Password "weCh3ik0"
# VerifyPeer true
# Format "Command"
# StoreRates false
# BufferSize 4096
-# </URL>
+# LowSpeedLimit 0
+# Timeout 0
+# </Node>
#</Plugin>
#<Plugin write_kafka>
# Attribute "foo" "bar"
#</Plugin>
+#<Plugin write_sensu>
+# <Node "example">
+# Host "localhost"
+# Port 3030
+# StoreRates true
+# AlwaysAppendDS false
+# Notifications true
+# Metrics true
+# EventServicePrefix ""
+# MetricHandler "influx"
+# MetricHandler "default"
+# NotificationHandler "flapjack"
+# NotificationHandler "howling_monkey"
+# </Node>
+# Tag "foobar"
+# Attribute "foo" "bar"
+#</Plugin>
+
#<Plugin write_tsdb>
# <Node>
# Host "localhost"
influence the way plugins are loaded, e.g.:
<LoadPlugin perl>
- Globals true
Interval 60
</LoadPlugin>
# ExchangeType "fanout"
# RoutingKey "collectd"
# Persistent false
+ # ConnectionRetryDelay 0
# Format "command"
# StoreRates false
# GraphitePrefix "collectd."
# QueueDurable false
# QueueAutoDelete true
# RoutingKey "collectd.#"
+ # ConnectionRetryDelay 0
</Subscribe>
</Plugin>
default), the I<transient> delivery mode will be used, i.e. messages may be
lost due to high load, overflowing queues or similar issues.
+=item B<ConnectionRetryDelay> I<Delay>
+
+When the connection to the AMQP broker is lost, defines the time in seconds to
+wait before attempting to reconnect. Defaults to 0, which implies collectd will
+attempt to reconnect at each read interval (in Subscribe mode) or each time
+values are ready for submission (in Publish mode).
+
=item B<Format> B<Command>|B<JSON>|B<Graphite> (Publish only)
Selects the format in which messages are sent to the broker. If set to
possibly need this option. What CA certificates come bundled with C<libcurl>
and are checked by default depends on the distribution you use.
+=item B<SSLCiphers> I<list of ciphers>
+
+Specifies which ciphers to use in the connection. The list of ciphers
+must specify valid ciphers. See
+L<http://www.openssl.org/docs/apps/ciphers.html> for details.
+
+=item B<Timeout> I<Milliseconds>
+
+The B<Timeout> option sets the overall timeout for HTTP requests to B<URL>, in
+milliseconds. By default, the configured B<Interval> is used to set the
+timeout.
+
=back
=head2 Plugin C<apcups>
possibly need this option. What CA certificates come bundled with C<libcurl>
and are checked by default depends on the distribution you use.
+=item B<Timeout> I<Milliseconds>
+
+The B<Timeout> option sets the overall timeout for HTTP requests to B<URL>, in
+milliseconds. By default, the configured B<Interval> is used to set the
+timeout.
+
=back
=head2 Plugin C<barometer>
-This plugin reads absolute air pressure using digital barometer sensor MPL115A2
-or MPL3115 from Freescale (sensor attached to any I2C bus available in
-the computer, for HW details see
-I<http://www.freescale.com/webapp/sps/site/prod_summary.jsp?code=MPL115A> or
-I<http://www.freescale.com/webapp/sps/site/prod_summary.jsp?code=MPL3115A2>).
-The sensor type - one fo these two - is detected automatically by the plugin
-and indicated in the plugin_instance (typically you will see subdirectory
-"barometer-mpl115" or "barometer-mpl3115").
+This plugin reads absolute air pressure using digital barometer sensor on a I2C
+bus. Supported sensors are:
+
+=over 5
+
+=item I<MPL115A2> from Freescale,
+see L<http://www.freescale.com/webapp/sps/site/prod_summary.jsp?code=MPL115A>.
+
+
+=item I<MPL3115> from Freescale
+see L<http://www.freescale.com/webapp/sps/site/prod_summary.jsp?code=MPL3115A2>.
+
+
+=item I<BMP085> from Bosch Sensortec
+
+=back
+
+The sensor type - one of the above - is detected automatically by the plugin
+and indicated in the plugin_instance (you will see subdirectory
+"barometer-mpl115" or "barometer-mpl3115", or "barometer-bmp085"). The order of
+detection is BMP085 -> MPL3115 -> MPL115A2, the first one found will be used
+(only one sensor can be used by the plugin).
The plugin provides absolute barometric pressure, air pressure reduced to sea
level (several possible approximations) and as an auxiliary value also internal
the standard Linux i2c-dev interface (the particular bus driver has to
support the SM Bus command subset).
-The reduction or normalization to mean sea level pressure requires (depedning on
-selected method/approximation) also altitude and reference to temperature sensor(s).
-When multiple temperature sensors are configured the minumum of their values is
-always used (expecting that the warmer ones are affected by e.g. direct sun light
-at that moment).
+The reduction or normalization to mean sea level pressure requires (depending
+on selected method/approximation) also altitude and reference to temperature
+sensor(s). When multiple temperature sensors are configured the minumum of
+their values is always used (expecting that the warmer ones are affected by
+e.g. direct sun light at that moment).
Synopsis:
=item B<Device> I<device>
-Device name of the I2C bus to which the sensor is connected. Note that typically
-you need to have loaded the i2c-dev module.
+The only mandatory configuration parameter.
+
+Device name of the I2C bus to which the sensor is connected. Note that
+typically you need to have loaded the i2c-dev module.
Using i2c-tools you can check/list i2c buses available on your system by:
i2cdetect -l
=item B<Oversampling> I<value>
-For MPL115 this is the size of the averaging window. To filter out sensor noise
-a simple averaging using floating window of configurable size is used. The plugin
-will use average of the last C<value> measurements (value of 1 means no averaging).
-Minimal size is 1, maximal 1024.
+Optional parameter controlling the oversampling/accuracy. Default value
+is 1 providing fastest and least accurate reading.
+
+For I<MPL115> this is the size of the averaging window. To filter out sensor
+noise a simple averaging using floating window of this configurable size is
+used. The plugin will use average of the last C<value> measurements (value of 1
+means no averaging). Minimal size is 1, maximal 1024.
-For MPL3115 this is the oversampling value. The actual oversampling is performed
-by the sensor and the higher value the higher accuracy and longer conversion time
-(although nothing to worry about in the collectd context). Supported values are:
-1, 2, 4, 8, 16, 32, 64 and 128. Any other value is adjusted by the plugin to
-the closest supported one. Default is 128.
+For I<MPL3115> this is the oversampling value. The actual oversampling is
+performed by the sensor and the higher value the higher accuracy and longer
+conversion time (although nothing to worry about in the collectd context).
+Supported values are: 1, 2, 4, 8, 16, 32, 64 and 128. Any other value is
+adjusted by the plugin to the closest supported one.
+
+For I<BMP085> this is the oversampling value. The actual oversampling is
+performed by the sensor and the higher value the higher accuracy and longer
+conversion time (although nothing to worry about in the collectd context).
+Supported values are: 1, 2, 4, 8. Any other value is adjusted by the plugin to
+the closest supported one.
=item B<PressureOffset> I<offset>
-You can further calibrate the sensor by supplying pressure and/or temperature offsets.
-This is added to the measured/caclulated value (i.e. if the measured value is too high
-then use negative offset).
+Optional parameter for MPL3115 only.
+
+You can further calibrate the sensor by supplying pressure and/or temperature
+offsets. This is added to the measured/caclulated value (i.e. if the measured
+value is too high then use negative offset).
In hPa, default is 0.0.
=item B<TemperatureOffset> I<offset>
-You can further calibrate the sensor by supplying pressure and/or temperature offsets.
-This is added to the measured/caclulated value (i.e. if the measured value is too high
-then use negative offset).
+Optional parameter for MPL3115 only.
+
+You can further calibrate the sensor by supplying pressure and/or temperature
+offsets. This is added to the measured/caclulated value (i.e. if the measured
+value is too high then use negative offset).
In C, default is 0.0.
=item B<Normalization> I<method>
-Normalization method - what approximation/model is used to compute mean sea
+Optional parameter, default value is 0.
+
+Normalization method - what approximation/model is used to compute the mean sea
level pressure from the air absolute pressure.
Supported values of the C<method> (integer between from 0 to 2) are:
=over 5
-=item B<0> - no conversion, absolute pressrure is simply copied over. For this method you
+=item B<0> - no conversion, absolute pressure is simply copied over. For this method you
do not need to configure C<Altitude> or C<TemperatureSensor>.
=item B<1> - international formula for conversion ,
-See I<http://en.wikipedia.org/wiki/Atmospheric_pressure#Altitude_atmospheric_pressure_variation>.
-For this method you have to configure C<Altitude> but do not need C<TemperatureSensor>
-(uses fixed global temperature average instead).
+See
+L<http://en.wikipedia.org/wiki/Atmospheric_pressure#Altitude_atmospheric_pressure_variation>.
+For this method you have to configure C<Altitude> but do not need
+C<TemperatureSensor> (uses fixed global temperature average instead).
=item B<2> - formula as recommended by the Deutsche Wetterdienst (German
Meteorological Service).
-See I<http://de.wikipedia.org/wiki/Barometrische_H%C3%B6henformel#Theorie>
-For this method you have to configure both C<Altitude> and C<TemperatureSensor>.
+See L<http://de.wikipedia.org/wiki/Barometrische_H%C3%B6henformel#Theorie>
+For this method you have to configure both C<Altitude> and
+C<TemperatureSensor>.
=back
=item B<TemperatureSensor> I<reference>
-Temperature sensor which should be used as a reference when normalizing the pressure.
-When specified more sensors a minumum is found and uses each time.
-The temperature reading directly from this pressure sensor/plugin
-is typically not suitable as the pressure sensor
-will be probably inside while we want outside temperature.
-The collectd reference name is something like
+Temperature sensor(s) which should be used as a reference when normalizing the
+pressure using C<Normalization> method 2.
+When specified more sensors a minumum is found and used each time. The
+temperature reading directly from this pressure sensor/plugin is typically not
+suitable as the pressure sensor will be probably inside while we want outside
+temperature. The collectd reference name is something like
<hostname>/<plugin_name>-<plugin_instance>/<type>-<type_instance>
-(<type_instance> is usually omitted when there is just single value type).
-Or you can figure it out from the path of the output data files.
+(<type_instance> is usually omitted when there is just single value type). Or
+you can figure it out from the path of the output data files.
=back
Default: Enabled.
+=item B<Timeout> I<Milliseconds>
+
+The B<Timeout> option sets the overall timeout for HTTP requests to B<URL>, in
+milliseconds. By default, the configured B<Interval> is used to set the
+timeout.
+
=item B<View> I<Name>
Collect statistics about a specific I<"view">. BIND can behave different,
=back
+=head2 Plugin C<ceph>
+
+The ceph plugin collects values from JSON data to be parsed by B<libyajl>
+(L<https://lloyd.github.io/yajl/>) retrieved from ceph daemon admin sockets.
+
+A separate B<Daemon> block must be configured for each ceph daemon to be
+monitored. The following example will read daemon statistics from four
+separate ceph daemons running on the same device (two OSDs, one MON, one MDS) :
+
+ <Plugin ceph>
+ LongRunAvgLatency false
+ ConvertSpecialMetricTypes true
+ <Daemon "osd.0">
+ SocketPath "/var/run/ceph/ceph-osd.0.asok"
+ </Daemon>
+ <Daemon "osd.1">
+ SocketPath "/var/run/ceph/ceph-osd.1.asok"
+ </Daemon>
+ <Daemon "mon.a">
+ SocketPath "/var/run/ceph/ceph-mon.ceph1.asok"
+ </Daemon>
+ <Daemon "mds.a">
+ SocketPath "/var/run/ceph/ceph-mds.ceph1.asok"
+ </Daemon>
+ </Plugin>
+
+The ceph plugin accepts the following configuration options:
+
+=over 4
+
+=item B<LongRunAvgLatency> B<true>|B<false>
+
+If enabled, latency values(sum,count pairs) are calculated as the long run
+average - average since the ceph daemon was started = (sum / count).
+When disabled, latency values are calculated as the average since the last
+collection = (sum_now - sum_last) / (count_now - count_last).
+
+Default: Disabled
+
+=item B<ConvertSpecialMetricTypes> B<true>|B<false>
+
+If enabled, special metrics (metrics that differ in type from similar counters)
+are converted to the type of those similar counters. This currently only
+applies to filestore.journal_wr_bytes which is a counter for OSD daemons. The
+ceph schema reports this metric type as a sum,count pair while similar counters
+are treated as derive types. When converted, the sum is used as the counter
+value and is treated as a derive type.
+When disabled, all metrics are treated as the types received from the ceph schema.
+
+Default: Enabled
+
+=back
+
+Each B<Daemon> block must have a string argument for the plugin instance name.
+A B<SocketPath> is also required for each B<Daemon> block:
+
+=over 4
+
+=item B<Daemon> I<DaemonName>
+
+Name to be used as the instance name for this daemon.
+
+=item B<SocketPath> I<SocketPath>
+
+Specifies the path to the UNIX admin socket of the ceph daemon.
+
+=back
+
=head2 Plugin C<cgroups>
This plugin collects the CPU user/system time for each I<cgroup> by reading the
Measure response time for the request. If this setting is enabled, B<Match>
blocks (see below) are optional. Disabled by default.
+Beware that requests will get aborted if they take too long to complete. Adjust
+B<Timeout> accordingly if you expect B<MeasureResponseTime> to report such slow
+requests.
+
=item B<MeasureResponseCode> B<true>|B<false>
Measure response code for the request. If this setting is enabled, B<Match>
B<MeasureResponseCode> options are set to B<true>, B<Match> blocks are
optional.
+=item B<Timeout> I<Milliseconds>
+
+The B<Timeout> option sets the overall timeout for HTTP requests to B<URL>, in
+milliseconds. By default, the configured B<Interval> is used to set the
+timeout. Prior to version 5.5.0, there was no timeout and requests could hang
+indefinitely. This legacy behaviour can be achieved by setting the value of
+B<Timeout> to 0.
+
+If B<Timeout> is 0 or bigger than the B<Interval>, keep in mind that each slow
+network connection will stall one read thread. Adjust the B<ReadThreads> global
+setting accordingly to prevent this from blocking other plugins.
+
=back
=head2 Plugin C<curl_json>
=item B<Post> I<Body>
+=item B<Timeout> I<Milliseconds>
+
These options behave exactly equivalent to the appropriate options of the
I<cURL> plugin. Please see there for a detailed description.
=item B<Post> I<Body>
+=item B<Timeout> I<Milliseconds>
+
These options behave exactly equivalent to the appropriate options of the
I<cURL plugin>. Please see there for a detailed description.
<Data "voltage-input-1">
RegisterBase 0
RegisterType float
- ModbusRegisterType holding
+ RegisterCmd ReadHolding
Type voltage
Instance "input-1"
</Data>
<Data "voltage-input-2">
RegisterBase 2
RegisterType float
- ModbusRegisterType holding
+ RegisterCmd ReadHolding
Type voltage
Instance "input-2"
</Data>
<Data "supply-temperature-1">
RegisterBase 0
RegisterType Int16
- ModbusRegisterType holding
+ RegisterCmd ReadHolding
Type temperature
Instance "temp-1"
</Data>
B<Uint32> or B<Float>, two 16E<nbsp>bit registers will be read and the data is
combined into one value. Defaults to B<Uint16>.
-=item B<ModbusRegisterType> B<holding>|B<input>
+=item B<RegisterCmd> B<ReadHolding>|B<ReadInput>
Specifies register type to be collected from device. Works only with libmodbus
-2.9.2 or higher. Defaults to B<holding>.
+2.9.2 or higher. Defaults to B<ReadHolding>.
=item B<Type> I<Type>
Enable the collection of master / slave statistics in a replication setup. In
order to be able to get access to these statistics, the user needs special
-privileges. See the B<User> documentation above.
+privileges. See the B<User> documentation above. Defaults to B<false>.
=item B<SlaveNotifications> I<true|false>
If enabled, the plugin sends a notification if the replication slave I/O and /
-or SQL threads are not running.
+or SQL threads are not running. Defaults to B<false>.
=item B<ConnectTimeout> I<Seconds>
possibly need this option. What CA certificates come bundled with C<libcurl>
and are checked by default depends on the distribution you use.
+=item B<Timeout> I<Milliseconds>
+
+The B<Timeout> option sets the overall timeout for HTTP requests to B<URL>, in
+milliseconds. By default, the configured B<Interval> is used to set the
+timeout.
+
=back
=head2 Plugin C<notify_desktop>
amount of time will be lost, for example, if a single statement within the
transaction fails or if the database server crashes.
+=item B<Instance> I<name>
+
+Specify the plugin instance name that should be used instead of the database
+name (which is the default, if this option has not been specified). This
+allows to query multiple databases of the same name on the same host (e.g.
+when running multiple database server versions in parallel).
+
=item B<Host> I<hostname>
Specify the hostname or IP of the PostgreSQL server to connect to. If the
Use I<Password> to authenticate when connecting to I<Redis>.
-=item B<Timeout> I<Timeout in miliseconds>
+=item B<Timeout> I<Milliseconds>
The B<Timeout> option set the socket timeout for node response. Since the Redis
read function is blocking, you should keep this value as low as possible. Keep
I<Factor> must be in the range C<[0.0-1.0)>, i.e. between zero (inclusive) and
one (exclusive).
+=item B<CollectStatistics> B<false>|B<true>
+
+When set to B<true>, various statistics about the I<rrdcached> daemon will be
+collected, with "rrdcached" as the I<plugin name>. Defaults to B<false>.
+
+Statistics are read via I<rrdcached>s socket using the STATS command.
+See L<rrdcached(1)> for details.
+
=back
=head2 Plugin C<rrdtool>
Service name or port number to connect to. Defaults to C<27017>.
-=item B<Timeout> I<Timeout>
+=item B<Timeout> I<Milliseconds>
Set the timeout for each operation on I<MongoDB> to I<Timeout> milliseconds.
Setting this option to zero means no timeout, which is the default.
This output plugin submits values to an HTTP server using POST requests and
encoding metrics with JSON or using the C<PUTVAL> command described in
-L<collectd-unixsock(5)>. Each destination you want to post data to needs to
-have one B<URL> block, within which the destination can be configured further,
-for example by specifying authentication data.
+L<collectd-unixsock(5)>.
Synopsis:
<Plugin "write_http">
- <URL "http://example.com/post-collectd">
+ <Node "example">
+ URL "http://example.com/post-collectd"
User "collectd"
Password "weCh3ik0"
Format JSON
- </URL>
+ </Node>
</Plugin>
-B<URL> blocks need one string argument which is used as the URL to which data
-is posted. The following options are understood within B<URL> blocks.
+The plugin can send values to multiple HTTP servers by specifying one
+E<lt>B<Node>E<nbsp>I<Name>E<gt> block for each server. Within each B<Node>
+block, the following options are available:
=over 4
+=item B<URL> I<URL>
+
+URL to which the values are submitted to. Mandatory.
+
=item B<User> I<Username>
Optional user name needed for authentication.
exceed the size of an C<int>, i.e. 2E<nbsp>GByte.
Defaults to C<4096>.
+=item B<LowSpeedLimit> I<Bytes per Second>
+
+Sets the minimal transfer rate in I<Bytes per Second> below which the
+connection with the HTTP server will be considered too slow and aborted. All
+the data submitted over this connection will probably be lost. Defaults to 0,
+which means no minimum transfer rate is enforced.
+
+=item B<Timeout> I<Timeout>
+
+Sets the maximum time in milliseconds given for HTTP POST operations to
+complete. When this limit is reached, the POST operation will be aborted, and
+all the data in the current send buffer will probably be lost. Defaults to 0,
+which means the connection never times out.
+
+The C<write_http> plugin regularly submits the collected values to the HTTP
+server. How frequently this happens depends on how much data you are collecting
+and the size of B<BufferSize>. The optimal value to set B<Timeout> to is
+slightly below this interval, which you can estimate by monitoring the network
+traffic between collectd and the HTTP server.
+
=back
=head2 Plugin C<write_kafka>
connections. Either a service name of a port number may be given. Please note
that numerical port numbers must be given as a string, too.
-=item B<Timeout> I<Timeout in miliseconds>
+=item B<Timeout> I<Milliseconds>
The B<Timeout> option sets the socket connection timeout, in milliseconds.
=back
+=head2 Plugin C<write_sensu>
+
+The I<write_sensu plugin> will send values to I<Sensu>, a powerful stream
+aggregation and monitoring system. The plugin sends I<JSON> encoded data to
+a local I<Sensu> client using a TCP socket.
+
+At the moment, the I<write_sensu plugin> does not send over a collectd_host
+parameter so it is not possible to use one collectd instance as a gateway for
+others. Each collectd host must pair with one I<Sensu> client.
+
+Synopsis:
+
+ <Plugin "write_sensu">
+ <Node "example">
+ Host "localhost"
+ Port "3030"
+ StoreRates true
+ AlwaysAppendDS false
+ MetricHandler "influx"
+ MetricHandler "default"
+ NotificationHandler "flapjack"
+ NotificationHandler "howling_monkey"
+ Notifications true
+ </Node>
+ Tag "foobar"
+ Attribute "foo" "bar"
+ </Plugin>
+
+The following options are understood by the I<write_sensu plugin>:
+
+=over 4
+
+=item E<lt>B<Node> I<Name>E<gt>
+
+The plugin's configuration consists of one or more B<Node> blocks. Each block
+is given a unique I<Name> and specifies one connection to an instance of
+I<Sensu>. Inside the B<Node> block, the following per-connection options are
+understood:
+
+=over 4
+
+=item B<Host> I<Address>
+
+Hostname or address to connect to. Defaults to C<localhost>.
+
+=item B<Port> I<Service>
+
+Service name or port number to connect to. Defaults to C<3030>.
+
+=item B<StoreRates> B<true>|B<false>
+
+If set to B<true> (the default), convert counter values to rates. If set to
+B<false> counter values are stored as is, i.e. as an increasing integer number.
+
+This will be reflected in the C<collectd_data_source_type> tag: If
+B<StoreRates> is enabled, converted values will have "rate" appended to the
+data source type, e.g. C<collectd_data_source_type:derive:rate>.
+
+=item B<AlwaysAppendDS> B<false>|B<true>
+
+If set the B<true>, append the name of the I<Data Source> (DS) to the
+"service", i.e. the field that, together with the "host" field, uniquely
+identifies a metric in I<Sensu>. If set to B<false> (the default), this is
+only done when there is more than one DS.
+
+=item B<Notifications> B<false>|B<true>
+
+If set to B<true>, create I<Sensu> events for notifications. This is B<false>
+by default. At least one of B<Notifications> or B<Metrics> should be enabled.
+
+=item B<Metrics> B<false>|B<true>
+
+If set to B<true>, create I<Sensu> events for metrics. This is B<false>
+by default. At least one of B<Notifications> or B<Metrics> should be enabled.
+
+
+=item B<Separator> I<String>
+
+Sets the separator for I<Sensu> metrics name or checks. Defaults to "/".
+
+=item B<MetricHandler> I<String>
+
+Add a handler that will be set when metrics are sent to I<Sensu>. You can add
+several of them, one per line. Defaults to no handler.
+
+=item B<NotificationHandler> I<String>
+
+Add a handler that will be set when notifications are sent to I<Sensu>. You can
+add several of them, one per line. Defaults to no handler.
+
+=item B<EventServicePrefix> I<String>
+
+Add the given string as a prefix to the event service name.
+If B<EventServicePrefix> not set or set to an empty string (""),
+no prefix will be used.
+
+=back
+
+=item B<Tag> I<String>
+
+Add the given string as an additional tag to the metric being sent to
+I<Sensu>.
+
+=item B<Attribute> I<String> I<String>
+
+Consider the two given strings to be the key and value of an additional
+attribute for each metric being sent out to I<Sensu>.
+
+=back
+
=head2 Plugin C<zookeeper>
The I<zookeeper plugin> will collect statistics from a I<Zookeeper> server
char *post_body;
_Bool response_time;
_Bool response_code;
+ int timeout;
CURL *curl;
char curl_errbuf[CURL_ERROR_SIZE];
if (wp->post_body != NULL)
curl_easy_setopt (wp->curl, CURLOPT_POSTFIELDS, wp->post_body);
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ if (wp->timeout >= 0)
+ curl_easy_setopt (wp->curl, CURLOPT_TIMEOUT_MS, (long) wp->timeout);
+ else
+ curl_easy_setopt (wp->curl, CURLOPT_TIMEOUT_MS,
+ CDTIME_T_TO_MS(plugin_get_interval()));
+#endif
+
return (0);
} /* }}} int cc_page_init_curl */
page->verify_host = 1;
page->response_time = 0;
page->response_code = 0;
+ page->timeout = -1;
page->instance = strdup (ci->values[0].value.string);
if (page->instance == NULL)
status = cc_config_append_string ("Header", &page->headers, child);
else if (strcasecmp ("Post", child->key) == 0)
status = cf_util_get_string (child, &page->post_body);
+ else if (strcasecmp ("Timeout", child->key) == 0)
+ status = cf_util_get_int (child, &page->timeout);
else
{
WARNING ("curl plugin: Option `%s' not allowed here.", child->key);
status = curl_easy_perform (wp->curl);
if (status != CURLE_OK)
{
- ERROR ("curl plugin: curl_easy_perform failed with staus %i: %s",
+ ERROR ("curl plugin: curl_easy_perform failed with status %i: %s",
status, wp->curl_errbuf);
return (-1);
}
long response_code = 0;
status = curl_easy_getinfo(wp->curl, CURLINFO_RESPONSE_CODE, &response_code);
if(status != CURLE_OK) {
- ERROR ("curl plugin: Fetching response code failed with staus %i: %s",
+ ERROR ("curl plugin: Fetching response code failed with status %i: %s",
status, wp->curl_errbuf);
} else {
cc_submit_response_code(wp, response_code);
struct curl_slist *headers;
char *post_body;
cdtime_t interval;
+ int timeout;
CURL *curl;
char curl_errbuf[CURL_ERROR_SIZE];
if (db->post_body != NULL)
curl_easy_setopt (db->curl, CURLOPT_POSTFIELDS, db->post_body);
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ if (db->timeout >= 0)
+ curl_easy_setopt (db->curl, CURLOPT_TIMEOUT_MS, (long) db->timeout);
+ else if (db->interval > 0)
+ curl_easy_setopt (db->curl, CURLOPT_TIMEOUT_MS,
+ CDTIME_T_TO_MS(db->timeout));
+ else
+ curl_easy_setopt (db->curl, CURLOPT_TIMEOUT_MS,
+ CDTIME_T_TO_MS(plugin_get_interval()));
+#endif
+
return (0);
} /* }}} int cj_init_curl */
}
memset (db, 0, sizeof (*db));
+ db->timeout = -1;
+
if (strcasecmp ("URL", ci->key) == 0)
status = cf_util_get_string (ci, &db->url);
else if (strcasecmp ("Sock", ci->key) == 0)
status = cj_config_add_key (db, child);
else if (strcasecmp ("Interval", child->key) == 0)
status = cf_util_get_cdtime(child, &db->interval);
+ else if (strcasecmp ("Timeout", child->key) == 0)
+ status = cf_util_get_int (child, &db->timeout);
else
{
WARNING ("curl_json plugin: Option `%s' not allowed here.", child->key);
_Bool verify_host;
char *cacert;
char *post_body;
+ int timeout;
struct curl_slist *headers;
cx_namespace_t *namespaces;
if (db->post_body != NULL)
curl_easy_setopt (db->curl, CURLOPT_POSTFIELDS, db->post_body);
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ if (db->timeout >= 0)
+ curl_easy_setopt (db->curl, CURLOPT_TIMEOUT_MS, (long) db->timeout);
+ else
+ curl_easy_setopt (db->curl, CURLOPT_TIMEOUT_MS,
+ CDTIME_T_TO_MS(plugin_get_interval()));
+#endif
+
return (0);
} /* }}} int cx_init_curl */
}
memset (db, 0, sizeof (*db));
+ db->timeout = -1;
+
if (strcasecmp ("URL", ci->key) == 0)
{
status = cf_util_get_string (ci, &db->url);
status = cf_util_get_string (child, &db->post_body);
else if (strcasecmp ("Namespace", child->key) == 0)
status = cx_config_add_namespace (db, child);
+ else if (strcasecmp ("Timeout", child->key) == 0)
+ status = cf_util_get_int (child, &db->timeout);
else
{
WARNING ("curl_xml plugin: Option `%s' not allowed here.", child->key);
if (e->type != MD_TYPE_STRING)
{
- ERROR ("meta_data_get_signed_int: Type mismatch for key `%s'", e->key);
+ ERROR ("meta_data_get_string: Type mismatch for key `%s'", e->key);
pthread_mutex_unlock (&md->lock);
return (-ENOENT);
}
const char *dir;
char filename[BUFSIZE] = "";
char typename[BUFSIZE];
- int typename_len;
int ret;
struct stat statbuf;
struct dirent *de;
WARNING ("plugin_load: Filename too long: \"%s.so\"", plugin_name);
return (-1);
}
- typename_len = strlen (typename);
if ((dh = opendir (dir)) == NULL)
{
while ((de = readdir (dh)) != NULL)
{
- if (strncasecmp (de->d_name, typename, typename_len))
+ if (strcasecmp (de->d_name, typename))
continue;
status = ssnprintf (filename, sizeof (filename),
/**
* collectd - src/dbi.c
- * Copyright (C) 2008-2013 Florian octo Forster
+ * Copyright (C) 2008-2015 Florian octo Forster
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
#include <dbi/dbi.h>
-#ifdef HAVE_LIBDBI_R
- dbi_inst inst = NULL;
+/* libdbi 0.9.0 introduced a new thread-safe interface and marked the old
+ * functions "deprecated". These macros convert the new functions to their old
+ * counterparts for backwards compatibility. */
+#if !defined(LIBDBI_VERSION) || (LIBDBI_VERSION < 900)
+# define HAVE_LEGACY_LIBDBI 1
+# define dbi_initialize_r(a,inst) dbi_initialize(a)
+# define dbi_shutdown_r(inst) dbi_shutdown()
+# define dbi_set_verbosity_r(a,inst) dbi_set_verbosity(a)
+# define dbi_driver_list_r(a,inst) dbi_driver_list(a)
+# define dbi_driver_open_r(a,inst) dbi_driver_open(a)
#endif
+
/*
* Data types
*/
/*
* Global variables
*/
+#if !defined(HAVE_LEGACY_LIBDBI) || !HAVE_LEGACY_LIBDBI
+static dbi_inst dbi_instance = 0;
+#endif
static udb_query_t **queries = NULL;
static size_t queries_num = 0;
static cdbi_database_t **databases = NULL;
else if (src_type == DBI_TYPE_STRING)
{
const char *value;
-
+
value = dbi_result_get_string_idx (res, index);
if (value == NULL)
sstrncpy (buffer, "", buffer_size);
* </Result>
* ...
* </Query>
- *
+ *
* <Database "plugin_instance1">
* Driver "mysql"
* DriverOption "hostname" "localhost"
return (-1);
}
-#ifdef HAVE_LIBDBI_R
- status = dbi_initialize_r (NULL, &inst);
-#else
- status = dbi_initialize (NULL);
-#endif
+ status = dbi_initialize_r (/* driverdir = */ NULL, &dbi_instance);
if (status < 0)
{
- ERROR ("dbi plugin: cdbi_init: dbi_initialize failed with status %i.",
+ ERROR ("dbi plugin: cdbi_init: dbi_initialize_r failed with status %i.",
status);
return (-1);
}
else if (status == 0)
{
- ERROR ("dbi plugin: `dbi_initialize' could not load any drivers. Please "
+ ERROR ("dbi plugin: `dbi_initialize_r' could not load any drivers. Please "
"install at least one `DBD' or check your installation.");
return (-1);
}
- DEBUG ("dbi plugin: cdbi_init: dbi_initialize reports %i driver%s.",
+ DEBUG ("dbi plugin: cdbi_init: dbi_initialize_r reports %i driver%s.",
status, (status == 1) ? "" : "s");
return (0);
db->connection = NULL;
}
-#ifdef HAVE_LIBDBI_R
- driver = dbi_driver_open_r (db->driver, inst);
-#else
- driver = dbi_driver_open (db->driver);
-#endif
+ driver = dbi_driver_open_r (db->driver, dbi_instance);
if (driver == NULL)
{
- ERROR ("dbi plugin: cdbi_connect_database: dbi_driver_open (%s) failed.",
+ ERROR ("dbi plugin: cdbi_connect_database: dbi_driver_open_r (%s) failed.",
db->driver);
INFO ("dbi plugin: Maybe the driver isn't installed? "
"Known drivers are:");
-#ifdef HAVE_LIBDBI_R
- for (driver = dbi_driver_list_r (NULL, inst);
+ for (driver = dbi_driver_list_r (NULL, dbi_instance);
driver != NULL;
- driver = dbi_driver_list_r (driver, inst))
-#else
- for (driver = dbi_driver_list (NULL);
- driver != NULL;
- driver = dbi_driver_list (driver))
-#endif
+ driver = dbi_driver_list_r (driver, dbi_instance))
{
INFO ("dbi plugin: * %s", dbi_driver_get_name (driver));
}
fprintf (fh,
"Severity: %s\n"
- "Time: %u\n",
- severity, (unsigned int)CDTIME_T_TO_TIME_T(n->time));
+ "Time: %.3f\n",
+ severity, CDTIME_T_TO_DOUBLE (n->time));
/* Print the optional fields */
if (strlen (n->host) > 0)
--- /dev/null
+/**
+ * collectd - src/ipc.c, based on src/memcached.c
+ * Copyright (C) 2010 Andres J. Diaz <ajdiaz@connectical.com>
+ * Copyright (C) 2010 Manuel L. Sanmartin <manuel.luis@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors:
+ * Andres J. Diaz <ajdiaz@connectical.com>
+ * Manuel L. Sanmartin <manuel.luis@gmail>
+ **/
+
+/* Many of this code is based on busybox ipc implementation, which is:
+ * (C) Rodney Radford <rradford@mindspring.com> and distributed under GPLv2.
+ */
+
+#include "collectd.h"
+#include "common.h"
+#include "plugin.h"
+#include "configfile.h"
+
+#if KERNEL_LINUX
+ /* X/OPEN tells us to use <sys/{types,ipc,sem}.h> for semctl() */
+ /* X/OPEN tells us to use <sys/{types,ipc,msg}.h> for msgctl() */
+ /* X/OPEN tells us to use <sys/{types,ipc,shm}.h> for shmctl() */
+# include <sys/types.h>
+# include <sys/ipc.h>
+# include <sys/sem.h>
+# include <sys/msg.h>
+# include <sys/shm.h>
+
+ /* For older kernels the same holds for the defines below */
+# ifndef MSG_STAT
+# define MSG_STAT 11
+# define MSG_INFO 12
+# endif
+
+# ifndef SHM_STAT
+# define SHM_STAT 13
+# define SHM_INFO 14
+ struct shm_info {
+ int used_ids;
+ ulong shm_tot; /* total allocated shm */
+ ulong shm_rss; /* total resident shm */
+ ulong shm_swp; /* total swapped shm */
+ ulong swap_attempts;
+ ulong swap_successes;
+ };
+# endif
+
+# ifndef SEM_STAT
+# define SEM_STAT 18
+# define SEM_INFO 19
+# endif
+
+ /* The last arg of semctl is a union semun, but where is it defined?
+ X/OPEN tells us to define it ourselves, but until recently
+ Linux include files would also define it. */
+# if defined(__GNU_LIBRARY__) && !defined(_SEM_SEMUN_UNDEFINED)
+ /* union semun is defined by including <sys/sem.h> */
+# else
+ /* according to X/OPEN we have to define it ourselves */
+ union semun {
+ int val;
+ struct semid_ds *buf;
+ unsigned short *array;
+ struct seminfo *__buf;
+ };
+# endif
+static long pagesize_g;
+/* #endif KERNEL_LINUX */
+#elif KERNEL_AIX
+# include <sys/ipc_info.h>
+/* #endif KERNEL_AIX */
+#else
+# error "No applicable input method."
+#endif
+
+__attribute__ ((nonnull(1)))
+static void ipc_submit_g (const char *plugin_instance,
+ const char *type,
+ const char *type_instance,
+ gauge_t value) /* {{{ */
+{
+ value_t values[1];
+ value_list_t vl = VALUE_LIST_INIT;
+
+ values[0].gauge = value;
+
+ vl.values = values;
+ vl.values_len = 1;
+ sstrncpy (vl.host, hostname_g, sizeof (vl.host));
+ sstrncpy (vl.plugin, "ipc", sizeof (vl.plugin));
+ sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
+ sstrncpy (vl.type, type, sizeof (vl.type));
+ if (type_instance != NULL)
+ sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
+
+ plugin_dispatch_values (&vl);
+} /* }}} */
+
+#if KERNEL_AIX
+static caddr_t ipc_get_info (cid_t cid, int cmd, int version, int stsize, int *nmemb) /* {{{ */
+{
+ int size = 0;
+ caddr_t buff = NULL;
+
+ if (get_ipc_info(cid, cmd, version, buff, &size) < 0)
+ {
+ if (errno != ENOSPC) {
+ char errbuf[1024];
+ WARNING ("ipc plugin: get_ipc_info: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ return (NULL);
+ }
+ }
+
+ if (size == 0)
+ return NULL;
+
+ if (size % stsize) {
+ ERROR ("ipc plugin: ipc_get_info: missmatch struct size and buffer size");
+ return (NULL);
+ }
+
+ *nmemb = size / stsize;
+
+ buff = (caddr_t)malloc (size);
+ if (buff == NULL) {
+ ERROR ("ipc plugin: ipc_get_info malloc failed.");
+ return (NULL);
+ }
+
+ if (get_ipc_info(cid, cmd, version, buff, &size) < 0)
+ {
+ char errbuf[1024];
+ WARNING ("ipc plugin: get_ipc_info: %s",
+ sstrerror (errno, errbuf, sizeof (errbuf)));
+ free(buff);
+ return (NULL);
+ }
+
+ return buff;
+} /* }}} */
+#endif /* KERNEL_AIX */
+
+static int ipc_read_sem (void) /* {{{ */
+{
+#if KERNEL_LINUX
+ struct seminfo seminfo;
+ union semun arg;
+
+ arg.array = (ushort *) (void *) &seminfo;
+
+ if ( semctl(0, 0, SEM_INFO, arg) < 0 )
+ {
+ ERROR("Kernel is not configured for semaphores");
+ return (-1);
+ }
+
+ ipc_submit_g("sem", "count", "arrays", seminfo.semusz);
+ ipc_submit_g("sem", "count", "total", seminfo.semaem);
+
+/* #endif KERNEL_LINUX */
+#elif KERNEL_AIX
+ ipcinfo_sem_t *ipcinfo_sem;
+ unsigned short sem_nsems=0;
+ unsigned short sems=0;
+ int i,n;
+
+ ipcinfo_sem = (ipcinfo_sem_t *)ipc_get_info(0,
+ GET_IPCINFO_SEM_ALL, IPCINFO_SEM_VERSION, sizeof(ipcinfo_sem_t), &n);
+ if (ipcinfo_sem == NULL)
+ return -1;
+
+ for (i=0; i<n; i++) {
+ sem_nsems += ipcinfo_sem[i].sem_nsems;
+ sems++;
+ }
+ free(ipcinfo_sem);
+
+ ipc_submit_g("sem", "count", "arrays", sem_nsems);
+ ipc_submit_g("sem", "count", "total", sems);
+#endif /* KERNEL_AIX */
+
+ return (0);
+}
+/* }}} */
+
+static int ipc_read_shm (void) /* {{{ */
+{
+#if KERNEL_LINUX
+ struct shm_info shm_info;
+
+ if ( shmctl(0, SHM_INFO, (struct shmid_ds *) (void *) &shm_info) < 0 )
+ {
+ ERROR("Kernel is not configured for shared memory");
+ return (-1);
+ }
+ ipc_submit_g("shm", "segments", NULL, shm_info.used_ids);
+ ipc_submit_g("shm", "bytes", "total", shm_info.shm_tot * pagesize_g);
+ ipc_submit_g("shm", "bytes", "rss", shm_info.shm_rss * pagesize_g);
+ ipc_submit_g("shm", "bytes", "swapped", shm_info.shm_swp * pagesize_g);
+/* #endif KERNEL_LINUX */
+#elif KERNEL_AIX
+ ipcinfo_shm_t *ipcinfo_shm;
+ ipcinfo_shm_t *pshm;
+ unsigned int shm_segments=0;
+ size64_t shm_bytes=0;
+ int i,n;
+
+ ipcinfo_shm = (ipcinfo_shm_t *)ipc_get_info(0,
+ GET_IPCINFO_SHM_ALL, IPCINFO_SHM_VERSION, sizeof(ipcinfo_shm_t), &n);
+ if (ipcinfo_shm == NULL)
+ return -1;
+
+ for (i=0, pshm=ipcinfo_shm; i<n; i++, pshm++) {
+ shm_segments++;
+ shm_bytes += pshm->shm_segsz;
+ }
+ free(ipcinfo_shm);
+
+ ipc_submit_g("shm", "segments", NULL, shm_segments);
+ ipc_submit_g("shm", "bytes", "total", shm_bytes);
+
+#endif /* KERNEL_AIX */
+ return (0);
+}
+/* }}} */
+
+static int ipc_read_msg (void) /* {{{ */
+{
+#if KERNEL_LINUX
+ struct msginfo msginfo;
+
+ if ( msgctl(0, MSG_INFO, (struct msqid_ds *) (void *) &msginfo) < 0 )
+ {
+ ERROR("Kernel is not configured for message queues");
+ return (-1);
+ }
+ ipc_submit_g("msg", "count", "queues", msginfo.msgmni);
+ ipc_submit_g("msg", "count", "headers", msginfo.msgmap);
+ ipc_submit_g("msg", "count", "space", msginfo.msgtql);
+/* #endif KERNEL_LINUX */
+#elif KERNEL_AIX
+ ipcinfo_msg_t *ipcinfo_msg;
+ uint32_t msg_used_space=0;
+ uint32_t msg_alloc_queues=0;
+ msgqnum32_t msg_qnum=0;
+ int i,n;
+
+ ipcinfo_msg = (ipcinfo_msg_t *)ipc_get_info(0,
+ GET_IPCINFO_MSG_ALL, IPCINFO_MSG_VERSION, sizeof(ipcinfo_msg_t), &n);
+ if (ipcinfo_msg == NULL)
+ return -1;
+
+ for (i=0; i<n; i++) {
+ msg_alloc_queues++;
+ msg_used_space += ipcinfo_msg[i].msg_cbytes;
+ msg_qnum += ipcinfo_msg[i].msg_qnum;
+ }
+ free(ipcinfo_msg);
+
+ ipc_submit_g("msg", "count", "queues", msg_alloc_queues);
+ ipc_submit_g("msg", "count", "headers", msg_qnum);
+ ipc_submit_g("msg", "count", "space", msg_used_space);
+#endif /* KERNEL_AIX */
+ return (0);
+}
+/* }}} */
+
+static int ipc_read (void) /* {{{ */
+{
+ int x = 0;
+ x |= ipc_read_shm();
+ x |= ipc_read_sem();
+ x |= ipc_read_msg();
+
+ return (x);
+}
+/* }}} */
+
+#ifdef KERNEL_LINUX
+static int ipc_init (void) /* {{{ */
+{
+ pagesize_g = sysconf(_SC_PAGESIZE);
+ return (0);
+}
+/* }}} */
+#endif /* KERNEL_LINUX */
+
+void module_register (void) /* {{{ */
+{
+#ifdef KERNEL_LINUX
+ plugin_register_init ("ipc", ipc_init);
+#endif
+ plugin_register_read ("ipc", ipc_read);
+}
+/* }}} */
+
+/* vim: set sw=2 sts=2 et fdm=marker : */
if (config_block != NULL)
{
-
cjni_config_perform (config_block);
oconfig_free (config_block);
- config_block = NULL;
}
if (jvm == NULL)
/**
* collectd - src/libcollectdclient/network.c
- * Copyright (C) 2005-2013 Florian Forster
+ * Copyright (C) 2005-2015 Florian Forster
* Copyright (C) 2010 Max Henkel
*
* Permission is hereby granted, free of charge, to any person obtaining a
memset (buffer, 0, sizeof (buffer));
buffer_size = sizeof (buffer);
- lcc_network_buffer_finalize (srv->buffer);
+ status = lcc_network_buffer_finalize (srv->buffer);
+ if (status != 0)
+ {
+ lcc_network_buffer_initialize (srv->buffer);
+ return (status);
+ }
+
status = lcc_network_buffer_get (srv->buffer, buffer, &buffer_size);
lcc_network_buffer_initialize (srv->buffer);
/**
* collectd - src/libcollectdclient/network_buffer.c
- * Copyright (C) 2010-2012 Florian octo Forster
+ * Copyright (C) 2010-2015 Florian octo Forster
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
#if HAVE_LIBGCRYPT
if (nb->seclevel == SIGN)
- nb_add_signature (nb);
+ return nb_add_signature (nb);
else if (nb->seclevel == ENCRYPT)
- nb_add_encryption (nb);
+ return nb_add_encryption (nb);
#endif
return (0);
return (ci_copy);
} /* oconfig_item_t *oconfig_clone */
-void oconfig_free (oconfig_item_t *ci)
+void oconfig_free_all (oconfig_item_t *ci)
{
int i;
free (ci->values);
for (i = 0; i < ci->children_num; i++)
- oconfig_free (ci->children + i);
+ oconfig_free_all (ci->children + i);
if (ci->children != NULL)
free (ci->children);
}
+void oconfig_free (oconfig_item_t *ci)
+{
+ oconfig_free_all (ci);
+ free (ci);
+ ci = NULL;
+}
+
/*
* vim:shiftwidth=2:tabstop=8:softtabstop=2:fdm=marker
*/
unsigned int len;
#endif
- if (yajl_gen_string(g, (u_char *)"@level", strlen("@level")) !=
+ if (yajl_gen_string(g, (u_char *)"level", strlen("level")) !=
yajl_gen_status_ok)
goto err;
if (yajl_gen_map_open(g) != yajl_gen_status_ok)
goto err;
- if (yajl_gen_string(g, (u_char *)"@message", strlen("@message")) !=
+ if (yajl_gen_string(g, (u_char *)"message", strlen("message")) !=
yajl_gen_status_ok)
goto err;
if (yajl_gen_string(g, (u_char *)msg, strlen(msg)) !=
if (yajl_gen_map_open(g) != yajl_gen_status_ok)
goto err;
- if (yajl_gen_string(g, (u_char *)"@message", strlen("@message")) !=
+ if (yajl_gen_string(g, (u_char *)"message", strlen("message")) !=
yajl_gen_status_ok)
goto err;
if (strlen(n->message) > 0) {
goto err;
}
-
- if (yajl_gen_string(g, (u_char *)"@fields", strlen("@fields")) !=
- yajl_gen_status_ok)
- goto err;
- if (yajl_gen_map_open(g) !=
- yajl_gen_status_ok)
- goto err;
-
if (strlen(n->host) > 0) {
if (yajl_gen_string(g, (u_char *)"host", strlen("host")) !=
yajl_gen_status_ok)
goto err;
break;
}
- if (yajl_gen_map_close(g) != yajl_gen_status_ok)
- goto err;
log_logstash_print (g, LOG_INFO, (n->time != 0) ? n->time : cdtime ());
return (0);
/*
* <Data "data_name">
* RegisterBase 1234
+ * RegisterCmd ReadHolding
* RegisterType float
* Type gauge
- * ModbusRegisterType holding
* Instance "..."
* </Data>
*
status = -1;
}
}
- else if (strcasecmp ("ModbusRegisterType", child->key) == 0)
+ else if (strcasecmp ("RegisterCmd", child->key) == 0)
{
#if LEGACY_LIBMODBUS
- ERROR("Modbus plugin: ModbusRegisterType parameter can not be used "
+ ERROR("Modbus plugin: RegisterCmd parameter can not be used "
"with your libmodbus version");
#else
char tmp[16];
status = cf_util_get_string_buffer (child, tmp, sizeof (tmp));
if (status != 0)
/* do nothing */;
- else if (strcasecmp ("holding", tmp) == 0)
+ else if (strcasecmp ("ReadHolding", tmp) == 0)
data.modbus_register_type = MREG_HOLDING;
- else if (strcasecmp ("input", tmp) == 0)
+ else if (strcasecmp ("ReadInput", tmp) == 0)
data.modbus_register_type = MREG_INPUT;
else
{
static char *verify_peer = NULL;
static char *verify_host = NULL;
static char *cacert = NULL;
+static char *timeout = NULL;
static CURL *curl = NULL;
"Password",
"VerifyPeer",
"VerifyHost",
- "CACert"
+ "CACert",
+ "Timeout"
};
static int config_keys_num = STATIC_ARRAY_SIZE (config_keys);
return (config_set (&verify_host, value));
else if (strcasecmp (key, "cacert") == 0)
return (config_set (&cacert, value));
+ else if (strcasecmp (key, "timeout") == 0)
+ return (config_set (&timeout, value));
else
return (-1);
} /* int config */
static int init (void)
{
- static char credentials[1024];
-
if (curl != NULL)
curl_easy_cleanup (curl);
if (user != NULL)
{
+#ifdef HAVE_CURLOPT_USERNAME
+ curl_easy_setopt (curl, CURLOPT_USERNAME, user);
+ curl_easy_setopt (curl, CURLOPT_PASSWORD, (pass == NULL) ? "" : pass);
+#else
+ static char credentials[1024];
int status = ssnprintf (credentials, sizeof (credentials),
"%s:%s", user, pass == NULL ? "" : pass);
if ((status < 0) || ((size_t) status >= sizeof (credentials)))
}
curl_easy_setopt (curl, CURLOPT_USERPWD, credentials);
+#endif
}
if (url != NULL)
curl_easy_setopt (curl, CURLOPT_CAINFO, cacert);
}
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ if (timeout != NULL)
+ {
+ curl_easy_setopt (curl, CURLOPT_TIMEOUT_MS, atol(timeout));
+ }
+ else
+ {
+ curl_easy_setopt (curl, CURLOPT_TIMEOUT_MS,
+ CDTIME_T_TO_MS(plugin_get_interval()));
+ }
+#endif
+
return (0);
} /* void init */
/* Description of statistics returned by the recursor: {{{
all-outqueries counts the number of outgoing UDP queries since starting
-answers0-1 counts the number of queries answered within 1 milisecond
+answers0-1 counts the number of queries answered within 1 millisecond
answers100-1000 counts the number of queries answered within 1 second
-answers10-100 counts the number of queries answered within 100 miliseconds
-answers1-10 counts the number of queries answered within 10 miliseconds
+answers10-100 counts the number of queries answered within 100 milliseconds
+answers1-10 counts the number of queries answered within 10 milliseconds
answers-slow counts the number of queries answered after 1 second
cache-entries shows the number of entries in the cache
cache-hits counts the number of cache hits since starting
{"recursing-questions", "dns_question", "recurse"},
{"tcp-queries", "dns_question", "tcp"},
{"udp-queries", "dns_question", "udp"},
+ {"rd-queries", "dns_question", "rd"},
/* Answers */
{"recursing-answers", "dns_answer", "recurse"},
{"tcp-answers", "dns_answer", "tcp"},
{"udp-answers", "dns_answer", "udp"},
+ {"recursion-unanswered", "dns_answer", "recursion-unanswered"},
+ {"udp-answers-bytes", "total_bytes", "udp-answers-bytes"},
/* Cache stuff */
{"packetcache-hit", "cache_result", "packet-hit"},
{"packetcache-miss", "cache_result", "packet-miss"},
{"packetcache-size", "cache_size", "packet"},
+ {"key-cache-size", "cache_size", "key"},
+ {"meta-cache-size", "cache_size", "meta"},
+ {"signature-cache-size", "cache_size", "signature"},
{"query-cache-hit", "cache_result", "query-hit"},
{"query-cache-miss", "cache_result", "query-miss"},
/* Latency */
{"latency", "latency", NULL},
+ /* DNS updates */
+ {"dnsupdate-answers", "dns_answer", "dnsupdate-answer"},
+ {"dnsupdate-changes", "dns_question", "dnsupdate-changes"},
+ {"dnsupdate-queries", "dns_question", "dnsupdate-queries"},
+ {"dnsupdate-refused", "dns_answer", "dnsupdate-refused"},
+
/* Other stuff.. */
{"corrupt-packets", "ipt_packets", "corrupt"},
{"deferred-cache-inserts", "counter", "cache-deferred_insert"},
{"udp4-queries", "dns_question", "queries-udp4"},
{"udp6-answers", "dns_answer", "udp6"},
{"udp6-queries", "dns_question", "queries-udp6"},
+ {"security-status", "dns_question", "security-status"},
+ {"udp-do-queries", "dns_question", "udp-do_queries"},
+ {"signatures", "counter", "signatures"},
/***********************
* Recursor statistics *
{"throttle-entries", "gauge", "entries-throttle"},
{"unauthorized-tcp", "counter", "denied-unauthorized_tcp"},
{"unauthorized-udp", "counter", "denied-unauthorized_udp"},
- {"unexpected-packets", "dns_answer", "unexpected"}
- /* {"uptime", "", ""} */
+ {"unexpected-packets", "dns_answer", "unexpected"},
+ {"uptime", "uptime", NULL}
}; /* }}} */
int lookup_table_length = STATIC_ARRAY_SIZE (lookup_table);
if (strcmp (lookup_table[i].name, pdns_type) == 0)
break;
- if (lookup_table[i].type == NULL)
- return;
-
if (i >= lookup_table_length)
{
INFO ("powerdns plugin: submit: Not found in lookup table: %s = %s;",
return;
}
+ if (lookup_table[i].type == NULL)
+ return;
+
type = lookup_table[i].type;
type_instance = lookup_table[i].type_instance;
int wait = 0;
kvm_t *kd;
- char errbuf[1024];
+ char errbuf[_POSIX2_LINE_MAX];
struct kinfo_proc *procs; /* array of processes */
struct kinfo_proc *proc_ptr = NULL;
int count; /* returns number of processes */
ps_list_reset ();
/* Open the kvm interface, get a descriptor */
- kd = kvm_open (NULL, NULL, NULL, 0, errbuf);
+ kd = kvm_openfiles (NULL, "/dev/null", NULL, 0, errbuf);
if (kd == NULL)
{
ERROR ("processes plugin: Cannot open kvm interface: %s",
#endif
static int cpy_init_python() {
- char *argv = "";
PyObject *sys;
PyObject *module;
#ifdef IS_PY3K
+ wchar_t *argv = L"";
/* Add a builtin module, before Py_Initialize */
PyImport_AppendInittab("collectd", PyInit_collectd);
+#else
+ char *argv = "";
#endif
Py_Initialize();
} else if (strcasecmp(item->key, "Encoding") == 0) {
if (item->values_num != 1 || item->values[0].type != OCONFIG_TYPE_STRING)
continue;
+#ifdef IS_PY3K
+ NOTICE("python: \"Encoding\" was used in the config file but Python3 was used, which does not support changing encodings. Ignoring this.");
+#else
/* Why is this even necessary? And undocumented? */
if (PyUnicode_SetDefaultEncoding(item->values[0].value.string))
cpy_log_exception("setting default encoding");
+#endif
} else if (strcasecmp(item->key, "LogTraces") == 0) {
if (item->values_num != 1 || item->values[0].type != OCONFIG_TYPE_BOOLEAN)
continue;
status = parse_value (string, &ret, type);
if (status != 0)
{
- ERROR ("snmp plugin: csnmp_value_list_to_value: Parsing string as %s failed: %s",
+ ERROR ("snmp plugin: host %s: csnmp_value_list_to_value: Parsing string as %s failed: %s",
+ (host_name != NULL) ? host_name : "UNKNOWN",
DS_TYPE_TO_STRING (type), string);
}
}
if (csnmp_instance_list_add (&instance_list_head, &instance_list_tail,
res, host, data) != 0)
{
- ERROR ("snmp plugin: csnmp_instance_list_add failed.");
+ ERROR ("snmp plugin: host %s: csnmp_instance_list_add failed.",
+ host->name);
status = -1;
break;
}
/* #endif defined(VM_SWAPUSAGE) */
#elif HAVE_LIBKVM_GETSWAPINFO
+ char errbuf[_POSIX2_LINE_MAX];
+
if (kvm_obj != NULL)
{
kvm_close (kvm_obj);
kvm_pagesize = getpagesize ();
- if ((kvm_obj = kvm_open (NULL, /* execfile */
- NULL, /* corefile */
- NULL, /* swapfile */
- O_RDONLY, /* flags */
- NULL)) /* errstr */
- == NULL)
+ kvm_obj = kvm_openfiles (NULL, "/dev/null", NULL, O_RDONLY, errbuf);
+
+ if (kvm_obj == NULL)
{
- ERROR ("swap plugin: kvm_open failed.");
+ ERROR ("swap plugin: kvm_openfiles failed, %s", errbuf);
return (-1);
}
/* #endif HAVE_LIBKVM_GETSWAPINFO */
cache_ratio value:GAUGE:0:100
cache_result value:DERIVE:0:U
cache_size value:GAUGE:0:U
+ceph_bytes value:GAUGE:U:U
+ceph_latency value:GAUGE:U:U
+ceph_rate value:DERIVE:0:U
changes_since_last_save value:GAUGE:0:U
charge value:GAUGE:0:U
compression_ratio value:GAUGE:0:2
route_etx value:GAUGE:0:U
route_metric value:GAUGE:0:U
routes value:GAUGE:0:U
+segments value:GAUGE:0:65535
serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U
signal_noise value:GAUGE:U:0
signal_power value:GAUGE:U:0
**/
#include "collectd.h"
+#include "plugin.h"
#include "utils_latency.h"
#include "common.h"
-#ifndef LATENCY_HISTOGRAM_SIZE
-# define LATENCY_HISTOGRAM_SIZE 1000
+#include <math.h>
+
+#ifndef HISTOGRAM_NUM_BINS
+# define HISTOGRAM_NUM_BINS 1000
#endif
+static const int HISTOGRAM_DEFAULT_BIN_WIDTH = 1;
+
struct latency_counter_s
{
cdtime_t start_time;
cdtime_t min;
cdtime_t max;
- int histogram[LATENCY_HISTOGRAM_SIZE];
+ int bin_width;
+ int histogram[HISTOGRAM_NUM_BINS];
};
+/*
+* Histogram represents the distribution of data, it has a list of "bins".
+* Each bin represents an interval and has a count (frequency) of
+* number of values fall within its interval.
+*
+* Histogram's range is determined by the number of bins and the bin width,
+* There are 1000 bins and all bins have the same width of default 1 millisecond.
+* When a value above this range is added, Histogram's range is increased by
+* increasing the bin width (note that number of bins remains always at 1000).
+* This operation of increasing bin width is little expensive as each bin need
+* to be visited to update it's count. To reduce frequent change of bin width,
+* new bin width will be the next nearest power of 2. Example: 2, 4, 8, 16, 32,
+* 64, 128, 256, 512, 1024, 2048, 5086, ...
+*
+* So, if the required bin width is 300, then new bin width will be 512 as it is
+* the next nearest power of 2.
+*
+*/
+void change_bin_width (latency_counter_t *lc, size_t val) /* {{{ */
+{
+ int i=0;
+ /* This function is called because the new value is above histogram's range.
+ * First find the required bin width:
+ * requiredBinWidth = (value + 1) / numBins
+ * then get the next nearest power of 2
+ * newBinWidth = 2^(ceil(log2(requiredBinWidth)))
+ */
+ double required_bin_width = (double)(val + 1) / HISTOGRAM_NUM_BINS;
+ double required_bin_width_logbase2 = log(required_bin_width) / log(2.0);
+ int new_bin_width = (int)(pow(2.0, ceil( required_bin_width_logbase2)));
+ int old_bin_width = lc->bin_width;
+ lc->bin_width = new_bin_width;
+
+ /*
+ * bin width has been increased, now iterate through all bins and move the
+ * old bin's count to new bin.
+ */
+ if (lc->num > 0) // if the histogram has data then iterate else skip
+ {
+ double width_change_ratio = old_bin_width / new_bin_width;
+ for (i=0; i<HISTOGRAM_NUM_BINS; i++)
+ {
+ int new_bin = (int)(i * width_change_ratio);
+ if (i == new_bin)
+ continue;
+ lc->histogram[new_bin] += lc->histogram[i];
+ lc->histogram[i] = 0;
+ }
+ DEBUG("utils_latency: change_bin_width: fixed all bins");
+ }
+
+ DEBUG("utils_latency: change_bin_width: val-[%zu], oldBinWidth-[%d], "
+ "newBinWidth-[%d], required_bin_width-[%f], "
+ "required_bin_width_logbase2-[%f]",
+ val, old_bin_width, new_bin_width, required_bin_width,
+ required_bin_width_logbase2);
+
+} /* }}} void change_bin_width */
+
latency_counter_t *latency_counter_create () /* {{{ */
{
latency_counter_t *lc;
return (NULL);
latency_counter_reset (lc);
+ lc->bin_width = HISTOGRAM_DEFAULT_BIN_WIDTH;
return (lc);
} /* }}} latency_counter_t *latency_counter_create */
* subtract one from the cdtime_t value so that exactly 1.0 ms get sorted
* accordingly. */
latency_ms = (size_t) CDTIME_T_TO_MS (latency - 1);
- if (latency_ms < STATIC_ARRAY_SIZE (lc->histogram))
- lc->histogram[latency_ms]++;
+
+ int bin = (int)(latency_ms / lc->bin_width);
+ if (bin >= HISTOGRAM_NUM_BINS)
+ {
+ change_bin_width(lc, latency_ms);
+ bin = (int)(latency_ms / lc->bin_width);
+ if (bin >= HISTOGRAM_NUM_BINS)
+ {
+ ERROR("utils_latency: latency_counter_add: Invalid bin %d", bin);
+ return;
+ }
+ }
+ lc->histogram[bin]++;
} /* }}} void latency_counter_add */
void latency_counter_reset (latency_counter_t *lc) /* {{{ */
if (lc == NULL)
return;
+ int bin_width = lc->bin_width;
memset (lc, 0, sizeof (*lc));
+
+ /* preserve bin width */
+ lc->bin_width = bin_width;
lc->start_time = cdtime ();
} /* }}} void latency_counter_reset */
percent_upper = 0.0;
percent_lower = 0.0;
sum = 0;
- for (i = 0; i < LATENCY_HISTOGRAM_SIZE; i++)
+ for (i = 0; i < HISTOGRAM_NUM_BINS; i++)
{
percent_lower = percent_upper;
sum += lc->histogram[i];
break;
}
- if (i >= LATENCY_HISTOGRAM_SIZE)
+ if (i >= HISTOGRAM_NUM_BINS)
return (0);
assert (percent_upper >= percent);
assert (percent_lower < percent);
- ms_upper = (double) (i + 1);
- ms_lower = (double) i;
+ ms_upper = (double) ( (i + 1) * lc->bin_width );
+ ms_lower = (double) ( i * lc->bin_width );
if (i == 0)
return (MS_TO_CDTIME_T (ms_upper));
*/
struct wh_callback_s
{
- char *location;
+ char *name;
+ char *location;
char *user;
char *pass;
char *credentials;
char *clientkeypass;
long sslversion;
_Bool store_rates;
+ int low_speed_limit;
+ time_t low_speed_time;
+ int timeout;
#define WH_FORMAT_COMMAND 0
#define WH_FORMAT_JSON 1
return (-1);
}
+ if (cb->low_speed_limit > 0 && cb->low_speed_time > 0)
+ {
+ curl_easy_setopt (cb->curl, CURLOPT_LOW_SPEED_LIMIT,
+ (long) (cb->low_speed_limit * cb->low_speed_time));
+ curl_easy_setopt (cb->curl, CURLOPT_LOW_SPEED_TIME,
+ (long) cb->low_speed_time);
+ }
+
+#ifdef HAVE_CURLOPT_TIMEOUT_MS
+ if (cb->timeout > 0)
+ curl_easy_setopt (cb->curl, CURLOPT_TIMEOUT_MS, (long) cb->timeout);
+#endif
+
curl_easy_setopt (cb->curl, CURLOPT_NOSIGNAL, 1L);
curl_easy_setopt (cb->curl, CURLOPT_USERAGENT, COLLECTD_USERAGENT);
if (cb->user != NULL)
{
+#ifdef HAVE_CURLOPT_USERNAME
+ curl_easy_setopt (cb->curl, CURLOPT_USERNAME, cb->user);
+ curl_easy_setopt (cb->curl, CURLOPT_PASSWORD,
+ (cb->pass == NULL) ? "" : cb->pass);
+#else
size_t credentials_size;
credentials_size = strlen (cb->user) + 2;
ssnprintf (cb->credentials, credentials_size, "%s:%s",
cb->user, (cb->pass == NULL) ? "" : cb->pass);
curl_easy_setopt (cb->curl, CURLOPT_USERPWD, cb->credentials);
+#endif
curl_easy_setopt (cb->curl, CURLOPT_HTTPAUTH, CURLAUTH_ANY);
}
curl_easy_cleanup (cb->curl);
cb->curl = NULL;
}
+ sfree (cb->name);
sfree (cb->location);
sfree (cb->user);
sfree (cb->pass);
return (0);
} /* }}} int config_set_format */
-static int wh_config_url (oconfig_item_t *ci) /* {{{ */
+static int wh_config_node (oconfig_item_t *ci) /* {{{ */
{
wh_callback_t *cb;
int buffer_size = 0;
user_data_t user_data;
+ char callback_name[DATA_MAX_NAME_LEN];
int i;
cb = malloc (sizeof (*cb));
cb->verify_host = 1;
cb->format = WH_FORMAT_COMMAND;
cb->sslversion = CURL_SSLVERSION_DEFAULT;
+ cb->low_speed_limit = 0;
+ cb->timeout = 0;
pthread_mutex_init (&cb->send_lock, /* attr = */ NULL);
- cf_util_get_string (ci, &cb->location);
- if (cb->location == NULL)
- return (-1);
+ cf_util_get_string (ci, &cb->name);
+
+ /* FIXME: Remove this legacy mode in version 6. */
+ if (strcasecmp ("URL", ci->key) == 0)
+ cf_util_get_string (ci, &cb->location);
for (i = 0; i < ci->children_num; i++)
{
oconfig_item_t *child = ci->children + i;
- if (strcasecmp ("User", child->key) == 0)
+ if (strcasecmp ("URL", child->key) == 0)
+ cf_util_get_string (child, &cb->location);
+ else if (strcasecmp ("User", child->key) == 0)
cf_util_get_string (child, &cb->user);
else if (strcasecmp ("Password", child->key) == 0)
cf_util_get_string (child, &cb->pass);
cf_util_get_boolean (child, &cb->store_rates);
else if (strcasecmp ("BufferSize", child->key) == 0)
cf_util_get_int (child, &buffer_size);
+ else if (strcasecmp ("LowSpeedLimit", child->key) == 0)
+ cf_util_get_int (child, &cb->low_speed_limit);
+ else if (strcasecmp ("Timeout", child->key) == 0)
+ cf_util_get_int (child, &cb->timeout);
else
{
ERROR ("write_http plugin: Invalid configuration "
}
}
+ if (cb->location == NULL)
+ {
+ ERROR ("write_http plugin: no URL defined for instance '%s'",
+ cb->name);
+ wh_callback_free (cb);
+ return (-1);
+ }
+
+ if (cb->low_speed_limit > 0)
+ cb->low_speed_time = CDTIME_T_TO_TIME_T(plugin_get_interval());
+
/* Determine send_buffer_size. */
cb->send_buffer_size = WRITE_HTTP_DEFAULT_BUFFER_SIZE;
if (buffer_size >= 1024)
/* Nulls the buffer and sets ..._free and ..._fill. */
wh_reset_buffer (cb);
- DEBUG ("write_http: Registering write callback with URL %s",
- cb->location);
+ ssnprintf (callback_name, sizeof (callback_name), "write_http/%s",
+ cb->name);
+ DEBUG ("write_http: Registering write callback '%s' with URL '%s'",
+ callback_name, cb->location);
memset (&user_data, 0, sizeof (user_data));
user_data.data = cb;
user_data.free_func = NULL;
- plugin_register_flush ("write_http", wh_flush, &user_data);
+ plugin_register_flush (callback_name, wh_flush, &user_data);
user_data.free_func = wh_callback_free;
- plugin_register_write ("write_http", wh_write, &user_data);
+ plugin_register_write (callback_name, wh_write, &user_data);
return (0);
-} /* }}} int wh_config_url */
+} /* }}} int wh_config_node */
static int wh_config (oconfig_item_t *ci) /* {{{ */
{
{
oconfig_item_t *child = ci->children + i;
- if (strcasecmp ("URL", child->key) == 0)
- wh_config_url (child);
+ if (strcasecmp ("Node", child->key) == 0)
+ wh_config_node (child);
+ /* FIXME: Remove this legacy mode in version 6. */
+ else if (strcasecmp ("URL", child->key) == 0) {
+ WARNING ("write_http plugin: Legacy <URL> block found. "
+ "Please use <Node> instead.");
+ wh_config_node (child);
+ }
else
{
ERROR ("write_http plugin: Invalid configuration "
#include <librdkafka/rdkafka.h>
#include <pthread.h>
#include <zlib.h>
+#include <errno.h>
struct kafka_topic_context {
#define KAFKA_FORMAT_JSON 0
_Bool store_rates;
rd_kafka_topic_conf_t *conf;
rd_kafka_topic_t *topic;
+ rd_kafka_conf_t *kafka_conf;
rd_kafka_t *kafka;
int has_key;
u_int32_t key;
char *postfix;
char escape_char;
char *topic_name;
+ pthread_mutex_t lock;
};
+static int kafka_handle(struct kafka_topic_context *);
static int kafka_write(const data_set_t *, const value_list_t *, user_data_t *);
static int32_t kafka_partition(const rd_kafka_topic_t *, const void *, size_t,
int32_t, void *, void *);
return target;
}
+static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */
+{
+ char errbuf[1024];
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+
+ if (ctx->kafka != NULL && ctx->topic != NULL)
+ return(0);
+
+ if (ctx->kafka == NULL) {
+ if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) {
+ ERROR("write_kafka plugin: cannot duplicate kafka config");
+ return(1);
+ }
+
+ if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
+ errbuf, sizeof(errbuf))) == NULL) {
+ ERROR("write_kafka plugin: cannot create kafka handle.");
+ return 1;
+ }
+
+ rd_kafka_conf_destroy(ctx->kafka_conf);
+ ctx->kafka_conf = NULL;
+
+ INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka));
+
+#ifdef HAVE_LIBRDKAFKA_LOGGER
+ rd_kafka_set_logger(ctx->kafka, kafka_log);
+#endif
+ }
+
+ if (ctx->topic == NULL ) {
+ if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) {
+ ERROR("write_kafka plugin: cannot duplicate kafka topic config");
+ return 1;
+ }
+
+ if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name,
+ topic_conf)) == NULL) {
+ ERROR("write_kafka plugin: cannot create topic : %s\n",
+ rd_kafka_err2str(rd_kafka_errno2err(errno)));
+ return errno;
+ }
+
+ rd_kafka_topic_conf_destroy(ctx->conf);
+ ctx->conf = NULL;
+
+ INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic));
+ }
+
+ return(0);
+
+} /* }}} int kafka_handle */
+
static int kafka_write(const data_set_t *ds, /* {{{ */
const value_list_t *vl,
user_data_t *ud)
if ((ds == NULL) || (vl == NULL) || (ctx == NULL))
return EINVAL;
+ pthread_mutex_lock (&ctx->lock);
+ status = kafka_handle(ctx);
+ pthread_mutex_unlock (&ctx->lock);
+ if( status != 0 )
+ return status;
+
bzero(buffer, sizeof(buffer));
switch (ctx->format) {
rd_kafka_topic_destroy(ctx->topic);
if (ctx->conf != NULL)
rd_kafka_topic_conf_destroy(ctx->conf);
+ if (ctx->kafka_conf != NULL)
+ rd_kafka_conf_destroy(ctx->kafka_conf);
+ if (ctx->kafka != NULL)
+ rd_kafka_destroy(ctx->kafka);
sfree(ctx);
} /* }}} void kafka_topic_context_free */
tctx->store_rates = 1;
tctx->format = KAFKA_FORMAT_JSON;
-#ifdef HAVE_LIBRDKAFKA_LOG_CB
- rd_kafka_conf_set_log_cb(conf, kafka_log);
-#endif
- if ((tctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
- errbuf, sizeof(errbuf))) == NULL) {
+ if ((tctx->kafka_conf = rd_kafka_conf_dup(conf)) == NULL) {
sfree(tctx);
- ERROR("write_kafka plugin: cannot create kafka handle.");
+ ERROR("write_kafka plugin: cannot allocate memory for kafka config");
return;
}
-#ifdef HAVE_LIBRDKAFKA_LOGGER
- rd_kafka_conf_set_logger(tctx->kafka, kafka_log);
+
+#ifdef HAVE_LIBRDKAFKA_LOG_CB
+ rd_kafka_conf_set_log_cb(tctx->kafka_conf, kafka_log);
#endif
- conf = NULL;
if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) {
- rd_kafka_destroy(tctx->kafka);
+ rd_kafka_conf_destroy(tctx->kafka_conf);
sfree(tctx);
ERROR ("write_kafka plugin: cannot create topic configuration.");
return;
rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition);
rd_kafka_topic_conf_set_opaque(tctx->conf, tctx);
- if ((tctx->topic = rd_kafka_topic_new(tctx->kafka, tctx->topic_name,
- tctx->conf)) == NULL) {
- ERROR("write_kafka plugin: cannot create topic.");
- goto errout;
- }
- tctx->conf = NULL;
-
ssnprintf(callback_name, sizeof(callback_name),
"write_kafka/%s", tctx->topic_name);
callback_name, status);
goto errout;
}
+
+ pthread_mutex_init (&tctx->lock, /* attr = */ NULL);
+
return;
errout:
- if (conf != NULL)
- rd_kafka_conf_destroy(conf);
- if (tctx->kafka != NULL)
- rd_kafka_destroy(tctx->kafka);
- if (tctx->topic != NULL)
- rd_kafka_topic_destroy(tctx->topic);
if (tctx->topic_name != NULL)
free(tctx->topic_name);
if (tctx->conf != NULL)
rd_kafka_topic_conf_destroy(tctx->conf);
+ if (tctx->kafka_conf != NULL)
+ rd_kafka_conf_destroy(tctx->kafka_conf);
sfree(tctx);
} /* }}} int kafka_config_topic */
int i;
oconfig_item_t *child;
rd_kafka_conf_t *conf;
- rd_kafka_conf_t *cloned;
rd_kafka_conf_res_t ret;
char errbuf[1024];
WARNING("cannot allocate kafka configuration.");
return -1;
}
-
for (i = 0; i < ci->children_num; i++) {
child = &ci->children[i];
if (strcasecmp("Topic", child->key) == 0) {
- if ((cloned = rd_kafka_conf_dup(conf)) == NULL) {
- WARNING("write_kafka plugin: cannot allocate memory for kafka config");
- goto errout;
- }
- kafka_config_topic (cloned, child);
+ kafka_config_topic (conf, child);
} else if (strcasecmp(child->key, "Property") == 0) {
char *key = NULL;
char *val = NULL;
#endif
#include <mongo.h>
+#if (MONGO_MAJOR == 0) && (MONGO_MINOR < 8)
+# define bson_alloc() bson_create()
+# define bson_dealloc(b) bson_dispose(b)
+#endif
+
struct wm_node_s
{
char name[DATA_MAX_NAME_LEN];
gauge_t *rates;
int i;
- ret = bson_create ();
+ ret = bson_alloc (); /* matched by bson_dealloc() */
if (ret == NULL)
{
ERROR ("write_mongodb plugin: bson_create failed.");
rates = NULL;
}
- bson_init (ret);
+ bson_init (ret); /* matched by bson_destroy() */
bson_append_date (ret, "time", (bson_date_t) CDTIME_T_TO_MS (vl->time));
bson_append_string (ret, "host", vl->host);
bson_append_string (ret, "plugin", vl->plugin);
pthread_mutex_unlock (&node->lock);
/* free our resource as not to leak memory */
- bson_dispose (bson_record);
+ bson_destroy (bson_record); /* matches bson_init() */
+ bson_dealloc (bson_record); /* matches bson_alloc() */
return (0);
} /* }}} int wm_write */
--- /dev/null
+/**
+ * collectd - src/write_sensu.c
+ * Copyright (C) 2015 Fabrice A. Marie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Fabrice A. Marie <fabrice at kibinlabs.com>
+ */
+
+#include "collectd.h"
+#include "plugin.h"
+#include "common.h"
+#include "configfile.h"
+#include "utils_cache.h"
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <netdb.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stddef.h>
+
+#include <stdlib.h>
+#ifndef HAVE_ASPRINTF
+/*
+ * Uses asprintf() portable implementation from
+ * https://github.com/littlstar/asprintf.c/blob/master/
+ * copyright (c) 2014 joseph werle <joseph.werle@gmail.com> under MIT license.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+
+int vasprintf(char **str, const char *fmt, va_list args) {
+ int size = 0;
+ va_list tmpa;
+ // copy
+ va_copy(tmpa, args);
+ // apply variadic arguments to
+ // sprintf with format to get size
+ size = vsnprintf(NULL, size, fmt, tmpa);
+ // toss args
+ va_end(tmpa);
+ // return -1 to be compliant if
+ // size is less than 0
+ if (size < 0) { return -1; }
+ // alloc with size plus 1 for `\0'
+ *str = (char *) malloc(size + 1);
+ // return -1 to be compliant
+ // if pointer is `NULL'
+ if (NULL == *str) { return -1; }
+ // format string with original
+ // variadic arguments and set new size
+ size = vsprintf(*str, fmt, args);
+ return size;
+}
+
+int asprintf(char **str, const char *fmt, ...) {
+ int size = 0;
+ va_list args;
+ // init variadic argumens
+ va_start(args, fmt);
+ // format and get size
+ size = vasprintf(str, fmt, args);
+ // toss args
+ va_end(args);
+ return size;
+}
+
+#endif
+
+#define SENSU_HOST "localhost"
+#define SENSU_PORT "3030"
+
+struct str_list {
+ int nb_strs;
+ char **strs;
+};
+
+struct sensu_host {
+ char *name;
+ char *event_service_prefix;
+ struct str_list metric_handlers;
+ struct str_list notification_handlers;
+#define F_READY 0x01
+ uint8_t flags;
+ pthread_mutex_t lock;
+ _Bool notifications;
+ _Bool metrics;
+ _Bool store_rates;
+ _Bool always_append_ds;
+ char *separator;
+ char *node;
+ char *service;
+ int s;
+ struct addrinfo *res;
+ int reference_count;
+};
+
+static char *sensu_tags;
+static char **sensu_attrs;
+static size_t sensu_attrs_num;
+
+static int add_str_to_list(struct str_list *strs,
+ const char *str_to_add) /* {{{ */
+{
+ char **old_strs_ptr = strs->strs;
+ char *newstr = strdup(str_to_add);
+ if (newstr == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return -1;
+ }
+ strs->strs = realloc(strs->strs, sizeof(char *) *(strs->nb_strs + 1));
+ if (strs->strs == NULL) {
+ strs->strs = old_strs_ptr;
+ free(newstr);
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return -1;
+ }
+ strs->strs[strs->nb_strs] = newstr;
+ strs->nb_strs++;
+ return 0;
+}
+/* }}} int add_str_to_list */
+
+static void free_str_list(struct str_list *strs) /* {{{ */
+{
+ int i;
+ for (i=0; i<strs->nb_strs; i++)
+ free(strs->strs[i]);
+ free(strs->strs);
+}
+/* }}} void free_str_list */
+
+static int sensu_connect(struct sensu_host *host) /* {{{ */
+{
+ int e;
+ struct addrinfo *ai, hints;
+ char const *node;
+ char const *service;
+
+ // Resolve the target if we haven't done already
+ if (!(host->flags & F_READY)) {
+ memset(&hints, 0, sizeof(hints));
+ memset(&service, 0, sizeof(service));
+ host->res = NULL;
+ hints.ai_family = AF_INET;
+ hints.ai_socktype = SOCK_STREAM;
+#ifdef AI_ADDRCONFIG
+ hints.ai_flags |= AI_ADDRCONFIG;
+#endif
+
+ node = (host->node != NULL) ? host->node : SENSU_HOST;
+ service = (host->service != NULL) ? host->service : SENSU_PORT;
+
+ if ((e = getaddrinfo(node, service, &hints, &(host->res))) != 0) {
+ ERROR("write_sensu plugin: Unable to resolve host \"%s\": %s",
+ node, gai_strerror(e));
+ return -1;
+ }
+ DEBUG("write_sensu plugin: successfully resolved host/port: %s/%s",
+ node, service);
+ host->flags |= F_READY;
+ }
+
+ struct linger so_linger;
+ host->s = -1;
+ for (ai = host->res; ai != NULL; ai = ai->ai_next) {
+ // create the socket
+ if ((host->s = socket(ai->ai_family,
+ ai->ai_socktype,
+ ai->ai_protocol)) == -1) {
+ continue;
+ }
+
+ // Set very low close() lingering
+ so_linger.l_onoff = 1;
+ so_linger.l_linger = 3;
+ if (setsockopt(host->s, SOL_SOCKET, SO_LINGER, &so_linger, sizeof so_linger) != 0)
+ WARNING("write_sensu plugin: failed to set socket close() lingering");
+
+ // connect the socket
+ if (connect(host->s, ai->ai_addr, ai->ai_addrlen) != 0) {
+ close(host->s);
+ host->s = -1;
+ continue;
+ }
+ DEBUG("write_sensu plugin: connected");
+ break;
+ }
+
+ if (host->s < 0) {
+ WARNING("write_sensu plugin: Unable to connect to sensu client");
+ return -1;
+ }
+ return 0;
+} /* }}} int sensu_connect */
+
+static void sensu_close_socket(struct sensu_host *host) /* {{{ */
+{
+ if (host->s != -1)
+ close(host->s);
+ host->s = -1;
+
+} /* }}} void sensu_close_socket */
+
+static char *build_json_str_list(const char *tag, struct str_list const *list) /* {{{ */
+{
+ int res;
+ char *ret_str;
+ char *temp_str;
+ int i;
+ if (list->nb_strs == 0) {
+ ret_str = malloc(sizeof(char));
+ if (ret_str == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str[0] = '\0';
+ }
+
+ res = asprintf(&temp_str, "\"%s\": [\"%s\"", tag, list->strs[0]);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ for (i=1; i<list->nb_strs; i++) {
+ res = asprintf(&ret_str, "%s, \"%s\"", temp_str, list->strs[i]);
+ free(temp_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ temp_str = ret_str;
+ }
+ res = asprintf(&ret_str, "%s]", temp_str);
+ free(temp_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+
+ return ret_str;
+} /* }}} char *build_json_str_list*/
+
+int sensu_format_name2(char *ret, int ret_len,
+ const char *hostname,
+ const char *plugin, const char *plugin_instance,
+ const char *type, const char *type_instance,
+ const char *separator)
+{
+ char *buffer;
+ size_t buffer_size;
+
+ buffer = ret;
+ buffer_size = (size_t) ret_len;
+
+#define APPEND(str) do { \
+ size_t l = strlen (str); \
+ if (l >= buffer_size) \
+ return (ENOBUFS); \
+ memcpy (buffer, (str), l); \
+ buffer += l; buffer_size -= l; \
+} while (0)
+
+ assert (plugin != NULL);
+ assert (type != NULL);
+
+ APPEND (hostname);
+ APPEND (separator);
+ APPEND (plugin);
+ if ((plugin_instance != NULL) && (plugin_instance[0] != 0))
+ {
+ APPEND ("-");
+ APPEND (plugin_instance);
+ }
+ APPEND (separator);
+ APPEND (type);
+ if ((type_instance != NULL) && (type_instance[0] != 0))
+ {
+ APPEND ("-");
+ APPEND (type_instance);
+ }
+ assert (buffer_size > 0);
+ buffer[0] = 0;
+
+#undef APPEND
+ return (0);
+} /* int sensu_format_name2 */
+
+static void in_place_replace_sensu_name_reserved(char *orig_name) /* {{{ */
+{
+ int i;
+ int len=strlen(orig_name);
+ for (i=0; i<len; i++) {
+ // some plugins like ipmi generate special characters in metric name
+ switch(orig_name[i]) {
+ case '(': orig_name[i] = '_'; break;
+ case ')': orig_name[i] = '_'; break;
+ case ' ': orig_name[i] = '_'; break;
+ case '"': orig_name[i] = '_'; break;
+ case '\'': orig_name[i] = '_'; break;
+ case '+': orig_name[i] = '_'; break;
+ }
+ }
+} /* }}} char *replace_sensu_name_reserved */
+
+static char *sensu_value_to_json(struct sensu_host const *host, /* {{{ */
+ data_set_t const *ds,
+ value_list_t const *vl, size_t index,
+ gauge_t const *rates,
+ int status)
+{
+ char name_buffer[5 * DATA_MAX_NAME_LEN];
+ char service_buffer[6 * DATA_MAX_NAME_LEN];
+ int i;
+ char *ret_str;
+ char *temp_str;
+ char *value_str;
+ int res;
+ // First part of the JSON string
+ const char *part1 = "{\"name\": \"collectd\", \"type\": \"metric\"";
+
+ char *handlers_str = build_json_str_list("handlers", &(host->metric_handlers));
+ if (handlers_str == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+
+ // incorporate the handlers
+ if (strlen(handlers_str) == 0) {
+ free(handlers_str);
+ ret_str = strdup(part1);
+ if (ret_str == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ }
+ else {
+ res = asprintf(&ret_str, "%s, %s", part1, handlers_str);
+ free(handlers_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ }
+
+ // incorporate the plugin name information
+ res = asprintf(&temp_str, "%s, \"collectd_plugin\": \"%s\"", ret_str, vl->plugin);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ // incorporate the plugin type
+ res = asprintf(&temp_str, "%s, \"collectd_plugin_type\": \"%s\"", ret_str, vl->type);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ // incorporate the plugin instance if any
+ if (vl->plugin_instance[0] != 0) {
+ res = asprintf(&temp_str, "%s, \"collectd_plugin_instance\": \"%s\"", ret_str, vl->plugin_instance);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate the plugin type instance if any
+ if (vl->type_instance[0] != 0) {
+ res = asprintf(&temp_str, "%s, \"collectd_plugin_type_instance\": \"%s\"", ret_str, vl->type_instance);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate the data source type
+ if ((ds->ds[index].type != DS_TYPE_GAUGE) && (rates != NULL)) {
+ char ds_type[DATA_MAX_NAME_LEN];
+ ssnprintf (ds_type, sizeof (ds_type), "%s:rate", DS_TYPE_TO_STRING(ds->ds[index].type));
+ res = asprintf(&temp_str, "%s, \"collectd_data_source_type\": \"%s\"", ret_str, ds_type);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ } else {
+ res = asprintf(&temp_str, "%s, \"collectd_data_source_type\": \"%s\"", ret_str, DS_TYPE_TO_STRING(ds->ds[index].type));
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate the data source name
+ res = asprintf(&temp_str, "%s, \"collectd_data_source_name\": \"%s\"", ret_str, ds->ds[index].name);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ // incorporate the data source index
+ {
+ char ds_index[DATA_MAX_NAME_LEN];
+ ssnprintf (ds_index, sizeof (ds_index), "%zu", index);
+ res = asprintf(&temp_str, "%s, \"collectd_data_source_index\": %s", ret_str, ds_index);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // add key value attributes from config if any
+ for (i = 0; i < sensu_attrs_num; i += 2) {
+ res = asprintf(&temp_str, "%s, \"%s\": \"%s\"", ret_str, sensu_attrs[i], sensu_attrs[i+1]);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate sensu tags from config if any
+ if (strlen(sensu_tags) != 0) {
+ res = asprintf(&temp_str, "%s, %s", ret_str, sensu_tags);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // calculate the value and set to a string
+ if (ds->ds[index].type == DS_TYPE_GAUGE) {
+ double tmp_v = (double) vl->values[index].gauge;
+ res = asprintf(&value_str, "%.8f", tmp_v);
+ if (res == -1) {
+ free(ret_str);
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ } else if (rates != NULL) {
+ double tmp_v = (double) rates[index];
+ res = asprintf(&value_str, "%.8f", tmp_v);
+ if (res == -1) {
+ free(ret_str);
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ } else {
+ if (ds->ds[index].type == DS_TYPE_DERIVE) {
+ res = asprintf(&value_str, "%"PRIi64, vl->values[index].derive);
+ if (res == -1) {
+ free(ret_str);
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ }
+ else if (ds->ds[index].type == DS_TYPE_ABSOLUTE) {
+ res = asprintf(&value_str, "%"PRIu64, vl->values[index].absolute);
+ if (res == -1) {
+ free(ret_str);
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ }
+ else {
+ res = asprintf(&value_str, "%llu", vl->values[index].counter);
+ if (res == -1) {
+ free(ret_str);
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ }
+ }
+
+ // Generate the full service name
+ sensu_format_name2(name_buffer, sizeof(name_buffer),
+ vl->host, vl->plugin, vl->plugin_instance,
+ vl->type, vl->type_instance, host->separator);
+ if (host->always_append_ds || (ds->ds_num > 1)) {
+ if (host->event_service_prefix == NULL)
+ ssnprintf(service_buffer, sizeof(service_buffer), "%s.%s",
+ name_buffer, ds->ds[index].name);
+ else
+ ssnprintf(service_buffer, sizeof(service_buffer), "%s%s.%s",
+ host->event_service_prefix, name_buffer, ds->ds[index].name);
+ } else {
+ if (host->event_service_prefix == NULL)
+ sstrncpy(service_buffer, name_buffer, sizeof(service_buffer));
+ else
+ ssnprintf(service_buffer, sizeof(service_buffer), "%s%s",
+ host->event_service_prefix, name_buffer);
+ }
+
+ // Replace collectd sensor name reserved characters so that time series DB is happy
+ in_place_replace_sensu_name_reserved(service_buffer);
+
+ // finalize the buffer by setting the output and closing curly bracket
+ res = asprintf(&temp_str, "%s, \"output\": \"%s %s %ld\"}\n", ret_str, service_buffer, value_str, CDTIME_T_TO_TIME_T(vl->time));
+ free(ret_str);
+ free(value_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ DEBUG("write_sensu plugin: Successfully created json for metric: "
+ "host = \"%s\", service = \"%s\"",
+ vl->host, service_buffer);
+ return ret_str;
+} /* }}} char *sensu_value_to_json */
+
+/*
+ * Uses replace_str2() implementation from
+ * http://creativeandcritical.net/str-replace-c/
+ * copyright (c) Laird Shaw, under public domain.
+ */
+char *replace_str(const char *str, const char *old, /* {{{ */
+ const char *new)
+{
+ char *ret, *r;
+ const char *p, *q;
+ size_t oldlen = strlen(old);
+ size_t count = strlen(new);
+ size_t retlen = count;
+ size_t newlen = count;
+ int samesize = (oldlen == newlen);
+
+ if (!samesize) {
+ for (count = 0, p = str; (q = strstr(p, old)) != NULL; p = q + oldlen)
+ count++;
+ /* This is undefined if p - str > PTRDIFF_MAX */
+ retlen = p - str + strlen(p) + count * (newlen - oldlen);
+ } else
+ retlen = strlen(str);
+
+ ret = malloc(retlen + 1);
+ if (ret == NULL)
+ return NULL;
+ // added to original: not optimized, but keeps valgrind happy.
+ memset(ret, 0, retlen + 1);
+
+ r = ret;
+ p = str;
+ while (1) {
+ /* If the old and new strings are different lengths - in other
+ * words we have already iterated through with strstr above,
+ * and thus we know how many times we need to call it - then we
+ * can avoid the final (potentially lengthy) call to strstr,
+ * which we already know is going to return NULL, by
+ * decrementing and checking count.
+ */
+ if (!samesize && !count--)
+ break;
+ /* Otherwise i.e. when the old and new strings are the same
+ * length, and we don't know how many times to call strstr,
+ * we must check for a NULL return here (we check it in any
+ * event, to avoid further conditions, and because there's
+ * no harm done with the check even when the old and new
+ * strings are different lengths).
+ */
+ if ((q = strstr(p, old)) == NULL)
+ break;
+ /* This is undefined if q - p > PTRDIFF_MAX */
+ ptrdiff_t l = q - p;
+ memcpy(r, p, l);
+ r += l;
+ memcpy(r, new, newlen);
+ r += newlen;
+ p = q + oldlen;
+ }
+ strncpy(r, p, strlen(p));
+
+ return ret;
+} /* }}} char *replace_str */
+
+static char *replace_json_reserved(const char *message) /* {{{ */
+{
+ char *msg = replace_str(message, "\\", "\\\\");
+ if (msg == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ char *tmp = replace_str(msg, "\"", "\\\"");
+ free(msg);
+ if (tmp == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ msg = replace_str(tmp, "\n", "\\\n");
+ free(tmp);
+ if (msg == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ return msg;
+} /* }}} char *replace_json_reserved */
+
+static char *sensu_notification_to_json(struct sensu_host *host, /* {{{ */
+ notification_t const *n)
+{
+ char service_buffer[6 * DATA_MAX_NAME_LEN];
+ char const *severity;
+ notification_meta_t *meta;
+ char *ret_str;
+ char *temp_str;
+ int status;
+ int i;
+ int res;
+ // add the severity/status
+ switch (n->severity) {
+ case NOTIF_OKAY:
+ severity = "OK";
+ status = 0;
+ break;
+ case NOTIF_WARNING:
+ severity = "WARNING";
+ status = 1;
+ break;
+ case NOTIF_FAILURE:
+ severity = "CRITICAL";
+ status = 2;
+ break;
+ default:
+ severity = "UNKNOWN";
+ status = 3;
+ }
+ res = asprintf(&temp_str, "{\"status\": %d", status);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ // incorporate the timestamp
+ res = asprintf(&temp_str, "%s, \"timestamp\": %ld", ret_str, CDTIME_T_TO_TIME_T(n->time));
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ char *handlers_str = build_json_str_list("handlers", &(host->notification_handlers));
+ if (handlers_str == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ // incorporate the handlers
+ if (strlen(handlers_str) != 0) {
+ res = asprintf(&temp_str, "%s, %s", ret_str, handlers_str);
+ free(ret_str);
+ free(handlers_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ } else {
+ free(handlers_str);
+ }
+
+ // incorporate the plugin name information if any
+ if (n->plugin[0] != 0) {
+ res = asprintf(&temp_str, "%s, \"collectd_plugin\": \"%s\"", ret_str, n->plugin);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate the plugin type if any
+ if (n->type[0] != 0) {
+ res = asprintf(&temp_str, "%s, \"collectd_plugin_type\": \"%s\"", ret_str, n->type);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate the plugin instance if any
+ if (n->plugin_instance[0] != 0) {
+ res = asprintf(&temp_str, "%s, \"collectd_plugin_instance\": \"%s\"", ret_str, n->plugin_instance);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate the plugin type instance if any
+ if (n->type_instance[0] != 0) {
+ res = asprintf(&temp_str, "%s, \"collectd_plugin_type_instance\": \"%s\"", ret_str, n->type_instance);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // add key value attributes from config if any
+ for (i = 0; i < sensu_attrs_num; i += 2) {
+ res = asprintf(&temp_str, "%s, \"%s\": \"%s\"", ret_str, sensu_attrs[i], sensu_attrs[i+1]);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate sensu tags from config if any
+ if (strlen(sensu_tags) != 0) {
+ res = asprintf(&temp_str, "%s, %s", ret_str, sensu_tags);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // incorporate the service name
+ sensu_format_name2(service_buffer, sizeof(service_buffer),
+ /* host */ "", n->plugin, n->plugin_instance,
+ n->type, n->type_instance, host->separator);
+ // replace sensu event name chars that are considered illegal
+ in_place_replace_sensu_name_reserved(service_buffer);
+ res = asprintf(&temp_str, "%s, \"name\": \"%s\"", ret_str, &service_buffer[1]);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ // incorporate the check output
+ if (n->message[0] != 0) {
+ char *msg = replace_json_reserved(n->message);
+ if (msg == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ res = asprintf(&temp_str, "%s, \"output\": \"%s - %s\"", ret_str, severity, msg);
+ free(ret_str);
+ free(msg);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+
+ // Pull in values from threshold and add extra attributes
+ for (meta = n->meta; meta != NULL; meta = meta->next) {
+ if (strcasecmp("CurrentValue", meta->name) == 0 && meta->type == NM_TYPE_DOUBLE) {
+ res = asprintf(&temp_str, "%s, \"current_value\": \"%.8f\"", ret_str, meta->nm_value.nm_double);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+ if (meta->type == NM_TYPE_STRING) {
+ res = asprintf(&temp_str, "%s, \"%s\": \"%s\"", ret_str, meta->name, meta->nm_value.nm_string);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+ }
+ }
+
+ // close the curly bracket
+ res = asprintf(&temp_str, "%s}\n", ret_str);
+ free(ret_str);
+ if (res == -1) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return NULL;
+ }
+ ret_str = temp_str;
+
+ DEBUG("write_sensu plugin: Successfully created JSON for notification: "
+ "host = \"%s\", service = \"%s\", state = \"%s\"",
+ n->host, service_buffer, severity);
+ return ret_str;
+} /* }}} char *sensu_notification_to_json */
+
+static int sensu_send_msg(struct sensu_host *host, const char *msg) /* {{{ */
+{
+ int status = 0;
+ size_t buffer_len;
+
+ status = sensu_connect(host);
+ if (status != 0)
+ return status;
+
+ buffer_len = strlen(msg);
+
+ status = (int) swrite(host->s, msg, buffer_len);
+ sensu_close_socket(host);
+
+ if (status != 0) {
+ char errbuf[1024];
+ ERROR("write_sensu plugin: Sending to Sensu at %s:%s failed: %s",
+ (host->node != NULL) ? host->node : SENSU_HOST,
+ (host->service != NULL) ? host->service : SENSU_PORT,
+ sstrerror(errno, errbuf, sizeof(errbuf)));
+ return -1;
+ }
+
+ return 0;
+} /* }}} int sensu_send_msg */
+
+
+static int sensu_send(struct sensu_host *host, char const *msg) /* {{{ */
+{
+ int status = 0;
+
+ status = sensu_send_msg(host, msg);
+ if (status != 0) {
+ host->flags &= ~F_READY;
+ if (host->res != NULL) {
+ freeaddrinfo(host->res);
+ host->res = NULL;
+ }
+ return status;
+ }
+
+ return 0;
+} /* }}} int sensu_send */
+
+
+static int sensu_write(const data_set_t *ds, /* {{{ */
+ const value_list_t *vl,
+ user_data_t *ud)
+{
+ int status = 0;
+ int statuses[vl->values_len];
+ struct sensu_host *host = ud->data;
+ gauge_t *rates = NULL;
+ int i;
+ char *msg;
+
+ pthread_mutex_lock(&host->lock);
+ memset(statuses, 0, vl->values_len * sizeof(*statuses));
+
+ if (host->store_rates) {
+ rates = uc_get_rate(ds, vl);
+ if (rates == NULL) {
+ ERROR("write_sensu plugin: uc_get_rate failed.");
+ pthread_mutex_unlock(&host->lock);
+ return -1;
+ }
+ }
+ for (i = 0; i < (size_t) vl->values_len; i++) {
+ msg = sensu_value_to_json(host, ds, vl, (int) i, rates, statuses[i]);
+ if (msg == NULL) {
+ sfree(rates);
+ pthread_mutex_unlock(&host->lock);
+ return -1;
+ }
+ status = sensu_send(host, msg);
+ free(msg);
+ if (status != 0) {
+ ERROR("write_sensu plugin: sensu_send failed with status %i", status);
+ pthread_mutex_unlock(&host->lock);
+ sfree(rates);
+ return status;
+ }
+ }
+ sfree(rates);
+ pthread_mutex_unlock(&host->lock);
+ return status;
+} /* }}} int sensu_write */
+
+static int sensu_notification(const notification_t *n, user_data_t *ud) /* {{{ */
+{
+ int status;
+ struct sensu_host *host = ud->data;
+ char *msg;
+
+ pthread_mutex_lock(&host->lock);
+
+ msg = sensu_notification_to_json(host, n);
+ if (msg == NULL) {
+ pthread_mutex_unlock(&host->lock);
+ return -1;
+ }
+
+ status = sensu_send(host, msg);
+ free(msg);
+ if (status != 0)
+ ERROR("write_sensu plugin: sensu_send failed with status %i", status);
+ pthread_mutex_unlock(&host->lock);
+
+ return status;
+} /* }}} int sensu_notification */
+
+static void sensu_free(void *p) /* {{{ */
+{
+ struct sensu_host *host = p;
+
+ if (host == NULL)
+ return;
+
+ pthread_mutex_lock(&host->lock);
+
+ host->reference_count--;
+ if (host->reference_count > 0) {
+ pthread_mutex_unlock(&host->lock);
+ return;
+ }
+
+ sensu_close_socket(host);
+ if (host->res != NULL) {
+ freeaddrinfo(host->res);
+ host->res = NULL;
+ }
+ sfree(host->service);
+ sfree(host->event_service_prefix);
+ sfree(host->name);
+ sfree(host->node);
+ sfree(host->separator);
+ free_str_list(&(host->metric_handlers));
+ free_str_list(&(host->notification_handlers));
+ pthread_mutex_destroy(&host->lock);
+ sfree(host);
+} /* }}} void sensu_free */
+
+
+static int sensu_config_node(oconfig_item_t *ci) /* {{{ */
+{
+ struct sensu_host *host = NULL;
+ int status = 0;
+ int i;
+ oconfig_item_t *child;
+ char callback_name[DATA_MAX_NAME_LEN];
+ user_data_t ud;
+
+ if ((host = calloc(1, sizeof(*host))) == NULL) {
+ ERROR("write_sensu plugin: calloc failed.");
+ return ENOMEM;
+ }
+ pthread_mutex_init(&host->lock, NULL);
+ host->reference_count = 1;
+ host->node = NULL;
+ host->service = NULL;
+ host->notifications = 0;
+ host->metrics = 0;
+ host->store_rates = 1;
+ host->always_append_ds = 0;
+ host->metric_handlers.nb_strs = 0;
+ host->metric_handlers.strs = NULL;
+ host->notification_handlers.nb_strs = 0;
+ host->notification_handlers.strs = NULL;
+ host->separator = strdup("/");
+ if (host->separator == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ sensu_free(host);
+ return -1;
+ }
+
+ status = cf_util_get_string(ci, &host->name);
+ if (status != 0) {
+ WARNING("write_sensu plugin: Required host name is missing.");
+ sensu_free(host);
+ return -1;
+ }
+
+ for (i = 0; i < ci->children_num; i++) {
+ child = &ci->children[i];
+ status = 0;
+
+ if (strcasecmp("Host", child->key) == 0) {
+ status = cf_util_get_string(child, &host->node);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("Notifications", child->key) == 0) {
+ status = cf_util_get_boolean(child, &host->notifications);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("Metrics", child->key) == 0) {
+ status = cf_util_get_boolean(child, &host->metrics);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("EventServicePrefix", child->key) == 0) {
+ status = cf_util_get_string(child, &host->event_service_prefix);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("Separator", child->key) == 0) {
+ status = cf_util_get_string(child, &host->separator);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("MetricHandler", child->key) == 0) {
+ char *temp_str = NULL;
+ status = cf_util_get_string(child, &temp_str);
+ if (status != 0)
+ break;
+ status = add_str_to_list(&(host->metric_handlers), temp_str);
+ free(temp_str);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("NotificationHandler", child->key) == 0) {
+ char *temp_str = NULL;
+ status = cf_util_get_string(child, &temp_str);
+ if (status != 0)
+ break;
+ status = add_str_to_list(&(host->notification_handlers), temp_str);
+ free(temp_str);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("Port", child->key) == 0) {
+ status = cf_util_get_service(child, &host->service);
+ if (status != 0) {
+ ERROR("write_sensu plugin: Invalid argument "
+ "configured for the \"Port\" "
+ "option.");
+ break;
+ }
+ } else if (strcasecmp("StoreRates", child->key) == 0) {
+ status = cf_util_get_boolean(child, &host->store_rates);
+ if (status != 0)
+ break;
+ } else if (strcasecmp("AlwaysAppendDS", child->key) == 0) {
+ status = cf_util_get_boolean(child,
+ &host->always_append_ds);
+ if (status != 0)
+ break;
+ } else {
+ WARNING("write_sensu plugin: ignoring unknown config "
+ "option: \"%s\"", child->key);
+ }
+ }
+ if (status != 0) {
+ sensu_free(host);
+ return status;
+ }
+
+ if (host->metrics && (host->metric_handlers.nb_strs == 0)) {
+ sensu_free(host);
+ WARNING("write_sensu plugin: metrics enabled but no MetricHandler defined. Giving up.");
+ return -1;
+ }
+
+ if (host->notifications && (host->notification_handlers.nb_strs == 0)) {
+ sensu_free(host);
+ WARNING("write_sensu plugin: notifications enabled but no NotificationHandler defined. Giving up.");
+ return -1;
+ }
+
+ if ((host->notification_handlers.nb_strs > 0) && (host->notifications == 0)) {
+ WARNING("write_sensu plugin: NotificationHandler given so forcing notifications to be enabled");
+ host->notifications = 1;
+ }
+
+ if ((host->metric_handlers.nb_strs > 0) && (host->metrics == 0)) {
+ WARNING("write_sensu plugin: MetricHandler given so forcing metrics to be enabled");
+ host->metrics = 1;
+ }
+
+ if (!(host->notifications || host->metrics)) {
+ WARNING("write_sensu plugin: neither metrics nor notifications enabled. Giving up.");
+ sensu_free(host);
+ return -1;
+ }
+
+ ssnprintf(callback_name, sizeof(callback_name), "write_sensu/%s", host->name);
+ ud.data = host;
+ ud.free_func = sensu_free;
+
+ pthread_mutex_lock(&host->lock);
+
+ if (host->metrics) {
+ status = plugin_register_write(callback_name, sensu_write, &ud);
+ if (status != 0)
+ WARNING("write_sensu plugin: plugin_register_write (\"%s\") "
+ "failed with status %i.",
+ callback_name, status);
+ else /* success */
+ host->reference_count++;
+ }
+
+ if (host->notifications) {
+ status = plugin_register_notification(callback_name, sensu_notification, &ud);
+ if (status != 0)
+ WARNING("write_sensu plugin: plugin_register_notification (\"%s\") "
+ "failed with status %i.",
+ callback_name, status);
+ else
+ host->reference_count++;
+ }
+
+ if (host->reference_count <= 1) {
+ /* Both callbacks failed => free memory.
+ * We need to unlock here, because sensu_free() will lock.
+ * This is not a race condition, because we're the only one
+ * holding a reference. */
+ pthread_mutex_unlock(&host->lock);
+ sensu_free(host);
+ return -1;
+ }
+
+ host->reference_count--;
+ pthread_mutex_unlock(&host->lock);
+
+ return status;
+} /* }}} int sensu_config_node */
+
+static int sensu_config(oconfig_item_t *ci) /* {{{ */
+{
+ int i;
+ oconfig_item_t *child;
+ int status;
+ struct str_list sensu_tags_arr;
+
+ sensu_tags_arr.nb_strs = 0;
+ sensu_tags_arr.strs = NULL;
+ sensu_tags = malloc(sizeof(char));
+ if (sensu_tags == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return -1;
+ }
+ sensu_tags[0] = '\0';
+
+ for (i = 0; i < ci->children_num; i++) {
+ child = &ci->children[i];
+
+ if (strcasecmp("Node", child->key) == 0) {
+ sensu_config_node(child);
+ } else if (strcasecmp(child->key, "attribute") == 0) {
+ char *key = NULL;
+ char *val = NULL;
+
+ if (child->values_num != 2) {
+ WARNING("sensu attributes need both a key and a value.");
+ free(sensu_tags);
+ return -1;
+ }
+ if (child->values[0].type != OCONFIG_TYPE_STRING ||
+ child->values[1].type != OCONFIG_TYPE_STRING) {
+ WARNING("sensu attribute needs string arguments.");
+ free(sensu_tags);
+ return -1;
+ }
+ if ((key = strdup(child->values[0].value.string)) == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ free(sensu_tags);
+ return -1;
+ }
+ if ((val = strdup(child->values[1].value.string)) == NULL) {
+ free(sensu_tags);
+ free(key);
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return -1;
+ }
+ strarray_add(&sensu_attrs, &sensu_attrs_num, key);
+ strarray_add(&sensu_attrs, &sensu_attrs_num, val);
+ DEBUG("write_sensu: got attr: %s => %s", key, val);
+ sfree(key);
+ sfree(val);
+ } else if (strcasecmp(child->key, "tag") == 0) {
+ char *tmp = NULL;
+ status = cf_util_get_string(child, &tmp);
+ if (status != 0)
+ continue;
+
+ status = add_str_to_list(&sensu_tags_arr, tmp);
+ sfree(tmp);
+ if (status != 0)
+ continue;
+ DEBUG("write_sensu plugin: Got tag: %s", tmp);
+ } else {
+ WARNING("write_sensu plugin: Ignoring unknown "
+ "configuration option \"%s\" at top level.",
+ child->key);
+ }
+ }
+ if (sensu_tags_arr.nb_strs > 0) {
+ free(sensu_tags);
+ sensu_tags = build_json_str_list("tags", &sensu_tags_arr);
+ free_str_list(&sensu_tags_arr);
+ if (sensu_tags == NULL) {
+ ERROR("write_sensu plugin: Unable to alloc memory");
+ return -1;
+ }
+ }
+ return 0;
+} /* }}} int sensu_config */
+
+void module_register(void)
+{
+ plugin_register_complex_config("write_sensu", sensu_config);
+}
+
+/* vim: set sw=8 sts=8 ts=8 noet : */
if (ds_name != NULL) {
if (vl->plugin_instance[0] == '\0') {
- ssnprintf(ret, ret_len, "%s%s.%s",
- prefix, vl->plugin, ds_name);
- } else if (vl->type_instance[0] == '\0') {
- ssnprintf(ret, ret_len, "%s%s.%s.%s.%s",
- prefix, vl->plugin, vl->plugin_instance,
- vl->type_instance, ds_name);
- } else {
- ssnprintf(ret, ret_len, "%s%s.%s.%s.%s",
- prefix, vl->plugin, vl->plugin_instance, vl->type,
- ds_name);
+ if (vl->type_instance[0] == '\0') {
+ ssnprintf(ret, ret_len, "%s%s.%s.%s", prefix, vl->plugin,
+ vl->type, ds_name);
+ } else {
+ ssnprintf(ret, ret_len, "%s%s.%s.%s.%s", prefix, vl->plugin,
+ vl->type, vl->type_instance, ds_name);
+ }
+ } else { /* vl->plugin_instance != "" */
+ if (vl->type_instance[0] == '\0') {
+ ssnprintf(ret, ret_len, "%s%s.%s.%s.%s", prefix, vl->plugin,
+ vl->plugin_instance, vl->type, ds_name);
+ } else {
+ ssnprintf(ret, ret_len, "%s%s.%s.%s.%s.%s", prefix,
+ vl->plugin, vl->plugin_instance, vl->type,
+ vl->type_instance, ds_name);
+ }
+ }
+ } else { /* ds_name == NULL */
+ if (vl->plugin_instance[0] == '\0') {
+ if (vl->type_instance[0] == '\0') {
+ ssnprintf(ret, ret_len, "%s%s.%s", prefix, vl->plugin,
+ vl->type);
+ } else {
+ ssnprintf(ret, ret_len, "%s%s.%s.%s", prefix, vl->plugin,
+ vl->type_instance, vl->type);
+ }
+ } else { /* vl->plugin_instance != "" */
+ if (vl->type_instance[0] == '\0') {
+ ssnprintf(ret, ret_len, "%s%s.%s.%s", prefix, vl->plugin,
+ vl->plugin_instance, vl->type);
+ } else {
+ ssnprintf(ret, ret_len, "%s%s.%s.%s.%s", prefix, vl->plugin,
+ vl->plugin_instance, vl->type, vl->type_instance);
+ }
}
- } else if (vl->plugin_instance[0] == '\0') {
- if (vl->type_instance[0] == '\0')
- ssnprintf(ret, ret_len, "%s%s.%s",
- prefix, vl->plugin, vl->type);
- else
- ssnprintf(ret, ret_len, "%s%s.%s",
- prefix, vl->plugin, vl->type_instance);
- } else if (vl->type_instance[0] == '\0') {
- ssnprintf(ret, ret_len, "%s%s.%s.%s",
- prefix, vl->plugin, vl->plugin_instance, vl->type);
- } else {
- ssnprintf(ret, ret_len, "%s%s.%s.%s.%s",
- prefix, vl->plugin, vl->plugin_instance, vl->type, vl->type_instance);
}
sfree(temp);
-#!/usr/bin/env bash
+#!/bin/sh
-DEFAULT_VERSION="5.4.1.git"
+DEFAULT_VERSION="5.4.2.git"
VERSION="`git describe 2> /dev/null | grep collectd | sed -e 's/^collectd-//'`"
VERSION="`echo \"$VERSION\" | sed -e 's/-/./g'`"
-echo -n "$VERSION"
+printf "%s" "$VERSION"