#AutoLoadPlugin false
#----------------------------------------------------------------------------#
+# When enabled, internal statistics are collected, using "collectd" as the #
+# plugin name. #
+# Disabled by default. #
+#----------------------------------------------------------------------------#
+#CollectInternalStats false
+
+#----------------------------------------------------------------------------#
# Interval at which to query values. This may be overwritten on a per-plugin #
# base by using the 'Interval' option of the LoadPlugin block: #
# <LoadPlugin foo> #
B<LoadPlugin> statement. B<LoadPlugin> statements are still required for
plugins that don't provide any configuration, e.g. the I<Load plugin>.
+=item B<CollectInternalStats> B<false>|B<true>
+
+When set to B<true>, various statistics about the I<collectd> daemon will be
+collected, with "collectd" as the I<plugin name>. Defaults to B<false>.
+
+The "write_queue" I<plugin instance> reports the number of elements currently
+queued and the number of elements dropped off the queue by the
+B<WriteQueueLimitLow>/B<WriteQueueLimitHigh> mechanism.
+
+The "cache" I<plugin instance> reports the number of elements in the value list
+cache (the cache you can interact with using L<collectd-unixsock(5)>).
+
=item B<Include> I<Path> [I<pattern>]
If I<Path> points to a file, includes that file. If I<Path> points to a
unset, the latter will default to half of B<WriteQueueLimitHigh>.
If you do not want to randomly drop values when the queue size is between
-I<LowNum> and I<HighNum>, set If B<WriteQueueLimitHigh> and
-B<WriteQueueLimitLow> to same value.
+I<LowNum> and I<HighNum>, set B<WriteQueueLimitHigh> and B<WriteQueueLimitLow>
+to the same value.
+
+Enabling the B<CollectInternalStats> option is of great help to figure out the
+values to set B<WriteQueueLimitHigh> and B<WriteQueueLimitLow> to.
=item B<Hostname> I<Name>
{"WriteQueueLimitLow", NULL, NULL},
{"Timeout", NULL, "2"},
{"AutoLoadPlugin", NULL, "false"},
+ {"CollectInternalStats", NULL, "false"},
{"PreCacheChain", NULL, "PreCache"},
{"PostCacheChain", NULL, "PostCache"},
{"MaxReadInterval", NULL, "86400"}
static long write_limit_high = 0;
static long write_limit_low = 0;
+static derive_t stats_values_dropped = 0;
+static _Bool record_statistics = 0;
+
/*
* Static functions
*/
return (plugindir);
}
+static void plugin_update_internal_statistics (void) { /* {{{ */
+ derive_t copy_write_queue_length;
+ value_list_t vl = VALUE_LIST_INIT;
+ value_t values[2];
+
+ copy_write_queue_length = write_queue_length;
+
+ /* Initialize `vl' */
+ vl.values = values;
+ vl.values_len = 2;
+ vl.time = 0;
+ sstrncpy (vl.host, hostname_g, sizeof (vl.host));
+ sstrncpy (vl.plugin, "collectd", sizeof (vl.plugin));
+
+ vl.type_instance[0] = 0;
+ vl.values_len = 1;
+
+ /* Write queue */
+ sstrncpy (vl.plugin_instance, "write_queue",
+ sizeof (vl.plugin_instance));
+
+ /* Write queue : queue length */
+ vl.values[0].gauge = (gauge_t) copy_write_queue_length;
+ sstrncpy (vl.type, "queue_length", sizeof (vl.type));
+ vl.type_instance[0] = 0;
+ plugin_dispatch_values (&vl);
+
+ /* Write queue : Values dropped (queue length > low limit) */
+ vl.values[0].derive = (derive_t) stats_values_dropped;
+ sstrncpy (vl.type, "derive", sizeof (vl.type));
+ sstrncpy (vl.type_instance, "dropped", sizeof (vl.type_instance));
+ plugin_dispatch_values (&vl);
+
+ /* Cache */
+ sstrncpy (vl.plugin_instance, "cache",
+ sizeof (vl.plugin_instance));
+
+ /* Cache : Nb entry in cache tree */
+ vl.values[0].gauge = (gauge_t) uc_get_size();
+ sstrncpy (vl.type, "cache_size", sizeof (vl.type));
+ vl.type_instance[0] = 0;
+ plugin_dispatch_values (&vl);
+
+ return;
+} /* }}} void plugin_update_internal_statistics */
+
static void destroy_callback (callback_func_t *cf) /* {{{ */
{
if (cf == NULL)
/* Init the value cache */
uc_init ();
+ if (IS_TRUE (global_option_get ("CollectInternalStats")))
+ record_statistics = 1;
+
chain_name = global_option_get ("PreCacheChain");
pre_cache_chain = fc_chain_get_by_name (chain_name);
/* TODO: Rename this function. */
void plugin_read_all (void)
{
+ if(record_statistics) {
+ plugin_update_internal_statistics ();
+ }
uc_check_timeout ();
return;
int plugin_dispatch_values (value_list_t const *vl)
{
int status;
+ static pthread_mutex_t statistics_lock = PTHREAD_MUTEX_INITIALIZER;
- if (check_drop_value ())
+ if (check_drop_value ()) {
+ if(record_statistics) {
+ pthread_mutex_lock(&statistics_lock);
+ stats_values_dropped++;
+ pthread_mutex_unlock(&statistics_lock);
+ }
return (0);
+ }
status = plugin_write_enqueue (vl);
if (status != 0)
return (ret);
} /* gauge_t *uc_get_rate */
+size_t uc_get_size() {
+ size_t size_arrays = 0;
+
+ pthread_mutex_lock (&cache_lock);
+ size_arrays = (size_t) c_avl_size (cache_tree);
+ pthread_mutex_unlock (&cache_lock);
+
+ return (size_arrays);
+}
+
int uc_get_names (char ***ret_names, cdtime_t **ret_times, size_t *ret_number)
{
c_avl_iterator_t *iter;
int uc_get_rate_by_name (const char *name, gauge_t **ret_values, size_t *ret_values_num);
gauge_t *uc_get_rate (const data_set_t *ds, const value_list_t *vl);
+size_t uc_get_size();
int uc_get_names (char ***ret_names, cdtime_t **ret_times, size_t *ret_number);
int uc_get_state (const data_set_t *ds, const value_list_t *vl);