+/* Data corresponding to <Disks /> */
+static int cna_handle_disk_data (const char *hostname, /* {{{ */
+ cfg_disk_t *cfg_disk, na_elem_t *data)
+{
+ time_t timestamp;
+ na_elem_t *instances;
+ na_elem_t *instance;
+ na_elem_iter_t instance_iter;
+ disk_t *worst_disk = NULL;
+
+ if ((cfg_disk == NULL) || (data == NULL))
+ return (EINVAL);
+
+ timestamp = (time_t) na_child_get_uint64(data, "timestamp", 0);
+
+ instances = na_elem_child (data, "instances");
+ if (instances == NULL)
+ {
+ ERROR ("netapp plugin: cna_handle_disk_data: "
+ "na_elem_child (\"instances\") failed.");
+ return (-1);
+ }
+
+ /* Iterate over all children */
+ instance_iter = na_child_iterator (instances);
+ for (instance = na_iterator_next (&instance_iter);
+ instance != NULL;
+ instance = na_iterator_next(&instance_iter))
+ {
+ disk_t *old_data;
+ disk_t new_data;
+
+ na_elem_iter_t counter_iterator;
+ na_elem_t *counter;
+
+ memset (&new_data, 0, sizeof (new_data));
+ new_data.timestamp = timestamp;
+ new_data.disk_busy_percent = NAN;
+
+ old_data = get_disk(cfg_disk, na_child_get_string (instance, "name"));
+ if (old_data == NULL)
+ continue;
+
+ /* Look for the "disk_busy" and "base_for_disk_busy" counters */
+ counter_iterator = na_child_iterator(na_elem_child(instance, "counters"));
+ for (counter = na_iterator_next(&counter_iterator);
+ counter != NULL;
+ counter = na_iterator_next(&counter_iterator))
+ {
+ const char *name;
+ uint64_t value;
+
+ name = na_child_get_string(counter, "name");
+ if (name == NULL)
+ continue;
+
+ value = na_child_get_uint64(counter, "value", UINT64_MAX);
+ if (value == UINT64_MAX)
+ continue;
+
+ if (strcmp(name, "disk_busy") == 0)
+ {
+ new_data.disk_busy = value;
+ new_data.flags |= HAVE_DISK_BUSY;
+ }
+ else if (strcmp(name, "base_for_disk_busy") == 0)
+ {
+ new_data.base_for_disk_busy = value;
+ new_data.flags |= HAVE_DISK_BASE;
+ }
+ else
+ {
+ DEBUG ("netapp plugin: cna_handle_disk_data: "
+ "Counter not handled: %s = %"PRIu64,
+ name, value);
+ }
+ }
+
+ /* If all required counters are available and did not just wrap around,
+ * calculate the busy percentage. Otherwise, the value is initialized to
+ * NAN at the top of the for-loop. */
+ if (HAS_ALL_FLAGS (old_data->flags, HAVE_DISK_BUSY | HAVE_DISK_BASE)
+ && HAS_ALL_FLAGS (new_data.flags, HAVE_DISK_BUSY | HAVE_DISK_BASE)
+ && (new_data.disk_busy >= old_data->disk_busy)
+ && (new_data.base_for_disk_busy > old_data->base_for_disk_busy))
+ {
+ uint64_t busy_diff;
+ uint64_t base_diff;
+
+ busy_diff = new_data.disk_busy - old_data->disk_busy;
+ base_diff = new_data.base_for_disk_busy - old_data->base_for_disk_busy;
+
+ new_data.disk_busy_percent = 100.0
+ * ((gauge_t) busy_diff) / ((gauge_t) base_diff);
+ }
+
+ /* Clear HAVE_* flags */
+ old_data->flags &= ~HAVE_DISK_ALL;
+
+ /* Copy data */
+ old_data->timestamp = new_data.timestamp;
+ old_data->disk_busy = new_data.disk_busy;
+ old_data->base_for_disk_busy = new_data.base_for_disk_busy;
+ old_data->disk_busy_percent = new_data.disk_busy_percent;
+
+ /* Copy flags */
+ old_data->flags |= (new_data.flags & HAVE_DISK_ALL);
+
+ if ((worst_disk == NULL)
+ || (worst_disk->disk_busy_percent < old_data->disk_busy_percent))
+ worst_disk = old_data;
+ } /* for (all disks) */
+
+ if ((cfg_disk->flags & CFG_DISK_BUSIEST) && (worst_disk != NULL))
+ submit_double (hostname, "system", "percent", "disk_busy",
+ worst_disk->disk_busy_percent, timestamp);
+
+ return (0);
+} /* }}} int cna_handle_disk_data */
+
+static int cna_setup_disk (cfg_disk_t *cd) /* {{{ */
+{
+ na_elem_t *e;
+
+ if (cd == NULL)
+ return (EINVAL);
+
+ if (cd->query != NULL)
+ return (0);
+
+ cd->query = na_elem_new ("perf-object-get-instances");
+ if (cd->query == NULL)
+ {
+ ERROR ("netapp plugin: na_elem_new failed.");
+ return (-1);
+ }
+ na_child_add_string (cd->query, "objectname", "disk");
+
+ e = na_elem_new("counters");
+ if (e == NULL)
+ {
+ na_elem_free (cd->query);
+ cd->query = NULL;
+ ERROR ("netapp plugin: na_elem_new failed.");
+ return (-1);
+ }
+ na_child_add_string(e, "foo", "disk_busy");
+ na_child_add_string(e, "foo", "base_for_disk_busy");
+ na_child_add(cd->query, e);
+
+ return (0);
+} /* }}} int cna_setup_disk */
+
+static int cna_query_disk (host_config_t *host) /* {{{ */
+{
+ na_elem_t *data;
+ int status;
+ time_t now;
+
+ if (host == NULL)
+ return (EINVAL);
+
+ /* If the user did not configure disk statistics, return without doing
+ * anything. */
+ if (host->cfg_disk == NULL)
+ return (0);
+
+ now = time (NULL);
+ if ((host->cfg_disk->interval.interval + host->cfg_disk->interval.last_read) > now)
+ return (0);
+
+ status = cna_setup_disk (host->cfg_disk);
+ if (status != 0)
+ return (status);
+ assert (host->cfg_disk->query != NULL);
+
+ data = na_server_invoke_elem(host->srv, host->cfg_disk->query);
+ if (na_results_status (data) != NA_OK)
+ {
+ ERROR ("netapp plugin: cna_query_disk: na_server_invoke_elem failed: %s",
+ na_results_reason (data));
+ na_elem_free (data);
+ return (-1);
+ }
+
+ status = cna_handle_disk_data (host->name, host->cfg_disk, data);
+
+ if (status == 0)
+ host->cfg_disk->interval.last_read = now;
+
+ na_elem_free (data);
+ return (status);
+} /* }}} int cna_query_disk */
+
+/* Data corresponding to <VolumeUsage /> */
+static int cna_submit_volume_usage_data (const char *hostname, /* {{{ */
+ cfg_volume_usage_t *cfg_volume)
+{
+ data_volume_usage_t *v;
+
+ for (v = cfg_volume->volumes; v != NULL; v = v->next)
+ {
+ if (HAS_ALL_FLAGS (v->flags, HAVE_VOLUME_USAGE_NORM_FREE))
+ submit_double (hostname, /* plugin instance = */ v->name,
+ "df_complex", "free",
+ (double) v->norm_free, /* timestamp = */ 0);
+
+ if (HAS_ALL_FLAGS (v->flags, HAVE_VOLUME_USAGE_NORM_USED))
+ submit_double (hostname, /* plugin instance = */ v->name,
+ "df_complex", "used",
+ (double) v->norm_used, /* timestamp = */ 0);
+
+ if (HAS_ALL_FLAGS (v->flags, HAVE_VOLUME_USAGE_SNAP_RSVD))
+ submit_double (hostname, /* plugin instance = */ v->name,
+ "df_complex", "snap_reserved",
+ (double) v->snap_reserved, /* timestamp = */ 0);
+
+ if (HAS_ALL_FLAGS (v->flags, HAVE_VOLUME_USAGE_SNAP_USED))
+ submit_double (hostname, /* plugin instance = */ v->name,
+ "df_complex", "snap_used",
+ (double) v->snap_used, /* timestamp = */ 0);
+
+ if (HAS_ALL_FLAGS (v->flags, HAVE_VOLUME_USAGE_SIS_SAVED))
+ submit_double (hostname, /* plugin instance = */ v->name,
+ "df_complex", "sis_saved",
+ (double) v->sis_saved, /* timestamp = */ 0);
+
+ /* Clear all the HAVE_* flags */
+ v->flags &= ~HAVE_VOLUME_USAGE_ALL;
+ } /* for (v = cfg_volume->volumes) */
+
+ return (0);
+} /* }}} int cna_submit_volume_usage_data */
+
+static int cna_handle_volume_usage_data (const char *hostname, /* {{{ */
+ cfg_volume_usage_t *cfg_volume, na_elem_t *data)
+{
+ na_elem_t *elem_volume;
+ na_elem_t *elem_volumes;
+ na_elem_iter_t iter_volume;
+
+ elem_volumes = na_elem_child (data, "volumes");
+ if (elem_volumes == NULL)
+ {
+ ERROR ("netapp plugin: cna_handle_volume_usage_data: "
+ "na_elem_child (\"volumes\") failed.");
+ return (-1);
+ }
+
+ iter_volume = na_child_iterator (elem_volumes);
+ for (elem_volume = na_iterator_next (&iter_volume);
+ elem_volume != NULL;
+ elem_volume = na_iterator_next (&iter_volume))
+ {
+ const char *volume_name;
+
+ data_volume_usage_t *v;
+ uint64_t value;
+
+ na_elem_t *sis;
+ const char *sis_state;