1 /*****************************************************************************
2 * RRDtool 1.4.3 Copyright by Tobi Oetiker, 1997-2010
3 * Copyright by Florian Forster, 2008
4 *****************************************************************************
5 * rrd_update.c RRD Update Function
6 *****************************************************************************
8 *****************************************************************************/
12 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
13 #include <sys/locking.h>
21 #include "rrd_rpncalc.h"
23 #include "rrd_is_thread_safe.h"
26 #include "rrd_client.h"
28 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
30 * WIN32 does not have gettimeofday and struct timeval. This is a quick and dirty
33 #include <sys/timeb.h>
37 time_t tv_sec; /* seconds */
38 long tv_usec; /* microseconds */
43 int tz_minuteswest; /* minutes W of Greenwich */
44 int tz_dsttime; /* type of dst correction */
47 static int gettimeofday(
49 struct __timezone *tz)
52 struct _timeb current_time;
54 _ftime(¤t_time);
56 t->tv_sec = current_time.time;
57 t->tv_usec = current_time.millitm * 1000;
64 /* FUNCTION PROTOTYPES */
78 static int allocate_data_structures(
81 rrd_value_t **pdp_temp,
84 unsigned long *tmpl_cnt,
85 unsigned long **rra_step_cnt,
86 unsigned long **skip_update,
87 rrd_value_t **pdp_new);
89 static int parse_template(
92 unsigned long *tmpl_cnt,
95 static int process_arg(
99 unsigned long rra_begin,
100 time_t *current_time,
101 unsigned long *current_time_usec,
102 rrd_value_t *pdp_temp,
103 rrd_value_t *pdp_new,
104 unsigned long *rra_step_cnt,
107 unsigned long tmpl_cnt,
108 rrd_info_t ** pcdp_summary,
110 unsigned long *skip_update,
111 int *schedule_smooth);
118 unsigned long tmpl_cnt,
119 time_t *current_time,
120 unsigned long *current_time_usec,
123 static int get_time_from_reading(
127 time_t *current_time,
128 unsigned long *current_time_usec,
131 static int update_pdp_prep(
134 rrd_value_t *pdp_new,
137 static int calculate_elapsed_steps(
139 unsigned long current_time,
140 unsigned long current_time_usec,
144 unsigned long *proc_pdp_cnt);
146 static void simple_update(
149 rrd_value_t *pdp_new);
151 static int process_all_pdp_st(
156 unsigned long elapsed_pdp_st,
157 rrd_value_t *pdp_new,
158 rrd_value_t *pdp_temp);
160 static int process_pdp_st(
162 unsigned long ds_idx,
167 rrd_value_t *pdp_new,
168 rrd_value_t *pdp_temp);
170 static int update_all_cdp_prep(
172 unsigned long *rra_step_cnt,
173 unsigned long rra_begin,
174 rrd_file_t *rrd_file,
175 unsigned long elapsed_pdp_st,
176 unsigned long proc_pdp_cnt,
177 rrd_value_t **last_seasonal_coef,
178 rrd_value_t **seasonal_coef,
179 rrd_value_t *pdp_temp,
180 unsigned long *skip_update,
181 int *schedule_smooth);
183 static int do_schedule_smooth(
185 unsigned long rra_idx,
186 unsigned long elapsed_pdp_st);
188 static int update_cdp_prep(
190 unsigned long elapsed_pdp_st,
191 unsigned long start_pdp_offset,
192 unsigned long *rra_step_cnt,
194 rrd_value_t *pdp_temp,
195 rrd_value_t *last_seasonal_coef,
196 rrd_value_t *seasonal_coef,
199 static void update_cdp(
202 rrd_value_t pdp_temp_val,
203 unsigned long rra_step_cnt,
204 unsigned long elapsed_pdp_st,
205 unsigned long start_pdp_offset,
206 unsigned long pdp_cnt,
211 static void initialize_cdp_val(
214 rrd_value_t pdp_temp_val,
215 unsigned long start_pdp_offset,
216 unsigned long pdp_cnt);
218 static void reset_cdp(
220 unsigned long elapsed_pdp_st,
221 rrd_value_t *pdp_temp,
222 rrd_value_t *last_seasonal_coef,
223 rrd_value_t *seasonal_coef,
227 enum cf_en current_cf);
229 static rrd_value_t initialize_carry_over(
230 rrd_value_t pdp_temp_val,
232 unsigned long elapsed_pdp_st,
233 unsigned long start_pdp_offset,
234 unsigned long pdp_cnt);
236 static rrd_value_t calculate_cdp_val(
238 rrd_value_t pdp_temp_val,
239 unsigned long elapsed_pdp_st,
244 static int update_aberrant_cdps(
246 rrd_file_t *rrd_file,
247 unsigned long rra_begin,
248 unsigned long elapsed_pdp_st,
249 rrd_value_t *pdp_temp,
250 rrd_value_t **seasonal_coef);
252 static int write_to_rras(
254 rrd_file_t *rrd_file,
255 unsigned long *rra_step_cnt,
256 unsigned long rra_begin,
258 unsigned long *skip_update,
259 rrd_info_t ** pcdp_summary);
261 static int write_RRA_row(
262 rrd_file_t *rrd_file,
264 unsigned long rra_idx,
265 unsigned short CDP_scratch_idx,
266 rrd_info_t ** pcdp_summary,
269 static int smooth_all_rras(
271 rrd_file_t *rrd_file,
272 unsigned long rra_begin);
275 static int write_changes_to_disk(
277 rrd_file_t *rrd_file,
282 * normalize time as returned by gettimeofday. usec part must
285 static void normalize_time(
288 if (t->tv_usec < 0) {
295 * Sets current_time and current_time_usec based on the current time.
296 * current_time_usec is set to 0 if the version number is 1 or 2.
298 static void initialize_time(
299 time_t *current_time,
300 unsigned long *current_time_usec,
303 struct timeval tmp_time; /* used for time conversion */
305 gettimeofday(&tmp_time, 0);
306 normalize_time(&tmp_time);
307 *current_time = tmp_time.tv_sec;
309 *current_time_usec = tmp_time.tv_usec;
311 *current_time_usec = 0;
315 #define IFDNAN(X,Y) (isnan(X) ? (Y) : (X));
317 rrd_info_t *rrd_update_v(
322 rrd_info_t *result = NULL;
324 char *opt_daemon = NULL;
325 struct option long_options[] = {
326 {"template", required_argument, 0, 't'},
332 opterr = 0; /* initialize getopt */
335 int option_index = 0;
338 opt = getopt_long(argc, argv, "t:", long_options, &option_index);
349 rrd_set_error("unknown option '%s'", argv[optind - 1]);
354 opt_daemon = getenv (ENV_RRDCACHED_ADDRESS);
355 if (opt_daemon != NULL && ! strcmp(opt_daemon,"")) {
356 rrd_set_error ("The \"%s\" environment variable is defined, "
357 "but \"%s\" cannot work with rrdcached. Either unset "
358 "the environment variable or use \"update\" instead.",
359 ENV_RRDCACHED_ADDRESS, argv[0]);
363 /* need at least 2 arguments: filename, data. */
364 if (argc - optind < 2) {
365 rrd_set_error("Not enough arguments");
369 result = rrd_info_push(NULL, sprintf_alloc("return_value"), RD_I_INT, rc);
370 rc.u_int = _rrd_update(argv[optind], tmplt,
372 (const char **) (argv + optind + 1), result);
373 result->value.u_int = rc.u_int;
382 struct option long_options[] = {
383 {"template", required_argument, 0, 't'},
384 {"daemon", required_argument, 0, 'd'},
387 int option_index = 0;
391 char *opt_daemon = NULL;
394 opterr = 0; /* initialize getopt */
397 opt = getopt_long(argc, argv, "t:d:", long_options, &option_index);
404 tmplt = strdup(optarg);
408 if (opt_daemon != NULL)
410 opt_daemon = strdup (optarg);
411 if (opt_daemon == NULL)
413 rrd_set_error("strdup failed.");
419 rrd_set_error("unknown option '%s'", argv[optind - 1]);
424 /* need at least 2 arguments: filename, data. */
425 if (argc - optind < 2) {
426 rrd_set_error("Not enough arguments");
430 { /* try to connect to rrdcached */
431 int status = rrdc_connect(opt_daemon);
438 if ((tmplt != NULL) && rrdc_is_connected(opt_daemon))
440 rrd_set_error("The caching daemon cannot be used together with "
445 if (! rrdc_is_connected(opt_daemon))
447 rc = rrd_update_r(argv[optind], tmplt,
448 argc - optind - 1, (const char **) (argv + optind + 1));
450 else /* we are connected */
452 rc = rrdc_update (argv[optind], /* file */
453 argc - optind - 1, /* values_num */
454 (const char *const *) (argv + optind + 1)); /* values */
456 rrd_set_error("Failed sending the values to rrdcached: %s",
466 if (opt_daemon != NULL)
475 const char *filename,
480 return _rrd_update(filename, tmplt, argc, argv, NULL);
484 const char *filename,
488 rrd_info_t * pcdp_summary)
490 return _rrd_update(filename, tmplt, argc, argv, pcdp_summary);
494 const char *filename,
498 rrd_info_t * pcdp_summary)
503 unsigned long rra_begin; /* byte pointer to the rra
504 * area in the rrd file. this
505 * pointer never changes value */
506 rrd_value_t *pdp_new; /* prepare the incoming data to be added
507 * to the existing entry */
508 rrd_value_t *pdp_temp; /* prepare the pdp values to be added
509 * to the cdp values */
511 long *tmpl_idx; /* index representing the settings
512 * transported by the tmplt index */
513 unsigned long tmpl_cnt = 2; /* time and data */
515 time_t current_time = 0;
516 unsigned long current_time_usec = 0; /* microseconds part of current time */
518 int schedule_smooth = 0;
520 /* number of elapsed PDP steps since last update */
521 unsigned long *rra_step_cnt = NULL;
523 int version; /* rrd version */
524 rrd_file_t *rrd_file;
525 char *arg_copy; /* for processing the argv */
526 unsigned long *skip_update; /* RRAs to advance but not write */
528 /* need at least 1 arguments: data. */
530 rrd_set_error("Not enough arguments");
535 if ((rrd_file = rrd_open(filename, &rrd, RRD_READWRITE)) == NULL) {
538 /* We are now at the beginning of the rra's */
539 rra_begin = rrd_file->header_len;
541 version = atoi(rrd.stat_head->version);
543 initialize_time(¤t_time, ¤t_time_usec, version);
545 /* get exclusive lock to whole file.
546 * lock gets removed when we close the file.
548 if (rrd_lock(rrd_file) != 0) {
549 rrd_set_error("could not lock RRD");
553 if (allocate_data_structures(&rrd, &updvals,
554 &pdp_temp, tmplt, &tmpl_idx, &tmpl_cnt,
555 &rra_step_cnt, &skip_update,
560 /* loop through the arguments. */
561 for (arg_i = 0; arg_i < argc; arg_i++) {
562 if ((arg_copy = strdup(argv[arg_i])) == NULL) {
563 rrd_set_error("failed duplication argv entry");
566 if (process_arg(arg_copy, &rrd, rrd_file, rra_begin,
567 ¤t_time, ¤t_time_usec, pdp_temp, pdp_new,
568 rra_step_cnt, updvals, tmpl_idx, tmpl_cnt,
569 &pcdp_summary, version, skip_update,
570 &schedule_smooth) == -1) {
571 if (rrd_test_error()) { /* Should have error string always here */
574 /* Prepend file name to error message */
575 if ((save_error = strdup(rrd_get_error())) != NULL) {
576 rrd_set_error("%s: %s", filename, save_error);
588 /* if we got here and if there is an error and if the file has not been
589 * written to, then close things up and return. */
590 if (rrd_test_error()) {
591 goto err_free_structures;
594 if (write_changes_to_disk(&rrd, rrd_file, version) == -1) {
595 goto err_free_structures;
599 /* calling the smoothing code here guarantees at most one smoothing
600 * operation per rrd_update call. Unfortunately, it is possible with bulk
601 * updates, or a long-delayed update for smoothing to occur off-schedule.
602 * This really isn't critical except during the burn-in cycles. */
603 if (schedule_smooth) {
604 smooth_all_rras(&rrd, rrd_file, rra_begin);
607 /* rrd_dontneed(rrd_file,&rrd); */
633 * Allocate some important arrays used, and initialize the template.
635 * When it returns, either all of the structures are allocated
636 * or none of them are.
638 * Returns 0 on success, -1 on error.
640 static int allocate_data_structures(
643 rrd_value_t **pdp_temp,
646 unsigned long *tmpl_cnt,
647 unsigned long **rra_step_cnt,
648 unsigned long **skip_update,
649 rrd_value_t **pdp_new)
652 if ((*updvals = (char **) malloc(sizeof(char *)
653 * (rrd->stat_head->ds_cnt + 1))) == NULL) {
654 rrd_set_error("allocating updvals pointer array.");
657 if ((*pdp_temp = (rrd_value_t *) malloc(sizeof(rrd_value_t)
658 * rrd->stat_head->ds_cnt)) ==
660 rrd_set_error("allocating pdp_temp.");
661 goto err_free_updvals;
663 if ((*skip_update = (unsigned long *) malloc(sizeof(unsigned long)
665 rrd->stat_head->rra_cnt)) ==
667 rrd_set_error("allocating skip_update.");
668 goto err_free_pdp_temp;
670 if ((*tmpl_idx = (long *) malloc(sizeof(unsigned long)
671 * (rrd->stat_head->ds_cnt + 1))) == NULL) {
672 rrd_set_error("allocating tmpl_idx.");
673 goto err_free_skip_update;
675 if ((*rra_step_cnt = (unsigned long *) malloc(sizeof(unsigned long)
678 rra_cnt))) == NULL) {
679 rrd_set_error("allocating rra_step_cnt.");
680 goto err_free_tmpl_idx;
683 /* initialize tmplt redirector */
684 /* default config example (assume DS 1 is a CDEF DS)
685 tmpl_idx[0] -> 0; (time)
686 tmpl_idx[1] -> 1; (DS 0)
687 tmpl_idx[2] -> 3; (DS 2)
688 tmpl_idx[3] -> 4; (DS 3) */
689 (*tmpl_idx)[0] = 0; /* time */
690 for (i = 1, ii = 1; i <= rrd->stat_head->ds_cnt; i++) {
691 if (dst_conv(rrd->ds_def[i - 1].dst) != DST_CDEF)
692 (*tmpl_idx)[ii++] = i;
697 if (parse_template(rrd, tmplt, tmpl_cnt, *tmpl_idx) == -1) {
698 goto err_free_rra_step_cnt;
702 if ((*pdp_new = (rrd_value_t *) malloc(sizeof(rrd_value_t)
703 * rrd->stat_head->ds_cnt)) == NULL) {
704 rrd_set_error("allocating pdp_new.");
705 goto err_free_rra_step_cnt;
710 err_free_rra_step_cnt:
714 err_free_skip_update:
724 * Parses tmplt and puts an ordered list of DS's into tmpl_idx.
726 * Returns 0 on success.
728 static int parse_template(
731 unsigned long *tmpl_cnt,
734 char *dsname, *tmplt_copy;
735 unsigned int tmpl_len, i;
738 *tmpl_cnt = 1; /* the first entry is the time */
740 /* we should work on a writeable copy here */
741 if ((tmplt_copy = strdup(tmplt)) == NULL) {
742 rrd_set_error("error copying tmplt '%s'", tmplt);
748 tmpl_len = strlen(tmplt_copy);
749 for (i = 0; i <= tmpl_len; i++) {
750 if (tmplt_copy[i] == ':' || tmplt_copy[i] == '\0') {
751 tmplt_copy[i] = '\0';
752 if (*tmpl_cnt > rrd->stat_head->ds_cnt) {
753 rrd_set_error("tmplt contains more DS definitions than RRD");
755 goto out_free_tmpl_copy;
757 if ((tmpl_idx[(*tmpl_cnt)++] = ds_match(rrd, dsname) + 1) == 0) {
758 rrd_set_error("unknown DS name '%s'", dsname);
760 goto out_free_tmpl_copy;
762 /* go to the next entry on the tmplt_copy */
764 dsname = &tmplt_copy[i + 1];
774 * Parse an update string, updates the primary data points (PDPs)
775 * and consolidated data points (CDPs), and writes changes to the RRAs.
777 * Returns 0 on success, -1 on error.
779 static int process_arg(
782 rrd_file_t *rrd_file,
783 unsigned long rra_begin,
784 time_t *current_time,
785 unsigned long *current_time_usec,
786 rrd_value_t *pdp_temp,
787 rrd_value_t *pdp_new,
788 unsigned long *rra_step_cnt,
791 unsigned long tmpl_cnt,
792 rrd_info_t ** pcdp_summary,
794 unsigned long *skip_update,
795 int *schedule_smooth)
797 rrd_value_t *seasonal_coef = NULL, *last_seasonal_coef = NULL;
799 /* a vector of future Holt-Winters seasonal coefs */
800 unsigned long elapsed_pdp_st;
802 double interval, pre_int, post_int; /* interval between this and
804 unsigned long proc_pdp_cnt;
806 if (parse_ds(rrd, updvals, tmpl_idx, step_start, tmpl_cnt,
807 current_time, current_time_usec, version) == -1) {
811 interval = (double) (*current_time - rrd->live_head->last_up)
812 + (double) ((long) *current_time_usec -
813 (long) rrd->live_head->last_up_usec) / 1e6f;
815 /* process the data sources and update the pdp_prep
816 * area accordingly */
817 if (update_pdp_prep(rrd, updvals, pdp_new, interval) == -1) {
821 elapsed_pdp_st = calculate_elapsed_steps(rrd,
823 *current_time_usec, interval,
827 /* has a pdp_st moment occurred since the last run ? */
828 if (elapsed_pdp_st == 0) {
829 /* no we have not passed a pdp_st moment. therefore update is simple */
830 simple_update(rrd, interval, pdp_new);
832 /* an pdp_st has occurred. */
833 if (process_all_pdp_st(rrd, interval,
835 elapsed_pdp_st, pdp_new, pdp_temp) == -1) {
838 if (update_all_cdp_prep(rrd, rra_step_cnt,
845 skip_update, schedule_smooth) == -1) {
846 goto err_free_coefficients;
848 if (update_aberrant_cdps(rrd, rrd_file, rra_begin,
849 elapsed_pdp_st, pdp_temp,
850 &seasonal_coef) == -1) {
851 goto err_free_coefficients;
853 if (write_to_rras(rrd, rrd_file, rra_step_cnt, rra_begin,
854 *current_time, skip_update,
855 pcdp_summary) == -1) {
856 goto err_free_coefficients;
858 } /* endif a pdp_st has occurred */
859 rrd->live_head->last_up = *current_time;
860 rrd->live_head->last_up_usec = *current_time_usec;
863 *rrd->legacy_last_up = rrd->live_head->last_up;
866 free(last_seasonal_coef);
869 err_free_coefficients:
871 free(last_seasonal_coef);
876 * Parse a DS string (time + colon-separated values), storing the
877 * results in current_time, current_time_usec, and updvals.
879 * Returns 0 on success, -1 on error.
886 unsigned long tmpl_cnt,
887 time_t *current_time,
888 unsigned long *current_time_usec,
896 /* initialize all ds input to unknown except the first one
897 which has always got to be set */
898 for (i = 1; i <= rrd->stat_head->ds_cnt; i++)
901 /* separate all ds elements; first must be examined separately
902 due to alternate time syntax */
903 if ((p = strchr(input, '@')) != NULL) {
905 } else if ((p = strchr(input, ':')) != NULL) {
908 rrd_set_error("expected timestamp not found in data source from %s",
914 updvals[tmpl_idx[i++]] = p + 1;
919 updvals[tmpl_idx[i++]] = p + 1;
922 rrd_set_error("found extra data on update argument: %s",p+1);
929 rrd_set_error("expected %lu data source readings (got %lu) from %s",
930 tmpl_cnt - 1, i - 1, input);
934 if (get_time_from_reading(rrd, timesyntax, updvals,
935 current_time, current_time_usec,
943 * Parse the time in a DS string, store it in current_time and
944 * current_time_usec and verify that it's later than the last
945 * update for this DS.
947 * Returns 0 on success, -1 on error.
949 static int get_time_from_reading(
953 time_t *current_time,
954 unsigned long *current_time_usec,
958 char *parsetime_error = NULL;
960 rrd_time_value_t ds_tv;
961 struct timeval tmp_time; /* used for time conversion */
963 /* get the time from the reading ... handle N */
964 if (timesyntax == '@') { /* at-style */
965 if ((parsetime_error = rrd_parsetime(updvals[0], &ds_tv))) {
966 rrd_set_error("ds time: %s: %s", updvals[0], parsetime_error);
969 if (ds_tv.type == RELATIVE_TO_END_TIME ||
970 ds_tv.type == RELATIVE_TO_START_TIME) {
971 rrd_set_error("specifying time relative to the 'start' "
972 "or 'end' makes no sense here: %s", updvals[0]);
975 *current_time = mktime(&ds_tv.tm) +ds_tv.offset;
976 *current_time_usec = 0; /* FIXME: how to handle usecs here ? */
977 } else if (strcmp(updvals[0], "N") == 0) {
978 gettimeofday(&tmp_time, 0);
979 normalize_time(&tmp_time);
980 *current_time = tmp_time.tv_sec;
981 *current_time_usec = tmp_time.tv_usec;
983 old_locale = setlocale(LC_NUMERIC, NULL);
984 setlocale(LC_NUMERIC, "C");
986 tmp = strtod(updvals[0], 0);
988 rrd_set_error("converting '%s' to float: %s",
989 updvals[0], rrd_strerror(errno));
992 setlocale(LC_NUMERIC, old_locale);
994 gettimeofday(&tmp_time, 0);
995 tmp = (double)tmp_time.tv_sec + (double)tmp_time.tv_usec * 1e-6f + tmp;
998 *current_time = floor(tmp);
999 *current_time_usec = (long) ((tmp - (double) *current_time) * 1e6f);
1001 /* dont do any correction for old version RRDs */
1003 *current_time_usec = 0;
1005 if (*current_time < rrd->live_head->last_up ||
1006 (*current_time == rrd->live_head->last_up &&
1007 (long) *current_time_usec <= (long) rrd->live_head->last_up_usec)) {
1008 rrd_set_error("illegal attempt to update using time %ld when "
1009 "last update time is %ld (minimum one second step)",
1010 *current_time, rrd->live_head->last_up);
1017 * Update pdp_new by interpreting the updvals according to the DS type
1018 * (COUNTER, GAUGE, etc.).
1020 * Returns 0 on success, -1 on error.
1022 static int update_pdp_prep(
1025 rrd_value_t *pdp_new,
1028 unsigned long ds_idx;
1030 char *endptr; /* used in the conversion */
1033 enum dst_en dst_idx;
1035 for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) {
1036 dst_idx = dst_conv(rrd->ds_def[ds_idx].dst);
1038 /* make sure we do not build diffs with old last_ds values */
1039 if (rrd->ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt < interval) {
1040 strncpy(rrd->pdp_prep[ds_idx].last_ds, "U", LAST_DS_LEN - 1);
1041 rrd->pdp_prep[ds_idx].last_ds[LAST_DS_LEN - 1] = '\0';
1044 /* NOTE: DST_CDEF should never enter this if block, because
1045 * updvals[ds_idx+1][0] is initialized to 'U'; unless the caller
1046 * accidently specified a value for the DST_CDEF. To handle this case,
1047 * an extra check is required. */
1049 if ((updvals[ds_idx + 1][0] != 'U') &&
1050 (dst_idx != DST_CDEF) &&
1051 rrd->ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt >= interval) {
1054 /* pdp_new contains rate * time ... eg the bytes transferred during
1055 * the interval. Doing it this way saves a lot of math operations
1060 /* Check if this is a valid integer. `U' is already handled in
1061 * another branch. */
1062 for (ii = 0; updvals[ds_idx + 1][ii] != 0; ii++) {
1063 if ((ii == 0) && (dst_idx == DST_DERIVE)
1064 && (updvals[ds_idx + 1][ii] == '-'))
1067 if ((updvals[ds_idx + 1][ii] < '0')
1068 || (updvals[ds_idx + 1][ii] > '9')) {
1069 rrd_set_error("not a simple %s integer: '%s'",
1070 (dst_idx == DST_DERIVE) ? "signed" : "unsigned",
1071 updvals[ds_idx + 1]);
1074 } /* for (ii = 0; updvals[ds_idx + 1][ii] != 0; ii++) */
1076 if (rrd->pdp_prep[ds_idx].last_ds[0] != 'U') {
1078 rrd_diff(updvals[ds_idx + 1],
1079 rrd->pdp_prep[ds_idx].last_ds);
1080 if (dst_idx == DST_COUNTER) {
1081 /* simple overflow catcher. This will fail
1082 * terribly for non 32 or 64 bit counters
1083 * ... are there any others in SNMP land?
1085 if (pdp_new[ds_idx] < (double) 0.0)
1086 pdp_new[ds_idx] += (double) 4294967296.0; /* 2^32 */
1087 if (pdp_new[ds_idx] < (double) 0.0)
1088 pdp_new[ds_idx] += (double) 18446744069414584320.0; /* 2^64-2^32 */
1090 rate = pdp_new[ds_idx] / interval;
1092 pdp_new[ds_idx] = DNAN;
1096 old_locale = setlocale(LC_NUMERIC, NULL);
1097 setlocale(LC_NUMERIC, "C");
1099 pdp_new[ds_idx] = strtod(updvals[ds_idx + 1], &endptr);
1101 rrd_set_error("converting '%s' to float: %s",
1102 updvals[ds_idx + 1], rrd_strerror(errno));
1105 setlocale(LC_NUMERIC, old_locale);
1106 if (endptr[0] != '\0') {
1108 ("conversion of '%s' to float not complete: tail '%s'",
1109 updvals[ds_idx + 1], endptr);
1112 rate = pdp_new[ds_idx] / interval;
1115 old_locale = setlocale(LC_NUMERIC, NULL);
1116 setlocale(LC_NUMERIC, "C");
1119 strtod(updvals[ds_idx + 1], &endptr) * interval;
1121 rrd_set_error("converting '%s' to float: %s",
1122 updvals[ds_idx + 1], rrd_strerror(errno));
1125 setlocale(LC_NUMERIC, old_locale);
1126 if (endptr[0] != '\0') {
1128 ("conversion of '%s' to float not complete: tail '%s'",
1129 updvals[ds_idx + 1], endptr);
1132 rate = pdp_new[ds_idx] / interval;
1135 rrd_set_error("rrd contains unknown DS type : '%s'",
1136 rrd->ds_def[ds_idx].dst);
1139 /* break out of this for loop if the error string is set */
1140 if (rrd_test_error()) {
1143 /* make sure pdp_temp is neither too large or too small
1144 * if any of these occur it becomes unknown ...
1145 * sorry folks ... */
1147 ((!isnan(rrd->ds_def[ds_idx].par[DS_max_val].u_val) &&
1148 rate > rrd->ds_def[ds_idx].par[DS_max_val].u_val) ||
1149 (!isnan(rrd->ds_def[ds_idx].par[DS_min_val].u_val) &&
1150 rate < rrd->ds_def[ds_idx].par[DS_min_val].u_val))) {
1151 pdp_new[ds_idx] = DNAN;
1154 /* no news is news all the same */
1155 pdp_new[ds_idx] = DNAN;
1159 /* make a copy of the command line argument for the next run */
1161 fprintf(stderr, "prep ds[%lu]\t"
1165 ds_idx, rrd->pdp_prep[ds_idx].last_ds, updvals[ds_idx + 1],
1168 strncpy(rrd->pdp_prep[ds_idx].last_ds, updvals[ds_idx + 1],
1170 rrd->pdp_prep[ds_idx].last_ds[LAST_DS_LEN - 1] = '\0';
1176 * How many PDP steps have elapsed since the last update? Returns the answer,
1177 * and stores the time between the last update and the last PDP in pre_time,
1178 * and the time between the last PDP and the current time in post_int.
1180 static int calculate_elapsed_steps(
1182 unsigned long current_time,
1183 unsigned long current_time_usec,
1187 unsigned long *proc_pdp_cnt)
1189 unsigned long proc_pdp_st; /* which pdp_st was the last to be processed */
1190 unsigned long occu_pdp_st; /* when was the pdp_st before the last update
1192 unsigned long proc_pdp_age; /* how old was the data in the pdp prep area
1193 * when it was last updated */
1194 unsigned long occu_pdp_age; /* how long ago was the last pdp_step time */
1196 /* when was the current pdp started */
1197 proc_pdp_age = rrd->live_head->last_up % rrd->stat_head->pdp_step;
1198 proc_pdp_st = rrd->live_head->last_up - proc_pdp_age;
1200 /* when did the last pdp_st occur */
1201 occu_pdp_age = current_time % rrd->stat_head->pdp_step;
1202 occu_pdp_st = current_time - occu_pdp_age;
1204 if (occu_pdp_st > proc_pdp_st) {
1205 /* OK we passed the pdp_st moment */
1206 *pre_int = (long) occu_pdp_st - rrd->live_head->last_up; /* how much of the input data
1207 * occurred before the latest
1209 *pre_int -= ((double) rrd->live_head->last_up_usec) / 1e6f; /* adjust usecs */
1210 *post_int = occu_pdp_age; /* how much after it */
1211 *post_int += ((double) current_time_usec) / 1e6f; /* adjust usecs */
1213 *pre_int = interval;
1217 *proc_pdp_cnt = proc_pdp_st / rrd->stat_head->pdp_step;
1220 printf("proc_pdp_age %lu\t"
1222 "occu_pfp_age %lu\t"
1226 "post_int %lf\n", proc_pdp_age, proc_pdp_st,
1227 occu_pdp_age, occu_pdp_st, interval, *pre_int, *post_int);
1230 /* compute the number of elapsed pdp_st moments */
1231 return (occu_pdp_st - proc_pdp_st) / rrd->stat_head->pdp_step;
1235 * Increment the PDP values by the values in pdp_new, or else initialize them.
1237 static void simple_update(
1240 rrd_value_t *pdp_new)
1244 for (i = 0; i < (signed) rrd->stat_head->ds_cnt; i++) {
1245 if (isnan(pdp_new[i])) {
1246 /* this is not really accurate if we use subsecond data arrival time
1247 should have thought of it when going subsecond resolution ...
1248 sorry next format change we will have it! */
1249 rrd->pdp_prep[i].scratch[PDP_unkn_sec_cnt].u_cnt +=
1252 if (isnan(rrd->pdp_prep[i].scratch[PDP_val].u_val)) {
1253 rrd->pdp_prep[i].scratch[PDP_val].u_val = pdp_new[i];
1255 rrd->pdp_prep[i].scratch[PDP_val].u_val += pdp_new[i];
1264 rrd->pdp_prep[i].scratch[PDP_val].u_val,
1265 rrd->pdp_prep[i].scratch[PDP_unkn_sec_cnt].u_cnt);
1271 * Call process_pdp_st for each DS.
1273 * Returns 0 on success, -1 on error.
1275 static int process_all_pdp_st(
1280 unsigned long elapsed_pdp_st,
1281 rrd_value_t *pdp_new,
1282 rrd_value_t *pdp_temp)
1284 unsigned long ds_idx;
1286 /* in pdp_prep[].scratch[PDP_val].u_val we have collected
1287 rate*seconds which occurred up to the last run.
1288 pdp_new[] contains rate*seconds from the latest run.
1289 pdp_temp[] will contain the rate for cdp */
1291 for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) {
1292 if (process_pdp_st(rrd, ds_idx, interval, pre_int, post_int,
1293 elapsed_pdp_st * rrd->stat_head->pdp_step,
1294 pdp_new, pdp_temp) == -1) {
1298 fprintf(stderr, "PDP UPD ds[%lu]\t"
1299 "elapsed_pdp_st %lu\t"
1302 "new_unkn_sec %5lu\n",
1306 rrd->pdp_prep[ds_idx].scratch[PDP_val].u_val,
1307 rrd->pdp_prep[ds_idx].scratch[PDP_unkn_sec_cnt].u_cnt);
1314 * Process an update that occurs after one of the PDP moments.
1315 * Increments the PDP value, sets NAN if time greater than the
1316 * heartbeats have elapsed, processes CDEFs.
1318 * Returns 0 on success, -1 on error.
1320 static int process_pdp_st(
1322 unsigned long ds_idx,
1326 long diff_pdp_st, /* number of seconds in full steps passed since last update */
1327 rrd_value_t *pdp_new,
1328 rrd_value_t *pdp_temp)
1332 /* update pdp_prep to the current pdp_st. */
1333 double pre_unknown = 0.0;
1334 unival *scratch = rrd->pdp_prep[ds_idx].scratch;
1335 unsigned long mrhb = rrd->ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt;
1337 rpnstack_t rpnstack; /* used for COMPUTE DS */
1339 rpnstack_init(&rpnstack);
1342 if (isnan(pdp_new[ds_idx])) {
1343 /* a final bit of unknown to be added before calculation
1344 we use a temporary variable for this so that we
1345 don't have to turn integer lines before using the value */
1346 pre_unknown = pre_int;
1348 if (isnan(scratch[PDP_val].u_val)) {
1349 scratch[PDP_val].u_val = 0;
1351 scratch[PDP_val].u_val += pdp_new[ds_idx] / interval * pre_int;
1354 /* if too much of the pdp_prep is unknown we dump it */
1355 /* if the interval is larger thatn mrhb we get NAN */
1356 if ((interval > mrhb) ||
1357 (rrd->stat_head->pdp_step / 2.0 <
1358 (signed) scratch[PDP_unkn_sec_cnt].u_cnt)) {
1359 pdp_temp[ds_idx] = DNAN;
1361 pdp_temp[ds_idx] = scratch[PDP_val].u_val /
1362 ((double) (diff_pdp_st - scratch[PDP_unkn_sec_cnt].u_cnt) -
1366 /* process CDEF data sources; remember each CDEF DS can
1367 * only reference other DS with a lower index number */
1368 if (dst_conv(rrd->ds_def[ds_idx].dst) == DST_CDEF) {
1372 rpn_expand((rpn_cdefds_t *) &(rrd->ds_def[ds_idx].par[DS_cdef]));
1374 rpnstack_free(&rpnstack);
1377 /* substitute data values for OP_VARIABLE nodes */
1378 for (i = 0; rpnp[i].op != OP_END; i++) {
1379 if (rpnp[i].op == OP_VARIABLE) {
1380 rpnp[i].op = OP_NUMBER;
1381 rpnp[i].val = pdp_temp[rpnp[i].ptr];
1384 /* run the rpn calculator */
1385 if (rpn_calc(rpnp, &rpnstack, 0, pdp_temp, ds_idx) == -1) {
1387 rpnstack_free(&rpnstack);
1393 /* make pdp_prep ready for the next run */
1394 if (isnan(pdp_new[ds_idx])) {
1395 /* this is not realy accurate if we use subsecond data arival time
1396 should have thought of it when going subsecond resolution ...
1397 sorry next format change we will have it! */
1398 scratch[PDP_unkn_sec_cnt].u_cnt = floor(post_int);
1399 scratch[PDP_val].u_val = DNAN;
1401 scratch[PDP_unkn_sec_cnt].u_cnt = 0;
1402 scratch[PDP_val].u_val = pdp_new[ds_idx] / interval * post_int;
1404 rpnstack_free(&rpnstack);
1409 * Iterate over all the RRAs for a given DS and:
1410 * 1. Decide whether to schedule a smooth later
1411 * 2. Decide whether to skip updating SEASONAL and DEVSEASONAL
1414 * Returns 0 on success, -1 on error
1416 static int update_all_cdp_prep(
1418 unsigned long *rra_step_cnt,
1419 unsigned long rra_begin,
1420 rrd_file_t *rrd_file,
1421 unsigned long elapsed_pdp_st,
1422 unsigned long proc_pdp_cnt,
1423 rrd_value_t **last_seasonal_coef,
1424 rrd_value_t **seasonal_coef,
1425 rrd_value_t *pdp_temp,
1426 unsigned long *skip_update,
1427 int *schedule_smooth)
1429 unsigned long rra_idx;
1431 /* index into the CDP scratch array */
1432 enum cf_en current_cf;
1433 unsigned long rra_start;
1435 /* number of rows to be updated in an RRA for a data value. */
1436 unsigned long start_pdp_offset;
1438 rra_start = rra_begin;
1439 for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) {
1440 current_cf = cf_conv(rrd->rra_def[rra_idx].cf_nam);
1442 rrd->rra_def[rra_idx].pdp_cnt -
1443 proc_pdp_cnt % rrd->rra_def[rra_idx].pdp_cnt;
1444 skip_update[rra_idx] = 0;
1445 if (start_pdp_offset <= elapsed_pdp_st) {
1446 rra_step_cnt[rra_idx] = (elapsed_pdp_st - start_pdp_offset) /
1447 rrd->rra_def[rra_idx].pdp_cnt + 1;
1449 rra_step_cnt[rra_idx] = 0;
1452 if (current_cf == CF_SEASONAL || current_cf == CF_DEVSEASONAL) {
1453 /* If this is a bulk update, we need to skip ahead in the seasonal arrays
1454 * so that they will be correct for the next observed value; note that for
1455 * the bulk update itself, no update will occur to DEVSEASONAL or SEASONAL;
1456 * futhermore, HWPREDICT and DEVPREDICT will be set to DNAN. */
1457 if (rra_step_cnt[rra_idx] > 1) {
1458 skip_update[rra_idx] = 1;
1459 lookup_seasonal(rrd, rra_idx, rra_start, rrd_file,
1460 elapsed_pdp_st, last_seasonal_coef);
1461 lookup_seasonal(rrd, rra_idx, rra_start, rrd_file,
1462 elapsed_pdp_st + 1, seasonal_coef);
1464 /* periodically run a smoother for seasonal effects */
1465 if (do_schedule_smooth(rrd, rra_idx, elapsed_pdp_st)) {
1468 "schedule_smooth: cur_row %lu, elapsed_pdp_st %lu, smooth idx %lu\n",
1469 rrd->rra_ptr[rra_idx].cur_row, elapsed_pdp_st,
1470 rrd->rra_def[rra_idx].par[RRA_seasonal_smooth_idx].
1473 *schedule_smooth = 1;
1476 if (rrd_test_error())
1480 (rrd, elapsed_pdp_st, start_pdp_offset, rra_step_cnt, rra_idx,
1481 pdp_temp, *last_seasonal_coef, *seasonal_coef,
1482 current_cf) == -1) {
1486 rrd->rra_def[rra_idx].row_cnt * rrd->stat_head->ds_cnt *
1487 sizeof(rrd_value_t);
1493 * Are we due for a smooth? Also increments our position in the burn-in cycle.
1495 static int do_schedule_smooth(
1497 unsigned long rra_idx,
1498 unsigned long elapsed_pdp_st)
1500 unsigned long cdp_idx = rra_idx * (rrd->stat_head->ds_cnt);
1501 unsigned long cur_row = rrd->rra_ptr[rra_idx].cur_row;
1502 unsigned long row_cnt = rrd->rra_def[rra_idx].row_cnt;
1503 unsigned long seasonal_smooth_idx =
1504 rrd->rra_def[rra_idx].par[RRA_seasonal_smooth_idx].u_cnt;
1505 unsigned long *init_seasonal =
1506 &(rrd->cdp_prep[cdp_idx].scratch[CDP_init_seasonal].u_cnt);
1508 /* Need to use first cdp parameter buffer to track burnin (burnin requires
1509 * a specific smoothing schedule). The CDP_init_seasonal parameter is
1510 * really an RRA level, not a data source within RRA level parameter, but
1511 * the rra_def is read only for rrd_update (not flushed to disk). */
1512 if (*init_seasonal > BURNIN_CYCLES) {
1513 /* someone has no doubt invented a trick to deal with this wrap around,
1514 * but at least this code is clear. */
1515 if (seasonal_smooth_idx > cur_row) {
1516 /* here elapsed_pdp_st = rra_step_cnt[rra_idx] because of 1-1 mapping
1517 * between PDP and CDP */
1518 return (cur_row + elapsed_pdp_st >= seasonal_smooth_idx);
1520 /* can't rely on negative numbers because we are working with
1521 * unsigned values */
1522 return (cur_row + elapsed_pdp_st >= row_cnt
1523 && cur_row + elapsed_pdp_st >= row_cnt + seasonal_smooth_idx);
1525 /* mark off one of the burn-in cycles */
1526 return (cur_row + elapsed_pdp_st >= row_cnt && ++(*init_seasonal));
1530 * For a given RRA, iterate over the data sources and call the appropriate
1531 * consolidation function.
1533 * Returns 0 on success, -1 on error.
1535 static int update_cdp_prep(
1537 unsigned long elapsed_pdp_st,
1538 unsigned long start_pdp_offset,
1539 unsigned long *rra_step_cnt,
1541 rrd_value_t *pdp_temp,
1542 rrd_value_t *last_seasonal_coef,
1543 rrd_value_t *seasonal_coef,
1546 unsigned long ds_idx, cdp_idx;
1548 /* update CDP_PREP areas */
1549 /* loop over data soures within each RRA */
1550 for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) {
1552 cdp_idx = rra_idx * rrd->stat_head->ds_cnt + ds_idx;
1554 if (rrd->rra_def[rra_idx].pdp_cnt > 1) {
1555 update_cdp(rrd->cdp_prep[cdp_idx].scratch, current_cf,
1556 pdp_temp[ds_idx], rra_step_cnt[rra_idx],
1557 elapsed_pdp_st, start_pdp_offset,
1558 rrd->rra_def[rra_idx].pdp_cnt,
1559 rrd->rra_def[rra_idx].par[RRA_cdp_xff_val].u_val,
1562 /* Nothing to consolidate if there's one PDP per CDP. However, if
1563 * we've missed some PDPs, let's update null counters etc. */
1564 if (elapsed_pdp_st > 2) {
1565 reset_cdp(rrd, elapsed_pdp_st, pdp_temp, last_seasonal_coef,
1566 seasonal_coef, rra_idx, ds_idx, cdp_idx,
1567 (enum cf_en)current_cf);
1571 if (rrd_test_error())
1573 } /* endif data sources loop */
1578 * Given the new reading (pdp_temp_val), update or initialize the CDP value,
1579 * primary value, secondary value, and # of unknowns.
1581 static void update_cdp(
1584 rrd_value_t pdp_temp_val,
1585 unsigned long rra_step_cnt,
1586 unsigned long elapsed_pdp_st,
1587 unsigned long start_pdp_offset,
1588 unsigned long pdp_cnt,
1593 /* shorthand variables */
1594 rrd_value_t *cdp_val = &scratch[CDP_val].u_val;
1595 rrd_value_t *cdp_primary_val = &scratch[CDP_primary_val].u_val;
1596 rrd_value_t *cdp_secondary_val = &scratch[CDP_secondary_val].u_val;
1597 unsigned long *cdp_unkn_pdp_cnt = &scratch[CDP_unkn_pdp_cnt].u_cnt;
1600 /* If we are in this block, as least 1 CDP value will be written to
1601 * disk, this is the CDP_primary_val entry. If more than 1 value needs
1602 * to be written, then the "fill in" value is the CDP_secondary_val
1604 if (isnan(pdp_temp_val)) {
1605 *cdp_unkn_pdp_cnt += start_pdp_offset;
1606 *cdp_secondary_val = DNAN;
1608 /* CDP_secondary value is the RRA "fill in" value for intermediary
1609 * CDP data entries. No matter the CF, the value is the same because
1610 * the average, max, min, and last of a list of identical values is
1611 * the same, namely, the value itself. */
1612 *cdp_secondary_val = pdp_temp_val;
1615 if (*cdp_unkn_pdp_cnt > pdp_cnt * xff) {
1616 *cdp_primary_val = DNAN;
1618 initialize_cdp_val(scratch, current_cf, pdp_temp_val,
1619 start_pdp_offset, pdp_cnt);
1622 initialize_carry_over(pdp_temp_val,current_cf,
1624 start_pdp_offset, pdp_cnt);
1625 /* endif meets xff value requirement for a valid value */
1626 /* initialize carry over CDP_unkn_pdp_cnt, this must after CDP_primary_val
1627 * is set because CDP_unkn_pdp_cnt is required to compute that value. */
1628 if (isnan(pdp_temp_val))
1629 *cdp_unkn_pdp_cnt = (elapsed_pdp_st - start_pdp_offset) % pdp_cnt;
1631 *cdp_unkn_pdp_cnt = 0;
1632 } else { /* rra_step_cnt[i] == 0 */
1635 if (isnan(*cdp_val)) {
1636 fprintf(stderr, "schedule CDP_val update, RRA %d DS %d, DNAN\n",
1639 fprintf(stderr, "schedule CDP_val update, RRA %d DS %d, %10.2f\n",
1643 if (isnan(pdp_temp_val)) {
1644 *cdp_unkn_pdp_cnt += elapsed_pdp_st;
1647 calculate_cdp_val(*cdp_val, pdp_temp_val, elapsed_pdp_st,
1654 * Set the CDP_primary_val and CDP_val to the appropriate initial value based
1655 * on the type of consolidation function.
1657 static void initialize_cdp_val(
1660 rrd_value_t pdp_temp_val,
1661 unsigned long start_pdp_offset,
1662 unsigned long pdp_cnt)
1664 rrd_value_t cum_val, cur_val;
1666 switch (current_cf) {
1668 cum_val = IFDNAN(scratch[CDP_val].u_val, 0.0);
1669 cur_val = IFDNAN(pdp_temp_val, 0.0);
1670 scratch[CDP_primary_val].u_val =
1671 (cum_val + cur_val * start_pdp_offset) /
1672 (pdp_cnt - scratch[CDP_unkn_pdp_cnt].u_cnt);
1675 cum_val = IFDNAN(scratch[CDP_val].u_val, -DINF);
1676 cur_val = IFDNAN(pdp_temp_val, -DINF);
1680 if (isnan(scratch[CDP_val].u_val) && isnan(pdp_temp)) {
1682 "RRA %lu, DS %lu, both CDP_val and pdp_temp are DNAN!",
1688 if (cur_val > cum_val)
1689 scratch[CDP_primary_val].u_val = cur_val;
1691 scratch[CDP_primary_val].u_val = cum_val;
1694 cum_val = IFDNAN(scratch[CDP_val].u_val, DINF);
1695 cur_val = IFDNAN(pdp_temp_val, DINF);
1698 if (isnan(scratch[CDP_val].u_val) && isnan(pdp_temp)) {
1700 "RRA %lu, DS %lu, both CDP_val and pdp_temp are DNAN!", i,
1706 if (cur_val < cum_val)
1707 scratch[CDP_primary_val].u_val = cur_val;
1709 scratch[CDP_primary_val].u_val = cum_val;
1713 scratch[CDP_primary_val].u_val = pdp_temp_val;
1719 * Update the consolidation function for Holt-Winters functions as
1720 * well as other functions that don't actually consolidate multiple
1723 static void reset_cdp(
1725 unsigned long elapsed_pdp_st,
1726 rrd_value_t *pdp_temp,
1727 rrd_value_t *last_seasonal_coef,
1728 rrd_value_t *seasonal_coef,
1732 enum cf_en current_cf)
1734 unival *scratch = rrd->cdp_prep[cdp_idx].scratch;
1736 switch (current_cf) {
1739 scratch[CDP_primary_val].u_val = pdp_temp[ds_idx];
1740 scratch[CDP_secondary_val].u_val = pdp_temp[ds_idx];
1743 case CF_DEVSEASONAL:
1744 /* need to update cached seasonal values, so they are consistent
1745 * with the bulk update */
1746 /* WARNING: code relies on the fact that CDP_hw_last_seasonal and
1747 * CDP_last_deviation are the same. */
1748 scratch[CDP_hw_last_seasonal].u_val = last_seasonal_coef[ds_idx];
1749 scratch[CDP_hw_seasonal].u_val = seasonal_coef[ds_idx];
1753 /* need to update the null_count and last_null_count.
1754 * even do this for non-DNAN pdp_temp because the
1755 * algorithm is not learning from batch updates. */
1756 scratch[CDP_null_count].u_cnt += elapsed_pdp_st;
1757 scratch[CDP_last_null_count].u_cnt += elapsed_pdp_st - 1;
1760 scratch[CDP_primary_val].u_val = DNAN;
1761 scratch[CDP_secondary_val].u_val = DNAN;
1764 /* do not count missed bulk values as failures */
1765 scratch[CDP_primary_val].u_val = 0;
1766 scratch[CDP_secondary_val].u_val = 0;
1767 /* need to reset violations buffer.
1768 * could do this more carefully, but for now, just
1769 * assume a bulk update wipes away all violations. */
1770 erase_violations(rrd, cdp_idx, rra_idx);
1775 static rrd_value_t initialize_carry_over(
1776 rrd_value_t pdp_temp_val,
1778 unsigned long elapsed_pdp_st,
1779 unsigned long start_pdp_offset,
1780 unsigned long pdp_cnt)
1782 unsigned long pdp_into_cdp_cnt = ((elapsed_pdp_st - start_pdp_offset) % pdp_cnt);
1783 if ( pdp_into_cdp_cnt == 0 || isnan(pdp_temp_val)){
1784 switch (current_cf) {
1796 switch (current_cf) {
1798 return pdp_temp_val * pdp_into_cdp_cnt ;
1800 return pdp_temp_val;
1806 * Update or initialize a CDP value based on the consolidation
1809 * Returns the new value.
1811 static rrd_value_t calculate_cdp_val(
1812 rrd_value_t cdp_val,
1813 rrd_value_t pdp_temp_val,
1814 unsigned long elapsed_pdp_st,
1825 if (isnan(cdp_val)) {
1826 if (current_cf == CF_AVERAGE) {
1827 pdp_temp_val *= elapsed_pdp_st;
1830 fprintf(stderr, "Initialize CDP_val for RRA %d DS %d: %10.2f\n",
1831 i, ii, pdp_temp_val);
1833 return pdp_temp_val;
1835 if (current_cf == CF_AVERAGE)
1836 return cdp_val + pdp_temp_val * elapsed_pdp_st;
1837 if (current_cf == CF_MINIMUM)
1838 return (pdp_temp_val < cdp_val) ? pdp_temp_val : cdp_val;
1839 if (current_cf == CF_MAXIMUM)
1840 return (pdp_temp_val > cdp_val) ? pdp_temp_val : cdp_val;
1842 return pdp_temp_val;
1846 * For each RRA, update the seasonal values and then call update_aberrant_CF
1847 * for each data source.
1849 * Return 0 on success, -1 on error.
1851 static int update_aberrant_cdps(
1853 rrd_file_t *rrd_file,
1854 unsigned long rra_begin,
1855 unsigned long elapsed_pdp_st,
1856 rrd_value_t *pdp_temp,
1857 rrd_value_t **seasonal_coef)
1859 unsigned long rra_idx, ds_idx, j;
1861 /* number of PDP steps since the last update that
1862 * are assigned to the first CDP to be generated
1863 * since the last update. */
1864 unsigned short scratch_idx;
1865 unsigned long rra_start;
1866 enum cf_en current_cf;
1868 /* this loop is only entered if elapsed_pdp_st < 3 */
1869 for (j = elapsed_pdp_st, scratch_idx = CDP_primary_val;
1870 j > 0 && j < 3; j--, scratch_idx = CDP_secondary_val) {
1871 rra_start = rra_begin;
1872 for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) {
1873 if (rrd->rra_def[rra_idx].pdp_cnt == 1) {
1874 current_cf = cf_conv(rrd->rra_def[rra_idx].cf_nam);
1875 if (current_cf == CF_SEASONAL || current_cf == CF_DEVSEASONAL) {
1876 if (scratch_idx == CDP_primary_val) {
1877 lookup_seasonal(rrd, rra_idx, rra_start, rrd_file,
1878 elapsed_pdp_st + 1, seasonal_coef);
1880 lookup_seasonal(rrd, rra_idx, rra_start, rrd_file,
1881 elapsed_pdp_st + 2, seasonal_coef);
1884 if (rrd_test_error())
1886 /* loop over data soures within each RRA */
1887 for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) {
1888 update_aberrant_CF(rrd, pdp_temp[ds_idx], current_cf,
1889 rra_idx * (rrd->stat_head->ds_cnt) +
1890 ds_idx, rra_idx, ds_idx, scratch_idx,
1894 rra_start += rrd->rra_def[rra_idx].row_cnt
1895 * rrd->stat_head->ds_cnt * sizeof(rrd_value_t);
1902 * Move sequentially through the file, writing one RRA at a time. Note this
1903 * architecture divorces the computation of CDP with flushing updated RRA
1906 * Return 0 on success, -1 on error.
1908 static int write_to_rras(
1910 rrd_file_t *rrd_file,
1911 unsigned long *rra_step_cnt,
1912 unsigned long rra_begin,
1913 time_t current_time,
1914 unsigned long *skip_update,
1915 rrd_info_t ** pcdp_summary)
1917 unsigned long rra_idx;
1918 unsigned long rra_start;
1919 time_t rra_time = 0; /* time of update for a RRA */
1921 unsigned long ds_cnt = rrd->stat_head->ds_cnt;
1923 /* Ready to write to disk */
1924 rra_start = rra_begin;
1926 for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) {
1927 rra_def_t *rra_def = &rrd->rra_def[rra_idx];
1928 rra_ptr_t *rra_ptr = &rrd->rra_ptr[rra_idx];
1931 unsigned short scratch_idx;
1932 unsigned long step_subtract;
1934 for (scratch_idx = CDP_primary_val,
1936 rra_step_cnt[rra_idx] > 0;
1937 rra_step_cnt[rra_idx]--,
1938 scratch_idx = CDP_secondary_val,
1939 step_subtract = 2) {
1943 fprintf(stderr, " -- RRA Preseek %ld\n", rrd_file->pos);
1945 /* increment, with wrap-around */
1946 if (++rra_ptr->cur_row >= rra_def->row_cnt)
1947 rra_ptr->cur_row = 0;
1949 /* we know what our position should be */
1950 rra_pos_new = rra_start
1951 + ds_cnt * rra_ptr->cur_row * sizeof(rrd_value_t);
1953 /* re-seek if the position is wrong or we wrapped around */
1954 if ((size_t)rra_pos_new != rrd_file->pos) {
1955 if (rrd_seek(rrd_file, rra_pos_new, SEEK_SET) != 0) {
1956 rrd_set_error("seek error in rrd");
1961 fprintf(stderr, " -- RRA Postseek %ld\n", rrd_file->pos);
1964 if (skip_update[rra_idx])
1967 if (*pcdp_summary != NULL) {
1968 unsigned long step_time = rra_def->pdp_cnt * rrd->stat_head->pdp_step;
1970 rra_time = (current_time - current_time % step_time)
1971 - ((rra_step_cnt[rra_idx] - step_subtract) * step_time);
1975 (rrd_file, rrd, rra_idx, scratch_idx,
1976 pcdp_summary, rra_time) == -1)
1979 rrd_notify_row(rrd_file, rra_idx, rra_pos_new, rra_time);
1982 rra_start += rra_def->row_cnt * ds_cnt * sizeof(rrd_value_t);
1989 * Write out one row of values (one value per DS) to the archive.
1991 * Returns 0 on success, -1 on error.
1993 static int write_RRA_row(
1994 rrd_file_t *rrd_file,
1996 unsigned long rra_idx,
1997 unsigned short CDP_scratch_idx,
1998 rrd_info_t ** pcdp_summary,
2001 unsigned long ds_idx, cdp_idx;
2004 for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) {
2005 /* compute the cdp index */
2006 cdp_idx = rra_idx * (rrd->stat_head->ds_cnt) + ds_idx;
2008 fprintf(stderr, " -- RRA WRITE VALUE %e, at %ld CF:%s\n",
2009 rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val,
2010 rrd_file->pos, rrd->rra_def[rra_idx].cf_nam);
2012 if (*pcdp_summary != NULL) {
2013 iv.u_val = rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val;
2014 /* append info to the return hash */
2015 *pcdp_summary = rrd_info_push(*pcdp_summary,
2017 ("[%lli]RRA[%s][%lu]DS[%s]",
2018 (long long)rra_time,
2019 rrd->rra_def[rra_idx].cf_nam,
2020 rrd->rra_def[rra_idx].pdp_cnt,
2021 rrd->ds_def[ds_idx].ds_nam),
2025 if (rrd_write(rrd_file,
2026 &(rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].
2027 u_val), sizeof(rrd_value_t)) != sizeof(rrd_value_t)) {
2028 rrd_set_error("writing rrd: %s", rrd_strerror(errno));
2036 * Call apply_smoother for all DEVSEASONAL and SEASONAL RRAs.
2038 * Returns 0 on success, -1 otherwise
2040 static int smooth_all_rras(
2042 rrd_file_t *rrd_file,
2043 unsigned long rra_begin)
2045 unsigned long rra_start = rra_begin;
2046 unsigned long rra_idx;
2048 for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; ++rra_idx) {
2049 if (cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_DEVSEASONAL ||
2050 cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_SEASONAL) {
2052 fprintf(stderr, "Running smoother for rra %lu\n", rra_idx);
2054 apply_smoother(rrd, rra_idx, rra_start, rrd_file);
2055 if (rrd_test_error())
2058 rra_start += rrd->rra_def[rra_idx].row_cnt
2059 * rrd->stat_head->ds_cnt * sizeof(rrd_value_t);
2066 * Flush changes to disk (unless we're using mmap)
2068 * Returns 0 on success, -1 otherwise
2070 static int write_changes_to_disk(
2072 rrd_file_t *rrd_file,
2075 /* we just need to write back the live header portion now */
2076 if (rrd_seek(rrd_file, (sizeof(stat_head_t)
2077 + sizeof(ds_def_t) * rrd->stat_head->ds_cnt
2078 + sizeof(rra_def_t) * rrd->stat_head->rra_cnt),
2080 rrd_set_error("seek rrd for live header writeback");
2084 if (rrd_write(rrd_file, rrd->live_head,
2085 sizeof(live_head_t) * 1) != sizeof(live_head_t) * 1) {
2086 rrd_set_error("rrd_write live_head to rrd");
2090 if (rrd_write(rrd_file, rrd->legacy_last_up,
2091 sizeof(time_t) * 1) != sizeof(time_t) * 1) {
2092 rrd_set_error("rrd_write live_head to rrd");
2098 if (rrd_write(rrd_file, rrd->pdp_prep,
2099 sizeof(pdp_prep_t) * rrd->stat_head->ds_cnt)
2100 != (ssize_t) (sizeof(pdp_prep_t) * rrd->stat_head->ds_cnt)) {
2101 rrd_set_error("rrd_write pdp_prep to rrd");
2105 if (rrd_write(rrd_file, rrd->cdp_prep,
2106 sizeof(cdp_prep_t) * rrd->stat_head->rra_cnt *
2107 rrd->stat_head->ds_cnt)
2108 != (ssize_t) (sizeof(cdp_prep_t) * rrd->stat_head->rra_cnt *
2109 rrd->stat_head->ds_cnt)) {
2111 rrd_set_error("rrd_write cdp_prep to rrd");
2115 if (rrd_write(rrd_file, rrd->rra_ptr,
2116 sizeof(rra_ptr_t) * rrd->stat_head->rra_cnt)
2117 != (ssize_t) (sizeof(rra_ptr_t) * rrd->stat_head->rra_cnt)) {
2118 rrd_set_error("rrd_write rra_ptr to rrd");