2 * turbostat -- Log CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors for collectd.
5 * Based on the 'turbostat' tool of the Linux kernel, found at
6 * linux/tools/power/x86/turbostat/turbostat.c:
8 * Copyright (c) 2013 Intel Corporation.
9 * Len Brown <len.brown@intel.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 * Ported to collectd by Vincent Brillault <git@lerya.net>
28 * _GNU_SOURCE is required because of the following functions:
42 #include "utils_time.h"
44 #include "msr-index.h"
46 #ifdef HAVE_SYS_CAPABILITY_H
47 #include <sys/capability.h>
48 #endif /* HAVE_SYS_CAPABILITY_H */
50 #define PLUGIN_NAME "turbostat"
53 * This tool uses the Model-Specific Registers (MSRs) present on Intel
55 * The general description each of these registers, depending on the
57 * can be found in the IntelĀ® 64 and IA-32 Architectures Software Developer
59 * Volume 3 Chapter 35.
63 * If set, aperf_mperf_unstable disables a/mperf based stats.
64 * This includes: C0 & C1 states, frequency
66 * This value is automatically set if mperf or aperf go backward
68 static _Bool aperf_mperf_unstable;
71 * If set, use kernel logical core numbering for all "per core" metrics.
73 static _Bool config_lcn;
76 * Bitmask of the list of core C states supported by the processor.
77 * Currently supported C-states (by this plugin): 3, 6, 7
79 static unsigned int do_core_cstate;
80 static unsigned int config_core_cstate;
81 static _Bool apply_config_core_cstate;
84 * Bitmask of the list of pacages C states supported by the processor.
85 * Currently supported C-states (by this plugin): 2, 3, 6, 7, 8, 9, 10
87 static unsigned int do_pkg_cstate;
88 static unsigned int config_pkg_cstate;
89 static _Bool apply_config_pkg_cstate;
92 * Boolean indicating if the processor supports 'I/O System-Management Interrupt
96 static _Bool config_smi;
97 static _Bool apply_config_smi;
100 * Boolean indicating if the processor supports 'Digital temperature sensor'
101 * This feature enables the monitoring of the temperature of each core
103 * This feature has two limitations:
104 * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature
106 * - Temperatures above the tcc_activation_temp are not recorded
109 static _Bool config_dts;
110 static _Bool apply_config_dts;
113 * Boolean indicating if the processor supports 'Package thermal management'
114 * This feature allows the monitoring of the temperature of each package
116 * This feature has two limitations:
117 * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature
119 * - Temperatures above the tcc_activation_temp are not recorded
122 static _Bool config_ptm;
123 static _Bool apply_config_ptm;
126 * Thermal Control Circuit Activation Temperature as configured by the user.
127 * This override the automated detection via MSR_IA32_TEMPERATURE_TARGET
128 * and should only be used if the automated detection fails.
130 static unsigned int tcc_activation_temp;
132 static unsigned int do_rapl;
133 static unsigned int config_rapl;
134 static _Bool apply_config_rapl;
135 static double rapl_energy_units;
137 #define RAPL_PKG (1 << 0)
138 /* 0x610 MSR_PKG_POWER_LIMIT */
139 /* 0x611 MSR_PKG_ENERGY_STATUS */
140 #define RAPL_DRAM (1 << 1)
141 /* 0x618 MSR_DRAM_POWER_LIMIT */
142 /* 0x619 MSR_DRAM_ENERGY_STATUS */
143 /* 0x61c MSR_DRAM_POWER_INFO */
144 #define RAPL_CORES (1 << 2)
145 /* 0x638 MSR_PP0_POWER_LIMIT */
146 /* 0x639 MSR_PP0_ENERGY_STATUS */
148 #define RAPL_GFX (1 << 3)
149 /* 0x640 MSR_PP1_POWER_LIMIT */
150 /* 0x641 MSR_PP1_ENERGY_STATUS */
151 /* 0x642 MSR_PP1_POLICY */
152 #define TJMAX_DEFAULT 100
154 static cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_saved_affinity_set;
155 static size_t cpu_present_setsize, cpu_affinity_setsize,
156 cpu_saved_affinity_setsize;
158 static struct thread_data {
159 unsigned long long tsc;
160 unsigned long long aperf;
161 unsigned long long mperf;
162 unsigned long long c1;
163 unsigned int smi_count;
166 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
167 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
168 } * thread_delta, *thread_even, *thread_odd;
170 static struct core_data {
171 unsigned long long c3;
172 unsigned long long c6;
173 unsigned long long c7;
174 unsigned int core_temp_c;
175 unsigned int core_id;
176 } * core_delta, *core_even, *core_odd;
178 static struct pkg_data {
179 unsigned long long pc2;
180 unsigned long long pc3;
181 unsigned long long pc6;
182 unsigned long long pc7;
183 unsigned long long pc8;
184 unsigned long long pc9;
185 unsigned long long pc10;
186 unsigned int package_id;
187 uint32_t energy_pkg; /* MSR_PKG_ENERGY_STATUS */
188 uint32_t energy_dram; /* MSR_DRAM_ENERGY_STATUS */
189 uint32_t energy_cores; /* MSR_PP0_ENERGY_STATUS */
190 uint32_t energy_gfx; /* MSR_PP1_ENERGY_STATUS */
191 unsigned int tcc_activation_temp;
192 unsigned int pkg_temp_c;
193 } * package_delta, *package_even, *package_odd;
195 #define DELTA_COUNTERS thread_delta, core_delta, package_delta
196 #define ODD_COUNTERS thread_odd, core_odd, package_odd
197 #define EVEN_COUNTERS thread_even, core_even, package_even
198 static _Bool is_even = 1;
200 static _Bool allocated = 0;
201 static _Bool initialized = 0;
203 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
204 (thread_base + (pkg_no)*topology.num_cores * topology.num_threads + \
205 (core_no)*topology.num_threads + (thread_no))
206 #define GET_CORE(core_base, core_no, pkg_no) \
207 (core_base + (pkg_no)*topology.num_cores + (core_no))
208 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
210 struct cpu_topology {
211 unsigned int package_id;
212 unsigned int core_id;
213 _Bool first_core_in_package;
214 _Bool first_thread_in_core;
217 static struct topology {
218 unsigned int max_cpu_id;
219 unsigned int num_packages;
220 unsigned int num_cores;
221 unsigned int num_threads;
222 struct cpu_topology *cpus;
225 static cdtime_t time_even, time_odd, time_delta;
227 static const char *config_keys[] = {
230 "SystemManagementInterrupt",
231 "DigitalTemperatureSensor",
232 "PackageThermalManagement",
234 "RunningAveragePowerLimit",
237 static const int config_keys_num = STATIC_ARRAY_SIZE(config_keys);
239 /*****************************
240 * MSR Manipulation helpers *
241 *****************************/
244 * Open a MSR device for reading
245 * Can change the scheduling affinity of the current process if multiple_read is
248 static int __attribute__((warn_unused_result))
249 open_msr(unsigned int cpu, _Bool multiple_read) {
254 * If we need to do multiple read, let's migrate to the CPU
255 * Otherwise, we would lose time calling functions on another CPU
257 * If we are not yet initialized (cpu_affinity_setsize = 0),
258 * we need to skip this optimisation.
260 if (multiple_read && cpu_affinity_setsize) {
261 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
262 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
263 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) {
264 ERROR("turbostat plugin: Could not migrate to CPU %d", cpu);
269 ssnprintf(pathname, sizeof(pathname), "/dev/cpu/%d/msr", cpu);
270 fd = open(pathname, O_RDONLY);
272 ERROR("turbostat plugin: failed to open %s", pathname);
279 * Read a single MSR from an open file descriptor
281 static int __attribute__((warn_unused_result))
282 read_msr(int fd, off_t offset, unsigned long long *msr) {
285 retval = pread(fd, msr, sizeof *msr, offset);
287 if (retval != sizeof *msr) {
288 ERROR("turbostat plugin: MSR offset 0x%llx read failed",
289 (unsigned long long)offset);
296 * Open a MSR device for reading, read the value asked for and close it.
297 * This call will not affect the scheduling affinity of this thread.
299 static ssize_t __attribute__((warn_unused_result))
300 get_msr(unsigned int cpu, off_t offset, unsigned long long *msr) {
304 fd = open_msr(cpu, 0);
307 retval = read_msr(fd, offset, msr);
312 /********************************
313 * Raw data acquisition (1 CPU) *
314 ********************************/
317 * Read every data avalaible for a single CPU
319 * Core data is shared for all threads in one core: extracted only for the first
321 * Package data is shared for all core in one package: extracted only for the
322 * first thread of the first core
324 * Side effect: migrates to the targeted CPU
326 static int __attribute__((warn_unused_result))
327 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) {
328 unsigned int cpu = t->cpu_id;
329 unsigned long long msr;
333 msr_fd = open_msr(cpu, 1);
337 #define READ_MSR(msr, dst) \
339 if (read_msr(msr_fd, msr, dst)) { \
340 ERROR("turbostat plugin: Unable to read " #msr); \
346 READ_MSR(MSR_IA32_TSC, &t->tsc);
348 READ_MSR(MSR_IA32_APERF, &t->aperf);
349 READ_MSR(MSR_IA32_MPERF, &t->mperf);
352 READ_MSR(MSR_SMI_COUNT, &msr);
353 t->smi_count = msr & 0xFFFFFFFF;
356 /* collect core counters only for 1st thread in core */
357 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) {
362 if (do_core_cstate & (1 << 3))
363 READ_MSR(MSR_CORE_C3_RESIDENCY, &c->c3);
364 if (do_core_cstate & (1 << 6))
365 READ_MSR(MSR_CORE_C6_RESIDENCY, &c->c6);
366 if (do_core_cstate & (1 << 7))
367 READ_MSR(MSR_CORE_C7_RESIDENCY, &c->c7);
370 READ_MSR(MSR_IA32_THERM_STATUS, &msr);
371 c->core_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
374 /* collect package counters only for 1st core in package */
375 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
380 if (do_pkg_cstate & (1 << 2))
381 READ_MSR(MSR_PKG_C2_RESIDENCY, &p->pc2);
382 if (do_pkg_cstate & (1 << 3))
383 READ_MSR(MSR_PKG_C3_RESIDENCY, &p->pc3);
384 if (do_pkg_cstate & (1 << 6))
385 READ_MSR(MSR_PKG_C6_RESIDENCY, &p->pc6);
386 if (do_pkg_cstate & (1 << 7))
387 READ_MSR(MSR_PKG_C7_RESIDENCY, &p->pc7);
388 if (do_pkg_cstate & (1 << 8))
389 READ_MSR(MSR_PKG_C8_RESIDENCY, &p->pc8);
390 if (do_pkg_cstate & (1 << 9))
391 READ_MSR(MSR_PKG_C9_RESIDENCY, &p->pc9);
392 if (do_pkg_cstate & (1 << 10))
393 READ_MSR(MSR_PKG_C10_RESIDENCY, &p->pc10);
395 if (do_rapl & RAPL_PKG) {
396 READ_MSR(MSR_PKG_ENERGY_STATUS, &msr);
397 p->energy_pkg = msr & 0xFFFFFFFF;
399 if (do_rapl & RAPL_CORES) {
400 READ_MSR(MSR_PP0_ENERGY_STATUS, &msr);
401 p->energy_cores = msr & 0xFFFFFFFF;
403 if (do_rapl & RAPL_DRAM) {
404 READ_MSR(MSR_DRAM_ENERGY_STATUS, &msr);
405 p->energy_dram = msr & 0xFFFFFFFF;
407 if (do_rapl & RAPL_GFX) {
408 READ_MSR(MSR_PP1_ENERGY_STATUS, &msr);
409 p->energy_gfx = msr & 0xFFFFFFFF;
412 READ_MSR(MSR_IA32_PACKAGE_THERM_STATUS, &msr);
413 p->pkg_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
421 /**********************************
422 * Evaluating the changes (1 CPU) *
423 **********************************/
426 * Extract the evolution old->new in delta at a package level
427 * (some are not new-delta, e.g. temperature)
429 static inline void delta_package(struct pkg_data *delta,
430 const struct pkg_data *new,
431 const struct pkg_data *old) {
432 delta->pc2 = new->pc2 - old->pc2;
433 delta->pc3 = new->pc3 - old->pc3;
434 delta->pc6 = new->pc6 - old->pc6;
435 delta->pc7 = new->pc7 - old->pc7;
436 delta->pc8 = new->pc8 - old->pc8;
437 delta->pc9 = new->pc9 - old->pc9;
438 delta->pc10 = new->pc10 - old->pc10;
439 delta->pkg_temp_c = new->pkg_temp_c;
441 delta->energy_pkg = new->energy_pkg - old->energy_pkg;
442 delta->energy_cores = new->energy_cores - old->energy_cores;
443 delta->energy_gfx = new->energy_gfx - old->energy_gfx;
444 delta->energy_dram = new->energy_dram - old->energy_dram;
448 * Extract the evolution old->new in delta at a core level
449 * (some are not new-delta, e.g. temperature)
451 static inline void delta_core(struct core_data *delta,
452 const struct core_data *new,
453 const struct core_data *old) {
454 delta->c3 = new->c3 - old->c3;
455 delta->c6 = new->c6 - old->c6;
456 delta->c7 = new->c7 - old->c7;
457 delta->core_temp_c = new->core_temp_c;
461 * Extract the evolution old->new in delta at a package level
462 * core_delta is required for c1 estimation (tsc - c0 - all core cstates)
464 static inline int __attribute__((warn_unused_result))
465 delta_thread(struct thread_data *delta, const struct thread_data *new,
466 const struct thread_data *old, const struct core_data *cdelta) {
467 delta->tsc = new->tsc - old->tsc;
469 /* check for TSC < 1 Mcycles over interval */
470 if (delta->tsc < (1000 * 1000)) {
471 WARNING("turbostat plugin: Insanely slow TSC rate, TSC stops "
472 "in idle? You can disable all c-states by booting with"
473 " 'idle=poll' or just the deep ones with"
474 " 'processor.max_cstate=1'");
478 delta->c1 = new->c1 - old->c1;
480 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
481 delta->aperf = new->aperf - old->aperf;
482 delta->mperf = new->mperf - old->mperf;
484 if (!aperf_mperf_unstable) {
485 WARNING("turbostat plugin: APERF or MPERF went "
486 "backwards. Frequency results do not cover "
487 "the entire interval. Fix this by running "
488 "Linux-2.6.30 or later.");
490 aperf_mperf_unstable = 1;
495 * As counter collection is not atomic,
496 * it is possible for mperf's non-halted cycles + idle states
497 * to exceed TSC's all cycles: show c1 = 0% in that case.
499 if ((delta->mperf + cdelta->c3 + cdelta->c6 + cdelta->c7) > delta->tsc)
502 /* normal case, derive c1 */
504 delta->tsc - delta->mperf - cdelta->c3 - cdelta->c6 - cdelta->c7;
507 if (delta->mperf == 0) {
508 WARNING("turbostat plugin: cpu%d MPERF 0!", old->cpu_id);
509 delta->mperf = 1; /* divide by 0 protection */
513 delta->smi_count = new->smi_count - old->smi_count;
518 /**********************************
519 * Submitting the results (1 CPU) *
520 **********************************/
523 * Submit one gauge value
525 static void turbostat_submit(const char *plugin_instance, const char *type,
526 const char *type_instance, gauge_t value) {
527 value_list_t vl = VALUE_LIST_INIT;
529 vl.values = &(value_t){.gauge = value};
531 sstrncpy(vl.plugin, PLUGIN_NAME, sizeof(vl.plugin));
532 if (plugin_instance != NULL)
533 sstrncpy(vl.plugin_instance, plugin_instance, sizeof(vl.plugin_instance));
534 sstrncpy(vl.type, type, sizeof(vl.type));
535 if (type_instance != NULL)
536 sstrncpy(vl.type_instance, type_instance, sizeof(vl.type_instance));
538 plugin_dispatch_values(&vl);
542 * Submit every data for a single CPU
544 * Core data is shared for all threads in one core: submitted only for the first
546 * Package data is shared for all core in one package: submitted only for the
547 * first thread of the first core
549 static int submit_counters(struct thread_data *t, struct core_data *c,
550 struct pkg_data *p) {
551 char name[DATA_MAX_NAME_LEN];
552 double interval_float;
554 interval_float = CDTIME_T_TO_DOUBLE(time_delta);
556 DEBUG("turbostat plugin: submit stats for cpu: %d, core: %d, pkg: %d",
557 t->cpu_id, c->core_id, p->package_id);
559 ssnprintf(name, sizeof(name), "cpu%02d", t->cpu_id);
561 if (!aperf_mperf_unstable)
562 turbostat_submit(name, "percent", "c0", 100.0 * t->mperf / t->tsc);
563 if (!aperf_mperf_unstable)
564 turbostat_submit(name, "percent", "c1", 100.0 * t->c1 / t->tsc);
566 turbostat_submit(name, "frequency", "average",
567 1.0 / 1000000 * t->aperf / interval_float);
569 if ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc)))
570 turbostat_submit(name, "frequency", "busy", 1.0 * t->tsc / 1000000 *
571 t->aperf / t->mperf /
574 /* Sanity check (should stay stable) */
575 turbostat_submit(name, "gauge", "TSC",
576 1.0 * t->tsc / 1000000 / interval_float);
580 turbostat_submit(name, "count", NULL, t->smi_count);
582 /* submit per-core data only for 1st thread in core */
583 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
586 /* If not using logical core numbering, set core id */
588 ssnprintf(name, sizeof(name), "core%02d", c->core_id);
591 if (do_core_cstate & (1 << 3))
592 turbostat_submit(name, "percent", "c3", 100.0 * c->c3 / t->tsc);
593 if (do_core_cstate & (1 << 6))
594 turbostat_submit(name, "percent", "c6", 100.0 * c->c6 / t->tsc);
595 if (do_core_cstate & (1 << 7))
596 turbostat_submit(name, "percent", "c7", 100.0 * c->c7 / t->tsc);
599 turbostat_submit(name, "temperature", NULL, c->core_temp_c);
601 /* submit per-package data only for 1st core in package */
602 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
605 ssnprintf(name, sizeof(name), "pkg%02d", p->package_id);
608 turbostat_submit(name, "temperature", NULL, p->pkg_temp_c);
610 if (do_pkg_cstate & (1 << 2))
611 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2 / t->tsc);
612 if (do_pkg_cstate & (1 << 3))
613 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3 / t->tsc);
614 if (do_pkg_cstate & (1 << 6))
615 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6 / t->tsc);
616 if (do_pkg_cstate & (1 << 7))
617 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7 / t->tsc);
618 if (do_pkg_cstate & (1 << 8))
619 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8 / t->tsc);
620 if (do_pkg_cstate & (1 << 9))
621 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9 / t->tsc);
622 if (do_pkg_cstate & (1 << 10))
623 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10 / t->tsc);
626 if (do_rapl & RAPL_PKG)
627 turbostat_submit(name, "power", "pkg",
628 p->energy_pkg * rapl_energy_units / interval_float);
629 if (do_rapl & RAPL_CORES)
630 turbostat_submit(name, "power", "cores",
631 p->energy_cores * rapl_energy_units / interval_float);
632 if (do_rapl & RAPL_GFX)
633 turbostat_submit(name, "power", "GFX",
634 p->energy_gfx * rapl_energy_units / interval_float);
635 if (do_rapl & RAPL_DRAM)
636 turbostat_submit(name, "power", "DRAM",
637 p->energy_dram * rapl_energy_units / interval_float);
643 /**********************************
644 * Looping function over all CPUs *
645 **********************************/
648 * Check if a given cpu id is in our compiled list of existing CPUs
650 static int cpu_is_not_present(unsigned int cpu) {
651 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
655 * Loop on all CPUs in topological order
657 * Skip non-present cpus
658 * Return the error code at the first error or 0
660 static int __attribute__((warn_unused_result))
661 for_all_cpus(int(func)(struct thread_data *, struct core_data *,
663 struct thread_data *thread_base, struct core_data *core_base,
664 struct pkg_data *pkg_base) {
667 for (unsigned int pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
668 for (unsigned int core_no = 0; core_no < topology.num_cores; ++core_no) {
669 for (unsigned int thread_no = 0; thread_no < topology.num_threads;
671 struct thread_data *t;
675 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
677 if (cpu_is_not_present(t->cpu_id))
680 c = GET_CORE(core_base, core_no, pkg_no);
681 p = GET_PKG(pkg_base, pkg_no);
683 retval = func(t, c, p);
693 * Dedicated loop: Extract every data evolution for all CPU
695 * Skip non-present cpus
696 * Return the error code at the first error or 0
698 * Core data is shared for all threads in one core: extracted only for the first
700 * Package data is shared for all core in one package: extracted only for the
701 * first thread of the first core
703 static int __attribute__((warn_unused_result))
704 for_all_cpus_delta(const struct thread_data *thread_new_base,
705 const struct core_data *core_new_base,
706 const struct pkg_data *pkg_new_base,
707 const struct thread_data *thread_old_base,
708 const struct core_data *core_old_base,
709 const struct pkg_data *pkg_old_base) {
712 for (unsigned int pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
713 for (unsigned int core_no = 0; core_no < topology.num_cores; ++core_no) {
714 for (unsigned int thread_no = 0; thread_no < topology.num_threads;
716 struct thread_data *t_delta;
717 const struct thread_data *t_old, *t_new;
718 struct core_data *c_delta;
720 /* Get correct pointers for threads */
721 t_delta = GET_THREAD(thread_delta, thread_no, core_no, pkg_no);
722 t_new = GET_THREAD(thread_new_base, thread_no, core_no, pkg_no);
723 t_old = GET_THREAD(thread_old_base, thread_no, core_no, pkg_no);
725 /* Skip threads that disappeared */
726 if (cpu_is_not_present(t_delta->cpu_id))
729 /* c_delta is always required for delta_thread */
730 c_delta = GET_CORE(core_delta, core_no, pkg_no);
732 /* calculate core delta only for 1st thread in core */
733 if (t_new->flags & CPU_IS_FIRST_THREAD_IN_CORE) {
734 const struct core_data *c_old, *c_new;
736 c_new = GET_CORE(core_new_base, core_no, pkg_no);
737 c_old = GET_CORE(core_old_base, core_no, pkg_no);
739 delta_core(c_delta, c_new, c_old);
742 /* Always calculate thread delta */
743 retval = delta_thread(t_delta, t_new, t_old, c_delta);
747 /* calculate package delta only for 1st core in package */
748 if (t_new->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) {
749 struct pkg_data *p_delta;
750 const struct pkg_data *p_old, *p_new;
752 p_delta = GET_PKG(package_delta, pkg_no);
753 p_new = GET_PKG(pkg_new_base, pkg_no);
754 p_old = GET_PKG(pkg_old_base, pkg_no);
756 delta_package(p_delta, p_new, p_old);
769 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
770 * the Thermal Control Circuit (TCC) activates.
771 * This is usually equal to tjMax.
773 * Older processors do not have this MSR, so there we guess,
774 * but also allow conficuration over-ride with "TCCActivationTemp".
776 * Several MSR temperature values are in units of degrees-C
777 * below this value, including the Digital Thermal Sensor (DTS),
778 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
780 static int __attribute__((warn_unused_result))
781 set_temperature_target(struct thread_data *t, struct core_data *c,
782 struct pkg_data *p) {
783 unsigned long long msr;
784 unsigned int target_c_local;
786 /* tcc_activation_temp is used only for dts or ptm */
787 if (!(do_dts || do_ptm))
790 /* this is a per-package concept */
791 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) ||
792 !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
795 if (tcc_activation_temp != 0) {
796 p->tcc_activation_temp = tcc_activation_temp;
800 if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr))
803 target_c_local = (msr >> 16) & 0xFF;
808 p->tcc_activation_temp = target_c_local;
813 p->tcc_activation_temp = TJMAX_DEFAULT;
814 WARNING("turbostat plugin: cpu%d: Guessing tjMax %d C,"
815 " Please use TCCActivationTemp to specify it.",
816 t->cpu_id, p->tcc_activation_temp);
822 * Identify the functionality of the CPU
824 static int __attribute__((warn_unused_result)) probe_cpu(void) {
825 unsigned int eax, ebx, ecx, edx, max_level;
826 unsigned int fms, family, model;
829 * - EAX: Maximum Input Value for Basic CPUID Information
830 * - EBX: "Genu" (0x756e6547)
831 * - EDX: "ineI" (0x49656e69)
832 * - ECX: "ntel" (0x6c65746e)
834 max_level = ebx = ecx = edx = 0;
835 __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
836 if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) {
837 ERROR("turbostat plugin: Unsupported CPU (not Intel)");
842 * - EAX: Version Information: Type, Family, Model, and Stepping ID
845 * + 12-13: Processor type
846 * + 16-19: Extended Model ID
847 * + 20-27: Extended Family ID
848 * - EDX: Feature Information:
849 * + 5: Support for MSR read/write operations
851 fms = ebx = ecx = edx = 0;
852 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
853 family = (fms >> 8) & 0xf;
854 model = (fms >> 4) & 0xf;
856 family += (fms >> 20) & 0xf;
857 if (family == 6 || family == 0xf)
858 model += ((fms >> 16) & 0xf) << 4;
859 if (!(edx & (1 << 5))) {
860 ERROR("turbostat plugin: Unsupported CPU (no MSR support)");
867 * + 0: Digital temperature sensor is supported if set
868 * + 6: Package thermal management is supported if set
870 * + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and
872 * + 3: The processor supports performance-energy bias preference if set.
873 * It also implies the presence of a new architectural MSR called
874 * IA32_ENERGY_PERF_BIAS
876 * This check is valid for both Intel and AMD
878 eax = ebx = ecx = edx = 0;
879 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
880 do_dts = eax & (1 << 0);
881 do_ptm = eax & (1 << 6);
882 if (!(ecx & (1 << 0))) {
883 ERROR("turbostat plugin: Unsupported CPU (No APERF)");
888 * Enable or disable C states depending on the model and family
896 do_pkg_cstate = (1 << 2) | (1 << 4) | (1 << 6);
902 do_core_cstate = (1 << 1) | (1 << 6);
903 do_pkg_cstate = (1 << 6);
906 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
907 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper
909 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
910 case 0x2E: /* Nehalem-EX Xeon - Beckton */
912 do_core_cstate = (1 << 3) | (1 << 6);
913 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
916 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
917 case 0x2C: /* Westmere EP - Gulftown */
918 case 0x2F: /* Westmere-EX Xeon - Eagleton */
920 do_core_cstate = (1 << 3) | (1 << 6);
921 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
925 case 0x2D: /* SNB Xeon */
927 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
928 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
932 case 0x3E: /* IVB Xeon */
934 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
935 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
942 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
943 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
947 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
948 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) |
949 (1 << 9) | (1 << 10);
953 case 0x56: /* BDX-DE */
955 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
956 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
960 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
961 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) |
962 (1 << 9) | (1 << 10);
977 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
981 case 0x56: /* BDX-DE */
982 do_rapl = RAPL_PKG | RAPL_DRAM;
984 case 0x2D: /* SNB Xeon */
985 case 0x3E: /* IVB Xeon */
986 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM;
990 do_rapl = RAPL_PKG | RAPL_CORES;
996 ERROR("turbostat plugin: Unsupported CPU (family: %#x, "
1002 /* Override detected values with configuration */
1003 if (apply_config_core_cstate)
1004 do_core_cstate = config_core_cstate;
1005 if (apply_config_pkg_cstate)
1006 do_pkg_cstate = config_pkg_cstate;
1007 if (apply_config_smi)
1008 do_smi = config_smi;
1009 if (apply_config_dts)
1010 do_dts = config_dts;
1011 if (apply_config_ptm)
1012 do_ptm = config_ptm;
1013 if (apply_config_rapl)
1014 do_rapl = config_rapl;
1017 unsigned long long msr;
1018 if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1022 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1024 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1030 /********************
1031 * Topology Probing *
1032 ********************/
1035 * Read a single int from a file.
1037 static int __attribute__((format(printf, 1, 2)))
1038 parse_int_file(const char *fmt, ...) {
1040 char path[PATH_MAX];
1043 va_start(args, fmt);
1044 len = vsnprintf(path, sizeof(path), fmt, args);
1046 if (len < 0 || len >= PATH_MAX) {
1047 ERROR("turbostat plugin: path truncated: '%s'", path);
1052 if (parse_value_file(path, &v, DS_TYPE_DERIVE) != 0) {
1053 ERROR("turbostat plugin: Parsing \"%s\" failed.", path);
1057 return (int)v.derive;
1060 static int get_threads_on_core(unsigned int cpu) {
1067 ssnprintf(path, sizeof(path),
1068 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1069 filep = fopen(path, "r");
1071 ERROR("turbostat plugin: Failed to open '%s'", path);
1076 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
1077 * otherwinse 1 sibling (self).
1079 matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
1090 * run func(cpu) on every cpu in /proc/stat
1091 * return max_cpu number
1093 static int __attribute__((warn_unused_result))
1094 for_all_proc_cpus(int(func)(unsigned int)) {
1096 unsigned int cpu_num;
1099 fp = fopen("/proc/stat", "r");
1101 ERROR("turbostat plugin: Failed to open /proc/stat");
1105 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1107 ERROR("turbostat plugin: Failed to parse /proc/stat");
1114 fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1118 retval = func(cpu_num);
1129 * Update the stored topology.max_cpu_id
1131 static int update_max_cpu_id(unsigned int cpu) {
1132 if (topology.max_cpu_id < cpu)
1133 topology.max_cpu_id = cpu;
1137 static int mark_cpu_present(unsigned int cpu) {
1138 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1142 static int __attribute__((warn_unused_result))
1143 allocate_cpu_set(cpu_set_t **set, size_t *size) {
1144 *set = CPU_ALLOC(topology.max_cpu_id + 1);
1146 ERROR("turbostat plugin: Unable to allocate CPU state");
1149 *size = CPU_ALLOC_SIZE(topology.max_cpu_id + 1);
1150 CPU_ZERO_S(*size, *set);
1155 * Build a local representation of the cpu distribution
1157 static int __attribute__((warn_unused_result)) topology_probe(void) {
1159 unsigned int max_package_id, max_core_id, max_threads;
1160 max_package_id = max_core_id = max_threads = 0;
1162 /* Clean topology */
1163 free(topology.cpus);
1164 memset(&topology, 0, sizeof(topology));
1166 ret = for_all_proc_cpus(update_max_cpu_id);
1171 calloc(1, (topology.max_cpu_id + 1) * sizeof(struct cpu_topology));
1172 if (topology.cpus == NULL) {
1173 ERROR("turbostat plugin: Unable to allocate memory for CPU topology");
1177 ret = allocate_cpu_set(&cpu_present_set, &cpu_present_setsize);
1180 ret = allocate_cpu_set(&cpu_affinity_set, &cpu_affinity_setsize);
1183 ret = allocate_cpu_set(&cpu_saved_affinity_set, &cpu_saved_affinity_setsize);
1187 ret = for_all_proc_cpus(mark_cpu_present);
1193 * find max_core_id, max_package_id
1195 for (unsigned int i = 0; i <= topology.max_cpu_id; ++i) {
1196 unsigned int num_threads;
1197 struct cpu_topology *cpu = &topology.cpus[i];
1199 if (cpu_is_not_present(i)) {
1200 WARNING("turbostat plugin: cpu%d NOT PRESENT", i);
1204 ret = parse_int_file(
1205 "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", i);
1209 cpu->package_id = (unsigned int)ret;
1210 if (cpu->package_id > max_package_id)
1211 max_package_id = cpu->package_id;
1213 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", i);
1217 cpu->core_id = (unsigned int)ret;
1218 if (cpu->core_id > max_core_id)
1219 max_core_id = cpu->core_id;
1220 ret = parse_int_file(
1221 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", i);
1224 else if ((unsigned int)ret == i)
1225 cpu->first_core_in_package = 1;
1227 ret = get_threads_on_core(i);
1231 num_threads = (unsigned int)ret;
1232 if (num_threads > max_threads)
1233 max_threads = num_threads;
1234 ret = parse_int_file(
1235 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", i);
1238 else if ((unsigned int)ret == i)
1239 cpu->first_thread_in_core = 1;
1241 DEBUG("turbostat plugin: cpu %d pkg %d core %d\n", i, cpu->package_id,
1244 /* Num is max + 1 (need to count 0) */
1245 topology.num_packages = max_package_id + 1;
1246 topology.num_cores = max_core_id + 1;
1247 topology.num_threads = max_threads;
1251 free(topology.cpus);
1255 /************************
1256 * Main alloc/init/free *
1257 ************************/
1259 static int allocate_counters(struct thread_data **threads,
1260 struct core_data **cores,
1261 struct pkg_data **packages) {
1262 unsigned int total_threads, total_cores;
1264 if ((topology.num_threads == 0) || (topology.num_cores == 0) ||
1265 (topology.num_packages == 0)) {
1267 "turbostat plugin: Invalid topology: %u threads, %u cores, %u packages",
1268 topology.num_threads, topology.num_cores, topology.num_packages);
1273 topology.num_threads * topology.num_cores * topology.num_packages;
1274 *threads = calloc(total_threads, sizeof(struct thread_data));
1275 if (*threads == NULL) {
1276 ERROR("turbostat plugin: calloc failed");
1280 for (unsigned int i = 0; i < total_threads; ++i)
1281 (*threads)[i].cpu_id = topology.max_cpu_id + 1;
1283 total_cores = topology.num_cores * topology.num_packages;
1284 *cores = calloc(total_cores, sizeof(struct core_data));
1285 if (*cores == NULL) {
1286 ERROR("turbostat plugin: calloc failed");
1291 *packages = calloc(topology.num_packages, sizeof(struct pkg_data));
1292 if (*packages == NULL) {
1293 ERROR("turbostat plugin: calloc failed");
1302 static void init_counter(struct thread_data *thread_base,
1303 struct core_data *core_base, struct pkg_data *pkg_base,
1304 unsigned int cpu_id) {
1305 struct thread_data *t;
1306 struct core_data *c;
1308 struct cpu_topology *cpu = &topology.cpus[cpu_id];
1310 t = GET_THREAD(thread_base, !(cpu->first_thread_in_core), cpu->core_id,
1312 c = GET_CORE(core_base, cpu->core_id, cpu->package_id);
1313 p = GET_PKG(pkg_base, cpu->package_id);
1316 if (cpu->first_thread_in_core)
1317 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1318 if (cpu->first_core_in_package)
1319 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1321 c->core_id = cpu->core_id;
1322 p->package_id = cpu->package_id;
1325 static void initialize_counters(void) {
1326 for (unsigned int cpu_id = 0; cpu_id <= topology.max_cpu_id; ++cpu_id) {
1327 if (cpu_is_not_present(cpu_id))
1329 init_counter(EVEN_COUNTERS, cpu_id);
1330 init_counter(ODD_COUNTERS, cpu_id);
1331 init_counter(DELTA_COUNTERS, cpu_id);
1335 static void free_all_buffers(void) {
1339 CPU_FREE(cpu_present_set);
1340 cpu_present_set = NULL;
1341 cpu_present_setsize = 0;
1343 CPU_FREE(cpu_affinity_set);
1344 cpu_affinity_set = NULL;
1345 cpu_affinity_setsize = 0;
1347 CPU_FREE(cpu_saved_affinity_set);
1348 cpu_saved_affinity_set = NULL;
1349 cpu_saved_affinity_setsize = 0;
1357 package_even = NULL;
1369 free(package_delta);
1371 thread_delta = NULL;
1373 package_delta = NULL;
1376 /**********************
1377 * Collectd functions *
1378 **********************/
1380 #define DO_OR_GOTO_ERR(something) \
1382 ret = (something); \
1387 static int setup_all_buffers(void) {
1390 DO_OR_GOTO_ERR(topology_probe());
1391 DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1392 DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1393 DO_OR_GOTO_ERR(allocate_counters(&thread_delta, &core_delta, &package_delta));
1394 initialize_counters();
1395 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1396 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, ODD_COUNTERS));
1405 static int turbostat_read(void) {
1409 if ((ret = setup_all_buffers()) < 0)
1413 if (for_all_proc_cpus(cpu_is_not_present)) {
1415 if ((ret = setup_all_buffers()) < 0)
1417 if (for_all_proc_cpus(cpu_is_not_present)) {
1418 ERROR("turbostat plugin: CPU appeared just after "
1424 /* Saving the scheduling affinity, as it will be modified by get_counters */
1425 if (sched_getaffinity(0, cpu_saved_affinity_setsize,
1426 cpu_saved_affinity_set) != 0) {
1427 ERROR("turbostat plugin: Unable to save the CPU affinity");
1432 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1434 time_even = cdtime();
1442 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
1444 time_odd = cdtime();
1446 time_delta = time_odd - time_even;
1447 if ((ret = for_all_cpus_delta(ODD_COUNTERS, EVEN_COUNTERS)) < 0)
1449 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1452 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1454 time_even = cdtime();
1456 time_delta = time_even - time_odd;
1457 if ((ret = for_all_cpus_delta(EVEN_COUNTERS, ODD_COUNTERS)) < 0)
1459 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1465 * Let's restore the affinity
1466 * This might fail if the number of CPU changed, but we can't do anything in
1469 (void)sched_setaffinity(0, cpu_saved_affinity_setsize,
1470 cpu_saved_affinity_set);
1474 static int check_permissions(void) {
1476 if (getuid() == 0) {
1477 /* We have everything we need */
1479 #if !defined(HAVE_SYS_CAPABILITY_H) && !defined(CAP_SYS_RAWIO)
1481 ERROR("turbostat plugin: Initialization failed: this plugin "
1482 "requires collectd to run as root");
1485 #else /* HAVE_SYS_CAPABILITY_H && CAP_SYS_RAWIO */
1490 if (check_capability(CAP_SYS_RAWIO) != 0) {
1491 WARNING("turbostat plugin: Collectd doesn't have the "
1492 "CAP_SYS_RAWIO capability. If you don't want to run "
1493 "collectd as root, try running \"setcap "
1494 "cap_sys_rawio=ep\" on collectd binary");
1498 if (euidaccess("/dev/cpu/0/msr", R_OK)) {
1499 WARNING("turbostat plugin: Collectd cannot open "
1500 "/dev/cpu/0/msr. If you don't want to run collectd as "
1501 "root, you need to change the ownership (chown) and "
1502 "permissions on /dev/cpu/*/msr to allow such access");
1507 ERROR("turbostat plugin: Initialization failed: this plugin "
1508 "requires collectd to either to run as root or give "
1509 "collectd a special capability (CAP_SYS_RAWIO) and read "
1510 "access to /dev/cpu/*/msr (see previous warnings)");
1512 #endif /* HAVE_SYS_CAPABILITY_H && CAP_SYS_RAWIO */
1515 static int turbostat_init(void) {
1519 if (stat("/dev/cpu/0/msr", &sb)) {
1520 ERROR("turbostat plugin: Initialization failed: /dev/cpu/0/msr "
1521 "does not exist while the CPU supports MSR. You may be "
1522 "missing the corresponding kernel module, please try '# "
1527 DO_OR_GOTO_ERR(check_permissions());
1529 DO_OR_GOTO_ERR(probe_cpu());
1531 DO_OR_GOTO_ERR(setup_all_buffers());
1533 plugin_register_read(PLUGIN_NAME, turbostat_read);
1541 static int turbostat_config(const char *key, const char *value) {
1542 long unsigned int tmp_val;
1545 if (strcasecmp("CoreCstates", key) == 0) {
1546 tmp_val = strtoul(value, &end, 0);
1547 if (*end != '\0' || tmp_val > UINT_MAX) {
1548 ERROR("turbostat plugin: Invalid CoreCstates '%s'", value);
1551 config_core_cstate = (unsigned int)tmp_val;
1552 apply_config_core_cstate = 1;
1553 } else if (strcasecmp("PackageCstates", key) == 0) {
1554 tmp_val = strtoul(value, &end, 0);
1555 if (*end != '\0' || tmp_val > UINT_MAX) {
1556 ERROR("turbostat plugin: Invalid PackageCstates '%s'", value);
1559 config_pkg_cstate = (unsigned int)tmp_val;
1560 apply_config_pkg_cstate = 1;
1561 } else if (strcasecmp("SystemManagementInterrupt", key) == 0) {
1562 config_smi = IS_TRUE(value);
1563 apply_config_smi = 1;
1564 } else if (strcasecmp("DigitalTemperatureSensor", key) == 0) {
1565 config_dts = IS_TRUE(value);
1566 apply_config_dts = 1;
1567 } else if (strcasecmp("PackageThermalManagement", key) == 0) {
1568 config_ptm = IS_TRUE(value);
1569 apply_config_ptm = 1;
1570 } else if (strcasecmp("LogicalCoreNames", key) == 0) {
1571 config_lcn = IS_TRUE(value);
1572 } else if (strcasecmp("RunningAveragePowerLimit", key) == 0) {
1573 tmp_val = strtoul(value, &end, 0);
1574 if (*end != '\0' || tmp_val > UINT_MAX) {
1575 ERROR("turbostat plugin: Invalid RunningAveragePowerLimit '%s'", value);
1578 config_rapl = (unsigned int)tmp_val;
1579 apply_config_rapl = 1;
1580 } else if (strcasecmp("TCCActivationTemp", key) == 0) {
1581 tmp_val = strtoul(value, &end, 0);
1582 if (*end != '\0' || tmp_val > UINT_MAX) {
1583 ERROR("turbostat plugin: Invalid TCCActivationTemp '%s'", value);
1586 tcc_activation_temp = (unsigned int)tmp_val;
1588 ERROR("turbostat plugin: Invalid configuration option '%s'", key);
1594 void module_register(void) {
1595 plugin_register_init(PLUGIN_NAME, turbostat_init);
1596 plugin_register_config(PLUGIN_NAME, turbostat_config, config_keys,