2 * turbostat -- Log CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors for collectd.
5 * Based on the 'turbostat' tool of the Linux kernel, found at
6 * linux/tools/power/x86/turbostat/turbostat.c:
8 * Copyright (c) 2013 Intel Corporation.
9 * Len Brown <len.brown@intel.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 * Ported to collectd by Vincent Brillault <git@lerya.net>
28 * _GNU_SOURCE is required because of the following functions:
41 #include "utils_time.h"
43 #include <asm/msr-index.h>
45 #ifdef HAVE_SYS_CAPABILITY_H
46 #include <sys/capability.h>
47 #endif /* HAVE_SYS_CAPABILITY_H */
49 #define PLUGIN_NAME "turbostat"
52 * This tool uses the Model-Specific Registers (MSRs) present on Intel processors.
53 * The general description each of these registers, depending on the architecture,
54 * can be found in the IntelĀ® 64 and IA-32 Architectures Software Developer Manual,
55 * Volume 3 Chapter 35.
59 * If set, aperf_mperf_unstable disables a/mperf based stats.
60 * This includes: C0 & C1 states, frequency
62 * This value is automatically set if mperf or aperf go backward
64 static _Bool aperf_mperf_unstable;
67 * Bitmask of the list of core C states supported by the processor.
68 * Currently supported C-states (by this plugin): 3, 6, 7
70 static unsigned int do_core_cstate;
71 static unsigned int config_core_cstate;
72 static _Bool apply_config_core_cstate;
75 * Bitmask of the list of pacages C states supported by the processor.
76 * Currently supported C-states (by this plugin): 2, 3, 6, 7, 8, 9, 10
78 static unsigned int do_pkg_cstate;
79 static unsigned int config_pkg_cstate;
80 static _Bool apply_config_pkg_cstate;
83 * Boolean indicating if the processor supports 'I/O System-Management Interrupt counter'
86 static _Bool config_smi;
87 static _Bool apply_config_smi;
90 * Boolean indicating if the processor supports 'Digital temperature sensor'
91 * This feature enables the monitoring of the temperature of each core
93 * This feature has two limitations:
94 * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
95 * - Temperatures above the tcc_activation_temp are not recorded
98 static _Bool config_dts;
99 static _Bool apply_config_dts;
102 * Boolean indicating if the processor supports 'Package thermal management'
103 * This feature allows the monitoring of the temperature of each package
105 * This feature has two limitations:
106 * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
107 * - Temperatures above the tcc_activation_temp are not recorded
110 static _Bool config_ptm;
111 static _Bool apply_config_ptm;
114 * Thermal Control Circuit Activation Temperature as configured by the user.
115 * This override the automated detection via MSR_IA32_TEMPERATURE_TARGET
116 * and should only be used if the automated detection fails.
118 static unsigned int tcc_activation_temp;
120 static unsigned int do_rapl;
121 static unsigned int config_rapl;
122 static _Bool apply_config_rapl;
123 static double rapl_energy_units;
125 #define RAPL_PKG (1 << 0)
126 /* 0x610 MSR_PKG_POWER_LIMIT */
127 /* 0x611 MSR_PKG_ENERGY_STATUS */
128 #define RAPL_DRAM (1 << 1)
129 /* 0x618 MSR_DRAM_POWER_LIMIT */
130 /* 0x619 MSR_DRAM_ENERGY_STATUS */
131 /* 0x61c MSR_DRAM_POWER_INFO */
132 #define RAPL_CORES (1 << 2)
133 /* 0x638 MSR_PP0_POWER_LIMIT */
134 /* 0x639 MSR_PP0_ENERGY_STATUS */
136 #define RAPL_GFX (1 << 3)
137 /* 0x640 MSR_PP1_POWER_LIMIT */
138 /* 0x641 MSR_PP1_ENERGY_STATUS */
139 /* 0x642 MSR_PP1_POLICY */
140 #define TJMAX_DEFAULT 100
142 static cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_saved_affinity_set;
143 static size_t cpu_present_setsize, cpu_affinity_setsize, cpu_saved_affinity_setsize;
145 static struct thread_data {
146 unsigned long long tsc;
147 unsigned long long aperf;
148 unsigned long long mperf;
149 unsigned long long c1;
150 unsigned int smi_count;
153 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
154 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
155 } *thread_delta, *thread_even, *thread_odd;
157 static struct core_data {
158 unsigned long long c3;
159 unsigned long long c6;
160 unsigned long long c7;
161 unsigned int core_temp_c;
162 unsigned int core_id;
163 } *core_delta, *core_even, *core_odd;
165 static struct pkg_data {
166 unsigned long long pc2;
167 unsigned long long pc3;
168 unsigned long long pc6;
169 unsigned long long pc7;
170 unsigned long long pc8;
171 unsigned long long pc9;
172 unsigned long long pc10;
173 unsigned int package_id;
174 uint32_t energy_pkg; /* MSR_PKG_ENERGY_STATUS */
175 uint32_t energy_dram; /* MSR_DRAM_ENERGY_STATUS */
176 uint32_t energy_cores; /* MSR_PP0_ENERGY_STATUS */
177 uint32_t energy_gfx; /* MSR_PP1_ENERGY_STATUS */
178 unsigned int tcc_activation_temp;
179 unsigned int pkg_temp_c;
180 } *package_delta, *package_even, *package_odd;
182 #define DELTA_COUNTERS thread_delta, core_delta, package_delta
183 #define ODD_COUNTERS thread_odd, core_odd, package_odd
184 #define EVEN_COUNTERS thread_even, core_even, package_even
185 static _Bool is_even = 1;
187 static _Bool allocated = 0;
188 static _Bool initialized = 0;
190 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
192 (pkg_no) * topology.num_cores * topology.num_threads + \
193 (core_no) * topology.num_threads + \
195 #define GET_CORE(core_base, core_no, pkg_no) \
197 (pkg_no) * topology.num_cores + \
199 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
201 struct cpu_topology {
202 unsigned int package_id;
203 unsigned int core_id;
204 _Bool first_core_in_package;
205 _Bool first_thread_in_core;
208 static struct topology {
209 unsigned int max_cpu_id;
210 unsigned int num_packages;
211 unsigned int num_cores;
212 unsigned int num_threads;
213 struct cpu_topology *cpus;
216 static cdtime_t time_even, time_odd, time_delta;
218 static const char *config_keys[] =
222 "SystemManagementInterrupt",
223 "DigitalTemperatureSensor",
224 "PackageThermalManagement",
226 "RunningAveragePowerLimit",
228 static const int config_keys_num = STATIC_ARRAY_SIZE (config_keys);
230 /*****************************
231 * MSR Manipulation helpers *
232 *****************************/
235 * Open a MSR device for reading
236 * Can change the scheduling affinity of the current process if multiple_read is 1
238 static int __attribute__((warn_unused_result))
239 open_msr(unsigned int cpu, _Bool multiple_read)
245 * If we need to do multiple read, let's migrate to the CPU
246 * Otherwise, we would lose time calling functions on another CPU
248 * If we are not yet initialized (cpu_affinity_setsize = 0),
249 * we need to skip this optimisation.
251 if (multiple_read && cpu_affinity_setsize) {
252 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
253 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
254 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) {
255 ERROR("turbostat plugin: Could not migrate to CPU %d", cpu);
260 ssnprintf(pathname, sizeof(pathname), "/dev/cpu/%d/msr", cpu);
261 fd = open(pathname, O_RDONLY);
263 ERROR("turbostat plugin: failed to open %s", pathname);
270 * Read a single MSR from an open file descriptor
272 static int __attribute__((warn_unused_result))
273 read_msr(int fd, off_t offset, unsigned long long *msr)
277 retval = pread(fd, msr, sizeof *msr, offset);
279 if (retval != sizeof *msr) {
280 ERROR("turbostat plugin: MSR offset 0x%llx read failed",
281 (unsigned long long)offset);
288 * Open a MSR device for reading, read the value asked for and close it.
289 * This call will not affect the scheduling affinity of this thread.
291 static ssize_t __attribute__((warn_unused_result))
292 get_msr(unsigned int cpu, off_t offset, unsigned long long *msr)
297 fd = open_msr(cpu, 0);
300 retval = read_msr(fd, offset, msr);
306 /********************************
307 * Raw data acquisition (1 CPU) *
308 ********************************/
311 * Read every data avalaible for a single CPU
313 * Core data is shared for all threads in one core: extracted only for the first thread
314 * Package data is shared for all core in one package: extracted only for the first thread of the first core
316 * Side effect: migrates to the targeted CPU
318 static int __attribute__((warn_unused_result))
319 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
321 unsigned int cpu = t->cpu_id;
322 unsigned long long msr;
326 msr_fd = open_msr(cpu, 1);
330 #define READ_MSR(msr, dst) \
332 if (read_msr(msr_fd, msr, dst)) { \
333 ERROR("turbostat plugin: Unable to read " #msr); \
339 READ_MSR(MSR_IA32_TSC, &t->tsc);
341 READ_MSR(MSR_IA32_APERF, &t->aperf);
342 READ_MSR(MSR_IA32_MPERF, &t->mperf);
345 READ_MSR(MSR_SMI_COUNT, &msr);
346 t->smi_count = msr & 0xFFFFFFFF;
349 /* collect core counters only for 1st thread in core */
350 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) {
355 if (do_core_cstate & (1 << 3))
356 READ_MSR(MSR_CORE_C3_RESIDENCY, &c->c3);
357 if (do_core_cstate & (1 << 6))
358 READ_MSR(MSR_CORE_C6_RESIDENCY, &c->c6);
359 if (do_core_cstate & (1 << 7))
360 READ_MSR(MSR_CORE_C7_RESIDENCY, &c->c7);
363 READ_MSR(MSR_IA32_THERM_STATUS, &msr);
364 c->core_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
367 /* collect package counters only for 1st core in package */
368 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
373 if (do_pkg_cstate & (1 << 2))
374 READ_MSR(MSR_PKG_C2_RESIDENCY, &p->pc2);
375 if (do_pkg_cstate & (1 << 3))
376 READ_MSR(MSR_PKG_C3_RESIDENCY, &p->pc3);
377 if (do_pkg_cstate & (1 << 6))
378 READ_MSR(MSR_PKG_C6_RESIDENCY, &p->pc6);
379 if (do_pkg_cstate & (1 << 7))
380 READ_MSR(MSR_PKG_C7_RESIDENCY, &p->pc7);
381 if (do_pkg_cstate & (1 << 8))
382 READ_MSR(MSR_PKG_C8_RESIDENCY, &p->pc8);
383 if (do_pkg_cstate & (1 << 9))
384 READ_MSR(MSR_PKG_C9_RESIDENCY, &p->pc9);
385 if (do_pkg_cstate & (1 << 10))
386 READ_MSR(MSR_PKG_C10_RESIDENCY, &p->pc10);
388 if (do_rapl & RAPL_PKG) {
389 READ_MSR(MSR_PKG_ENERGY_STATUS, &msr);
390 p->energy_pkg = msr & 0xFFFFFFFF;
392 if (do_rapl & RAPL_CORES) {
393 READ_MSR(MSR_PP0_ENERGY_STATUS, &msr);
394 p->energy_cores = msr & 0xFFFFFFFF;
396 if (do_rapl & RAPL_DRAM) {
397 READ_MSR(MSR_DRAM_ENERGY_STATUS, &msr);
398 p->energy_dram = msr & 0xFFFFFFFF;
400 if (do_rapl & RAPL_GFX) {
401 READ_MSR(MSR_PP1_ENERGY_STATUS, &msr);
402 p->energy_gfx = msr & 0xFFFFFFFF;
405 READ_MSR(MSR_IA32_PACKAGE_THERM_STATUS, &msr);
406 p->pkg_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
415 /**********************************
416 * Evaluating the changes (1 CPU) *
417 **********************************/
420 * Extract the evolution old->new in delta at a package level
421 * (some are not new-delta, e.g. temperature)
424 delta_package(struct pkg_data *delta, const struct pkg_data *new, const struct pkg_data *old)
426 delta->pc2 = new->pc2 - old->pc2;
427 delta->pc3 = new->pc3 - old->pc3;
428 delta->pc6 = new->pc6 - old->pc6;
429 delta->pc7 = new->pc7 - old->pc7;
430 delta->pc8 = new->pc8 - old->pc8;
431 delta->pc9 = new->pc9 - old->pc9;
432 delta->pc10 = new->pc10 - old->pc10;
433 delta->pkg_temp_c = new->pkg_temp_c;
435 delta->energy_pkg = new->energy_pkg - old->energy_pkg;
436 delta->energy_cores = new->energy_cores - old->energy_cores;
437 delta->energy_gfx = new->energy_gfx - old->energy_gfx;
438 delta->energy_dram = new->energy_dram - old->energy_dram;
442 * Extract the evolution old->new in delta at a core level
443 * (some are not new-delta, e.g. temperature)
446 delta_core(struct core_data *delta, const struct core_data *new, const struct core_data *old)
448 delta->c3 = new->c3 - old->c3;
449 delta->c6 = new->c6 - old->c6;
450 delta->c7 = new->c7 - old->c7;
451 delta->core_temp_c = new->core_temp_c;
455 * Extract the evolution old->new in delta at a package level
456 * core_delta is required for c1 estimation (tsc - c0 - all core cstates)
458 static inline int __attribute__((warn_unused_result))
459 delta_thread(struct thread_data *delta, const struct thread_data *new, const struct thread_data *old,
460 const struct core_data *core_delta)
462 delta->tsc = new->tsc - old->tsc;
464 /* check for TSC < 1 Mcycles over interval */
465 if (delta->tsc < (1000 * 1000)) {
466 WARNING("turbostat plugin: Insanely slow TSC rate, TSC stops "
467 "in idle? You can disable all c-states by booting with"
468 " 'idle=poll' or just the deep ones with"
469 " 'processor.max_cstate=1'");
473 delta->c1 = new->c1 - old->c1;
475 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
476 delta->aperf = new->aperf - old->aperf;
477 delta->mperf = new->mperf - old->mperf;
479 if (!aperf_mperf_unstable) {
480 WARNING("turbostat plugin: APERF or MPERF went "
481 "backwards. Frequency results do not cover "
482 "the entire interval. Fix this by running "
483 "Linux-2.6.30 or later.");
485 aperf_mperf_unstable = 1;
490 * As counter collection is not atomic,
491 * it is possible for mperf's non-halted cycles + idle states
492 * to exceed TSC's all cycles: show c1 = 0% in that case.
494 if ((delta->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > delta->tsc)
497 /* normal case, derive c1 */
498 delta->c1 = delta->tsc - delta->mperf - core_delta->c3
499 - core_delta->c6 - core_delta->c7;
502 if (delta->mperf == 0) {
503 WARNING("turbostat plugin: cpu%d MPERF 0!", old->cpu_id);
504 delta->mperf = 1; /* divide by 0 protection */
508 delta->smi_count = new->smi_count - old->smi_count;
513 /**********************************
514 * Submitting the results (1 CPU) *
515 **********************************/
518 * Submit one gauge value
521 turbostat_submit (const char *plugin_instance,
522 const char *type, const char *type_instance,
525 value_list_t vl = VALUE_LIST_INIT;
531 sstrncpy (vl.host, hostname_g, sizeof (vl.host));
532 sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin));
533 if (plugin_instance != NULL)
534 sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
535 sstrncpy (vl.type, type, sizeof (vl.type));
536 if (type_instance != NULL)
537 sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
539 plugin_dispatch_values (&vl);
543 * Submit every data for a single CPU
545 * Core data is shared for all threads in one core: submitted only for the first thread
546 * Package data is shared for all core in one package: submitted only for the first thread of the first core
549 submit_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
551 char name[DATA_MAX_NAME_LEN];
552 double interval_float;
554 interval_float = CDTIME_T_TO_DOUBLE(time_delta);
556 ssnprintf(name, sizeof(name), "cpu%02d", t->cpu_id);
558 if (!aperf_mperf_unstable)
559 turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc);
560 if (!aperf_mperf_unstable)
561 turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc);
563 turbostat_submit(name, "frequency", "average", 1.0 / 1000000 * t->aperf / interval_float);
565 if ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc)))
566 turbostat_submit(name, "frequency", "busy", 1.0 * t->tsc / 1000000 * t->aperf / t->mperf / interval_float);
568 /* Sanity check (should stay stable) */
569 turbostat_submit(name, "gauge", "TSC", 1.0 * t->tsc / 1000000 / interval_float);
573 turbostat_submit(name, "count", NULL, t->smi_count);
575 /* submit per-core data only for 1st thread in core */
576 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
579 ssnprintf(name, sizeof(name), "core%02d", c->core_id);
581 if (do_core_cstate & (1 << 3))
582 turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc);
583 if (do_core_cstate & (1 << 6))
584 turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc);
585 if (do_core_cstate & (1 << 7))
586 turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc);
589 turbostat_submit(name, "temperature", NULL, c->core_temp_c);
591 /* submit per-package data only for 1st core in package */
592 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
595 ssnprintf(name, sizeof(name), "pkg%02d", p->package_id);
598 turbostat_submit(name, "temperature", NULL, p->pkg_temp_c);
600 if (do_pkg_cstate & (1 << 2))
601 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc);
602 if (do_pkg_cstate & (1 << 3))
603 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc);
604 if (do_pkg_cstate & (1 << 6))
605 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc);
606 if (do_pkg_cstate & (1 << 7))
607 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc);
608 if (do_pkg_cstate & (1 << 8))
609 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc);
610 if (do_pkg_cstate & (1 << 9))
611 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc);
612 if (do_pkg_cstate & (1 << 10))
613 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc);
616 if (do_rapl & RAPL_PKG)
617 turbostat_submit(name, "power", "pkg", p->energy_pkg * rapl_energy_units / interval_float);
618 if (do_rapl & RAPL_CORES)
619 turbostat_submit(name, "power", "cores", p->energy_cores * rapl_energy_units / interval_float);
620 if (do_rapl & RAPL_GFX)
621 turbostat_submit(name, "power", "GFX", p->energy_gfx * rapl_energy_units / interval_float);
622 if (do_rapl & RAPL_DRAM)
623 turbostat_submit(name, "power", "DRAM", p->energy_dram * rapl_energy_units / interval_float);
630 /**********************************
631 * Looping function over all CPUs *
632 **********************************/
635 * Check if a given cpu id is in our compiled list of existing CPUs
638 cpu_is_not_present(unsigned int cpu)
640 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
644 * Loop on all CPUs in topological order
646 * Skip non-present cpus
647 * Return the error code at the first error or 0
649 static int __attribute__((warn_unused_result))
650 for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
651 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
654 unsigned int pkg_no, core_no, thread_no;
656 for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
657 for (core_no = 0; core_no < topology.num_cores; ++core_no) {
658 for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
659 struct thread_data *t;
663 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
665 if (cpu_is_not_present(t->cpu_id))
668 c = GET_CORE(core_base, core_no, pkg_no);
669 p = GET_PKG(pkg_base, pkg_no);
671 retval = func(t, c, p);
681 * Dedicated loop: Extract every data evolution for all CPU
683 * Skip non-present cpus
684 * Return the error code at the first error or 0
686 * Core data is shared for all threads in one core: extracted only for the first thread
687 * Package data is shared for all core in one package: extracted only for the first thread of the first core
689 static int __attribute__((warn_unused_result))
690 for_all_cpus_delta(const struct thread_data *thread_new_base, const struct core_data *core_new_base, const struct pkg_data *pkg_new_base,
691 const struct thread_data *thread_old_base, const struct core_data *core_old_base, const struct pkg_data *pkg_old_base)
694 unsigned int pkg_no, core_no, thread_no;
696 for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
697 for (core_no = 0; core_no < topology.num_cores; ++core_no) {
698 for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
699 struct thread_data *t_delta;
700 const struct thread_data *t_old, *t_new;
701 struct core_data *c_delta;
703 /* Get correct pointers for threads */
704 t_delta = GET_THREAD(thread_delta, thread_no, core_no, pkg_no);
705 t_new = GET_THREAD(thread_new_base, thread_no, core_no, pkg_no);
706 t_old = GET_THREAD(thread_old_base, thread_no, core_no, pkg_no);
708 /* Skip threads that disappeared */
709 if (cpu_is_not_present(t_delta->cpu_id))
712 /* c_delta is always required for delta_thread */
713 c_delta = GET_CORE(core_delta, core_no, pkg_no);
715 /* calculate core delta only for 1st thread in core */
716 if (t_new->flags & CPU_IS_FIRST_THREAD_IN_CORE) {
717 const struct core_data *c_old, *c_new;
719 c_new = GET_CORE(core_new_base, core_no, pkg_no);
720 c_old = GET_CORE(core_old_base, core_no, pkg_no);
722 delta_core(c_delta, c_new, c_old);
725 /* Always calculate thread delta */
726 retval = delta_thread(t_delta, t_new, t_old, c_delta);
730 /* calculate package delta only for 1st core in package */
731 if (t_new->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) {
732 struct pkg_data *p_delta;
733 const struct pkg_data *p_old, *p_new;
735 p_delta = GET_PKG(package_delta, pkg_no);
736 p_new = GET_PKG(pkg_new_base, pkg_no);
737 p_old = GET_PKG(pkg_old_base, pkg_no);
739 delta_package(p_delta, p_new, p_old);
753 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
754 * the Thermal Control Circuit (TCC) activates.
755 * This is usually equal to tjMax.
757 * Older processors do not have this MSR, so there we guess,
758 * but also allow conficuration over-ride with "TCCActivationTemp".
760 * Several MSR temperature values are in units of degrees-C
761 * below this value, including the Digital Thermal Sensor (DTS),
762 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
764 static int __attribute__((warn_unused_result))
765 set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
767 unsigned long long msr;
768 unsigned int target_c_local;
770 /* tcc_activation_temp is used only for dts or ptm */
771 if (!(do_dts || do_ptm))
774 /* this is a per-package concept */
775 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
778 if (tcc_activation_temp != 0) {
779 p->tcc_activation_temp = tcc_activation_temp;
783 if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr))
786 target_c_local = (msr >> 16) & 0xFF;
791 p->tcc_activation_temp = target_c_local;
796 p->tcc_activation_temp = TJMAX_DEFAULT;
797 WARNING("turbostat plugin: cpu%d: Guessing tjMax %d C,"
798 " Please use TCCActivationTemp to specify it.",
799 t->cpu_id, p->tcc_activation_temp);
805 * Identify the functionality of the CPU
807 static int __attribute__((warn_unused_result))
810 unsigned int eax, ebx, ecx, edx, max_level;
811 unsigned int fms, family, model;
814 * - EAX: Maximum Input Value for Basic CPUID Information
815 * - EBX: "Genu" (0x756e6547)
816 * - EDX: "ineI" (0x49656e69)
817 * - ECX: "ntel" (0x6c65746e)
819 max_level = ebx = ecx = edx = 0;
820 __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
821 if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) {
822 ERROR("turbostat plugin: Unsupported CPU (not Intel)");
827 * - EAX: Version Information: Type, Family, Model, and Stepping ID
830 * + 12-13: Processor type
831 * + 16-19: Extended Model ID
832 * + 20-27: Extended Family ID
833 * - EDX: Feature Information:
834 * + 5: Support for MSR read/write operations
836 fms = ebx = ecx = edx = 0;
837 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
838 family = (fms >> 8) & 0xf;
839 model = (fms >> 4) & 0xf;
841 family += (fms >> 20) & 0xf;
842 if (family == 6 || family == 0xf)
843 model += ((fms >> 16) & 0xf) << 4;
844 if (!(edx & (1 << 5))) {
845 ERROR("turbostat plugin: Unsupported CPU (no MSR support)");
852 * + 0: Digital temperature sensor is supported if set
853 * + 6: Package thermal management is supported if set
855 * + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and IA32_APERF).
856 * + 3: The processor supports performance-energy bias preference if set.
857 * It also implies the presence of a new architectural MSR called IA32_ENERGY_PERF_BIAS
859 * This check is valid for both Intel and AMD
861 eax = ebx = ecx = edx = 0;
862 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
863 do_dts = eax & (1 << 0);
864 do_ptm = eax & (1 << 6);
865 if (!(ecx & (1 << 0))) {
866 ERROR("turbostat plugin: Unsupported CPU (No APERF)");
871 * Enable or disable C states depending on the model and family
879 do_pkg_cstate = (1 << 2) | (1 << 4) | (1 << 6);
885 do_core_cstate = (1 << 1) | (1 << 6);
886 do_pkg_cstate = (1 << 6);
889 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
890 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
891 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
892 case 0x2E: /* Nehalem-EX Xeon - Beckton */
894 do_core_cstate = (1 << 3) | (1 << 6);
895 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
898 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
899 case 0x2C: /* Westmere EP - Gulftown */
900 case 0x2F: /* Westmere-EX Xeon - Eagleton */
902 do_core_cstate = (1 << 3) | (1 << 6);
903 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
907 case 0x2D: /* SNB Xeon */
909 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
910 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
914 case 0x3E: /* IVB Xeon */
916 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
917 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
924 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
925 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
929 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
930 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
934 case 0x56: /* BDX-DE */
936 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
937 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
941 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
942 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
957 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
961 case 0x56: /* BDX-DE */
962 do_rapl = RAPL_PKG | RAPL_DRAM ;
964 case 0x2D: /* SNB Xeon */
965 case 0x3E: /* IVB Xeon */
966 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM;
970 do_rapl = RAPL_PKG | RAPL_CORES;
976 ERROR("turbostat plugin: Unsupported CPU (family: %#x, "
977 "model: %#x)", family, model);
981 /* Override detected values with configuration */
982 if (apply_config_core_cstate)
983 do_core_cstate = config_core_cstate;
984 if (apply_config_pkg_cstate)
985 do_pkg_cstate = config_pkg_cstate;
986 if (apply_config_smi)
988 if (apply_config_dts)
990 if (apply_config_ptm)
992 if (apply_config_rapl)
993 do_rapl = config_rapl;
996 unsigned long long msr;
997 if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1001 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1003 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1010 /********************
1011 * Topology Probing *
1012 ********************/
1015 * Read a single int from a file.
1017 static int __attribute__ ((format(printf,1,2)))
1018 parse_int_file(const char *fmt, ...)
1021 char path[PATH_MAX];
1025 va_start(args, fmt);
1026 len = vsnprintf(path, sizeof(path), fmt, args);
1028 if (len < 0 || len >= PATH_MAX) {
1029 ERROR("turbostat plugin: path truncated: '%s'", path);
1033 filep = fopen(path, "r");
1035 ERROR("turbostat plugin: Failed to open '%s'", path);
1038 if (fscanf(filep, "%d", &value) != 1) {
1039 ERROR("turbostat plugin: Failed to parse number from '%s'", path);
1047 get_threads_on_core(unsigned int cpu)
1055 ssnprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1056 filep = fopen(path, "r");
1058 ERROR("turbostat plugin: Failed to open '%s'", path);
1063 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
1064 * otherwinse 1 sibling (self).
1066 matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
1077 * run func(cpu) on every cpu in /proc/stat
1078 * return max_cpu number
1080 static int __attribute__((warn_unused_result))
1081 for_all_proc_cpus(int (func)(unsigned int))
1084 unsigned int cpu_num;
1087 fp = fopen("/proc/stat", "r");
1089 ERROR("turbostat plugin: Failed to open /proc/stat");
1093 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1095 ERROR("turbostat plugin: Failed to parse /proc/stat");
1101 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1105 retval = func(cpu_num);
1116 * Update the stored topology.max_cpu_id
1119 update_max_cpu_id(unsigned int cpu)
1121 if (topology.max_cpu_id < cpu)
1122 topology.max_cpu_id = cpu;
1127 mark_cpu_present(unsigned int cpu)
1129 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1133 static int __attribute__((warn_unused_result))
1134 allocate_cpu_set(cpu_set_t ** set, size_t * size) {
1135 *set = CPU_ALLOC(topology.max_cpu_id + 1);
1137 ERROR("turbostat plugin: Unable to allocate CPU state");
1140 *size = CPU_ALLOC_SIZE(topology.max_cpu_id + 1);
1141 CPU_ZERO_S(*size, *set);
1146 * Build a local representation of the cpu distribution
1148 static int __attribute__((warn_unused_result))
1153 unsigned int max_package_id, max_core_id, max_threads;
1154 max_package_id = max_core_id = max_threads = 0;
1156 /* Clean topology */
1157 free(topology.cpus);
1158 memset(&topology, 0, sizeof(topology));
1160 ret = for_all_proc_cpus(update_max_cpu_id);
1164 topology.cpus = calloc(1, (topology.max_cpu_id + 1) * sizeof(struct cpu_topology));
1165 if (topology.cpus == NULL) {
1166 ERROR("turbostat plugin: Unable to allocate memory for CPU topology");
1170 ret = allocate_cpu_set(&cpu_present_set, &cpu_present_setsize);
1173 ret = allocate_cpu_set(&cpu_affinity_set, &cpu_affinity_setsize);
1176 ret = allocate_cpu_set(&cpu_saved_affinity_set, &cpu_saved_affinity_setsize);
1180 ret = for_all_proc_cpus(mark_cpu_present);
1186 * find max_core_id, max_package_id
1188 for (i = 0; i <= topology.max_cpu_id; ++i) {
1189 unsigned int num_threads;
1190 struct cpu_topology *cpu = &topology.cpus[i];
1192 if (cpu_is_not_present(i)) {
1193 WARNING("turbostat plugin: cpu%d NOT PRESENT", i);
1197 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", i);
1201 cpu->package_id = (unsigned int) ret;
1202 if (cpu->package_id > max_package_id)
1203 max_package_id = cpu->package_id;
1205 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", i);
1209 cpu->core_id = (unsigned int) ret;
1210 if (cpu->core_id > max_core_id)
1211 max_core_id = cpu->core_id;
1212 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", i);
1215 else if ((unsigned int) ret == i)
1216 cpu->first_core_in_package = 1;
1218 ret = get_threads_on_core(i);
1222 num_threads = (unsigned int) ret;
1223 if (num_threads > max_threads)
1224 max_threads = num_threads;
1225 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", i);
1228 else if ((unsigned int) ret == i)
1229 cpu->first_thread_in_core = 1;
1231 DEBUG("turbostat plugin: cpu %d pkg %d core %d\n",
1232 i, cpu->package_id, cpu->core_id);
1234 /* Num is max + 1 (need to count 0) */
1235 topology.num_packages = max_package_id + 1;
1236 topology.num_cores = max_core_id + 1;
1237 topology.num_threads = max_threads;
1241 free(topology.cpus);
1246 /************************
1247 * Main alloc/init/free *
1248 ************************/
1251 allocate_counters(struct thread_data **threads, struct core_data **cores, struct pkg_data **packages)
1254 unsigned int total_threads, total_cores;
1256 if ((topology.num_threads == 0)
1257 || (topology.num_cores == 0)
1258 || (topology.num_packages == 0))
1260 ERROR ("turbostat plugin: Invalid topology: %u threads, %u cores, %u packages",
1261 topology.num_threads, topology.num_cores, topology.num_packages);
1265 total_threads = topology.num_threads * topology.num_cores * topology.num_packages;
1266 *threads = calloc(total_threads, sizeof(struct thread_data));
1267 if (*threads == NULL)
1269 ERROR ("turbostat plugin: calloc failed");
1273 for (i = 0; i < total_threads; ++i)
1274 (*threads)[i].cpu_id = topology.max_cpu_id + 1;
1276 total_cores = topology.num_cores * topology.num_packages;
1277 *cores = calloc(total_cores, sizeof(struct core_data));
1280 ERROR ("turbostat plugin: calloc failed");
1285 *packages = calloc(topology.num_packages, sizeof(struct pkg_data));
1286 if (*packages == NULL)
1288 ERROR ("turbostat plugin: calloc failed");
1298 init_counter(struct thread_data *thread_base, struct core_data *core_base,
1299 struct pkg_data *pkg_base, unsigned int cpu_id)
1301 struct thread_data *t;
1302 struct core_data *c;
1304 struct cpu_topology *cpu = &topology.cpus[cpu_id];
1306 t = GET_THREAD(thread_base, !(cpu->first_thread_in_core), cpu->core_id, cpu->package_id);
1307 c = GET_CORE(core_base, cpu->core_id, cpu->package_id);
1308 p = GET_PKG(pkg_base, cpu->package_id);
1311 if (cpu->first_thread_in_core)
1312 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1313 if (cpu->first_core_in_package)
1314 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1316 c->core_id = cpu->core_id;
1317 p->package_id = cpu->package_id;
1321 initialize_counters(void)
1323 unsigned int cpu_id;
1325 for (cpu_id = 0; cpu_id <= topology.max_cpu_id; ++cpu_id) {
1326 if (cpu_is_not_present(cpu_id))
1328 init_counter(EVEN_COUNTERS, cpu_id);
1329 init_counter(ODD_COUNTERS, cpu_id);
1330 init_counter(DELTA_COUNTERS, cpu_id);
1337 free_all_buffers(void)
1342 CPU_FREE(cpu_present_set);
1343 cpu_present_set = NULL;
1344 cpu_present_set = 0;
1346 CPU_FREE(cpu_affinity_set);
1347 cpu_affinity_set = NULL;
1348 cpu_affinity_setsize = 0;
1350 CPU_FREE(cpu_saved_affinity_set);
1351 cpu_saved_affinity_set = NULL;
1352 cpu_saved_affinity_setsize = 0;
1360 package_even = NULL;
1372 free(package_delta);
1374 thread_delta = NULL;
1376 package_delta = NULL;
1380 /**********************
1381 * Collectd functions *
1382 **********************/
1384 #define DO_OR_GOTO_ERR(something) \
1386 ret = (something); \
1391 static int setup_all_buffers(void)
1395 DO_OR_GOTO_ERR(topology_probe());
1396 DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1397 DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1398 DO_OR_GOTO_ERR(allocate_counters(&thread_delta, &core_delta, &package_delta));
1399 initialize_counters();
1400 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1401 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, ODD_COUNTERS));
1411 turbostat_read(void)
1416 if ((ret = setup_all_buffers()) < 0)
1420 if (for_all_proc_cpus(cpu_is_not_present)) {
1422 if ((ret = setup_all_buffers()) < 0)
1424 if (for_all_proc_cpus(cpu_is_not_present)) {
1425 ERROR("turbostat plugin: CPU appeared just after "
1431 /* Saving the scheduling affinity, as it will be modified by get_counters */
1432 if (sched_getaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set) != 0) {
1433 ERROR("turbostat plugin: Unable to save the CPU affinity");
1438 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1440 time_even = cdtime();
1448 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
1450 time_odd = cdtime();
1452 time_delta = time_odd - time_even;
1453 if ((ret = for_all_cpus_delta(ODD_COUNTERS, EVEN_COUNTERS)) < 0)
1455 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1458 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1460 time_even = cdtime();
1462 time_delta = time_even - time_odd;
1463 if ((ret = for_all_cpus_delta(EVEN_COUNTERS, ODD_COUNTERS)) < 0)
1465 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1471 * Let's restore the affinity
1472 * This might fail if the number of CPU changed, but we can't do anything in that case..
1474 (void)sched_setaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set);
1479 check_permissions(void)
1481 #ifdef HAVE_SYS_CAPABILITY_H
1482 struct __user_cap_header_struct cap_header_data;
1483 cap_user_header_t cap_header = &cap_header_data;
1484 struct __user_cap_data_struct cap_data_data;
1485 cap_user_data_t cap_data = &cap_data_data;
1487 #endif /* HAVE_SYS_CAPABILITY_H */
1489 if (getuid() == 0) {
1490 /* We have everything we need */
1492 #ifndef HAVE_SYS_CAPABILITY_H
1494 ERROR("turbostat plugin: Initialization failed: this plugin "
1495 "requires collectd to run as root");
1498 #else /* HAVE_SYS_CAPABILITY_H */
1501 /* check for CAP_SYS_RAWIO */
1502 cap_header->pid = getpid();
1503 cap_header->version = _LINUX_CAPABILITY_VERSION;
1504 if (capget(cap_header, cap_data) < 0) {
1505 ERROR("turbostat plugin: capget failed");
1509 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
1510 WARNING("turbostat plugin: Collectd doesn't have the "
1511 "CAP_SYS_RAWIO capability. If you don't want to run "
1512 "collectd as root, try running \"setcap "
1513 "cap_sys_rawio=ep\" on collectd binary");
1517 if (euidaccess("/dev/cpu/0/msr", R_OK)) {
1518 WARNING("turbostat plugin: Collectd cannot open"
1519 "/dev/cpu/0/msr. If you don't want to run collectd as "
1520 "root, you need to change the ownership (chown) and "
1521 "permissions on /dev/cpu/*/msr to allow such access");
1526 ERROR("turbostat plugin: Initialization failed: this plugin "
1527 "requires collectd to either to run as root or give "
1528 "collectd a special capability (CAP_SYS_RAWIO) and read "
1529 "access to /dev/cpu/*/msr (see previous warnings)");
1531 #endif /* HAVE_SYS_CAPABILITY_H */
1535 turbostat_init(void)
1540 if (stat("/dev/cpu/0/msr", &sb)) {
1541 ERROR("turbostat plugin: Initialization failed: /dev/cpu/0/msr"
1542 " does not exist while the CPU supports MSR. You may be "
1543 "missing the corresponding kernel module, please try '# "
1548 DO_OR_GOTO_ERR(check_permissions());
1550 DO_OR_GOTO_ERR(probe_cpu());
1552 DO_OR_GOTO_ERR(setup_all_buffers());
1554 plugin_register_read(PLUGIN_NAME, turbostat_read);
1563 turbostat_config(const char *key, const char *value)
1565 long unsigned int tmp_val;
1568 if (strcasecmp("CoreCstates", key) == 0) {
1569 tmp_val = strtoul(value, &end, 0);
1570 if (*end != '\0' || tmp_val > UINT_MAX) {
1571 ERROR("turbostat plugin: Invalid CoreCstates '%s'",
1575 config_core_cstate = (unsigned int) tmp_val;
1576 apply_config_core_cstate = 1;
1577 } else if (strcasecmp("PackageCstates", key) == 0) {
1578 tmp_val = strtoul(value, &end, 0);
1579 if (*end != '\0' || tmp_val > UINT_MAX) {
1580 ERROR("turbostat plugin: Invalid PackageCstates '%s'",
1584 config_pkg_cstate = (unsigned int) tmp_val;
1585 apply_config_pkg_cstate = 1;
1586 } else if (strcasecmp("SystemManagementInterrupt", key) == 0) {
1587 config_smi = IS_TRUE(value);
1588 apply_config_smi = 1;
1589 } else if (strcasecmp("DigitalTemperatureSensor", key) == 0) {
1590 config_dts = IS_TRUE(value);
1591 apply_config_dts = 1;
1592 } else if (strcasecmp("PackageThermalManagement", key) == 0) {
1593 config_ptm = IS_TRUE(value);
1594 apply_config_ptm = 1;
1595 } else if (strcasecmp("RunningAveragePowerLimit", key) == 0) {
1596 tmp_val = strtoul(value, &end, 0);
1597 if (*end != '\0' || tmp_val > UINT_MAX) {
1598 ERROR("turbostat plugin: Invalid RunningAveragePowerLimit '%s'",
1602 config_rapl = (unsigned int) tmp_val;
1603 apply_config_rapl = 1;
1604 } else if (strcasecmp("TCCActivationTemp", key) == 0) {
1605 tmp_val = strtoul(value, &end, 0);
1606 if (*end != '\0' || tmp_val > UINT_MAX) {
1607 ERROR("turbostat plugin: Invalid TCCActivationTemp '%s'",
1611 tcc_activation_temp = (unsigned int) tmp_val;
1613 ERROR("turbostat plugin: Invalid configuration option '%s'",
1620 void module_register(void)
1622 plugin_register_init(PLUGIN_NAME, turbostat_init);
1623 plugin_register_config(PLUGIN_NAME, turbostat_config, config_keys, config_keys_num);