X-Git-Url: https://git.octo.it/?a=blobdiff_plain;f=src%2Fturbostat.c;h=80650dff38539c781bf106a2cdcc462ceaaf3300;hb=363bd0300297144a29b58cc813b23d9353525178;hp=5424a30587721b75327869037d81482561f2b1be;hpb=a1f3e47ec460b5a77b46f15ddfda918ac1b50e80;p=collectd.git diff --git a/src/turbostat.c b/src/turbostat.c index 5424a305..80650dff 100644 --- a/src/turbostat.c +++ b/src/turbostat.c @@ -24,94 +24,125 @@ * Ported to collectd by Vincent Brillault */ +/* + * _GNU_SOURCE is required because of the following functions: + * - CPU_ISSET_S + * - CPU_ZERO_S + * - CPU_SET_S + * - CPU_FREE + * - CPU_ALLOC + * - CPU_ALLOC_SIZE + */ #define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include "collectd.h" #include "common.h" #include "plugin.h" +#include "utils_time.h" + +#include +#include +#ifdef HAVE_SYS_CAPABILITY_H +#include +#endif /* HAVE_SYS_CAPABILITY_H */ #define PLUGIN_NAME "turbostat" -static const char *proc_stat = "/proc/stat"; -static unsigned int interval_sec = 5; /* set with -i interval_sec */ -static unsigned int skip_c0; -static unsigned int skip_c1; -static unsigned int do_nhm_cstates; -static unsigned int do_snb_cstates; -static unsigned int do_c8_c9_c10; -static unsigned int do_slm_cstates; -static unsigned int has_aperf; -static unsigned int has_epb; -static unsigned int units = 1000000000; /* Ghz etc */ -static unsigned int genuine_intel; -static unsigned int has_invariant_tsc; -static unsigned int do_nehalem_platform_info; -static int do_smi; -static unsigned int show_pkg; -static unsigned int show_core; -static unsigned int show_cpu; -static unsigned int do_rapl; -static unsigned int do_dts; -static unsigned int do_ptm; +/* + * This tool uses the Model-Specific Registers (MSRs) present on Intel processors. + * The general description each of these registers, depending on the architecture, + * can be found in the Intel® 64 and IA-32 Architectures Software Developer Manual, + * Volume 3 Chapter 35. + */ + +/* + * If set, aperf_mperf_unstable disables a/mperf based stats. + * This includes: C0 & C1 states, frequency + * + * This value is automatically set if mperf or aperf go backward + */ +static _Bool aperf_mperf_unstable; + +/* + * Bitmask of the list of core C states supported by the processor. + * Currently supported C-states (by this plugin): 3, 6, 7 + */ +static unsigned int do_core_cstate; +static unsigned int config_core_cstate; +static _Bool apply_config_core_cstate; + +/* + * Bitmask of the list of pacages C states supported by the processor. + * Currently supported C-states (by this plugin): 2, 3, 6, 7, 8, 9, 10 + */ +static unsigned int do_pkg_cstate; +static unsigned int config_pkg_cstate; +static _Bool apply_config_pkg_cstate; + +/* + * Boolean indicating if the processor supports 'I/O System-Management Interrupt counter' + */ +static _Bool do_smi; +static _Bool config_smi; +static _Bool apply_config_smi; + +/* + * Boolean indicating if the processor supports 'Digital temperature sensor' + * This feature enables the monitoring of the temperature of each core + * + * This feature has two limitations: + * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong + * - Temperatures above the tcc_activation_temp are not recorded + */ +static _Bool do_dts; +static _Bool config_dts; +static _Bool apply_config_dts; + +/* + * Boolean indicating if the processor supports 'Package thermal management' + * This feature allows the monitoring of the temperature of each package + * + * This feature has two limitations: + * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong + * - Temperatures above the tcc_activation_temp are not recorded + */ +static _Bool do_ptm; +static _Bool config_ptm; +static _Bool apply_config_ptm; + +/* + * Thermal Control Circuit Activation Temperature as configured by the user. + * This override the automated detection via MSR_IA32_TEMPERATURE_TARGET + * and should only be used if the automated detection fails. + */ static unsigned int tcc_activation_temp; -static unsigned int tcc_activation_temp_override; -static double rapl_power_units, rapl_energy_units, rapl_time_units; -static double rapl_joule_counter_range; + +static unsigned int do_rapl; +static unsigned int config_rapl; +static _Bool apply_config_rapl; +static double rapl_energy_units; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ /* 0x611 MSR_PKG_ENERGY_STATUS */ -#define RAPL_PKG_PERF_STATUS (1 << 1) - /* 0x613 MSR_PKG_PERF_STATUS */ -#define RAPL_PKG_POWER_INFO (1 << 2) - /* 0x614 MSR_PKG_POWER_INFO */ - -#define RAPL_DRAM (1 << 3) +#define RAPL_DRAM (1 << 1) /* 0x618 MSR_DRAM_POWER_LIMIT */ /* 0x619 MSR_DRAM_ENERGY_STATUS */ /* 0x61c MSR_DRAM_POWER_INFO */ -#define RAPL_DRAM_PERF_STATUS (1 << 4) - /* 0x61b MSR_DRAM_PERF_STATUS */ - -#define RAPL_CORES (1 << 5) +#define RAPL_CORES (1 << 2) /* 0x638 MSR_PP0_POWER_LIMIT */ /* 0x639 MSR_PP0_ENERGY_STATUS */ -#define RAPL_CORE_POLICY (1 << 6) - /* 0x63a MSR_PP0_POLICY */ - -#define RAPL_GFX (1 << 7) +#define RAPL_GFX (1 << 3) /* 0x640 MSR_PP1_POWER_LIMIT */ /* 0x641 MSR_PP1_ENERGY_STATUS */ /* 0x642 MSR_PP1_POLICY */ #define TJMAX_DEFAULT 100 -int aperf_mperf_unstable; -int backwards_count; -char *progname; - -cpu_set_t *cpu_present_set, *cpu_affinity_set; -size_t cpu_present_setsize, cpu_affinity_setsize; +static cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_saved_affinity_set; +static size_t cpu_present_setsize, cpu_affinity_setsize, cpu_saved_affinity_setsize; -struct thread_data { +static struct thread_data { unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; @@ -121,17 +152,17 @@ struct thread_data { unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 -} *thread_even, *thread_odd; +} *thread_delta, *thread_even, *thread_odd; -struct core_data { +static struct core_data { unsigned long long c3; unsigned long long c6; unsigned long long c7; unsigned int core_temp_c; unsigned int core_id; -} *core_even, *core_odd; +} *core_delta, *core_even, *core_odd; -struct pkg_data { +static struct pkg_data { unsigned long long pc2; unsigned long long pc3; unsigned long long pc6; @@ -140,16 +171,15 @@ struct pkg_data { unsigned long long pc9; unsigned long long pc10; unsigned int package_id; - unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ - unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ - unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */ - unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */ - unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ - unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ + uint32_t energy_pkg; /* MSR_PKG_ENERGY_STATUS */ + uint32_t energy_dram; /* MSR_DRAM_ENERGY_STATUS */ + uint32_t energy_cores; /* MSR_PP0_ENERGY_STATUS */ + uint32_t energy_gfx; /* MSR_PP1_ENERGY_STATUS */ + unsigned int tcc_activation_temp; unsigned int pkg_temp_c; +} *package_delta, *package_even, *package_odd; -} *package_even, *package_odd; - +#define DELTA_COUNTERS thread_delta, core_delta, package_delta #define ODD_COUNTERS thread_odd, core_odd, package_odd #define EVEN_COUNTERS thread_even, core_even, package_even static _Bool is_even = 1; @@ -158,547 +188,487 @@ static _Bool allocated = 0; static _Bool initialized = 0; #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \ - (thread_base + (pkg_no) * topo.num_cores_per_pkg * \ - topo.num_threads_per_core + \ - (core_no) * topo.num_threads_per_core + (thread_no)) + (thread_base + \ + (pkg_no) * topology.num_cores * topology.num_threads + \ + (core_no) * topology.num_threads + \ + (thread_no)) #define GET_CORE(core_base, core_no, pkg_no) \ - (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no)) + (core_base + \ + (pkg_no) * topology.num_cores + \ + (core_no)) #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) -struct topo_params { - int num_packages; - int num_cpus; - int num_cores; - int max_cpu_num; - int num_cores_per_pkg; - int num_threads_per_core; -} topo; - -struct timeval tv_even, tv_odd, tv_delta; - -enum return_values { - OK = 0, - ERR_CPU_MIGRATE, - ERR_MSR_IA32_APERF, - ERR_MSR_IA32_MPERF, - ERR_MSR_SMI_COUNT, - ERR_MSR_CORE_C3_RESIDENCY, - ERR_MSR_CORE_C6_RESIDENCY, - ERR_MSR_CORE_C7_RESIDENCY, - ERR_MSR_IA32_THERM_STATUS, - ERR_MSR_PKG_C3_RESIDENCY, - ERR_MSR_PKG_C6_RESIDENCY, - ERR_MSR_PKG_C2_RESIDENCY, - ERR_MSR_PKG_C7_RESIDENCY, - ERR_MSR_PKG_C8_RESIDENCY, - ERR_MSR_PKG_C9_RESIDENCY, - ERR_MSR_PKG_C10_RESIDENCY, - ERR_MSR_PKG_ENERGY_STATUS, - ERR_MSR_PP0_ENERGY_STATUS, - ERR_MSR_DRAM_ENERGY_STATUS, - ERR_MSR_PP1_ENERGY_STATUS, - ERR_MSR_PKG_PERF_STATUS, - ERR_MSR_DRAM_PERF_STATUS, - ERR_MSR_IA32_PACKAGE_THERM_STATUS, - ERR_CPU_NOT_PRESENT, - ERR_NO_MSR, - ERR_CANT_OPEN_FILE, - ERR_CANT_READ_NUMBER, - ERR_CANT_READ_PROC_STAT, - ERR_NO_INVARIANT_TSC, - ERR_NO_APERF, - ERR_CALLOC, - ERR_CPU_ALLOC, - ERR_NOT_ROOT, +struct cpu_topology { + unsigned int package_id; + unsigned int core_id; + _Bool first_core_in_package; + _Bool first_thread_in_core; }; -static int setup_all_buffers(void); +static struct topology { + unsigned int max_cpu_id; + unsigned int num_packages; + unsigned int num_cores; + unsigned int num_threads; + struct cpu_topology *cpus; +} topology; -static int -cpu_is_not_present(int cpu) +static cdtime_t time_even, time_odd, time_delta; + +static const char *config_keys[] = { - return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); -} + "CoreCstates", + "PackageCstates", + "SystemManagementInterrupt", + "DigitalTemperatureSensor", + "PackageThermalManagement", + "TCCActivationTemp", + "RunningAveragePowerLimit", +}; +static const int config_keys_num = STATIC_ARRAY_SIZE (config_keys); + +/***************************** + * MSR Manipulation helpers * + *****************************/ + /* - * run func(thread, core, package) in topology order - * skip non-present cpus + * Open a MSR device for reading + * Can change the scheduling affinity of the current process if multiple_read is 1 */ - static int __attribute__((warn_unused_result)) -for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), - struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) +open_msr(unsigned int cpu, _Bool multiple_read) { - int retval, pkg_no, core_no, thread_no; + char pathname[32]; + int fd; - for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { - for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { - for (thread_no = 0; thread_no < - topo.num_threads_per_core; ++thread_no) { - struct thread_data *t; - struct core_data *c; - struct pkg_data *p; + /* + * If we need to do multiple read, let's migrate to the CPU + * Otherwise, we would lose time calling functions on another CPU + * + * If we are not yet initialized (cpu_affinity_setsize = 0), + * we need to skip this optimisation. + */ + if (multiple_read && cpu_affinity_setsize) { + CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); + CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); + if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) { + ERROR("turbostat plugin: Could not migrate to CPU %d", cpu); + return -1; + } + } - t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); + ssnprintf(pathname, sizeof(pathname), "/dev/cpu/%d/msr", cpu); + fd = open(pathname, O_RDONLY); + if (fd < 0) { + ERROR("turbostat plugin: failed to open %s", pathname); + return -1; + } + return fd; +} - if (cpu_is_not_present(t->cpu_id)) - continue; +/* + * Read a single MSR from an open file descriptor + */ +static int __attribute__((warn_unused_result)) +read_msr(int fd, off_t offset, unsigned long long *msr) +{ + ssize_t retval; - c = GET_CORE(core_base, core_no, pkg_no); - p = GET_PKG(pkg_base, pkg_no); + retval = pread(fd, msr, sizeof *msr, offset); - retval = func(t, c, p); - if (retval) - return retval; - } - } + if (retval != sizeof *msr) { + ERROR("turbostat plugin: MSR offset 0x%llx read failed", + (unsigned long long)offset); + return -1; } return 0; } -static int __attribute__((warn_unused_result)) -cpu_migrate(int cpu) +/* + * Open a MSR device for reading, read the value asked for and close it. + * This call will not affect the scheduling affinity of this thread. + */ +static ssize_t __attribute__((warn_unused_result)) +get_msr(unsigned int cpu, off_t offset, unsigned long long *msr) { - CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); - CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); - if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) - return -ERR_CPU_MIGRATE; - else - return 0; + ssize_t retval; + int fd; + + fd = open_msr(cpu, 0); + if (fd < 0) + return fd; + retval = read_msr(fd, offset, msr); + close(fd); + return retval; } + +/******************************** + * Raw data acquisition (1 CPU) * + ********************************/ + +/* + * Read every data avalaible for a single CPU + * + * Core data is shared for all threads in one core: extracted only for the first thread + * Package data is shared for all core in one package: extracted only for the first thread of the first core + * + * Side effect: migrates to the targeted CPU + */ static int __attribute__((warn_unused_result)) -get_msr(int cpu, off_t offset, unsigned long long *msr) +get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - ssize_t retval; - char pathname[32]; - int fd; + unsigned int cpu = t->cpu_id; + unsigned long long msr; + int msr_fd; + int retval = 0; + + msr_fd = open_msr(cpu, 1); + if (msr_fd < 0) + return msr_fd; + +#define READ_MSR(msr, dst) \ +do { \ + if (read_msr(msr_fd, msr, dst)) { \ + ERROR("turbostat plugin: Unable to read " #msr); \ + retval = -1; \ + goto out; \ + } \ +} while (0) - ssnprintf(pathname, 32, "/dev/cpu/%d/msr", cpu); - fd = open(pathname, O_RDONLY); - if (fd < 0) - return -1; + READ_MSR(MSR_IA32_TSC, &t->tsc); - retval = pread(fd, msr, sizeof *msr, offset); - close(fd); + READ_MSR(MSR_IA32_APERF, &t->aperf); + READ_MSR(MSR_IA32_MPERF, &t->mperf); - if (retval != sizeof *msr) { - ERROR ("%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset); - return -1; + if (do_smi) { + READ_MSR(MSR_SMI_COUNT, &msr); + t->smi_count = msr & 0xFFFFFFFF; } - return 0; -} + /* collect core counters only for 1st thread in core */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) { + retval = 0; + goto out; + } -#define DELTA_WRAP32(new, old) \ - if (new > old) { \ - old = new - old; \ - } else { \ - old = 0x100000000 + new - old; \ + if (do_core_cstate & (1 << 3)) + READ_MSR(MSR_CORE_C3_RESIDENCY, &c->c3); + if (do_core_cstate & (1 << 6)) + READ_MSR(MSR_CORE_C6_RESIDENCY, &c->c6); + if (do_core_cstate & (1 << 7)) + READ_MSR(MSR_CORE_C7_RESIDENCY, &c->c7); + + if (do_dts) { + READ_MSR(MSR_IA32_THERM_STATUS, &msr); + c->core_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F); } -static void -delta_package(struct pkg_data *new, struct pkg_data *old) + /* collect package counters only for 1st core in package */ + if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { + retval = 0; + goto out; + } + + if (do_pkg_cstate & (1 << 2)) + READ_MSR(MSR_PKG_C2_RESIDENCY, &p->pc2); + if (do_pkg_cstate & (1 << 3)) + READ_MSR(MSR_PKG_C3_RESIDENCY, &p->pc3); + if (do_pkg_cstate & (1 << 6)) + READ_MSR(MSR_PKG_C6_RESIDENCY, &p->pc6); + if (do_pkg_cstate & (1 << 7)) + READ_MSR(MSR_PKG_C7_RESIDENCY, &p->pc7); + if (do_pkg_cstate & (1 << 8)) + READ_MSR(MSR_PKG_C8_RESIDENCY, &p->pc8); + if (do_pkg_cstate & (1 << 9)) + READ_MSR(MSR_PKG_C9_RESIDENCY, &p->pc9); + if (do_pkg_cstate & (1 << 10)) + READ_MSR(MSR_PKG_C10_RESIDENCY, &p->pc10); + + if (do_rapl & RAPL_PKG) { + READ_MSR(MSR_PKG_ENERGY_STATUS, &msr); + p->energy_pkg = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_CORES) { + READ_MSR(MSR_PP0_ENERGY_STATUS, &msr); + p->energy_cores = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_DRAM) { + READ_MSR(MSR_DRAM_ENERGY_STATUS, &msr); + p->energy_dram = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_GFX) { + READ_MSR(MSR_PP1_ENERGY_STATUS, &msr); + p->energy_gfx = msr & 0xFFFFFFFF; + } + if (do_ptm) { + READ_MSR(MSR_IA32_PACKAGE_THERM_STATUS, &msr); + p->pkg_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F); + } + +out: + close(msr_fd); + return retval; +} + + +/********************************** + * Evaluating the changes (1 CPU) * + **********************************/ + +/* + * Extract the evolution old->new in delta at a package level + * (some are not new-delta, e.g. temperature) + */ +static inline void +delta_package(struct pkg_data *delta, const struct pkg_data *new, const struct pkg_data *old) { - old->pc2 = new->pc2 - old->pc2; - old->pc3 = new->pc3 - old->pc3; - old->pc6 = new->pc6 - old->pc6; - old->pc7 = new->pc7 - old->pc7; - old->pc8 = new->pc8 - old->pc8; - old->pc9 = new->pc9 - old->pc9; - old->pc10 = new->pc10 - old->pc10; - old->pkg_temp_c = new->pkg_temp_c; - - DELTA_WRAP32(new->energy_pkg, old->energy_pkg); - DELTA_WRAP32(new->energy_cores, old->energy_cores); - DELTA_WRAP32(new->energy_gfx, old->energy_gfx); - DELTA_WRAP32(new->energy_dram, old->energy_dram); - DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status); - DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status); + delta->pc2 = new->pc2 - old->pc2; + delta->pc3 = new->pc3 - old->pc3; + delta->pc6 = new->pc6 - old->pc6; + delta->pc7 = new->pc7 - old->pc7; + delta->pc8 = new->pc8 - old->pc8; + delta->pc9 = new->pc9 - old->pc9; + delta->pc10 = new->pc10 - old->pc10; + delta->pkg_temp_c = new->pkg_temp_c; + + delta->energy_pkg = new->energy_pkg - old->energy_pkg; + delta->energy_cores = new->energy_cores - old->energy_cores; + delta->energy_gfx = new->energy_gfx - old->energy_gfx; + delta->energy_dram = new->energy_dram - old->energy_dram; } -static void -delta_core(struct core_data *new, struct core_data *old) +/* + * Extract the evolution old->new in delta at a core level + * (some are not new-delta, e.g. temperature) + */ +static inline void +delta_core(struct core_data *delta, const struct core_data *new, const struct core_data *old) { - old->c3 = new->c3 - old->c3; - old->c6 = new->c6 - old->c6; - old->c7 = new->c7 - old->c7; - old->core_temp_c = new->core_temp_c; + delta->c3 = new->c3 - old->c3; + delta->c6 = new->c6 - old->c6; + delta->c7 = new->c7 - old->c7; + delta->core_temp_c = new->core_temp_c; } /* - * old = new - old + * Extract the evolution old->new in delta at a package level + * core_delta is required for c1 estimation (tsc - c0 - all core cstates) */ -static int __attribute__((warn_unused_result)) -delta_thread(struct thread_data *new, struct thread_data *old, - struct core_data *core_delta) +static inline int __attribute__((warn_unused_result)) +delta_thread(struct thread_data *delta, const struct thread_data *new, const struct thread_data *old, + const struct core_data *core_delta) { - old->tsc = new->tsc - old->tsc; + delta->tsc = new->tsc - old->tsc; /* check for TSC < 1 Mcycles over interval */ - if (old->tsc < (1000 * 1000)) { - WARNING("Insanely slow TSC rate, TSC stops in idle?\n" - "You can disable all c-states by booting with \"idle=poll\"\n" - "or just the deep ones with \"processor.max_cstate=1\""); + if (delta->tsc < (1000 * 1000)) { + WARNING("turbostat plugin: Insanely slow TSC rate, TSC stops " + "in idle? You can disable all c-states by booting with" + " 'idle=poll' or just the deep ones with" + " 'processor.max_cstate=1'"); return -1; } - old->c1 = new->c1 - old->c1; + delta->c1 = new->c1 - old->c1; if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { - old->aperf = new->aperf - old->aperf; - old->mperf = new->mperf - old->mperf; + delta->aperf = new->aperf - old->aperf; + delta->mperf = new->mperf - old->mperf; } else { - if (!aperf_mperf_unstable) { - WARNING("%s: APERF or MPERF went backwards *\n", progname); - WARNING("* Frequency results do not cover entire interval *\n"); - WARNING("* fix this by running Linux-2.6.30 or later *\n"); + WARNING("turbostat plugin: APERF or MPERF went " + "backwards. Frequency results do not cover " + "the entire interval. Fix this by running " + "Linux-2.6.30 or later."); aperf_mperf_unstable = 1; } - /* - * mperf delta is likely a huge "positive" number - * can not use it for calculating c0 time - */ - skip_c0 = 1; - skip_c1 = 1; } - /* * As counter collection is not atomic, * it is possible for mperf's non-halted cycles + idle states * to exceed TSC's all cycles: show c1 = 0% in that case. */ - if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) - old->c1 = 0; + if ((delta->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > delta->tsc) + delta->c1 = 0; else { /* normal case, derive c1 */ - old->c1 = old->tsc - old->mperf - core_delta->c3 + delta->c1 = delta->tsc - delta->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; } - if (old->mperf == 0) { - WARNING("cpu%d MPERF 0!\n", old->cpu_id); - old->mperf = 1; /* divide by 0 protection */ + if (delta->mperf == 0) { + WARNING("turbostat plugin: cpu%d MPERF 0!", old->cpu_id); + delta->mperf = 1; /* divide by 0 protection */ } if (do_smi) - old->smi_count = new->smi_count - old->smi_count; + delta->smi_count = new->smi_count - old->smi_count; return 0; } -static int __attribute__((warn_unused_result)) -delta_cpu(struct thread_data *t, struct core_data *c, - struct pkg_data *p, struct thread_data *t2, - struct core_data *c2, struct pkg_data *p2) -{ - int ret; - - /* calculate core delta only for 1st thread in core */ - if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) - delta_core(c, c2); - - /* always calculate thread delta */ - ret = delta_thread(t, t2, c2); /* c2 is core delta */ - if (ret != 0) - return ret; - - /* calculate package delta only for 1st core in package */ - if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) - delta_package(p, p2); - - return 0; -} +/********************************** + * Submitting the results (1 CPU) * + **********************************/ -static unsigned long long -rdtsc(void) +/* + * Submit one gauge value + */ +static void +turbostat_submit (const char *plugin_instance, + const char *type, const char *type_instance, + gauge_t value) { - unsigned int low, high; + value_list_t vl = VALUE_LIST_INIT; + value_t v; - asm volatile("rdtsc" : "=a" (low), "=d" (high)); + v.gauge = value; + vl.values = &v; + vl.values_len = 1; + sstrncpy (vl.host, hostname_g, sizeof (vl.host)); + sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin)); + if (plugin_instance != NULL) + sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance)); + sstrncpy (vl.type, type, sizeof (vl.type)); + if (type_instance != NULL) + sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance)); - return low | ((unsigned long long)high) << 32; + plugin_dispatch_values (&vl); } - /* - * get_counters(...) - * migrate to cpu - * acquire and record local counters for that cpu + * Submit every data for a single CPU + * + * Core data is shared for all threads in one core: submitted only for the first thread + * Package data is shared for all core in one package: submitted only for the first thread of the first core */ -static int __attribute__((warn_unused_result)) -get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) +static int +submit_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - int cpu = t->cpu_id; - unsigned long long msr; + char name[DATA_MAX_NAME_LEN]; + double interval_float; - if (cpu_migrate(cpu)) { - WARNING("Could not migrate to CPU %d\n", cpu); - return -ERR_CPU_MIGRATE; - } + interval_float = CDTIME_T_TO_DOUBLE(time_delta); - t->tsc = rdtsc(); /* we are running on local CPU of interest */ + ssnprintf(name, sizeof(name), "cpu%02d", t->cpu_id); - if (has_aperf) { - if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) - return -ERR_MSR_IA32_APERF; - if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) - return -ERR_MSR_IA32_MPERF; - } + if (!aperf_mperf_unstable) + turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc); + if (!aperf_mperf_unstable) + turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc); - if (do_smi) { - if (get_msr(cpu, MSR_SMI_COUNT, &msr)) - return -ERR_MSR_SMI_COUNT; - t->smi_count = msr & 0xFFFFFFFF; - } + turbostat_submit(name, "frequency", "average", 1.0 / 1000000 * t->aperf / interval_float); - /* collect core counters only for 1st thread in core */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) - return 0; + if ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc))) + turbostat_submit(name, "frequency", "busy", 1.0 * t->tsc / 1000000 * t->aperf / t->mperf / interval_float); - if (do_nhm_cstates && !do_slm_cstates) { - if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) - return -ERR_MSR_CORE_C3_RESIDENCY; - } + /* Sanity check (should stay stable) */ + turbostat_submit(name, "gauge", "TSC", 1.0 * t->tsc / 1000000 / interval_float); - if (do_nhm_cstates) { - if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) - return -ERR_MSR_CORE_C6_RESIDENCY; - } + /* SMI */ + if (do_smi) + turbostat_submit(name, "count", NULL, t->smi_count); + + /* submit per-core data only for 1st thread in core */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + goto done; - if (do_snb_cstates) - if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) - return -ERR_MSR_CORE_C7_RESIDENCY; + ssnprintf(name, sizeof(name), "core%02d", c->core_id); - if (do_dts) { - if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) - return -ERR_MSR_IA32_THERM_STATUS; - c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); - } + if (do_core_cstate & (1 << 3)) + turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc); + if (do_core_cstate & (1 << 6)) + turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc); + if (do_core_cstate & (1 << 7)) + turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc); + if (do_dts) + turbostat_submit(name, "temperature", NULL, c->core_temp_c); - /* collect package counters only for 1st core in package */ + /* submit per-package data only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) - return 0; + goto done; - if (do_nhm_cstates && !do_slm_cstates) { - if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) - return -ERR_MSR_PKG_C3_RESIDENCY; - if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) - return -ERR_MSR_PKG_C6_RESIDENCY; - } - if (do_snb_cstates) { - if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) - return -ERR_MSR_PKG_C2_RESIDENCY; - if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) - return -ERR_MSR_PKG_C7_RESIDENCY; - } - if (do_c8_c9_c10) { - if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) - return -ERR_MSR_PKG_C8_RESIDENCY; - if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) - return -ERR_MSR_PKG_C9_RESIDENCY; - if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) - return -ERR_MSR_PKG_C10_RESIDENCY; - } - if (do_rapl & RAPL_PKG) { - if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) - return -ERR_MSR_PKG_ENERGY_STATUS; - p->energy_pkg = msr & 0xFFFFFFFF; - } - if (do_rapl & RAPL_CORES) { - if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr)) - return MSR_PP0_ENERGY_STATUS; - p->energy_cores = msr & 0xFFFFFFFF; - } - if (do_rapl & RAPL_DRAM) { - if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) - return -ERR_MSR_DRAM_ENERGY_STATUS; - p->energy_dram = msr & 0xFFFFFFFF; - } - if (do_rapl & RAPL_GFX) { - if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr)) - return -ERR_MSR_PP1_ENERGY_STATUS; - p->energy_gfx = msr & 0xFFFFFFFF; - } - if (do_rapl & RAPL_PKG_PERF_STATUS) { - if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr)) - return -ERR_MSR_PKG_PERF_STATUS; - p->rapl_pkg_perf_status = msr & 0xFFFFFFFF; - } - if (do_rapl & RAPL_DRAM_PERF_STATUS) { - if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr)) - return -ERR_MSR_DRAM_PERF_STATUS; - p->rapl_dram_perf_status = msr & 0xFFFFFFFF; - } - if (do_ptm) { - if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) - return -ERR_MSR_IA32_PACKAGE_THERM_STATUS; - p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); + ssnprintf(name, sizeof(name), "pkg%02d", p->package_id); + + if (do_ptm) + turbostat_submit(name, "temperature", NULL, p->pkg_temp_c); + + if (do_pkg_cstate & (1 << 2)) + turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc); + if (do_pkg_cstate & (1 << 3)) + turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc); + if (do_pkg_cstate & (1 << 6)) + turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc); + if (do_pkg_cstate & (1 << 7)) + turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc); + if (do_pkg_cstate & (1 << 8)) + turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc); + if (do_pkg_cstate & (1 << 9)) + turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc); + if (do_pkg_cstate & (1 << 10)) + turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc); + + if (do_rapl) { + if (do_rapl & RAPL_PKG) + turbostat_submit(name, "power", "pkg", p->energy_pkg * rapl_energy_units / interval_float); + if (do_rapl & RAPL_CORES) + turbostat_submit(name, "power", "cores", p->energy_cores * rapl_energy_units / interval_float); + if (do_rapl & RAPL_GFX) + turbostat_submit(name, "power", "GFX", p->energy_gfx * rapl_energy_units / interval_float); + if (do_rapl & RAPL_DRAM) + turbostat_submit(name, "power", "DRAM", p->energy_dram * rapl_energy_units / interval_float); } +done: return 0; } -static void -free_all_buffers(void) -{ - allocated = 0; - initialized = 0; - CPU_FREE(cpu_present_set); - cpu_present_set = NULL; - cpu_present_set = 0; - - CPU_FREE(cpu_affinity_set); - cpu_affinity_set = NULL; - cpu_affinity_setsize = 0; - - free(thread_even); - free(core_even); - free(package_even); - - thread_even = NULL; - core_even = NULL; - package_even = NULL; - - free(thread_odd); - free(core_odd); - free(package_odd); - - thread_odd = NULL; - core_odd = NULL; - package_odd = NULL; -} - -/* - * Parse a file containing a single int. - */ -static int __attribute__ ((format(printf,1,2))) -parse_int_file(const char *fmt, ...) -{ - va_list args; - char path[PATH_MAX]; - FILE *filep; - int value; - - va_start(args, fmt); - vsnprintf(path, sizeof(path), fmt, args); - va_end(args); - filep = fopen(path, "r"); - if (!filep) { - ERROR("%s: open failed", path); - return -ERR_CANT_OPEN_FILE; - } - if (fscanf(filep, "%d", &value) != 1) { - ERROR("%s: failed to parse number from file", path); - return -ERR_CANT_READ_NUMBER; - } - fclose(filep); - return value; -} - -/* - * cpu_is_first_sibling_in_core(cpu) - * return 1 if given CPU is 1st HT sibling in the core - */ -static int -cpu_is_first_sibling_in_core(int cpu) -{ - return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); -} +/********************************** + * Looping function over all CPUs * + **********************************/ /* - * cpu_is_first_core_in_package(cpu) - * return 1 if given CPU is 1st core in package + * Check if a given cpu id is in our compiled list of existing CPUs */ static int -cpu_is_first_core_in_package(int cpu) -{ - return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); -} - -static int -get_physical_package_id(int cpu) -{ - return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); -} - -static int -get_core_id(int cpu) +cpu_is_not_present(unsigned int cpu) { - return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); -} - -static int -get_num_ht_siblings(int cpu) -{ - char path[80]; - FILE *filep; - int sib1, sib2; - int matches; - char character; - - ssnprintf(path, 80, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); - filep = fopen(path, "r"); - if (!filep) { - ERROR("%s: open failed", path); - return -ERR_CANT_OPEN_FILE; - } - /* - * file format: - * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) - * otherwinse 1 sibling (self). - */ - matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); - - fclose(filep); - - if (matches == 3) - return 2; - else - return 1; + return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); } /* - * run func(thread, core, package) in topology order - * skip non-present cpus + * Loop on all CPUs in topological order + * + * Skip non-present cpus + * Return the error code at the first error or 0 */ - - static int __attribute__((warn_unused_result)) -for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, - struct pkg_data *, struct thread_data *, struct core_data *, - struct pkg_data *), struct thread_data *thread_base, - struct core_data *core_base, struct pkg_data *pkg_base, - struct thread_data *thread_base2, struct core_data *core_base2, - struct pkg_data *pkg_base2) +for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), + struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) { - int retval, pkg_no, core_no, thread_no; + int retval; + unsigned int pkg_no, core_no, thread_no; - for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { - for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { - for (thread_no = 0; thread_no < - topo.num_threads_per_core; ++thread_no) { - struct thread_data *t, *t2; - struct core_data *c, *c2; - struct pkg_data *p, *p2; + for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) { + for (core_no = 0; core_no < topology.num_cores; ++core_no) { + for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) { + struct thread_data *t; + struct core_data *c; + struct pkg_data *p; t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; - t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no); - c = GET_CORE(core_base, core_no, pkg_no); - c2 = GET_CORE(core_base2, core_no, pkg_no); - p = GET_PKG(pkg_base, pkg_no); - p2 = GET_PKG(pkg_base2, pkg_no); - retval = func(t, c, p, t2, c2, p2); + retval = func(t, c, p); if (retval) return retval; } @@ -708,790 +678,867 @@ for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, } /* - * run func(cpu) on every cpu in /proc/stat - * return max_cpu number + * Dedicated loop: Extract every data evolution for all CPU + * + * Skip non-present cpus + * Return the error code at the first error or 0 + * + * Core data is shared for all threads in one core: extracted only for the first thread + * Package data is shared for all core in one package: extracted only for the first thread of the first core */ static int __attribute__((warn_unused_result)) -for_all_proc_cpus(int (func)(int)) +for_all_cpus_delta(const struct thread_data *thread_new_base, const struct core_data *core_new_base, const struct pkg_data *pkg_new_base, + const struct thread_data *thread_old_base, const struct core_data *core_old_base, const struct pkg_data *pkg_old_base) { - FILE *fp; - int cpu_num; int retval; + unsigned int pkg_no, core_no, thread_no; + + for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) { + for (core_no = 0; core_no < topology.num_cores; ++core_no) { + for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) { + struct thread_data *t_delta; + const struct thread_data *t_old, *t_new; + struct core_data *c_delta; + + /* Get correct pointers for threads */ + t_delta = GET_THREAD(thread_delta, thread_no, core_no, pkg_no); + t_new = GET_THREAD(thread_new_base, thread_no, core_no, pkg_no); + t_old = GET_THREAD(thread_old_base, thread_no, core_no, pkg_no); + + /* Skip threads that disappeared */ + if (cpu_is_not_present(t_delta->cpu_id)) + continue; - fp = fopen(proc_stat, "r"); - if (!fp) { - ERROR("%s: open failed", proc_stat); - return -ERR_CANT_OPEN_FILE; - } + /* c_delta is always required for delta_thread */ + c_delta = GET_CORE(core_delta, core_no, pkg_no); - retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); - if (retval != 0) { - ERROR("%s: failed to parse format", proc_stat); - return -ERR_CANT_READ_PROC_STAT; - } + /* calculate core delta only for 1st thread in core */ + if (t_new->flags & CPU_IS_FIRST_THREAD_IN_CORE) { + const struct core_data *c_old, *c_new; - while (1) { - retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); - if (retval != 1) - break; + c_new = GET_CORE(core_new_base, core_no, pkg_no); + c_old = GET_CORE(core_old_base, core_no, pkg_no); - retval = func(cpu_num); - if (retval) { - fclose(fp); - return(retval); - } - } - fclose(fp); - return 0; -} + delta_core(c_delta, c_new, c_old); + } -/* - * count_cpus() - * remember the last one seen, it will be the max - */ -static int -count_cpus(int cpu) -{ - if (topo.max_cpu_num < cpu) - topo.max_cpu_num = cpu; + /* Always calculate thread delta */ + retval = delta_thread(t_delta, t_new, t_old, c_delta); + if (retval) + return retval; - topo.num_cpus += 1; - return 0; -} -static int -mark_cpu_present(int cpu) -{ - CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); - return 0; -} + /* calculate package delta only for 1st core in package */ + if (t_new->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) { + struct pkg_data *p_delta; + const struct pkg_data *p_old, *p_new; + p_delta = GET_PKG(package_delta, pkg_no); + p_new = GET_PKG(pkg_new_base, pkg_no); + p_old = GET_PKG(pkg_old_base, pkg_no); -static void -turbostat_submit (const char *plugin_instance, - const char *type, const char *type_instance, - gauge_t value) -{ - value_list_t vl = VALUE_LIST_INIT; - value_t v; + delta_package(p_delta, p_new, p_old); + } + } + } + } + return 0; +} - v.gauge = value; - vl.values = &v; - vl.values_len = 1; - sstrncpy (vl.host, hostname_g, sizeof (vl.host)); - sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin)); - if (plugin_instance != NULL) - sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance)); - sstrncpy (vl.type, type, sizeof (vl.type)); - if (type_instance != NULL) - sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance)); - plugin_dispatch_values (&vl); -} +/*************** + * CPU Probing * + ***************/ /* - * column formatting convention & formats - * package: "pk" 2 columns %2d - * core: "cor" 3 columns %3d - * CPU: "CPU" 3 columns %3d - * Pkg_W: %6.2 - * Cor_W: %6.2 - * GFX_W: %5.2 - * RAM_W: %5.2 - * GHz: "GHz" 3 columns %3.2 - * TSC: "TSC" 3 columns %3.2 - * SMI: "SMI" 4 columns %4d - * percentage " %pc3" %6.2 - * Perf Status percentage: %5.2 - * "CTMP" 4 columns %4d + * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where + * the Thermal Control Circuit (TCC) activates. + * This is usually equal to tjMax. + * + * Older processors do not have this MSR, so there we guess, + * but also allow conficuration over-ride with "TCCActivationTemp". + * + * Several MSR temperature values are in units of degrees-C + * below this value, including the Digital Thermal Sensor (DTS), + * Package Thermal Management Sensor (PTM), and thermal event thresholds. */ -#define NAME_LEN 12 -static int -submit_counters(struct thread_data *t, struct core_data *c, - struct pkg_data *p) +static int __attribute__((warn_unused_result)) +set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - char name[NAME_LEN]; - double interval_float; + unsigned long long msr; + unsigned int target_c_local; - interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; + /* tcc_activation_temp is used only for dts or ptm */ + if (!(do_dts || do_ptm)) + return 0; - snprintf(name, NAME_LEN, "cpu%02d", t->cpu_id); + /* this is a per-package concept */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + return 0; - if (do_nhm_cstates) { - if (!skip_c0) - turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc); - if (!skip_c1) - turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc); + if (tcc_activation_temp != 0) { + p->tcc_activation_temp = tcc_activation_temp; + return 0; } - /* GHz */ - if (has_aperf && ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc)))) - turbostat_submit(NULL, "frequency", name, 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); - - /* SMI */ - if (do_smi) - turbostat_submit(NULL, "current", name, t->smi_count); - - /* print per-core data only for 1st thread in core */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) - goto done; - - snprintf(name, NAME_LEN, "core%02d", c->core_id); - - if (do_nhm_cstates && !do_slm_cstates) - turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc); - if (do_nhm_cstates) - turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc); - if (do_snb_cstates) - turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc); + if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr)) + goto guess; - if (do_dts) - turbostat_submit(NULL, "temperature", name, c->core_temp_c); + target_c_local = (msr >> 16) & 0xFF; - /* print per-package data only for 1st core in package */ - if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) - goto done; + if (!target_c_local) + goto guess; - snprintf(name, NAME_LEN, "pkg%02d", p->package_id); + p->tcc_activation_temp = target_c_local; - if (do_ptm) - turbostat_submit(NULL, "temperature", name, p->pkg_temp_c); + return 0; - if (do_snb_cstates) - turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc); - if (do_nhm_cstates && !do_slm_cstates) - turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc); - if (do_nhm_cstates && !do_slm_cstates) - turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc); - if (do_snb_cstates) - turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc); - if (do_c8_c9_c10) { - turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc); - turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc); - turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc); - } +guess: + p->tcc_activation_temp = TJMAX_DEFAULT; + WARNING("turbostat plugin: cpu%d: Guessing tjMax %d C," + " Please use TCCActivationTemp to specify it.", + t->cpu_id, p->tcc_activation_temp); - if (do_rapl) { - if (do_rapl & RAPL_PKG) - turbostat_submit(name, "power", "Pkg_W", p->energy_pkg * rapl_energy_units / interval_float); - if (do_rapl & RAPL_CORES) - turbostat_submit(name, "power", "Cor_W", p->energy_cores * rapl_energy_units / interval_float); - if (do_rapl & RAPL_GFX) - turbostat_submit(name, "power", "GFX_W", p->energy_gfx * rapl_energy_units / interval_float); - if (do_rapl & RAPL_DRAM) - turbostat_submit(name, "power", "RAM_W", p->energy_dram * rapl_energy_units / interval_float); - } -done: return 0; } -static int -turbostat_read(user_data_t * not_used) +/* + * Identify the functionality of the CPU + */ +static int __attribute__((warn_unused_result)) +probe_cpu() { - int ret; + unsigned int eax, ebx, ecx, edx, max_level; + unsigned int fms, family, model; - if (!allocated) { - if ((ret = setup_all_buffers()) < 0) - return ret; + /* CPUID(0): + * - EAX: Maximum Input Value for Basic CPUID Information + * - EBX: "Genu" (0x756e6547) + * - EDX: "ineI" (0x49656e69) + * - ECX: "ntel" (0x6c65746e) + */ + max_level = ebx = ecx = edx = 0; + __get_cpuid(0, &max_level, &ebx, &ecx, &edx); + if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) { + ERROR("turbostat plugin: Unsupported CPU (not Intel)"); + return -1; } - if (for_all_proc_cpus(cpu_is_not_present)) { - free_all_buffers(); - if ((ret = setup_all_buffers()) < 0) - return ret; - if (for_all_proc_cpus(cpu_is_not_present)) - return -ERR_CPU_NOT_PRESENT; + /* CPUID(1): + * - EAX: Version Information: Type, Family, Model, and Stepping ID + * + 4-7: Model ID + * + 8-11: Family ID + * + 12-13: Processor type + * + 16-19: Extended Model ID + * + 20-27: Extended Family ID + * - EDX: Feature Information: + * + 5: Support for MSR read/write operations + */ + fms = ebx = ecx = edx = 0; + __get_cpuid(1, &fms, &ebx, &ecx, &edx); + family = (fms >> 8) & 0xf; + model = (fms >> 4) & 0xf; + if (family == 0xf) + family += (fms >> 20) & 0xf; + if (family == 6 || family == 0xf) + model += ((fms >> 16) & 0xf) << 4; + if (!(edx & (1 << 5))) { + ERROR("turbostat plugin: Unsupported CPU (no MSR support)"); + return -1; } - if (!initialized) { - if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0) - return ret; - gettimeofday(&tv_even, (struct timezone *)NULL); - is_even = 1; - initialized = 1; - return 0; + /* + * CPUID(6): + * - EAX: + * + 0: Digital temperature sensor is supported if set + * + 6: Package thermal management is supported if set + * - ECX: + * + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and IA32_APERF). + * + 3: The processor supports performance-energy bias preference if set. + * It also implies the presence of a new architectural MSR called IA32_ENERGY_PERF_BIAS + * + * This check is valid for both Intel and AMD + */ + eax = ebx = ecx = edx = 0; + __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); + do_dts = eax & (1 << 0); + do_ptm = eax & (1 << 6); + if (!(ecx & (1 << 0))) { + ERROR("turbostat plugin: Unsupported CPU (No APERF)"); + return -1; } - if (is_even) { - if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0) - return ret; - gettimeofday(&tv_odd, (struct timezone *)NULL); - is_even = 0; - timersub(&tv_odd, &tv_even, &tv_delta); - if ((ret = for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) < 0) - return ret; - if ((ret = for_all_cpus(submit_counters, EVEN_COUNTERS)) < 0) - return ret; + /* + * Enable or disable C states depending on the model and family + */ + if (family == 6) { + switch (model) { + /* Atom (partial) */ + case 0x27: + do_smi = 0; + do_core_cstate = 0; + do_pkg_cstate = (1 << 2) | (1 << 4) | (1 << 6); + break; + /* Silvermont */ + case 0x37: /* BYT */ + case 0x4D: /* AVN */ + do_smi = 1; + do_core_cstate = (1 << 1) | (1 << 6); + do_pkg_cstate = (1 << 6); + break; + /* Nehalem */ + case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ + case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ + case 0x1F: /* Core i7 and i5 Processor - Nehalem */ + case 0x2E: /* Nehalem-EX Xeon - Beckton */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6); + do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7); + break; + /* Westmere */ + case 0x25: /* Westmere Client - Clarkdale, Arrandale */ + case 0x2C: /* Westmere EP - Gulftown */ + case 0x2F: /* Westmere-EX Xeon - Eagleton */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6); + do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7); + break; + /* Sandy Bridge */ + case 0x2A: /* SNB */ + case 0x2D: /* SNB Xeon */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7); + do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7); + break; + /* Ivy Bridge */ + case 0x3A: /* IVB */ + case 0x3E: /* IVB Xeon */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7); + do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7); + break; + /* Haswell Bridge */ + case 0x3C: /* HSW */ + case 0x3F: /* HSW */ + case 0x46: /* HSW */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7); + do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7); + break; + case 0x45: /* HSW */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7); + do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10); + break; + /* Broadwel */ + case 0x4F: /* BDW */ + case 0x56: /* BDX-DE */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7); + do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7); + break; + case 0x3D: /* BDW */ + do_smi = 1; + do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7); + do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10); + break; + default: + do_smi = 0; + do_core_cstate = 0; + do_pkg_cstate = 0; + break; + } + switch (model) { + case 0x2A: /* SNB */ + case 0x3A: /* IVB */ + case 0x3C: /* HSW */ + case 0x45: /* HSW */ + case 0x46: /* HSW */ + case 0x3D: /* BDW */ + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; + break; + case 0x3F: /* HSX */ + case 0x4F: /* BDX */ + case 0x56: /* BDX-DE */ + do_rapl = RAPL_PKG | RAPL_DRAM ; + break; + case 0x2D: /* SNB Xeon */ + case 0x3E: /* IVB Xeon */ + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM; + break; + case 0x37: /* BYT */ + case 0x4D: /* AVN */ + do_rapl = RAPL_PKG | RAPL_CORES; + break; + default: + do_rapl = 0; + } } else { - if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0) - return ret; - gettimeofday(&tv_even, (struct timezone *)NULL); - is_even = 1; - timersub(&tv_even, &tv_odd, &tv_delta); - if ((ret = for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) < 0) - return ret; - if ((ret = for_all_cpus(submit_counters, ODD_COUNTERS)) < 0) - return ret; + ERROR("turbostat plugin: Unsupported CPU (family: %#x, " + "model: %#x)", family, model); + return -1; } - return 0; -} -static int __attribute__((warn_unused_result)) -check_dev_msr() -{ - struct stat sb; + /* Override detected values with configuration */ + if (apply_config_core_cstate) + do_core_cstate = config_core_cstate; + if (apply_config_pkg_cstate) + do_pkg_cstate = config_pkg_cstate; + if (apply_config_smi) + do_smi = config_smi; + if (apply_config_dts) + do_dts = config_dts; + if (apply_config_ptm) + do_ptm = config_ptm; + if (apply_config_rapl) + do_rapl = config_rapl; - if (stat("/dev/cpu/0/msr", &sb)) { - ERROR("no /dev/cpu/0/msr\n" - "Try \"# modprobe msr\""); - return -ERR_NO_MSR; + if (do_rapl) { + unsigned long long msr; + if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) + return 0; + + if (model == 0x37) + rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; + else + rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); } - return 0; -} -static int __attribute__((warn_unused_result)) -check_super_user() -{ - if (getuid() != 0) { - ERROR("must be root"); - return -ERR_NOT_ROOT; - } return 0; } -#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ -#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ +/******************** + * Topology Probing * + ********************/ -static double -get_tdp(unsigned int model) +/* + * Read a single int from a file. + */ +static int __attribute__ ((format(printf,1,2))) +parse_int_file(const char *fmt, ...) { - unsigned long long msr; + va_list args; + char path[PATH_MAX]; + FILE *filep; + int len, value; - if (do_rapl & RAPL_PKG_POWER_INFO) - if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) - return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; + va_start(args, fmt); + len = vsnprintf(path, sizeof(path), fmt, args); + va_end(args); + if (len < 0 || len >= PATH_MAX) { + ERROR("turbostat plugin: path truncated: '%s'", path); + return -1; + } - switch (model) { - case 0x37: - case 0x4D: - return 30.0; - default: - return 135.0; + filep = fopen(path, "r"); + if (!filep) { + ERROR("turbostat plugin: Failed to open '%s'", path); + return -1; } + if (fscanf(filep, "%d", &value) != 1) { + ERROR("turbostat plugin: Failed to parse number from '%s'", path); + return -1; + } + fclose(filep); + return value; } - -/* - * rapl_probe() - * - * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units - */ -static void -rapl_probe(unsigned int family, unsigned int model) +static int +get_threads_on_core(unsigned int cpu) { - unsigned long long msr; - unsigned int time_unit; - double tdp; - - if (!genuine_intel) - return; - - if (family != 6) - return; - - switch (model) { - case 0x2A: - case 0x3A: - case 0x3C: /* HSW */ - case 0x45: /* HSW */ - case 0x46: /* HSW */ - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; - break; - case 0x3F: /* HSX */ - do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; - break; - case 0x2D: - case 0x3E: - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; - break; - case 0x37: /* BYT */ - case 0x4D: /* AVN */ - do_rapl = RAPL_PKG | RAPL_CORES ; - break; - default: - return; + char path[80]; + FILE *filep; + int sib1, sib2; + int matches; + char character; + + ssnprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); + filep = fopen(path, "r"); + if (!filep) { + ERROR("turbostat plugin: Failed to open '%s'", path); + return -1; } + /* + * file format: + * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) + * otherwinse 1 sibling (self). + */ + matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); - /* units on package 0, verify later other packages match */ - if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) - return; + fclose(filep); - rapl_power_units = 1.0 / (1 << (msr & 0xF)); - if (model == 0x37) - rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; + if (matches == 3) + return 2; else - rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); + return 1; +} - time_unit = msr >> 16 & 0xF; - if (time_unit == 0) - time_unit = 0xA; +/* + * run func(cpu) on every cpu in /proc/stat + * return max_cpu number + */ +static int __attribute__((warn_unused_result)) +for_all_proc_cpus(int (func)(unsigned int)) +{ + FILE *fp; + unsigned int cpu_num; + int retval; - rapl_time_units = 1.0 / (1 << (time_unit)); + fp = fopen("/proc/stat", "r"); + if (!fp) { + ERROR("turbostat plugin: Failed to open /proc/stat"); + return -1; + } - tdp = get_tdp(model); + retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); + if (retval != 0) { + ERROR("turbostat plugin: Failed to parse /proc/stat"); + fclose(fp); + return -1; + } - rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; -// if (verbose) -// fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); + while (1) { + retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); + if (retval != 1) + break; - return; + retval = func(cpu_num); + if (retval) { + fclose(fp); + return(retval); + } + } + fclose(fp); + return 0; } +/* + * Update the stored topology.max_cpu_id + */ static int -is_snb(unsigned int family, unsigned int model) +update_max_cpu_id(unsigned int cpu) { - if (!genuine_intel) - return 0; - - switch (model) { - case 0x2A: - case 0x2D: - case 0x3A: /* IVB */ - case 0x3E: /* IVB Xeon */ - case 0x3C: /* HSW */ - case 0x3F: /* HSW */ - case 0x45: /* HSW */ - case 0x46: /* HSW */ - return 1; - } + if (topology.max_cpu_id < cpu) + topology.max_cpu_id = cpu; return 0; } static int -has_c8_c9_c10(unsigned int family, unsigned int model) +mark_cpu_present(unsigned int cpu) { - if (!genuine_intel) - return 0; - - switch (model) { - case 0x45: - return 1; - } + CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); return 0; } - -static int -is_slm(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - switch (model) { - case 0x37: /* BYT */ - case 0x4D: /* AVN */ - return 1; +static int __attribute__((warn_unused_result)) +allocate_cpu_set(cpu_set_t ** set, size_t * size) { + *set = CPU_ALLOC(topology.max_cpu_id + 1); + if (*set == NULL) { + ERROR("turbostat plugin: Unable to allocate CPU state"); + return -1; } + *size = CPU_ALLOC_SIZE(topology.max_cpu_id + 1); + CPU_ZERO_S(*size, *set); return 0; } /* - * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where - * the Thermal Control Circuit (TCC) activates. - * This is usually equal to tjMax. - * - * Older processors do not have this MSR, so there we guess, - * but also allow cmdline over-ride with -T. - * - * Several MSR temperature values are in units of degrees-C - * below this value, including the Digital Thermal Sensor (DTS), - * Package Thermal Management Sensor (PTM), and thermal event thresholds. + * Build a local representation of the cpu distribution */ static int __attribute__((warn_unused_result)) -set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) +topology_probe() { - unsigned long long msr; - unsigned int target_c_local; - int cpu; - - /* tcc_activation_temp is used only for dts or ptm */ - if (!(do_dts || do_ptm)) - return 0; + unsigned int i; + int ret; + unsigned int max_package_id, max_core_id, max_threads; + max_package_id = max_core_id = max_threads = 0; - /* this is a per-package concept */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) - return 0; + /* Clean topology */ + free(topology.cpus); + memset(&topology, 0, sizeof(topology)); - cpu = t->cpu_id; - if (cpu_migrate(cpu)) { - ERROR("Could not migrate to CPU %d\n", cpu); - return -ERR_CPU_MIGRATE; - } + ret = for_all_proc_cpus(update_max_cpu_id); + if (ret != 0) + goto err; - if (tcc_activation_temp_override != 0) { - tcc_activation_temp = tcc_activation_temp_override; - ERROR("cpu%d: Using cmdline TCC Target (%d C)\n", - cpu, tcc_activation_temp); - return 0; + topology.cpus = calloc(1, (topology.max_cpu_id + 1) * sizeof(struct cpu_topology)); + if (topology.cpus == NULL) { + ERROR("turbostat plugin: Unable to allocate memory for CPU topology"); + return -1; } - /* Temperature Target MSR is Nehalem and newer only */ - if (!do_nehalem_platform_info) - goto guess; + ret = allocate_cpu_set(&cpu_present_set, &cpu_present_setsize); + if (ret != 0) + goto err; + ret = allocate_cpu_set(&cpu_affinity_set, &cpu_affinity_setsize); + if (ret != 0) + goto err; + ret = allocate_cpu_set(&cpu_saved_affinity_set, &cpu_saved_affinity_setsize); + if (ret != 0) + goto err; - if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) - goto guess; + ret = for_all_proc_cpus(mark_cpu_present); + if (ret != 0) + goto err; - target_c_local = (msr >> 16) & 0x7F; + /* + * For online cpus + * find max_core_id, max_package_id + */ + for (i = 0; i <= topology.max_cpu_id; ++i) { + unsigned int num_threads; + struct cpu_topology *cpu = &topology.cpus[i]; - if (target_c_local < 85 || target_c_local > 127) - goto guess; + if (cpu_is_not_present(i)) { + WARNING("turbostat plugin: cpu%d NOT PRESENT", i); + continue; + } - tcc_activation_temp = target_c_local; + ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", i); + if (ret < 0) + goto err; + else + cpu->package_id = (unsigned int) ret; + if (cpu->package_id > max_package_id) + max_package_id = cpu->package_id; + + ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", i); + if (ret < 0) + goto err; + else + cpu->core_id = (unsigned int) ret; + if (cpu->core_id > max_core_id) + max_core_id = cpu->core_id; + ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", i); + if (ret < 0) + goto err; + else if ((unsigned int) ret == i) + cpu->first_core_in_package = 1; + + ret = get_threads_on_core(i); + if (ret < 0) + goto err; + else + num_threads = (unsigned int) ret; + if (num_threads > max_threads) + max_threads = num_threads; + ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", i); + if (ret < 0) + goto err; + else if ((unsigned int) ret == i) + cpu->first_thread_in_core = 1; + + DEBUG("turbostat plugin: cpu %d pkg %d core %d\n", + i, cpu->package_id, cpu->core_id); + } + /* Num is max + 1 (need to count 0) */ + topology.num_packages = max_package_id + 1; + topology.num_cores = max_core_id + 1; + topology.num_threads = max_threads; return 0; +err: + free(topology.cpus); + return ret; +} -guess: - tcc_activation_temp = TJMAX_DEFAULT; - WARNING("cpu%d: Guessing tjMax %d C, Please use -T to specify\n", - cpu, tcc_activation_temp); - return 0; -} +/************************ + * Main alloc/init/free * + ************************/ -static int __attribute__((warn_unused_result)) -check_cpuid() +static int +allocate_counters(struct thread_data **threads, struct core_data **cores, struct pkg_data **packages) { - unsigned int eax, ebx, ecx, edx, max_level; - unsigned int fms, family, model; + unsigned int i; + unsigned int total_threads, total_cores; - eax = ebx = ecx = edx = 0; + total_threads = topology.num_threads * topology.num_cores * topology.num_packages; + *threads = calloc(total_threads, sizeof(struct thread_data)); + if (*threads == NULL) + goto err; - __get_cpuid(0, &max_level, &ebx, &ecx, &edx); + for (i = 0; i < total_threads; ++i) + (*threads)[i].cpu_id = topology.max_cpu_id + 1; - if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) - genuine_intel = 1; + total_cores = topology.num_cores * topology.num_packages; + *cores = calloc(total_cores, sizeof(struct core_data)); + if (*cores == NULL) + goto err_clean_threads; - fms = 0; - __get_cpuid(1, &fms, &ebx, &ecx, &edx); - family = (fms >> 8) & 0xf; - model = (fms >> 4) & 0xf; - if (family == 6 || family == 0xf) - model += ((fms >> 16) & 0xf) << 4; + *packages = calloc(topology.num_packages, sizeof(struct pkg_data)); + if (*packages == NULL) + goto err_clean_cores; - if (!(edx & (1 << 5))) { - ERROR("CPUID: no MSR"); - return -ERR_NO_MSR; - } + return 0; - /* - * check max extended function levels of CPUID. - * This is needed to check for invariant TSC. - * This check is valid for both Intel and AMD. - */ - ebx = ecx = edx = 0; - __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx); +err_clean_cores: + free(*cores); +err_clean_threads: + free(*threads); +err: + ERROR("turbostat plugin: Failled to allocate memory for counters"); + return -1; +} - if (max_level < 0x80000007) { - ERROR("CPUID: no invariant TSC (max_level 0x%x)", max_level); - return -ERR_NO_INVARIANT_TSC; - } +static void +init_counter(struct thread_data *thread_base, struct core_data *core_base, + struct pkg_data *pkg_base, unsigned int cpu_id) +{ + struct thread_data *t; + struct core_data *c; + struct pkg_data *p; + struct cpu_topology *cpu = &topology.cpus[cpu_id]; - /* - * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 - * this check is valid for both Intel and AMD - */ - __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx); - has_invariant_tsc = edx & (1 << 8); + t = GET_THREAD(thread_base, !(cpu->first_thread_in_core), cpu->core_id, cpu->package_id); + c = GET_CORE(core_base, cpu->core_id, cpu->package_id); + p = GET_PKG(pkg_base, cpu->package_id); - if (!has_invariant_tsc) { - ERROR("No invariant TSC"); - return -ERR_NO_INVARIANT_TSC; - } + t->cpu_id = cpu_id; + if (cpu->first_thread_in_core) + t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; + if (cpu->first_core_in_package) + t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; - /* - * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 - * this check is valid for both Intel and AMD - */ + c->core_id = cpu->core_id; + p->package_id = cpu->package_id; +} - __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); - has_aperf = ecx & (1 << 0); - do_dts = eax & (1 << 0); - do_ptm = eax & (1 << 6); - has_epb = ecx & (1 << 3); +static void +initialize_counters(void) +{ + unsigned int cpu_id; - if (!has_aperf) { - ERROR("No APERF"); - return -ERR_NO_APERF; + for (cpu_id = 0; cpu_id <= topology.max_cpu_id; ++cpu_id) { + if (cpu_is_not_present(cpu_id)) + continue; + init_counter(EVEN_COUNTERS, cpu_id); + init_counter(ODD_COUNTERS, cpu_id); + init_counter(DELTA_COUNTERS, cpu_id); } - - do_nehalem_platform_info = genuine_intel && has_invariant_tsc; - do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */ - do_smi = do_nhm_cstates; - do_snb_cstates = is_snb(family, model); - do_c8_c9_c10 = has_c8_c9_c10(family, model); - do_slm_cstates = is_slm(family, model); - - rapl_probe(family, model); - - return 0; } -static int __attribute__((warn_unused_result)) -topology_probe() +static void +free_all_buffers(void) { - int i; - int ret; - int max_core_id = 0; - int max_package_id = 0; - int max_siblings = 0; - struct cpu_topology { - int core_id; - int physical_package_id; - } *cpus; - - /* Initialize num_cpus, max_cpu_num */ - topo.num_cpus = 0; - topo.max_cpu_num = 0; - ret = for_all_proc_cpus(count_cpus); - if (ret < 0) - return ret; - if (topo.num_cpus > 1) - show_cpu = 1; - - DEBUG("num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); - - cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); - if (cpus == NULL) { - ERROR("calloc cpus"); - return -ERR_CALLOC; - } + allocated = 0; + initialized = 0; - /* - * Allocate and initialize cpu_present_set - */ - cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); - if (cpu_present_set == NULL) { - free(cpus); - ERROR("CPU_ALLOC"); - return -ERR_CPU_ALLOC; - } - cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); - CPU_ZERO_S(cpu_present_setsize, cpu_present_set); - ret = for_all_proc_cpus(mark_cpu_present); - if (ret < 0) { - free(cpus); - return ret; - } + CPU_FREE(cpu_present_set); + cpu_present_set = NULL; + cpu_present_set = 0; - /* - * Allocate and initialize cpu_affinity_set - */ - cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); - if (cpu_affinity_set == NULL) { - free(cpus); - ERROR("CPU_ALLOC"); - return -ERR_CPU_ALLOC; - } - cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); - CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); + CPU_FREE(cpu_affinity_set); + cpu_affinity_set = NULL; + cpu_affinity_setsize = 0; + CPU_FREE(cpu_saved_affinity_set); + cpu_saved_affinity_set = NULL; + cpu_saved_affinity_setsize = 0; - /* - * For online cpus - * find max_core_id, max_package_id - */ - for (i = 0; i <= topo.max_cpu_num; ++i) { - int siblings; + free(thread_even); + free(core_even); + free(package_even); - if (cpu_is_not_present(i)) { - //if (verbose > 1) - fprintf(stderr, "cpu%d NOT PRESENT\n", i); - continue; - } - cpus[i].core_id = get_core_id(i); - if (cpus[i].core_id < 0) - return cpus[i].core_id; - if (cpus[i].core_id > max_core_id) - max_core_id = cpus[i].core_id; - - cpus[i].physical_package_id = get_physical_package_id(i); - if (cpus[i].physical_package_id < 0) - return cpus[i].physical_package_id; - if (cpus[i].physical_package_id > max_package_id) - max_package_id = cpus[i].physical_package_id; - - siblings = get_num_ht_siblings(i); - if (siblings < 0) - return siblings; - if (siblings > max_siblings) - max_siblings = siblings; - DEBUG("cpu %d pkg %d core %d\n", - i, cpus[i].physical_package_id, cpus[i].core_id); - } - topo.num_cores_per_pkg = max_core_id + 1; - DEBUG("max_core_id %d, sizing for %d cores per package\n", - max_core_id, topo.num_cores_per_pkg); - if (topo.num_cores_per_pkg > 1) - show_core = 1; - - topo.num_packages = max_package_id + 1; - DEBUG("max_package_id %d, sizing for %d packages\n", - max_package_id, topo.num_packages); - if (topo.num_packages > 1) - show_pkg = 1; - - topo.num_threads_per_core = max_siblings; - DEBUG("max_siblings %d\n", max_siblings); - - free(cpus); - return 0; -} + thread_even = NULL; + core_even = NULL; + package_even = NULL; -static int -allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) -{ - int i; + free(thread_odd); + free(core_odd); + free(package_odd); + + thread_odd = NULL; + core_odd = NULL; + package_odd = NULL; - *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * - topo.num_packages, sizeof(struct thread_data)); - if (*t == NULL) - goto error; + free(thread_delta); + free(core_delta); + free(package_delta); + + thread_delta = NULL; + core_delta = NULL; + package_delta = NULL; +} - for (i = 0; i < topo.num_threads_per_core * - topo.num_cores_per_pkg * topo.num_packages; i++) - (*t)[i].cpu_id = -1; - *c = calloc(topo.num_cores_per_pkg * topo.num_packages, - sizeof(struct core_data)); - if (*c == NULL) - goto error; +/********************** + * Collectd functions * + **********************/ - for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) - (*c)[i].core_id = -1; +#define DO_OR_GOTO_ERR(something) \ +do { \ + ret = (something); \ + if (ret < 0) \ + goto err; \ +} while (0) - *p = calloc(topo.num_packages, sizeof(struct pkg_data)); - if (*p == NULL) - goto error; +static int setup_all_buffers(void) +{ + int ret; - for (i = 0; i < topo.num_packages; i++) - (*p)[i].package_id = i; + DO_OR_GOTO_ERR(topology_probe()); + DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even)); + DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd)); + DO_OR_GOTO_ERR(allocate_counters(&thread_delta, &core_delta, &package_delta)); + initialize_counters(); + DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS)); + DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, ODD_COUNTERS)); + allocated = 1; return 0; -error: - ERROR("calloc counters"); - return -ERR_CALLOC; +err: + free_all_buffers(); + return ret; } -/* - * init_counter() - * - * set cpu_id, core_num, pkg_num - * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE - * - * increment topo.num_cores when 1st core in pkg seen - */ + static int -init_counter(struct thread_data *thread_base, struct core_data *core_base, - struct pkg_data *pkg_base, int thread_num, int core_num, - int pkg_num, int cpu_id) +turbostat_read(void) { int ret; - struct thread_data *t; - struct core_data *c; - struct pkg_data *p; - t = GET_THREAD(thread_base, thread_num, core_num, pkg_num); - c = GET_CORE(core_base, core_num, pkg_num); - p = GET_PKG(pkg_base, pkg_num); + if (!allocated) { + if ((ret = setup_all_buffers()) < 0) + return ret; + } - t->cpu_id = cpu_id; - if (thread_num == 0) { - t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; - if ((ret = cpu_is_first_core_in_package(cpu_id)) < 0) { + if (for_all_proc_cpus(cpu_is_not_present)) { + free_all_buffers(); + if ((ret = setup_all_buffers()) < 0) return ret; - } else if (ret != 0) { - t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; + if (for_all_proc_cpus(cpu_is_not_present)) { + ERROR("turbostat plugin: CPU appeared just after " + "initialization"); + return -1; } } - c->core_id = core_num; - p->package_id = pkg_num; + /* Saving the scheduling affinity, as it will be modified by get_counters */ + if (sched_getaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set) != 0) { + ERROR("turbostat plugin: Unable to save the CPU affinity"); + return -1; + } + + if (!initialized) { + if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0) + goto out; + time_even = cdtime(); + is_even = 1; + initialized = 1; + ret = 0; + goto out; + } - return 0; + if (is_even) { + if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0) + goto out; + time_odd = cdtime(); + is_even = 0; + time_delta = time_odd - time_even; + if ((ret = for_all_cpus_delta(ODD_COUNTERS, EVEN_COUNTERS)) < 0) + goto out; + if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0) + goto out; + } else { + if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0) + goto out; + time_even = cdtime(); + is_even = 1; + time_delta = time_even - time_odd; + if ((ret = for_all_cpus_delta(EVEN_COUNTERS, ODD_COUNTERS)) < 0) + goto out; + if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0) + goto out; + } + ret = 0; +out: + /* + * Let's restore the affinity + * This might fail if the number of CPU changed, but we can't do anything in that case.. + */ + (void)sched_setaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set); + return ret; } - static int -initialize_counters(int cpu_id) +check_permissions(void) { - int my_thread_id, my_core_id, my_package_id; - int ret; - - my_package_id = get_physical_package_id(cpu_id); - if (my_package_id < 0) - return my_package_id; - my_core_id = get_core_id(cpu_id); - if (my_core_id < 0) - return my_core_id; - - if ((ret = cpu_is_first_sibling_in_core(cpu_id)) < 0) { - return ret; - } else if (ret != 0) { - my_thread_id = 0; - topo.num_cores++; +#ifdef HAVE_SYS_CAPABILITY_H + struct __user_cap_header_struct cap_header_data; + cap_user_header_t cap_header = &cap_header_data; + struct __user_cap_data_struct cap_data_data; + cap_user_data_t cap_data = &cap_data_data; + int ret = 0; +#endif /* HAVE_SYS_CAPABILITY_H */ + + if (getuid() == 0) { + /* We have everything we need */ + return 0; +#ifndef HAVE_SYS_CAPABILITY_H } else { - my_thread_id = 1; + ERROR("turbostat plugin: Initialization failed: this plugin " + "requires collectd to run as root"); + return -1; + } +#else /* HAVE_SYS_CAPABILITY_H */ } - ret = init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); - if (ret < 0) - return ret; - ret = init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); - if (ret < 0) - return ret; - return 0; -} - -#define DO_OR_GOTO_ERR(something) \ -do { \ - ret = (something); \ - if (ret < 0) \ - goto err; \ -} while (0) + /* check for CAP_SYS_RAWIO */ + cap_header->pid = getpid(); + cap_header->version = _LINUX_CAPABILITY_VERSION; + if (capget(cap_header, cap_data) < 0) { + ERROR("turbostat plugin: capget failed"); + return -1; + } -static int setup_all_buffers(void) -{ - int ret; + if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) { + WARNING("turbostat plugin: Collectd doesn't have the " + "CAP_SYS_RAWIO capability. If you don't want to run " + "collectd as root, try running \"setcap " + "cap_sys_rawio=ep\" on collectd binary"); + ret = -1; + } - DO_OR_GOTO_ERR(topology_probe()); - DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even)); - DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd)); - DO_OR_GOTO_ERR(for_all_proc_cpus(initialize_counters)); + if (euidaccess("/dev/cpu/0/msr", R_OK)) { + WARNING("turbostat plugin: Collectd cannot open" + "/dev/cpu/0/msr. If you don't want to run collectd as " + "root, you need to change the ownership (chown) and " + "permissions on /dev/cpu/*/msr to allow such access"); + ret = -1; + } - allocated = 1; - return 0; -err: - free_all_buffers(); + if (ret != 0) + ERROR("turbostat plugin: Initialization failed: this plugin " + "requires collectd to either to run as root or give " + "collectd a special capability (CAP_SYS_RAWIO) and read " + "access to /dev/cpu/*/msr (see previous warnings)"); return ret; +#endif /* HAVE_SYS_CAPABILITY_H */ } static int turbostat_init(void) { + struct stat sb; int ret; - struct timespec ts; - DO_OR_GOTO_ERR(check_cpuid()); - DO_OR_GOTO_ERR(check_dev_msr()); - DO_OR_GOTO_ERR(check_super_user()); - DO_OR_GOTO_ERR(setup_all_buffers()); - DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS)); + if (stat("/dev/cpu/0/msr", &sb)) { + ERROR("turbostat plugin: Initialization failed: /dev/cpu/0/msr" + " does not exist while the CPU supports MSR. You may be " + "missing the corresponding kernel module, please try '# " + "modprobe msr'"); + return -1; + } + + DO_OR_GOTO_ERR(check_permissions()); + + DO_OR_GOTO_ERR(probe_cpu()); - ts.tv_sec = interval_sec; - ts.tv_nsec = 0; + DO_OR_GOTO_ERR(setup_all_buffers()); - plugin_register_complex_read(NULL, PLUGIN_NAME, turbostat_read, &ts, NULL); + plugin_register_read(PLUGIN_NAME, turbostat_read); return 0; err: @@ -1499,23 +1546,64 @@ err: return ret; } -static const char *config_keys[] = -{ - "Interval", -}; -static int config_keys_num = STATIC_ARRAY_SIZE (config_keys); - static int turbostat_config(const char *key, const char *value) { - if (strcasecmp("Interval", key) == 0) - interval_sec = atoi(value); - else + long unsigned int tmp_val; + char *end; + + if (strcasecmp("CoreCstates", key) == 0) { + tmp_val = strtoul(value, &end, 0); + if (*end != '\0' || tmp_val > UINT_MAX) { + ERROR("turbostat plugin: Invalid CoreCstates '%s'", + value); + return -1; + } + config_core_cstate = (unsigned int) tmp_val; + apply_config_core_cstate = 1; + } else if (strcasecmp("PackageCstates", key) == 0) { + tmp_val = strtoul(value, &end, 0); + if (*end != '\0' || tmp_val > UINT_MAX) { + ERROR("turbostat plugin: Invalid PackageCstates '%s'", + value); + return -1; + } + config_pkg_cstate = (unsigned int) tmp_val; + apply_config_pkg_cstate = 1; + } else if (strcasecmp("SystemManagementInterrupt", key) == 0) { + config_smi = IS_TRUE(value); + apply_config_smi = 1; + } else if (strcasecmp("DigitalTemperatureSensor", key) == 0) { + config_dts = IS_TRUE(value); + apply_config_dts = 1; + } else if (strcasecmp("PackageThermalManagement", key) == 0) { + config_ptm = IS_TRUE(value); + apply_config_ptm = 1; + } else if (strcasecmp("RunningAveragePowerLimit", key) == 0) { + tmp_val = strtoul(value, &end, 0); + if (*end != '\0' || tmp_val > UINT_MAX) { + ERROR("turbostat plugin: Invalid RunningAveragePowerLimit '%s'", + value); + return -1; + } + config_rapl = (unsigned int) tmp_val; + apply_config_rapl = 1; + } else if (strcasecmp("TCCActivationTemp", key) == 0) { + tmp_val = strtoul(value, &end, 0); + if (*end != '\0' || tmp_val > UINT_MAX) { + ERROR("turbostat plugin: Invalid TCCActivationTemp '%s'", + value); + return -1; + } + tcc_activation_temp = (unsigned int) tmp_val; + } else { + ERROR("turbostat plugin: Invalid configuration option '%s'", + key); return -1; + } return 0; } -void module_register(void); void module_register(void) { plugin_register_init(PLUGIN_NAME, turbostat_init);