Turbostat: Refactor topology probing
[collectd.git] / src / turbostat.c
1 /*
2  * turbostat -- Log CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors for collectd.
4  *
5  * Based on the 'turbostat' tool of the Linux kernel, found at
6  * linux/tools/power/x86/turbostat/turbostat.c:
7  * ----
8  * Copyright (c) 2013 Intel Corporation.
9  * Len Brown <len.brown@intel.com>
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms and conditions of the GNU General Public License,
13  * version 2, as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  *
20  * You should have received a copy of the GNU General Public License along with
21  * this program; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23  * ----
24  * Ported to collectd by Vincent Brillault <git@lerya.net>
25  */
26
27 /*
28  * _GNU_SOURCE is required because of the following functions:
29  * - CPU_ISSET_S
30  * - CPU_ZERO_S
31  * - CPU_SET_S
32  * - CPU_FREE
33  * - CPU_ALLOC
34  * - CPU_ALLOC_SIZE
35  */
36 #define _GNU_SOURCE
37
38 #include <asm/msr-index.h>
39 #include <stdarg.h>
40 #include <stdio.h>
41 #include <err.h>
42 #include <unistd.h>
43 #include <sys/types.h>
44 #include <sys/wait.h>
45 #include <sys/stat.h>
46 #include <sys/resource.h>
47 #include <fcntl.h>
48 #include <signal.h>
49 #include <sys/time.h>
50 #include <stdlib.h>
51 #include <dirent.h>
52 #include <string.h>
53 #include <ctype.h>
54 #include <sched.h>
55 #include <cpuid.h>
56
57 #include "collectd.h"
58 #include "common.h"
59 #include "plugin.h"
60
61 #define PLUGIN_NAME "turbostat"
62
63 /*
64  * This tool uses the Model-Specific Registers (MSRs) present on Intel processors.
65  * The general description each of these registers, depending on the architecture,
66  * can be found in the IntelĀ® 64 and IA-32 Architectures Software Developer Manual,
67  * Volume 3 Chapter 35.
68  */
69
70 /*
71  * If set, aperf_mperf_unstable disables a/mperf based stats.
72  * This includes: C0 & C1 states, frequency
73  *
74  * This value is automatically set if mperf or aperf go backward
75  */
76 static _Bool aperf_mperf_unstable;
77
78 /*
79  * Bitmask of the list of core C states supported by the processor.
80  * Currently supported C-states (by this plugin): 3, 6, 7
81  */
82 static unsigned int do_core_cstate;
83
84 /*
85  * Bitmask of the list of pacages C states supported by the processor.
86  * Currently supported C-states (by this plugin): 2, 3, 6, 7, 8, 9, 10
87  */
88 static unsigned int do_pkg_cstate;
89
90 /*
91  * Boolean indicating if the processor supports 'Digital temperature sensor'
92  * This feature enables the monitoring of the temperature of each core
93  *
94  * This feature has two limitations:
95  *  - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
96  *  - Temperatures above the tcc_activation_temp are not recorded
97  */
98 static _Bool do_dts;
99
100 /*
101  * Boolean indicating if the processor supports 'Package thermal management'
102  * This feature allows the monitoring of the temperature of each package
103  *
104  * This feature has two limitations:
105  *  - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
106  *  - Temperatures above the tcc_activation_temp are not recorded
107  */
108 static _Bool do_ptm;
109
110 /*
111  * Thermal Control Circuit Activation Temperature as configured by the user.
112  * This override the automated detection via MSR_IA32_TEMPERATURE_TARGET
113  * and should only be used if the automated detection fails.
114  */
115 static unsigned int tcc_activation_temp;
116
117 static unsigned int do_rapl;
118 static double rapl_energy_units;
119
120 #define RAPL_PKG                (1 << 0)
121                                         /* 0x610 MSR_PKG_POWER_LIMIT */
122                                         /* 0x611 MSR_PKG_ENERGY_STATUS */
123 #define RAPL_PKG_PERF_STATUS    (1 << 1)
124                                         /* 0x613 MSR_PKG_PERF_STATUS */
125 #define RAPL_PKG_POWER_INFO     (1 << 2)
126                                         /* 0x614 MSR_PKG_POWER_INFO */
127
128 #define RAPL_DRAM               (1 << 3)
129                                         /* 0x618 MSR_DRAM_POWER_LIMIT */
130                                         /* 0x619 MSR_DRAM_ENERGY_STATUS */
131                                         /* 0x61c MSR_DRAM_POWER_INFO */
132 #define RAPL_DRAM_PERF_STATUS   (1 << 4)
133                                         /* 0x61b MSR_DRAM_PERF_STATUS */
134
135 #define RAPL_CORES              (1 << 5)
136                                         /* 0x638 MSR_PP0_POWER_LIMIT */
137                                         /* 0x639 MSR_PP0_ENERGY_STATUS */
138 #define RAPL_CORE_POLICY        (1 << 6)
139                                         /* 0x63a MSR_PP0_POLICY */
140
141
142 #define RAPL_GFX                (1 << 7)
143                                         /* 0x640 MSR_PP1_POWER_LIMIT */
144                                         /* 0x641 MSR_PP1_ENERGY_STATUS */
145                                         /* 0x642 MSR_PP1_POLICY */
146 #define TJMAX_DEFAULT   100
147
148 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_saved_affinity_set;
149 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_saved_affinity_setsize;
150
151 struct thread_data {
152         unsigned long long tsc;
153         unsigned long long aperf;
154         unsigned long long mperf;
155         unsigned long long c1;
156         unsigned int smi_count;
157         unsigned int cpu_id;
158         unsigned int flags;
159 #define CPU_IS_FIRST_THREAD_IN_CORE     0x2
160 #define CPU_IS_FIRST_CORE_IN_PACKAGE    0x4
161 } *thread_delta, *thread_even, *thread_odd;
162
163 struct core_data {
164         unsigned long long c3;
165         unsigned long long c6;
166         unsigned long long c7;
167         unsigned int core_temp_c;
168         unsigned int core_id;
169 } *core_delta, *core_even, *core_odd;
170
171 struct pkg_data {
172         unsigned long long pc2;
173         unsigned long long pc3;
174         unsigned long long pc6;
175         unsigned long long pc7;
176         unsigned long long pc8;
177         unsigned long long pc9;
178         unsigned long long pc10;
179         unsigned int package_id;
180         unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
181         unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
182         unsigned int energy_cores;      /* MSR_PP0_ENERGY_STATUS */
183         unsigned int energy_gfx;        /* MSR_PP1_ENERGY_STATUS */
184         unsigned int rapl_pkg_perf_status;      /* MSR_PKG_PERF_STATUS */
185         unsigned int rapl_dram_perf_status;     /* MSR_DRAM_PERF_STATUS */
186         unsigned int tcc_activation_temp;
187         unsigned int pkg_temp_c;
188 } *package_delta, *package_even, *package_odd;
189
190 #define DELTA_COUNTERS thread_delta, core_delta, package_delta
191 #define ODD_COUNTERS thread_odd, core_odd, package_odd
192 #define EVEN_COUNTERS thread_even, core_even, package_even
193 static _Bool is_even = 1;
194
195 static _Bool allocated = 0;
196 static _Bool initialized = 0;
197
198 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
199         (thread_base + \
200                 (pkg_no) * topology.num_cores * topology.num_threads + \
201                 (core_no) * topology.num_threads + \
202                 (thread_no))
203 #define GET_CORE(core_base, core_no, pkg_no) \
204         (core_base + \
205                 (pkg_no) * topology.num_cores + \
206                 (core_no))
207 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
208
209 struct cpu_topology {
210         int package_id;
211         int core_id;
212         _Bool first_core_in_package;
213         _Bool first_thread_in_core;
214 };
215
216 struct topology {
217         int max_cpu_id;
218         int num_packages;
219         int num_cores;
220         int num_threads;
221         struct cpu_topology *cpus;
222 } topology;
223
224 struct timeval tv_even, tv_odd, tv_delta;
225
226 enum return_values {
227         OK = 0,
228         ERR_CPU_MIGRATE,
229         ERR_CPU_SAVE_SCHED_AFFINITY,
230         ERR_MSR_IA32_APERF,
231         ERR_MSR_IA32_MPERF,
232         ERR_MSR_SMI_COUNT,
233         ERR_MSR_CORE_C3_RESIDENCY,
234         ERR_MSR_CORE_C6_RESIDENCY,
235         ERR_MSR_CORE_C7_RESIDENCY,
236         ERR_MSR_IA32_THERM_STATUS,
237         ERR_MSR_PKG_C3_RESIDENCY,
238         ERR_MSR_PKG_C6_RESIDENCY,
239         ERR_MSR_PKG_C2_RESIDENCY,
240         ERR_MSR_PKG_C7_RESIDENCY,
241         ERR_MSR_PKG_C8_RESIDENCY,
242         ERR_MSR_PKG_C9_RESIDENCY,
243         ERR_MSR_PKG_C10_RESIDENCY,
244         ERR_MSR_PKG_ENERGY_STATUS,
245         ERR_MSR_PKG_POWER_INFO,
246         ERR_MSR_PP0_ENERGY_STATUS,
247         ERR_MSR_DRAM_ENERGY_STATUS,
248         ERR_MSR_PP1_ENERGY_STATUS,
249         ERR_MSR_PKG_PERF_STATUS,
250         ERR_MSR_DRAM_PERF_STATUS,
251         ERR_MSR_IA32_PACKAGE_THERM_STATUS,
252         ERR_MSR_IA32_TSC,
253         ERR_CPU_NOT_PRESENT,
254         ERR_NO_MSR,
255         ERR_CANT_OPEN_MSR,
256         ERR_CANT_OPEN_FILE,
257         ERR_CANT_READ_NUMBER,
258         ERR_CANT_READ_PROC_STAT,
259         ERR_NO_INVARIANT_TSC,
260         ERR_NO_APERF,
261         ERR_CALLOC,
262         ERR_CPU_ALLOC,
263         ERR_NOT_ROOT,
264         UNSUPPORTED_CPU,
265 };
266
267
268 /*****************************
269  *  MSR Manipulation helpers *
270  *****************************/
271
272 /*
273  * Open a MSR device for reading
274  * Can change the scheduling affinity of the current process if multiple_read is 1
275  */
276 static int __attribute__((warn_unused_result))
277 open_msr(int cpu, _Bool multiple_read)
278 {
279         char pathname[32];
280         int fd;
281
282         /*
283          * If we need to do multiple read, let's migrate to the CPU
284          * Otherwise, we would lose time calling functions on another CPU
285          *
286          * If we are not yet initialized (cpu_affinity_setsize = 0),
287          * we need to skip this optimisation.
288          */
289         if (multiple_read && cpu_affinity_setsize) {
290                 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
291                 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
292                 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) {
293                         ERROR("Could not migrate to CPU %d", cpu);
294                         return -ERR_CPU_MIGRATE;
295                 }
296         }
297
298         ssnprintf(pathname, sizeof(pathname), "/dev/cpu/%d/msr", cpu);
299         fd = open(pathname, O_RDONLY);
300         if (fd < 0)
301                 return -ERR_CANT_OPEN_MSR;
302         return fd;
303 }
304
305 /*
306  * Read a single MSR from an open file descriptor
307  */
308 static int __attribute__((warn_unused_result))
309 read_msr(int fd, off_t offset, unsigned long long *msr)
310 {
311         ssize_t retval;
312
313         retval = pread(fd, msr, sizeof *msr, offset);
314
315         if (retval != sizeof *msr) {
316                 ERROR("MSR offset 0x%llx read failed", (unsigned long long)offset);
317                 return -1;
318         }
319         return 0;
320 }
321
322 /*
323  * Open a MSR device for reading, read the value asked for and close it.
324  * This call will not affect the scheduling affinity of this thread.
325  */
326 static int __attribute__((warn_unused_result))
327 get_msr(int cpu, off_t offset, unsigned long long *msr)
328 {
329         ssize_t retval;
330         int fd;
331
332         fd = open_msr(cpu, 0);
333         if (fd < 0)
334                 return fd;
335         retval = read_msr(fd, offset, msr);
336         close(fd);
337         return retval;
338 }
339
340
341 /********************************
342  * Raw data acquisition (1 CPU) *
343  ********************************/
344
345 /*
346  * Read every data avalaible for a single CPU
347  *
348  * Core data is shared for all threads in one core: extracted only for the first thread
349  * Package data is shared for all core in one package: extracted only for the first thread of the first core
350  *
351  * Side effect: migrates to the targeted CPU
352  */
353 static int __attribute__((warn_unused_result))
354 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
355 {
356         int cpu = t->cpu_id;
357         unsigned long long msr;
358         int msr_fd;
359         int retval = 0;
360
361         msr_fd = open_msr(cpu, 1);
362         if (msr_fd < 0)
363                 return msr_fd;
364
365 #define READ_MSR(msr, dst)                      \
366 do {                                            \
367         if (read_msr(msr_fd, msr, dst)) {       \
368                 retval = -ERR_##msr;            \
369                 goto out;                       \
370         }                                       \
371 } while (0)
372
373         READ_MSR(MSR_IA32_TSC, &t->tsc);
374
375         READ_MSR(MSR_IA32_APERF, &t->aperf);
376         READ_MSR(MSR_IA32_MPERF, &t->mperf);
377
378         READ_MSR(MSR_SMI_COUNT, &msr);
379         t->smi_count = msr & 0xFFFFFFFF;
380
381         /* collect core counters only for 1st thread in core */
382         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) {
383                 retval = 0;
384                 goto out;
385         }
386
387         if (do_core_cstate & (1 << 3))
388                 READ_MSR(MSR_CORE_C3_RESIDENCY, &c->c3);
389         if (do_core_cstate & (1 << 6))
390                 READ_MSR(MSR_CORE_C6_RESIDENCY, &c->c6);
391         if (do_core_cstate & (1 << 7))
392                 READ_MSR(MSR_CORE_C7_RESIDENCY, &c->c7);
393
394         if (do_dts) {
395                 READ_MSR(MSR_IA32_THERM_STATUS, &msr);
396                 c->core_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
397         }
398
399         /* collect package counters only for 1st core in package */
400         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
401                 retval = 0;
402                 goto out;
403         }
404
405         if (do_pkg_cstate & (1 << 2))
406                 READ_MSR(MSR_PKG_C2_RESIDENCY, &p->pc2);
407         if (do_pkg_cstate & (1 << 3))
408                 READ_MSR(MSR_PKG_C3_RESIDENCY, &p->pc3);
409         if (do_pkg_cstate & (1 << 6))
410                 READ_MSR(MSR_PKG_C6_RESIDENCY, &p->pc6);
411         if (do_pkg_cstate & (1 << 7))
412                 READ_MSR(MSR_PKG_C7_RESIDENCY, &p->pc7);
413         if (do_pkg_cstate & (1 << 8))
414                 READ_MSR(MSR_PKG_C8_RESIDENCY, &p->pc8);
415         if (do_pkg_cstate & (1 << 9))
416                 READ_MSR(MSR_PKG_C9_RESIDENCY, &p->pc9);
417         if (do_pkg_cstate & (1 << 10))
418                 READ_MSR(MSR_PKG_C10_RESIDENCY, &p->pc10);
419
420         if (do_rapl & RAPL_PKG) {
421                 READ_MSR(MSR_PKG_ENERGY_STATUS, &msr);
422                 p->energy_pkg = msr & 0xFFFFFFFF;
423         }
424         if (do_rapl & RAPL_CORES) {
425                 READ_MSR(MSR_PP0_ENERGY_STATUS, &msr);
426                 p->energy_cores = msr & 0xFFFFFFFF;
427         }
428         if (do_rapl & RAPL_DRAM) {
429                 READ_MSR(MSR_DRAM_ENERGY_STATUS, &msr);
430                 p->energy_dram = msr & 0xFFFFFFFF;
431         }
432         if (do_rapl & RAPL_GFX) {
433                 READ_MSR(MSR_PP1_ENERGY_STATUS, &msr);
434                 p->energy_gfx = msr & 0xFFFFFFFF;
435         }
436         if (do_rapl & RAPL_PKG_PERF_STATUS) {
437                 READ_MSR(MSR_PKG_PERF_STATUS, &msr);
438                 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
439         }
440         if (do_rapl & RAPL_DRAM_PERF_STATUS) {
441                 READ_MSR(MSR_DRAM_PERF_STATUS, &msr);
442                 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
443         }
444         if (do_ptm) {
445                 READ_MSR(MSR_IA32_PACKAGE_THERM_STATUS, &msr);
446                 p->pkg_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
447         }
448
449 out:
450         close(msr_fd);
451         return retval;
452 }
453
454
455 /**********************************
456  * Evaluating the changes (1 CPU) *
457  **********************************/
458
459 /*
460  * Do delta = new - old on 32bits cyclique intergers
461  */
462 #define DELTA_WRAP32(delta, new, old)                   \
463         if (new > old) {                                \
464                 delta = new - old;                      \
465         } else {                                        \
466                 delta = 0x100000000 + new - old;        \
467         }
468
469 /*
470  * Extract the evolution old->new in delta at a package level
471  * (some are not new-delta, e.g. temperature)
472  */
473 static inline void
474 delta_package(struct pkg_data *delta, const struct pkg_data *new, const struct pkg_data *old)
475 {
476         delta->pc2 = new->pc2 - old->pc2;
477         delta->pc3 = new->pc3 - old->pc3;
478         delta->pc6 = new->pc6 - old->pc6;
479         delta->pc7 = new->pc7 - old->pc7;
480         delta->pc8 = new->pc8 - old->pc8;
481         delta->pc9 = new->pc9 - old->pc9;
482         delta->pc10 = new->pc10 - old->pc10;
483         delta->pkg_temp_c = new->pkg_temp_c;
484
485         DELTA_WRAP32(delta->energy_pkg, new->energy_pkg, old->energy_pkg);
486         DELTA_WRAP32(delta->energy_cores, new->energy_cores, old->energy_cores);
487         DELTA_WRAP32(delta->energy_gfx, new->energy_gfx, old->energy_gfx);
488         DELTA_WRAP32(delta->energy_dram, new->energy_dram, old->energy_dram);
489         DELTA_WRAP32(delta->rapl_pkg_perf_status, new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
490         DELTA_WRAP32(delta->rapl_dram_perf_status, new->rapl_dram_perf_status, old->rapl_dram_perf_status);
491 }
492
493 /*
494  * Extract the evolution old->new in delta at a core level
495  * (some are not new-delta, e.g. temperature)
496  */
497 static inline void
498 delta_core(struct core_data *delta, const struct core_data *new, const struct core_data *old)
499 {
500         delta->c3 = new->c3 - old->c3;
501         delta->c6 = new->c6 - old->c6;
502         delta->c7 = new->c7 - old->c7;
503         delta->core_temp_c = new->core_temp_c;
504 }
505
506 /*
507  * Extract the evolution old->new in delta at a package level
508  * core_delta is required for c1 estimation (tsc - c0 - all core cstates)
509  */
510 static inline int __attribute__((warn_unused_result))
511 delta_thread(struct thread_data *delta, const struct thread_data *new, const struct thread_data *old,
512         const struct core_data *core_delta)
513 {
514         delta->tsc = new->tsc - old->tsc;
515
516         /* check for TSC < 1 Mcycles over interval */
517         if (delta->tsc < (1000 * 1000)) {
518                 WARNING("Insanely slow TSC rate, TSC stops in idle? ");
519                 WARNING("You can disable all c-states by booting with \"idle=poll\" ");
520                 WARNING("or just the deep ones with \"processor.max_cstate=1\"");
521                 return -1;
522         }
523
524         delta->c1 = new->c1 - old->c1;
525
526         if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
527                 delta->aperf = new->aperf - old->aperf;
528                 delta->mperf = new->mperf - old->mperf;
529         } else {
530                 if (!aperf_mperf_unstable) {
531                         WARNING(" APERF or MPERF went backwards * ");
532                         WARNING("* Frequency results do not cover entire interval *");
533                         WARNING("* fix this by running Linux-2.6.30 or later *");
534
535                         aperf_mperf_unstable = 1;
536                 }
537         }
538
539         /*
540          * As counter collection is not atomic,
541          * it is possible for mperf's non-halted cycles + idle states
542          * to exceed TSC's all cycles: show c1 = 0% in that case.
543          */
544         if ((delta->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > delta->tsc)
545                 delta->c1 = 0;
546         else {
547                 /* normal case, derive c1 */
548                 delta->c1 = delta->tsc - delta->mperf - core_delta->c3
549                         - core_delta->c6 - core_delta->c7;
550         }
551
552         if (delta->mperf == 0) {
553                 WARNING("cpu%d MPERF 0!", old->cpu_id);
554                 delta->mperf = 1;       /* divide by 0 protection */
555         }
556
557         delta->smi_count = new->smi_count - old->smi_count;
558
559         return 0;
560 }
561
562 /**********************************
563  * Submitting the results (1 CPU) *
564  **********************************/
565
566 /*
567  * Submit one gauge value
568  */
569 static void
570 turbostat_submit (const char *plugin_instance,
571         const char *type, const char *type_instance,
572         gauge_t value)
573 {
574         value_list_t vl = VALUE_LIST_INIT;
575         value_t v;
576
577         v.gauge = value;
578         vl.values = &v;
579         vl.values_len = 1;
580         sstrncpy (vl.host, hostname_g, sizeof (vl.host));
581         sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin));
582         if (plugin_instance != NULL)
583                 sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
584         sstrncpy (vl.type, type, sizeof (vl.type));
585         if (type_instance != NULL)
586                 sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
587
588         plugin_dispatch_values (&vl);
589 }
590
591 /*
592  * Submit every data for a single CPU
593  *
594  * Core data is shared for all threads in one core: submitted only for the first thread
595  * Package data is shared for all core in one package: submitted only for the first thread of the first core
596  */
597 static int
598 submit_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
599 {
600         char name[12];
601         double interval_float;
602
603         interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
604
605         ssnprintf(name, sizeof(name), "cpu%02d", t->cpu_id);
606
607         if (!aperf_mperf_unstable)
608                 turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc);
609         if (!aperf_mperf_unstable)
610                 turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc);
611
612         /* GHz */
613         if ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc)))
614                 turbostat_submit(NULL, "frequency", name, 1.0 * t->tsc / 1000000000 * t->aperf / t->mperf / interval_float);
615
616         /* SMI */
617         turbostat_submit(NULL, "current", name, t->smi_count);
618
619         /* submit per-core data only for 1st thread in core */
620         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
621                 goto done;
622
623         ssnprintf(name, sizeof(name), "core%02d", c->core_id);
624
625         if (do_core_cstate & (1 << 3))
626                 turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc);
627         if (do_core_cstate & (1 << 6))
628                 turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc);
629         if (do_core_cstate & (1 << 7))
630                 turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc);
631
632         if (do_dts)
633                 turbostat_submit(NULL, "temperature", name, c->core_temp_c);
634
635         /* submit per-package data only for 1st core in package */
636         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
637                 goto done;
638
639         ssnprintf(name, sizeof(name), "pkg%02d", p->package_id);
640
641         if (do_ptm)
642                 turbostat_submit(NULL, "temperature", name, p->pkg_temp_c);
643
644         if (do_pkg_cstate & (1 << 2))
645                 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc);
646         if (do_pkg_cstate & (1 << 3))
647                 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc);
648         if (do_pkg_cstate & (1 << 6))
649                 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc);
650         if (do_pkg_cstate & (1 << 7))
651                 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc);
652         if (do_pkg_cstate & (1 << 8))
653                 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc);
654         if (do_pkg_cstate & (1 << 9))
655                 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc);
656         if (do_pkg_cstate & (1 << 10))
657                 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc);
658
659         if (do_rapl) {
660                 if (do_rapl & RAPL_PKG)
661                         turbostat_submit(name, "power", "Pkg_W", p->energy_pkg * rapl_energy_units / interval_float);
662                 if (do_rapl & RAPL_CORES)
663                         turbostat_submit(name, "power", "Cor_W", p->energy_cores * rapl_energy_units / interval_float);
664                 if (do_rapl & RAPL_GFX)
665                         turbostat_submit(name, "power", "GFX_W", p->energy_gfx * rapl_energy_units / interval_float);
666                 if (do_rapl & RAPL_DRAM)
667                         turbostat_submit(name, "power", "RAM_W", p->energy_dram * rapl_energy_units / interval_float);
668         }
669 done:
670         return 0;
671 }
672
673
674 /**********************************
675  * Looping function over all CPUs *
676  **********************************/
677
678 /*
679  * Check if a given cpu id is in our compiled list of existing CPUs
680  */
681 static int
682 cpu_is_not_present(int cpu)
683 {
684         return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
685 }
686
687 /*
688  * Loop on all CPUs in topological order
689  *
690  * Skip non-present cpus
691  * Return the error code at the first error or 0
692  */
693 static int __attribute__((warn_unused_result))
694 for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
695         struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
696 {
697         int retval, pkg_no, core_no, thread_no;
698
699         for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
700                 for (core_no = 0; core_no < topology.num_cores; ++core_no) {
701                         for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
702                                 struct thread_data *t;
703                                 struct core_data *c;
704                                 struct pkg_data *p;
705
706                                 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
707
708                                 if (cpu_is_not_present(t->cpu_id))
709                                         continue;
710
711                                 c = GET_CORE(core_base, core_no, pkg_no);
712                                 p = GET_PKG(pkg_base, pkg_no);
713
714                                 retval = func(t, c, p);
715                                 if (retval)
716                                         return retval;
717                         }
718                 }
719         }
720         return 0;
721 }
722
723 /*
724  * Dedicated loop: Extract every data evolution for all CPU
725  *
726  * Skip non-present cpus
727  * Return the error code at the first error or 0
728  *
729  * Core data is shared for all threads in one core: extracted only for the first thread
730  * Package data is shared for all core in one package: extracted only for the first thread of the first core
731  */
732 static int __attribute__((warn_unused_result))
733 for_all_cpus_delta(const struct thread_data *thread_new_base, const struct core_data *core_new_base, const struct pkg_data *pkg_new_base,
734                    const struct thread_data *thread_old_base, const struct core_data *core_old_base, const struct pkg_data *pkg_old_base)
735 {
736         int retval, pkg_no, core_no, thread_no;
737
738         for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
739                 for (core_no = 0; core_no < topology.num_cores; ++core_no) {
740                         for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
741                                 struct thread_data *t_delta;
742                                 const struct thread_data *t_old, *t_new;
743                                 struct core_data *c_delta;
744
745                                 /* Get correct pointers for threads */
746                                 t_delta = GET_THREAD(thread_delta, thread_no, core_no, pkg_no);
747                                 t_new = GET_THREAD(thread_new_base, thread_no, core_no, pkg_no);
748                                 t_old = GET_THREAD(thread_old_base, thread_no, core_no, pkg_no);
749
750                                 /* Skip threads that disappeared */
751                                 if (cpu_is_not_present(t_delta->cpu_id))
752                                         continue;
753
754                                 /* c_delta is always required for delta_thread */
755                                 c_delta = GET_CORE(core_delta, core_no, pkg_no);
756
757                                 /* calculate core delta only for 1st thread in core */
758                                 if (t_new->flags & CPU_IS_FIRST_THREAD_IN_CORE) {
759                                         const struct core_data *c_old, *c_new;
760
761                                         c_new = GET_CORE(core_new_base, core_no, pkg_no);
762                                         c_old = GET_CORE(core_old_base, core_no, pkg_no);
763
764                                         delta_core(c_delta, c_new, c_old);
765                                 }
766
767                                 /* Always calculate thread delta */
768                                 retval = delta_thread(t_delta, t_new, t_old, c_delta);
769                                 if (retval)
770                                         return retval;
771
772                                 /* calculate package delta only for 1st core in package */
773                                 if (t_new->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) {
774                                         struct pkg_data *p_delta;
775                                         const struct pkg_data *p_old, *p_new;
776
777                                         p_delta = GET_PKG(package_delta, pkg_no);
778                                         p_new = GET_PKG(pkg_new_base, pkg_no);
779                                         p_old = GET_PKG(pkg_old_base, pkg_no);
780
781                                         delta_package(p_delta, p_new, p_old);
782                                 }
783                         }
784                 }
785         }
786         return 0;
787 }
788
789
790 static void
791 free_all_buffers(void)
792 {
793         allocated = 0;
794         initialized = 0;
795
796         CPU_FREE(cpu_present_set);
797         cpu_present_set = NULL;
798         cpu_present_set = 0;
799
800         CPU_FREE(cpu_affinity_set);
801         cpu_affinity_set = NULL;
802         cpu_affinity_setsize = 0;
803
804         CPU_FREE(cpu_saved_affinity_set);
805         cpu_saved_affinity_set = NULL;
806         cpu_saved_affinity_setsize = 0;
807
808         free(thread_even);
809         free(core_even);
810         free(package_even);
811
812         thread_even = NULL;
813         core_even = NULL;
814         package_even = NULL;
815
816         free(thread_odd);
817         free(core_odd);
818         free(package_odd);
819
820         thread_odd = NULL;
821         core_odd = NULL;
822         package_odd = NULL;
823
824         free(thread_delta);
825         free(core_delta);
826         free(package_delta);
827
828         thread_delta = NULL;
829         core_delta = NULL;
830         package_delta = NULL;
831 }
832
833
834 /****************
835  * File helpers *
836  ****************/
837
838 /*
839  * Read a single int from a file.
840  */
841 static int __attribute__ ((format(printf,1,2)))
842 parse_int_file(const char *fmt, ...)
843 {
844         va_list args;
845         char path[PATH_MAX];
846         FILE *filep;
847         int value;
848
849         va_start(args, fmt);
850         vsnprintf(path, sizeof(path), fmt, args);
851         va_end(args);
852         filep = fopen(path, "r");
853         if (!filep) {
854                 ERROR("%s: open failed", path);
855                 return -ERR_CANT_OPEN_FILE;
856         }
857         if (fscanf(filep, "%d", &value) != 1) {
858                 ERROR("%s: failed to parse number from file", path);
859                 return -ERR_CANT_READ_NUMBER;
860         }
861         fclose(filep);
862         return value;
863 }
864
865 static int
866 get_threads_on_core(int cpu)
867 {
868         char path[80];
869         FILE *filep;
870         int sib1, sib2;
871         int matches;
872         char character;
873
874         ssnprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
875         filep = fopen(path, "r");
876         if (!filep) {
877                 ERROR("%s: open failed", path);
878                 return -ERR_CANT_OPEN_FILE;
879         }
880         /*
881          * file format:
882          * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
883          * otherwinse 1 sibling (self).
884          */
885         matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
886
887         fclose(filep);
888
889         if (matches == 3)
890                 return 2;
891         else
892                 return 1;
893 }
894
895 /*
896  * run func(cpu) on every cpu in /proc/stat
897  * return max_cpu number
898  */
899 static int __attribute__((warn_unused_result))
900 for_all_proc_cpus(int (func)(int))
901 {
902         FILE *fp;
903         int cpu_num;
904         int retval;
905
906         fp = fopen("/proc/stat", "r");
907         if (!fp) {
908                 ERROR("Failed to open /proc/stat");
909                 return -ERR_CANT_OPEN_FILE;
910         }
911
912         retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
913         if (retval != 0) {
914                 ERROR("Failed to parse /proc/stat");
915                 fclose(fp);
916                 return -ERR_CANT_READ_PROC_STAT;
917         }
918
919         while (1) {
920                 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
921                 if (retval != 1)
922                         break;
923
924                 retval = func(cpu_num);
925                 if (retval) {
926                         fclose(fp);
927                         return(retval);
928                 }
929         }
930         fclose(fp);
931         return 0;
932 }
933
934 /*
935  * Update the stored topology.max_cpu_id
936  */
937 static int
938 update_max_cpu_id(int cpu)
939 {
940         if (topology.max_cpu_id < cpu)
941                 topology.max_cpu_id = cpu;
942         return 0;
943 }
944
945 static int
946 mark_cpu_present(int cpu)
947 {
948         CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
949         return 0;
950 }
951
952
953 static int setup_all_buffers(void);
954
955 static int
956 turbostat_read(user_data_t * not_used)
957 {
958         int ret;
959
960         if (!allocated) {
961                 if ((ret = setup_all_buffers()) < 0)
962                         return ret;
963         }
964
965         if (for_all_proc_cpus(cpu_is_not_present)) {
966                 free_all_buffers();
967                 if ((ret = setup_all_buffers()) < 0)
968                         return ret;
969                 if (for_all_proc_cpus(cpu_is_not_present))
970                         return -ERR_CPU_NOT_PRESENT;
971         }
972
973         /* Saving the scheduling affinity, as it will be modified by get_counters */
974         if (sched_getaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set) != 0)
975                 return -ERR_CPU_SAVE_SCHED_AFFINITY;
976
977         if (!initialized) {
978                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
979                         goto out;
980                 gettimeofday(&tv_even, (struct timezone *)NULL);
981                 is_even = 1;
982                 initialized = 1;
983                 ret = 0;
984                 goto out;
985         }
986
987         if (is_even) {
988                 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
989                         goto out;
990                 gettimeofday(&tv_odd, (struct timezone *)NULL);
991                 is_even = 0;
992                 timersub(&tv_odd, &tv_even, &tv_delta);
993                 if ((ret = for_all_cpus_delta(ODD_COUNTERS, EVEN_COUNTERS)) < 0)
994                         goto out;
995                 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
996                         goto out;
997         } else {
998                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
999                         goto out;
1000                 gettimeofday(&tv_even, (struct timezone *)NULL);
1001                 is_even = 1;
1002                 timersub(&tv_even, &tv_odd, &tv_delta);
1003                 if ((ret = for_all_cpus_delta(EVEN_COUNTERS, ODD_COUNTERS)) < 0)
1004                         goto out;
1005                 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1006                         goto out;
1007         }
1008         ret = 0;
1009 out:
1010         /*
1011          * Let's restore the affinity
1012          * This might fail if the number of CPU changed, but we can't do anything in that case..
1013          */
1014         (void)sched_setaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set);
1015         return ret;
1016 }
1017
1018 static int __attribute__((warn_unused_result))
1019 check_dev_msr()
1020 {
1021         struct stat sb;
1022
1023         if (stat("/dev/cpu/0/msr", &sb)) {
1024                 ERROR("no /dev/cpu/0/msr, try \"# modprobe msr\"");
1025                 return -ERR_NO_MSR;
1026         }
1027         return 0;
1028 }
1029
1030 static int __attribute__((warn_unused_result))
1031 check_super_user()
1032 {
1033         if (getuid() != 0) {
1034                 ERROR("must be root");
1035                 return -ERR_NOT_ROOT;
1036         }
1037         return 0;
1038 }
1039
1040 /*
1041  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
1042  * the Thermal Control Circuit (TCC) activates.
1043  * This is usually equal to tjMax.
1044  *
1045  * Older processors do not have this MSR, so there we guess,
1046  * but also allow conficuration over-ride with "TCCActivationTemp".
1047  *
1048  * Several MSR temperature values are in units of degrees-C
1049  * below this value, including the Digital Thermal Sensor (DTS),
1050  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
1051  */
1052 static int __attribute__((warn_unused_result))
1053 set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1054 {
1055         unsigned long long msr;
1056         unsigned int target_c_local;
1057
1058         /* tcc_activation_temp is used only for dts or ptm */
1059         if (!(do_dts || do_ptm))
1060                 return 0;
1061
1062         /* this is a per-package concept */
1063         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1064                 return 0;
1065
1066         if (tcc_activation_temp != 0) {
1067                 p->tcc_activation_temp = tcc_activation_temp;
1068                 return 0;
1069         }
1070
1071         if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr))
1072                 goto guess;
1073
1074         target_c_local = (msr >> 16) & 0xFF;
1075
1076         if (!target_c_local)
1077                 goto guess;
1078
1079         p->tcc_activation_temp = target_c_local;
1080
1081         return 0;
1082
1083 guess:
1084         p->tcc_activation_temp = TJMAX_DEFAULT;
1085         WARNING("cpu%d: Guessing tjMax %d C, Please use TCCActivationTemp to specify",
1086                 t->cpu_id, p->tcc_activation_temp);
1087
1088         return 0;
1089 }
1090
1091 /*
1092  * Identify the functionality of the CPU
1093  */
1094 static int __attribute__((warn_unused_result))
1095 probe_cpu()
1096 {
1097         unsigned int eax, ebx, ecx, edx, max_level;
1098         unsigned int fms, family, model;
1099
1100         /* CPUID(0):
1101          * - EAX: Maximum Input Value for Basic CPUID Information
1102          * - EBX: "Genu" (0x756e6547)
1103          * - EDX: "ineI" (0x49656e69)
1104          * - ECX: "ntel" (0x6c65746e)
1105          */
1106         max_level = ebx = ecx = edx = 0;
1107         __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
1108         if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) {
1109                 ERROR("Unsupported CPU");
1110                 return -UNSUPPORTED_CPU;
1111         }
1112
1113         /* CPUID(1):
1114          * - EAX: Version Information: Type, Family, Model, and Stepping ID
1115          *  + 4-7:   Model ID
1116          *  + 8-11:  Family ID
1117          *  + 12-13: Processor type
1118          *  + 16-19: Extended Model ID
1119          *  + 20-27: Extended Family ID
1120          * - EDX: Feature Information:
1121          *  + 5: Support for MSR read/write operations
1122          */
1123         fms = ebx = ecx = edx = 0;
1124         __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1125         family = (fms >> 8) & 0xf;
1126         model = (fms >> 4) & 0xf;
1127         if (family == 0xf)
1128                 family += (fms >> 20) & 0xf;
1129         if (family == 6 || family == 0xf)
1130                 model += ((fms >> 16) & 0xf) << 4;
1131         if (!(edx & (1 << 5))) {
1132                 ERROR("CPUID: no MSR");
1133                 return -ERR_NO_MSR;
1134         }
1135
1136         /*
1137          * CPUID(0x80000000):
1138          * - EAX: Maximum Input Value for Extended Function CPUID Information
1139          *
1140          * This allows us to verify if the CPUID(0x80000007) can be called
1141          *
1142          * This check is valid for both Intel and AMD.
1143          */
1144         max_level = ebx = ecx = edx = 0;
1145         __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
1146         if (max_level < 0x80000007) {
1147                 ERROR("CPUID: no invariant TSC (max_level 0x%x)", max_level);
1148                 return -ERR_NO_INVARIANT_TSC;
1149         }
1150
1151         /*
1152          * CPUID(0x80000007):
1153          * - EDX:
1154          *  + 8: Invariant TSC available if set
1155          *
1156          * This check is valid for both Intel and AMD
1157          */
1158         eax = ebx = ecx = edx = 0;
1159         __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1160         if (!(edx & (1 << 8))) {
1161                 ERROR("No invariant TSC");
1162                 return -ERR_NO_INVARIANT_TSC;
1163         }
1164
1165         /*
1166          * CPUID(6):
1167          * - EAX:
1168          *  + 0: Digital temperature sensor is supported if set
1169          *  + 6: Package thermal management is supported if set
1170          * - ECX:
1171          *  + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and IA32_APERF).
1172          *  + 3: The processor supports performance-energy bias preference if set.
1173          *       It also implies the presence of a new architectural MSR called IA32_ENERGY_PERF_BIAS
1174          *
1175          * This check is valid for both Intel and AMD
1176          */
1177         eax = ebx = ecx = edx = 0;
1178         __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
1179         do_dts = eax & (1 << 0);
1180         do_ptm = eax & (1 << 6);
1181         if (!(ecx & (1 << 0))) {
1182                 ERROR("No APERF");
1183                 return -ERR_NO_APERF;
1184         }
1185
1186         /*
1187          * Enable or disable C states depending on the model and family
1188          */
1189         if (family == 6) {
1190                 switch (model) {
1191                 /* Atom (partial) */
1192                 case 0x27:
1193                         do_core_cstate = 0;
1194                         do_pkg_cstate = (1 << 2) | (1 << 4) | (1 << 6);
1195                         break;
1196                 /* Silvermont */
1197                 case 0x37: /* BYT */
1198                 case 0x4A:
1199                 case 0x4D: /* AVN */
1200                 case 0x5A:
1201                 case 0x5D:
1202                         do_core_cstate = (1 << 1) | (1 << 6);
1203                         do_pkg_cstate = (1 << 6);
1204                         break;
1205                 /* Nehalem */
1206                 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1207                 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1208                 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1209                 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1210                         do_core_cstate = (1 << 3) | (1 << 6);
1211                         do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1212                         break;
1213                 /* Westmere */
1214                 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1215                 case 0x2C: /* Westmere EP - Gulftown */
1216                 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1217                         do_core_cstate = (1 << 3) | (1 << 6);
1218                         do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1219                         break;
1220                 /* Sandy Bridge */
1221                 case 0x2A: /* SNB */
1222                 case 0x2D: /* SNB Xeon */
1223                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1224                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1225                         break;
1226                 /* Ivy Bridge */
1227                 case 0x3A: /* IVB */
1228                 case 0x3E: /* IVB Xeon */
1229                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1230                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1231                         break;
1232                 /* Haswell Bridge */
1233                 case 0x3C: /* HSW */
1234                 case 0x3F: /* HSW */
1235                 case 0x46: /* HSW */
1236                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1237                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1238                         break;
1239                 case 0x45: /* HSW */
1240                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1241                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1242                         break;
1243                 /* Broadwel */
1244                 case 0x4F: /* BDW */
1245                 case 0x56: /* BDX-DE */
1246                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1247                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1248                         break;
1249                 case 0x3D: /* BDW */
1250                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1251                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1252                         break;
1253                 default:
1254                         ERROR("Unsupported CPU");
1255                 }
1256                 switch (model) {
1257                 case 0x2A:
1258                 case 0x3A:
1259                 case 0x3C:
1260                 case 0x45:
1261                 case 0x46:
1262                         do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_GFX;
1263                         break;
1264                 case 0x3F:
1265                         do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1266                         break;
1267                 case 0x2D:
1268                 case 0x3E:
1269                         do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1270                         break;
1271                 case 0x37:
1272                 case 0x4D:
1273                         do_rapl = RAPL_PKG | RAPL_CORES;
1274                         break;
1275                 default:
1276                         do_rapl = 0;
1277                 }
1278         } else {
1279                 ERROR("Unsupported CPU");
1280                 return -UNSUPPORTED_CPU;
1281         }
1282
1283         if (do_rapl) {
1284                 unsigned long long msr;
1285                 if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1286                         return 0;
1287
1288                 if (model == 0x37)
1289                         rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1290                 else
1291                         rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1292         }
1293
1294         return 0;
1295 }
1296
1297 static int __attribute__((warn_unused_result))
1298 allocate_cpu_set(cpu_set_t * set, size_t * size) {
1299         set = CPU_ALLOC(topology.max_cpu_id  + 1);
1300         if (set == NULL) {
1301                 ERROR("Unable to allocate CPU state");
1302                 return -ERR_CPU_ALLOC;
1303         }
1304         *size = CPU_ALLOC_SIZE(topology.max_cpu_id  + 1);
1305         CPU_ZERO_S(*size, set);
1306         return 0;
1307 }
1308
1309 /*
1310  * Build a local representation of the cpu distribution
1311  */
1312 static int __attribute__((warn_unused_result))
1313 topology_probe()
1314 {
1315         int i;
1316         int ret;
1317         int max_package_id, max_core_id, max_thread_id;
1318         max_package_id = max_core_id = max_thread_id = 0;
1319
1320         /* Clean topology */
1321         free(topology.cpus);
1322         memset(&topology, 0, sizeof(topology));
1323
1324         /* Can't fail (update_max_cpu_id always returns 0) */
1325         assert(for_all_proc_cpus(update_max_cpu_id));
1326
1327         topology.cpus = calloc(1, (topology.max_cpu_id  + 1) * sizeof(struct cpu_topology));
1328         if (topology.cpus == NULL) {
1329                 ERROR("Unable to allocate memory for cpu topology");
1330                 return -ERR_CALLOC;
1331         }
1332
1333         ret = allocate_cpu_set(cpu_present_set, &cpu_present_setsize);
1334         if (ret != 0)
1335                 goto err;
1336         ret = allocate_cpu_set(cpu_affinity_set, &cpu_affinity_setsize);
1337         if (ret != 0)
1338                 goto err;
1339         ret = allocate_cpu_set(cpu_saved_affinity_set, &cpu_saved_affinity_setsize);
1340         if (ret != 0)
1341                 goto err;
1342
1343         /* Can't fail (mark_cpu_present always returns 0) */
1344         assert(for_all_proc_cpus(mark_cpu_present));
1345
1346         /*
1347          * For online cpus
1348          * find max_core_id, max_package_id
1349          */
1350         for (i = 0; i <= topology.max_cpu_id; ++i) {
1351                 int num_threads;
1352                 struct cpu_topology *cpu = &topology.cpus[i];
1353
1354                 if (cpu_is_not_present(i)) {
1355                         WARNING("cpu%d NOT PRESENT", i);
1356                         continue;
1357                 }
1358
1359                 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", i);
1360                 if (ret < 0)
1361                         goto err;
1362                 else
1363                         cpu->package_id = ret;
1364                 if (cpu->package_id > max_package_id)
1365                         max_package_id = cpu->package_id;
1366
1367                 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", i);
1368                 if (ret < 0)
1369                         goto err;
1370                 else
1371                         cpu->core_id = ret;
1372                 if (cpu->core_id > max_core_id)
1373                         max_core_id = cpu->core_id;
1374                 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", i);
1375                 if (ret < 0)
1376                         goto err;
1377                 else if (ret == cpu->core_id)
1378                         cpu->first_core_in_package = 1;
1379
1380                 ret = get_threads_on_core(i);
1381                 if (ret < 0)
1382                         goto err;
1383                 else
1384                         num_threads = ret;
1385                 if (num_threads > max_thread_id)
1386                         max_thread_id = num_threads;
1387                 if (num_threads > 1) {
1388                         ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", i);
1389                         if (ret < 0)
1390                                 goto err;
1391                         else if (ret == num_threads)
1392                                 cpu->first_thread_in_core = 1;
1393                 }
1394                 DEBUG("cpu %d pkg %d core %d\n",
1395                         i, cpu->package_id, cpu->core_id);
1396         }
1397         /* Num is max + 1 (need to count 0) */
1398         topology.num_packages = max_package_id + 1;
1399         topology.num_cores = max_core_id + 1;
1400         topology.num_threads = max_thread_id + 1;
1401
1402         return 0;
1403 err:
1404         free(topology.cpus);
1405         return ret;
1406 }
1407
1408 static int
1409 allocate_counters(struct thread_data **threads, struct core_data **cores, struct pkg_data **packages)
1410 {
1411         int i;
1412         int total_threads, total_cores;
1413
1414         total_threads = topology.num_threads * topology.num_cores * topology.num_packages;
1415         *threads = calloc(total_threads, sizeof(struct thread_data));
1416         if (*threads == NULL)
1417                 goto err;
1418
1419         for (i = 0; i < total_threads; ++i)
1420                 (*threads)[i].cpu_id = -1;
1421
1422         total_cores = topology.num_cores * topology.num_packages;
1423         *cores = calloc(total_cores, sizeof(struct core_data));
1424         if (*cores == NULL)
1425                 goto err_clean_threads;
1426
1427         for (i = 0; i < total_cores; ++i)
1428                 (*cores)[i].core_id = -1;
1429
1430         *packages = calloc(topology.num_packages, sizeof(struct pkg_data));
1431         if (*packages == NULL)
1432                 goto err_clean_cores;
1433
1434         for (i = 0; i < topology.num_packages; i++)
1435                 (*packages)[i].package_id = i;
1436
1437         return 0;
1438
1439 err_clean_cores:
1440         free(*cores);
1441 err_clean_threads:
1442         free(*threads);
1443 err:
1444         ERROR("calloc counters");
1445         return -ERR_CALLOC;
1446 }
1447
1448 /*
1449  * init_counter()
1450  *
1451  * set cpu_id, core_id, package_id
1452  * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
1453  *
1454  * increment topo.num_cores when 1st core in pkg seen
1455  */
1456 static int
1457 init_counter(struct thread_data *thread_base, struct core_data *core_base,
1458         struct pkg_data *pkg_base, int cpu_id)
1459 {
1460         struct thread_data *t;
1461         struct core_data *c;
1462         struct pkg_data *p;
1463         struct cpu_topology *cpu = &topology.cpus[cpu_id];
1464
1465         t = GET_THREAD(thread_base, !(cpu->first_thread_in_core), cpu->core_id, cpu->package_id);
1466         c = GET_CORE(core_base, cpu->core_id, cpu->package_id);
1467         p = GET_PKG(pkg_base, cpu->package_id);
1468
1469         t->cpu_id = cpu_id;
1470         if (cpu->first_thread_in_core)
1471                 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1472         if (cpu->first_core_in_package)
1473                 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1474
1475         c->core_id = cpu->core_id;
1476         p->package_id = cpu->package_id;
1477
1478         return 0;
1479 }
1480
1481
1482 static int
1483 initialize_counters(void)
1484 {
1485         int ret;
1486         int cpu_id;
1487
1488         for (cpu_id = 0; cpu_id <= topology.max_cpu_id; ++cpu_id) {
1489                 if (cpu_is_not_present(cpu_id)) {
1490                         continue;
1491                 }
1492
1493                 ret = init_counter(EVEN_COUNTERS, cpu_id);
1494                 if (ret < 0)
1495                         return ret;
1496                 ret = init_counter(ODD_COUNTERS, cpu_id);
1497                 if (ret < 0)
1498                         return ret;
1499                 ret = init_counter(DELTA_COUNTERS, cpu_id);
1500                 if (ret < 0)
1501                         return ret;
1502         }
1503         return 0;
1504 }
1505
1506 #define DO_OR_GOTO_ERR(something) \
1507 do {                         \
1508         ret = (something);     \
1509         if (ret < 0)         \
1510                 goto err;    \
1511 } while (0)
1512
1513 static int setup_all_buffers(void)
1514 {
1515         int ret;
1516
1517         DO_OR_GOTO_ERR(topology_probe());
1518         DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1519         DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1520         DO_OR_GOTO_ERR(allocate_counters(&thread_delta, &core_delta, &package_delta));
1521         DO_OR_GOTO_ERR(initialize_counters());
1522         DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1523         DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, ODD_COUNTERS));
1524
1525         allocated = 1;
1526         return 0;
1527 err:
1528         free_all_buffers();
1529         return ret;
1530 }
1531
1532 static int
1533 turbostat_init(void)
1534 {
1535         int ret;
1536
1537         DO_OR_GOTO_ERR(check_super_user());
1538         DO_OR_GOTO_ERR(probe_cpu());
1539         DO_OR_GOTO_ERR(check_dev_msr());
1540         DO_OR_GOTO_ERR(setup_all_buffers());
1541
1542         plugin_register_complex_read(NULL, PLUGIN_NAME, turbostat_read, NULL, NULL);
1543
1544         return 0;
1545 err:
1546         free_all_buffers();
1547         return ret;
1548 }
1549
1550 static const char *config_keys[] =
1551 {
1552         "TCCActivationTemp",
1553 };
1554 static const int config_keys_num = STATIC_ARRAY_SIZE (config_keys);
1555
1556 static int
1557 turbostat_config(const char *key, const char *value)
1558 {
1559         long unsigned int tmp_val;
1560         char *end;
1561
1562         if (strcasecmp("TCCActivationTemp", key) == 0) {
1563                 tmp_val = strtoul(value, &end, 0);
1564                 if (*end != '\0' || tmp_val > UINT_MAX)
1565                         return -1;
1566                 tcc_activation_temp = (unsigned int) tmp_val;
1567         } else {
1568                 return -1;
1569         }
1570         return 0;
1571 }
1572
1573 void module_register(void);
1574 void module_register(void)
1575 {
1576         plugin_register_init(PLUGIN_NAME, turbostat_init);
1577         plugin_register_config(PLUGIN_NAME, turbostat_config, config_keys, config_keys_num);
1578 }