6fa172d0b666fd1b3c612fcb3d4423215ab45257
[collectd.git] / src / turbostat.c
1 /*
2  * turbostat -- Log CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors for collectd.
4  *
5  * Based on the 'turbostat' tool of the Linux kernel, found at
6  * linux/tools/power/x86/turbostat/turbostat.c:
7  * ----
8  * Copyright (c) 2013 Intel Corporation.
9  * Len Brown <len.brown@intel.com>
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms and conditions of the GNU General Public License,
13  * version 2, as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  *
20  * You should have received a copy of the GNU General Public License along with
21  * this program; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23  * ----
24  * Ported to collectd by Vincent Brillault <git@lerya.net>
25  */
26
27 /*
28  * _GNU_SOURCE is required because of the following functions:
29  * - CPU_ISSET_S
30  * - CPU_ZERO_S
31  * - CPU_SET_S
32  * - CPU_FREE
33  * - CPU_ALLOC
34  * - CPU_ALLOC_SIZE
35  */
36 #define _GNU_SOURCE
37
38 #include <asm/msr-index.h>
39 #include <stdarg.h>
40 #include <stdio.h>
41 #include <err.h>
42 #include <unistd.h>
43 #include <sys/types.h>
44 #include <sys/wait.h>
45 #include <sys/stat.h>
46 #include <sys/resource.h>
47 #include <fcntl.h>
48 #include <signal.h>
49 #include <sys/time.h>
50 #include <stdlib.h>
51 #include <dirent.h>
52 #include <string.h>
53 #include <ctype.h>
54 #include <sched.h>
55 #include <cpuid.h>
56
57 #include "collectd.h"
58 #include "common.h"
59 #include "plugin.h"
60
61 #define PLUGIN_NAME "turbostat"
62
63 /*
64  * This tool uses the Model-Specific Registers (MSRs) present on Intel processors.
65  * The general description each of these registers, depending on the architecture,
66  * can be found in the IntelĀ® 64 and IA-32 Architectures Software Developer Manual,
67  * Volume 3 Chapter 35.
68  */
69
70 /*
71  * If set, aperf_mperf_unstable disables a/mperf based stats.
72  * This includes: C0 & C1 states, frequency
73  *
74  * This value is automatically set if mperf or aperf go backward
75  */
76 static _Bool aperf_mperf_unstable;
77
78 /*
79  * Bitmask of the list of core C states supported by the processor.
80  * Currently supported C-states (by this plugin): 3, 6, 7
81  */
82 static unsigned int do_core_cstate;
83
84 /*
85  * Bitmask of the list of pacages C states supported by the processor.
86  * Currently supported C-states (by this plugin): 2, 3, 6, 7, 8, 9, 10
87  */
88 static unsigned int do_pkg_cstate;
89
90 /*
91  * Boolean indicating if the processor supports 'Digital temperature sensor'
92  * This feature enables the monitoring of the temperature of each core
93  *
94  * This feature has two limitations:
95  *  - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
96  *  - Temperatures above the tcc_activation_temp are not recorded
97  */
98 static _Bool do_dts;
99
100 /*
101  * Boolean indicating if the processor supports 'Package thermal management'
102  * This feature allows the monitoring of the temperature of each package
103  *
104  * This feature has two limitations:
105  *  - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
106  *  - Temperatures above the tcc_activation_temp are not recorded
107  */
108 static _Bool do_ptm;
109
110 /*
111  * Thermal Control Circuit Activation Temperature as configured by the user.
112  * This override the automated detection via MSR_IA32_TEMPERATURE_TARGET
113  * and should only be used if the automated detection fails.
114  */
115 static unsigned int tcc_activation_temp;
116
117 static unsigned int do_rapl;
118 static double rapl_energy_units;
119
120 #define RAPL_PKG                (1 << 0)
121                                         /* 0x610 MSR_PKG_POWER_LIMIT */
122                                         /* 0x611 MSR_PKG_ENERGY_STATUS */
123 #define RAPL_PKG_PERF_STATUS    (1 << 1)
124                                         /* 0x613 MSR_PKG_PERF_STATUS */
125 #define RAPL_PKG_POWER_INFO     (1 << 2)
126                                         /* 0x614 MSR_PKG_POWER_INFO */
127
128 #define RAPL_DRAM               (1 << 3)
129                                         /* 0x618 MSR_DRAM_POWER_LIMIT */
130                                         /* 0x619 MSR_DRAM_ENERGY_STATUS */
131                                         /* 0x61c MSR_DRAM_POWER_INFO */
132 #define RAPL_DRAM_PERF_STATUS   (1 << 4)
133                                         /* 0x61b MSR_DRAM_PERF_STATUS */
134
135 #define RAPL_CORES              (1 << 5)
136                                         /* 0x638 MSR_PP0_POWER_LIMIT */
137                                         /* 0x639 MSR_PP0_ENERGY_STATUS */
138 #define RAPL_CORE_POLICY        (1 << 6)
139                                         /* 0x63a MSR_PP0_POLICY */
140
141
142 #define RAPL_GFX                (1 << 7)
143                                         /* 0x640 MSR_PP1_POWER_LIMIT */
144                                         /* 0x641 MSR_PP1_ENERGY_STATUS */
145                                         /* 0x642 MSR_PP1_POLICY */
146 #define TJMAX_DEFAULT   100
147
148 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_saved_affinity_set;
149 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_saved_affinity_setsize;
150
151 struct thread_data {
152         unsigned long long tsc;
153         unsigned long long aperf;
154         unsigned long long mperf;
155         unsigned long long c1;
156         unsigned int smi_count;
157         unsigned int cpu_id;
158         unsigned int flags;
159 #define CPU_IS_FIRST_THREAD_IN_CORE     0x2
160 #define CPU_IS_FIRST_CORE_IN_PACKAGE    0x4
161 } *thread_delta, *thread_even, *thread_odd;
162
163 struct core_data {
164         unsigned long long c3;
165         unsigned long long c6;
166         unsigned long long c7;
167         unsigned int core_temp_c;
168         unsigned int core_id;
169 } *core_delta, *core_even, *core_odd;
170
171 struct pkg_data {
172         unsigned long long pc2;
173         unsigned long long pc3;
174         unsigned long long pc6;
175         unsigned long long pc7;
176         unsigned long long pc8;
177         unsigned long long pc9;
178         unsigned long long pc10;
179         unsigned int package_id;
180         unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
181         unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
182         unsigned int energy_cores;      /* MSR_PP0_ENERGY_STATUS */
183         unsigned int energy_gfx;        /* MSR_PP1_ENERGY_STATUS */
184         unsigned int rapl_pkg_perf_status;      /* MSR_PKG_PERF_STATUS */
185         unsigned int rapl_dram_perf_status;     /* MSR_DRAM_PERF_STATUS */
186         unsigned int tcc_activation_temp;
187         unsigned int pkg_temp_c;
188 } *package_delta, *package_even, *package_odd;
189
190 #define DELTA_COUNTERS thread_delta, core_delta, package_delta
191 #define ODD_COUNTERS thread_odd, core_odd, package_odd
192 #define EVEN_COUNTERS thread_even, core_even, package_even
193 static _Bool is_even = 1;
194
195 static _Bool allocated = 0;
196 static _Bool initialized = 0;
197
198 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
199         (thread_base + \
200                 (pkg_no) * topology.num_cores * topology.num_threads + \
201                 (core_no) * topology.num_threads + \
202                 (thread_no))
203 #define GET_CORE(core_base, core_no, pkg_no) \
204         (core_base + \
205                 (pkg_no) * topology.num_cores + \
206                 (core_no))
207 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
208
209 struct cpu_topology {
210         int package_id;
211         int core_id;
212         _Bool first_core_in_package;
213         _Bool first_thread_in_core;
214 };
215
216 struct topology {
217         int max_cpu_id;
218         int num_packages;
219         int num_cores;
220         int num_threads;
221         struct cpu_topology *cpus;
222 } topology;
223
224 struct timeval tv_even, tv_odd, tv_delta;
225
226 enum return_values {
227         OK = 0,
228         ERR_CPU_MIGRATE,
229         ERR_CPU_SAVE_SCHED_AFFINITY,
230         ERR_MSR_IA32_APERF,
231         ERR_MSR_IA32_MPERF,
232         ERR_MSR_SMI_COUNT,
233         ERR_MSR_CORE_C3_RESIDENCY,
234         ERR_MSR_CORE_C6_RESIDENCY,
235         ERR_MSR_CORE_C7_RESIDENCY,
236         ERR_MSR_IA32_THERM_STATUS,
237         ERR_MSR_PKG_C3_RESIDENCY,
238         ERR_MSR_PKG_C6_RESIDENCY,
239         ERR_MSR_PKG_C2_RESIDENCY,
240         ERR_MSR_PKG_C7_RESIDENCY,
241         ERR_MSR_PKG_C8_RESIDENCY,
242         ERR_MSR_PKG_C9_RESIDENCY,
243         ERR_MSR_PKG_C10_RESIDENCY,
244         ERR_MSR_PKG_ENERGY_STATUS,
245         ERR_MSR_PKG_POWER_INFO,
246         ERR_MSR_PP0_ENERGY_STATUS,
247         ERR_MSR_DRAM_ENERGY_STATUS,
248         ERR_MSR_PP1_ENERGY_STATUS,
249         ERR_MSR_PKG_PERF_STATUS,
250         ERR_MSR_DRAM_PERF_STATUS,
251         ERR_MSR_IA32_PACKAGE_THERM_STATUS,
252         ERR_MSR_IA32_TSC,
253         ERR_CPU_NOT_PRESENT,
254         ERR_NO_MSR,
255         ERR_CANT_OPEN_MSR,
256         ERR_CANT_OPEN_FILE,
257         ERR_CANT_READ_NUMBER,
258         ERR_CANT_READ_PROC_STAT,
259         ERR_NO_INVARIANT_TSC,
260         ERR_NO_APERF,
261         ERR_CALLOC,
262         ERR_CPU_ALLOC,
263         ERR_NOT_ROOT,
264         UNSUPPORTED_CPU,
265         ERR_PATH_TOO_LONG,
266 };
267
268
269 /*****************************
270  *  MSR Manipulation helpers *
271  *****************************/
272
273 /*
274  * Open a MSR device for reading
275  * Can change the scheduling affinity of the current process if multiple_read is 1
276  */
277 static int __attribute__((warn_unused_result))
278 open_msr(int cpu, _Bool multiple_read)
279 {
280         char pathname[32];
281         int fd;
282
283         /*
284          * If we need to do multiple read, let's migrate to the CPU
285          * Otherwise, we would lose time calling functions on another CPU
286          *
287          * If we are not yet initialized (cpu_affinity_setsize = 0),
288          * we need to skip this optimisation.
289          */
290         if (multiple_read && cpu_affinity_setsize) {
291                 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
292                 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
293                 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) {
294                         ERROR("Turbostat plugin: Could not migrate to CPU %d", cpu);
295                         return -ERR_CPU_MIGRATE;
296                 }
297         }
298
299         ssnprintf(pathname, sizeof(pathname), "/dev/cpu/%d/msr", cpu);
300         fd = open(pathname, O_RDONLY);
301         if (fd < 0)
302                 return -ERR_CANT_OPEN_MSR;
303         return fd;
304 }
305
306 /*
307  * Read a single MSR from an open file descriptor
308  */
309 static int __attribute__((warn_unused_result))
310 read_msr(int fd, off_t offset, unsigned long long *msr)
311 {
312         ssize_t retval;
313
314         retval = pread(fd, msr, sizeof *msr, offset);
315
316         if (retval != sizeof *msr) {
317                 ERROR("Turbostat plugin: MSR offset 0x%llx read failed",
318                       (unsigned long long)offset);
319                 return -1;
320         }
321         return 0;
322 }
323
324 /*
325  * Open a MSR device for reading, read the value asked for and close it.
326  * This call will not affect the scheduling affinity of this thread.
327  */
328 static int __attribute__((warn_unused_result))
329 get_msr(int cpu, off_t offset, unsigned long long *msr)
330 {
331         ssize_t retval;
332         int fd;
333
334         fd = open_msr(cpu, 0);
335         if (fd < 0)
336                 return fd;
337         retval = read_msr(fd, offset, msr);
338         close(fd);
339         return retval;
340 }
341
342
343 /********************************
344  * Raw data acquisition (1 CPU) *
345  ********************************/
346
347 /*
348  * Read every data avalaible for a single CPU
349  *
350  * Core data is shared for all threads in one core: extracted only for the first thread
351  * Package data is shared for all core in one package: extracted only for the first thread of the first core
352  *
353  * Side effect: migrates to the targeted CPU
354  */
355 static int __attribute__((warn_unused_result))
356 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
357 {
358         int cpu = t->cpu_id;
359         unsigned long long msr;
360         int msr_fd;
361         int retval = 0;
362
363         msr_fd = open_msr(cpu, 1);
364         if (msr_fd < 0)
365                 return msr_fd;
366
367 #define READ_MSR(msr, dst)                      \
368 do {                                            \
369         if (read_msr(msr_fd, msr, dst)) {       \
370                 retval = -ERR_##msr;            \
371                 goto out;                       \
372         }                                       \
373 } while (0)
374
375         READ_MSR(MSR_IA32_TSC, &t->tsc);
376
377         READ_MSR(MSR_IA32_APERF, &t->aperf);
378         READ_MSR(MSR_IA32_MPERF, &t->mperf);
379
380         READ_MSR(MSR_SMI_COUNT, &msr);
381         t->smi_count = msr & 0xFFFFFFFF;
382
383         /* collect core counters only for 1st thread in core */
384         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) {
385                 retval = 0;
386                 goto out;
387         }
388
389         if (do_core_cstate & (1 << 3))
390                 READ_MSR(MSR_CORE_C3_RESIDENCY, &c->c3);
391         if (do_core_cstate & (1 << 6))
392                 READ_MSR(MSR_CORE_C6_RESIDENCY, &c->c6);
393         if (do_core_cstate & (1 << 7))
394                 READ_MSR(MSR_CORE_C7_RESIDENCY, &c->c7);
395
396         if (do_dts) {
397                 READ_MSR(MSR_IA32_THERM_STATUS, &msr);
398                 c->core_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
399         }
400
401         /* collect package counters only for 1st core in package */
402         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
403                 retval = 0;
404                 goto out;
405         }
406
407         if (do_pkg_cstate & (1 << 2))
408                 READ_MSR(MSR_PKG_C2_RESIDENCY, &p->pc2);
409         if (do_pkg_cstate & (1 << 3))
410                 READ_MSR(MSR_PKG_C3_RESIDENCY, &p->pc3);
411         if (do_pkg_cstate & (1 << 6))
412                 READ_MSR(MSR_PKG_C6_RESIDENCY, &p->pc6);
413         if (do_pkg_cstate & (1 << 7))
414                 READ_MSR(MSR_PKG_C7_RESIDENCY, &p->pc7);
415         if (do_pkg_cstate & (1 << 8))
416                 READ_MSR(MSR_PKG_C8_RESIDENCY, &p->pc8);
417         if (do_pkg_cstate & (1 << 9))
418                 READ_MSR(MSR_PKG_C9_RESIDENCY, &p->pc9);
419         if (do_pkg_cstate & (1 << 10))
420                 READ_MSR(MSR_PKG_C10_RESIDENCY, &p->pc10);
421
422         if (do_rapl & RAPL_PKG) {
423                 READ_MSR(MSR_PKG_ENERGY_STATUS, &msr);
424                 p->energy_pkg = msr & 0xFFFFFFFF;
425         }
426         if (do_rapl & RAPL_CORES) {
427                 READ_MSR(MSR_PP0_ENERGY_STATUS, &msr);
428                 p->energy_cores = msr & 0xFFFFFFFF;
429         }
430         if (do_rapl & RAPL_DRAM) {
431                 READ_MSR(MSR_DRAM_ENERGY_STATUS, &msr);
432                 p->energy_dram = msr & 0xFFFFFFFF;
433         }
434         if (do_rapl & RAPL_GFX) {
435                 READ_MSR(MSR_PP1_ENERGY_STATUS, &msr);
436                 p->energy_gfx = msr & 0xFFFFFFFF;
437         }
438         if (do_rapl & RAPL_PKG_PERF_STATUS) {
439                 READ_MSR(MSR_PKG_PERF_STATUS, &msr);
440                 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
441         }
442         if (do_rapl & RAPL_DRAM_PERF_STATUS) {
443                 READ_MSR(MSR_DRAM_PERF_STATUS, &msr);
444                 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
445         }
446         if (do_ptm) {
447                 READ_MSR(MSR_IA32_PACKAGE_THERM_STATUS, &msr);
448                 p->pkg_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
449         }
450
451 out:
452         close(msr_fd);
453         return retval;
454 }
455
456
457 /**********************************
458  * Evaluating the changes (1 CPU) *
459  **********************************/
460
461 /*
462  * Do delta = new - old on 32bits cyclique intergers
463  */
464 #define DELTA_WRAP32(delta, new, old)                   \
465         if (new > old) {                                \
466                 delta = new - old;                      \
467         } else {                                        \
468                 delta = 0x100000000 + new - old;        \
469         }
470
471 /*
472  * Extract the evolution old->new in delta at a package level
473  * (some are not new-delta, e.g. temperature)
474  */
475 static inline void
476 delta_package(struct pkg_data *delta, const struct pkg_data *new, const struct pkg_data *old)
477 {
478         delta->pc2 = new->pc2 - old->pc2;
479         delta->pc3 = new->pc3 - old->pc3;
480         delta->pc6 = new->pc6 - old->pc6;
481         delta->pc7 = new->pc7 - old->pc7;
482         delta->pc8 = new->pc8 - old->pc8;
483         delta->pc9 = new->pc9 - old->pc9;
484         delta->pc10 = new->pc10 - old->pc10;
485         delta->pkg_temp_c = new->pkg_temp_c;
486
487         DELTA_WRAP32(delta->energy_pkg, new->energy_pkg, old->energy_pkg);
488         DELTA_WRAP32(delta->energy_cores, new->energy_cores, old->energy_cores);
489         DELTA_WRAP32(delta->energy_gfx, new->energy_gfx, old->energy_gfx);
490         DELTA_WRAP32(delta->energy_dram, new->energy_dram, old->energy_dram);
491         DELTA_WRAP32(delta->rapl_pkg_perf_status, new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
492         DELTA_WRAP32(delta->rapl_dram_perf_status, new->rapl_dram_perf_status, old->rapl_dram_perf_status);
493 }
494
495 /*
496  * Extract the evolution old->new in delta at a core level
497  * (some are not new-delta, e.g. temperature)
498  */
499 static inline void
500 delta_core(struct core_data *delta, const struct core_data *new, const struct core_data *old)
501 {
502         delta->c3 = new->c3 - old->c3;
503         delta->c6 = new->c6 - old->c6;
504         delta->c7 = new->c7 - old->c7;
505         delta->core_temp_c = new->core_temp_c;
506 }
507
508 /*
509  * Extract the evolution old->new in delta at a package level
510  * core_delta is required for c1 estimation (tsc - c0 - all core cstates)
511  */
512 static inline int __attribute__((warn_unused_result))
513 delta_thread(struct thread_data *delta, const struct thread_data *new, const struct thread_data *old,
514         const struct core_data *core_delta)
515 {
516         delta->tsc = new->tsc - old->tsc;
517
518         /* check for TSC < 1 Mcycles over interval */
519         if (delta->tsc < (1000 * 1000)) {
520                 WARNING("Turbostat plugin: Insanely slow TSC rate, TSC stops "
521                         "in idle? You can disable all c-states by booting with"
522                         " 'idle=poll' or just the deep ones with"
523                         " 'processor.max_cstate=1'");
524                 return -1;
525         }
526
527         delta->c1 = new->c1 - old->c1;
528
529         if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
530                 delta->aperf = new->aperf - old->aperf;
531                 delta->mperf = new->mperf - old->mperf;
532         } else {
533                 if (!aperf_mperf_unstable) {
534                         WARNING("Turbostat plugin: APERF or MPERF went "
535                                 "backwards. Frequency results do not cover "
536                                 "the entire interval. Fix this by running "
537                                 "Linux-2.6.30 or later.");
538
539                         aperf_mperf_unstable = 1;
540                 }
541         }
542
543         /*
544          * As counter collection is not atomic,
545          * it is possible for mperf's non-halted cycles + idle states
546          * to exceed TSC's all cycles: show c1 = 0% in that case.
547          */
548         if ((delta->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > delta->tsc)
549                 delta->c1 = 0;
550         else {
551                 /* normal case, derive c1 */
552                 delta->c1 = delta->tsc - delta->mperf - core_delta->c3
553                         - core_delta->c6 - core_delta->c7;
554         }
555
556         if (delta->mperf == 0) {
557                 WARNING("Turbostat plugin: cpu%d MPERF 0!", old->cpu_id);
558                 delta->mperf = 1;       /* divide by 0 protection */
559         }
560
561         delta->smi_count = new->smi_count - old->smi_count;
562
563         return 0;
564 }
565
566 /**********************************
567  * Submitting the results (1 CPU) *
568  **********************************/
569
570 /*
571  * Submit one gauge value
572  */
573 static void
574 turbostat_submit (const char *plugin_instance,
575         const char *type, const char *type_instance,
576         gauge_t value)
577 {
578         value_list_t vl = VALUE_LIST_INIT;
579         value_t v;
580
581         v.gauge = value;
582         vl.values = &v;
583         vl.values_len = 1;
584         sstrncpy (vl.host, hostname_g, sizeof (vl.host));
585         sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin));
586         if (plugin_instance != NULL)
587                 sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
588         sstrncpy (vl.type, type, sizeof (vl.type));
589         if (type_instance != NULL)
590                 sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
591
592         plugin_dispatch_values (&vl);
593 }
594
595 /*
596  * Submit every data for a single CPU
597  *
598  * Core data is shared for all threads in one core: submitted only for the first thread
599  * Package data is shared for all core in one package: submitted only for the first thread of the first core
600  */
601 static int
602 submit_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
603 {
604         char name[12];
605         double interval_float;
606
607         interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
608
609         ssnprintf(name, sizeof(name), "cpu%02d", t->cpu_id);
610
611         if (!aperf_mperf_unstable)
612                 turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc);
613         if (!aperf_mperf_unstable)
614                 turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc);
615
616         /* GHz */
617         if ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc)))
618                 turbostat_submit(NULL, "frequency", name, 1.0 * t->tsc / 1000000000 * t->aperf / t->mperf / interval_float);
619
620         /* SMI */
621         turbostat_submit(NULL, "current", name, t->smi_count);
622
623         /* submit per-core data only for 1st thread in core */
624         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
625                 goto done;
626
627         ssnprintf(name, sizeof(name), "core%02d", c->core_id);
628
629         if (do_core_cstate & (1 << 3))
630                 turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc);
631         if (do_core_cstate & (1 << 6))
632                 turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc);
633         if (do_core_cstate & (1 << 7))
634                 turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc);
635
636         if (do_dts)
637                 turbostat_submit(NULL, "temperature", name, c->core_temp_c);
638
639         /* submit per-package data only for 1st core in package */
640         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
641                 goto done;
642
643         ssnprintf(name, sizeof(name), "pkg%02d", p->package_id);
644
645         if (do_ptm)
646                 turbostat_submit(NULL, "temperature", name, p->pkg_temp_c);
647
648         if (do_pkg_cstate & (1 << 2))
649                 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc);
650         if (do_pkg_cstate & (1 << 3))
651                 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc);
652         if (do_pkg_cstate & (1 << 6))
653                 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc);
654         if (do_pkg_cstate & (1 << 7))
655                 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc);
656         if (do_pkg_cstate & (1 << 8))
657                 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc);
658         if (do_pkg_cstate & (1 << 9))
659                 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc);
660         if (do_pkg_cstate & (1 << 10))
661                 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc);
662
663         if (do_rapl) {
664                 if (do_rapl & RAPL_PKG)
665                         turbostat_submit(name, "power", "Pkg_W", p->energy_pkg * rapl_energy_units / interval_float);
666                 if (do_rapl & RAPL_CORES)
667                         turbostat_submit(name, "power", "Cor_W", p->energy_cores * rapl_energy_units / interval_float);
668                 if (do_rapl & RAPL_GFX)
669                         turbostat_submit(name, "power", "GFX_W", p->energy_gfx * rapl_energy_units / interval_float);
670                 if (do_rapl & RAPL_DRAM)
671                         turbostat_submit(name, "power", "RAM_W", p->energy_dram * rapl_energy_units / interval_float);
672         }
673 done:
674         return 0;
675 }
676
677
678 /**********************************
679  * Looping function over all CPUs *
680  **********************************/
681
682 /*
683  * Check if a given cpu id is in our compiled list of existing CPUs
684  */
685 static int
686 cpu_is_not_present(int cpu)
687 {
688         return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
689 }
690
691 /*
692  * Loop on all CPUs in topological order
693  *
694  * Skip non-present cpus
695  * Return the error code at the first error or 0
696  */
697 static int __attribute__((warn_unused_result))
698 for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
699         struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
700 {
701         int retval, pkg_no, core_no, thread_no;
702
703         for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
704                 for (core_no = 0; core_no < topology.num_cores; ++core_no) {
705                         for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
706                                 struct thread_data *t;
707                                 struct core_data *c;
708                                 struct pkg_data *p;
709
710                                 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
711
712                                 if (cpu_is_not_present(t->cpu_id))
713                                         continue;
714
715                                 c = GET_CORE(core_base, core_no, pkg_no);
716                                 p = GET_PKG(pkg_base, pkg_no);
717
718                                 retval = func(t, c, p);
719                                 if (retval)
720                                         return retval;
721                         }
722                 }
723         }
724         return 0;
725 }
726
727 /*
728  * Dedicated loop: Extract every data evolution for all CPU
729  *
730  * Skip non-present cpus
731  * Return the error code at the first error or 0
732  *
733  * Core data is shared for all threads in one core: extracted only for the first thread
734  * Package data is shared for all core in one package: extracted only for the first thread of the first core
735  */
736 static int __attribute__((warn_unused_result))
737 for_all_cpus_delta(const struct thread_data *thread_new_base, const struct core_data *core_new_base, const struct pkg_data *pkg_new_base,
738                    const struct thread_data *thread_old_base, const struct core_data *core_old_base, const struct pkg_data *pkg_old_base)
739 {
740         int retval, pkg_no, core_no, thread_no;
741
742         for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
743                 for (core_no = 0; core_no < topology.num_cores; ++core_no) {
744                         for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
745                                 struct thread_data *t_delta;
746                                 const struct thread_data *t_old, *t_new;
747                                 struct core_data *c_delta;
748
749                                 /* Get correct pointers for threads */
750                                 t_delta = GET_THREAD(thread_delta, thread_no, core_no, pkg_no);
751                                 t_new = GET_THREAD(thread_new_base, thread_no, core_no, pkg_no);
752                                 t_old = GET_THREAD(thread_old_base, thread_no, core_no, pkg_no);
753
754                                 /* Skip threads that disappeared */
755                                 if (cpu_is_not_present(t_delta->cpu_id))
756                                         continue;
757
758                                 /* c_delta is always required for delta_thread */
759                                 c_delta = GET_CORE(core_delta, core_no, pkg_no);
760
761                                 /* calculate core delta only for 1st thread in core */
762                                 if (t_new->flags & CPU_IS_FIRST_THREAD_IN_CORE) {
763                                         const struct core_data *c_old, *c_new;
764
765                                         c_new = GET_CORE(core_new_base, core_no, pkg_no);
766                                         c_old = GET_CORE(core_old_base, core_no, pkg_no);
767
768                                         delta_core(c_delta, c_new, c_old);
769                                 }
770
771                                 /* Always calculate thread delta */
772                                 retval = delta_thread(t_delta, t_new, t_old, c_delta);
773                                 if (retval)
774                                         return retval;
775
776                                 /* calculate package delta only for 1st core in package */
777                                 if (t_new->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) {
778                                         struct pkg_data *p_delta;
779                                         const struct pkg_data *p_old, *p_new;
780
781                                         p_delta = GET_PKG(package_delta, pkg_no);
782                                         p_new = GET_PKG(pkg_new_base, pkg_no);
783                                         p_old = GET_PKG(pkg_old_base, pkg_no);
784
785                                         delta_package(p_delta, p_new, p_old);
786                                 }
787                         }
788                 }
789         }
790         return 0;
791 }
792
793
794 /***************
795  * CPU Probing *
796  ***************/
797
798 /*
799  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
800  * the Thermal Control Circuit (TCC) activates.
801  * This is usually equal to tjMax.
802  *
803  * Older processors do not have this MSR, so there we guess,
804  * but also allow conficuration over-ride with "TCCActivationTemp".
805  *
806  * Several MSR temperature values are in units of degrees-C
807  * below this value, including the Digital Thermal Sensor (DTS),
808  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
809  */
810 static int __attribute__((warn_unused_result))
811 set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
812 {
813         unsigned long long msr;
814         unsigned int target_c_local;
815
816         /* tcc_activation_temp is used only for dts or ptm */
817         if (!(do_dts || do_ptm))
818                 return 0;
819
820         /* this is a per-package concept */
821         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
822                 return 0;
823
824         if (tcc_activation_temp != 0) {
825                 p->tcc_activation_temp = tcc_activation_temp;
826                 return 0;
827         }
828
829         if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr))
830                 goto guess;
831
832         target_c_local = (msr >> 16) & 0xFF;
833
834         if (!target_c_local)
835                 goto guess;
836
837         p->tcc_activation_temp = target_c_local;
838
839         return 0;
840
841 guess:
842         p->tcc_activation_temp = TJMAX_DEFAULT;
843         WARNING("Turbostat plugin: cpu%d: Guessing tjMax %d C,"
844                 " Please use TCCActivationTemp to specify it.",
845                 t->cpu_id, p->tcc_activation_temp);
846
847         return 0;
848 }
849
850 /*
851  * Identify the functionality of the CPU
852  */
853 static int __attribute__((warn_unused_result))
854 probe_cpu()
855 {
856         unsigned int eax, ebx, ecx, edx, max_level;
857         unsigned int fms, family, model;
858
859         /* CPUID(0):
860          * - EAX: Maximum Input Value for Basic CPUID Information
861          * - EBX: "Genu" (0x756e6547)
862          * - EDX: "ineI" (0x49656e69)
863          * - ECX: "ntel" (0x6c65746e)
864          */
865         max_level = ebx = ecx = edx = 0;
866         __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
867         if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) {
868                 ERROR("Turbostat plugin: Unsupported CPU (not Intel)");
869                 return -UNSUPPORTED_CPU;
870         }
871
872         /* CPUID(1):
873          * - EAX: Version Information: Type, Family, Model, and Stepping ID
874          *  + 4-7:   Model ID
875          *  + 8-11:  Family ID
876          *  + 12-13: Processor type
877          *  + 16-19: Extended Model ID
878          *  + 20-27: Extended Family ID
879          * - EDX: Feature Information:
880          *  + 5: Support for MSR read/write operations
881          */
882         fms = ebx = ecx = edx = 0;
883         __get_cpuid(1, &fms, &ebx, &ecx, &edx);
884         family = (fms >> 8) & 0xf;
885         model = (fms >> 4) & 0xf;
886         if (family == 0xf)
887                 family += (fms >> 20) & 0xf;
888         if (family == 6 || family == 0xf)
889                 model += ((fms >> 16) & 0xf) << 4;
890         if (!(edx & (1 << 5))) {
891                 ERROR("Turbostat plugin: Unsupported CPU (no MSR support)");
892                 return -ERR_NO_MSR;
893         }
894
895         /*
896          * CPUID(0x80000000):
897          * - EAX: Maximum Input Value for Extended Function CPUID Information
898          *
899          * This allows us to verify if the CPUID(0x80000007) can be called
900          *
901          * This check is valid for both Intel and AMD.
902          */
903         max_level = ebx = ecx = edx = 0;
904         __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
905         if (max_level < 0x80000007) {
906                 ERROR("Turbostat plugin: Unsupported CPU (no invariant TSC, "
907                       " Maximum Extended Function: 0x%x)", max_level);
908                 return -ERR_NO_INVARIANT_TSC;
909         }
910
911         /*
912          * CPUID(0x80000007):
913          * - EDX:
914          *  + 8: Invariant TSC available if set
915          *
916          * This check is valid for both Intel and AMD
917          */
918         eax = ebx = ecx = edx = 0;
919         __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
920         if (!(edx & (1 << 8))) {
921                 ERROR("Turbostat plugin: Unsupported CPU (No invariant TSC)");
922                 return -ERR_NO_INVARIANT_TSC;
923         }
924
925         /*
926          * CPUID(6):
927          * - EAX:
928          *  + 0: Digital temperature sensor is supported if set
929          *  + 6: Package thermal management is supported if set
930          * - ECX:
931          *  + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and IA32_APERF).
932          *  + 3: The processor supports performance-energy bias preference if set.
933          *       It also implies the presence of a new architectural MSR called IA32_ENERGY_PERF_BIAS
934          *
935          * This check is valid for both Intel and AMD
936          */
937         eax = ebx = ecx = edx = 0;
938         __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
939         do_dts = eax & (1 << 0);
940         do_ptm = eax & (1 << 6);
941         if (!(ecx & (1 << 0))) {
942                 ERROR("Turbostat plugin: Unsupported CPU (No APERF)");
943                 return -ERR_NO_APERF;
944         }
945
946         /*
947          * Enable or disable C states depending on the model and family
948          */
949         if (family == 6) {
950                 switch (model) {
951                 /* Atom (partial) */
952                 case 0x27:
953                         do_core_cstate = 0;
954                         do_pkg_cstate = (1 << 2) | (1 << 4) | (1 << 6);
955                         break;
956                 /* Silvermont */
957                 case 0x37: /* BYT */
958                 case 0x4A:
959                 case 0x4D: /* AVN */
960                 case 0x5A:
961                 case 0x5D:
962                         do_core_cstate = (1 << 1) | (1 << 6);
963                         do_pkg_cstate = (1 << 6);
964                         break;
965                 /* Nehalem */
966                 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
967                 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
968                 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
969                 case 0x2E: /* Nehalem-EX Xeon - Beckton */
970                         do_core_cstate = (1 << 3) | (1 << 6);
971                         do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
972                         break;
973                 /* Westmere */
974                 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
975                 case 0x2C: /* Westmere EP - Gulftown */
976                 case 0x2F: /* Westmere-EX Xeon - Eagleton */
977                         do_core_cstate = (1 << 3) | (1 << 6);
978                         do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
979                         break;
980                 /* Sandy Bridge */
981                 case 0x2A: /* SNB */
982                 case 0x2D: /* SNB Xeon */
983                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
984                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
985                         break;
986                 /* Ivy Bridge */
987                 case 0x3A: /* IVB */
988                 case 0x3E: /* IVB Xeon */
989                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
990                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
991                         break;
992                 /* Haswell Bridge */
993                 case 0x3C: /* HSW */
994                 case 0x3F: /* HSW */
995                 case 0x46: /* HSW */
996                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
997                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
998                         break;
999                 case 0x45: /* HSW */
1000                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1001                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1002                         break;
1003                 /* Broadwel */
1004                 case 0x4F: /* BDW */
1005                 case 0x56: /* BDX-DE */
1006                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1007                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1008                         break;
1009                 case 0x3D: /* BDW */
1010                         do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1011                         do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1012                         break;
1013                 default:
1014                         ERROR("Turbostat plugin: Unsupported CPU (family: %#x,"
1015                               " model: %#x)", family, model);
1016                 }
1017                 switch (model) {
1018                 case 0x2A:
1019                 case 0x3A:
1020                 case 0x3C:
1021                 case 0x45:
1022                 case 0x46:
1023                         do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_GFX;
1024                         break;
1025                 case 0x3F:
1026                         do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1027                         break;
1028                 case 0x2D:
1029                 case 0x3E:
1030                         do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1031                         break;
1032                 case 0x37:
1033                 case 0x4D:
1034                         do_rapl = RAPL_PKG | RAPL_CORES;
1035                         break;
1036                 default:
1037                         do_rapl = 0;
1038                 }
1039         } else {
1040                 ERROR("Turbostat plugin: Unsupported CPU (family: %#x, "
1041                       "model: %#x)", family, model);
1042                 return -UNSUPPORTED_CPU;
1043         }
1044
1045         if (do_rapl) {
1046                 unsigned long long msr;
1047                 if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1048                         return 0;
1049
1050                 if (model == 0x37)
1051                         rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1052                 else
1053                         rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1054         }
1055
1056         return 0;
1057 }
1058
1059
1060 /********************
1061  * Topology Probing *
1062  ********************/
1063
1064 /*
1065  * Read a single int from a file.
1066  */
1067 static int __attribute__ ((format(printf,1,2)))
1068 parse_int_file(const char *fmt, ...)
1069 {
1070         va_list args;
1071         char path[PATH_MAX];
1072         FILE *filep;
1073         int len, value;
1074
1075         va_start(args, fmt);
1076         len = vsnprintf(path, sizeof(path), fmt, args);
1077         va_end(args);
1078         if (len < 0 || len >= PATH_MAX) {
1079                 ERROR("Turbostat plugin: path truncated: '%s'", path);
1080                 return -ERR_PATH_TOO_LONG;
1081         }
1082
1083         filep = fopen(path, "r");
1084         if (!filep) {
1085                 ERROR("Turbostat plugin: Failed to open '%s'", path);
1086                 return -ERR_CANT_OPEN_FILE;
1087         }
1088         if (fscanf(filep, "%d", &value) != 1) {
1089                 ERROR("Turbostat plugin: Failed to parse number from '%s'", path);
1090                 return -ERR_CANT_READ_NUMBER;
1091         }
1092         fclose(filep);
1093         return value;
1094 }
1095
1096 static int
1097 get_threads_on_core(int cpu)
1098 {
1099         char path[80];
1100         FILE *filep;
1101         int sib1, sib2;
1102         int matches;
1103         char character;
1104
1105         ssnprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1106         filep = fopen(path, "r");
1107         if (!filep) {
1108                 ERROR("Turbostat plugin: Failed to open '%s'", path);
1109                 return -ERR_CANT_OPEN_FILE;
1110         }
1111         /*
1112          * file format:
1113          * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
1114          * otherwinse 1 sibling (self).
1115          */
1116         matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
1117
1118         fclose(filep);
1119
1120         if (matches == 3)
1121                 return 2;
1122         else
1123                 return 1;
1124 }
1125
1126 /*
1127  * run func(cpu) on every cpu in /proc/stat
1128  * return max_cpu number
1129  */
1130 static int __attribute__((warn_unused_result))
1131 for_all_proc_cpus(int (func)(int))
1132 {
1133         FILE *fp;
1134         int cpu_num;
1135         int retval;
1136
1137         fp = fopen("/proc/stat", "r");
1138         if (!fp) {
1139                 ERROR("Turbostat plugin: Failed to open /proc/stat");
1140                 return -ERR_CANT_OPEN_FILE;
1141         }
1142
1143         retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1144         if (retval != 0) {
1145                 ERROR("Turbostat plugin: Failed to parse /proc/stat");
1146                 fclose(fp);
1147                 return -ERR_CANT_READ_PROC_STAT;
1148         }
1149
1150         while (1) {
1151                 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1152                 if (retval != 1)
1153                         break;
1154
1155                 retval = func(cpu_num);
1156                 if (retval) {
1157                         fclose(fp);
1158                         return(retval);
1159                 }
1160         }
1161         fclose(fp);
1162         return 0;
1163 }
1164
1165 /*
1166  * Update the stored topology.max_cpu_id
1167  */
1168 static int
1169 update_max_cpu_id(int cpu)
1170 {
1171         if (topology.max_cpu_id < cpu)
1172                 topology.max_cpu_id = cpu;
1173         return 0;
1174 }
1175
1176 static int
1177 mark_cpu_present(int cpu)
1178 {
1179         CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1180         return 0;
1181 }
1182
1183 static int __attribute__((warn_unused_result))
1184 allocate_cpu_set(cpu_set_t ** set, size_t * size) {
1185         *set = CPU_ALLOC(topology.max_cpu_id  + 1);
1186         if (*set == NULL) {
1187                 ERROR("Turbostat plugin: Unable to allocate CPU state");
1188                 return -ERR_CPU_ALLOC;
1189         }
1190         *size = CPU_ALLOC_SIZE(topology.max_cpu_id  + 1);
1191         CPU_ZERO_S(*size, *set);
1192         return 0;
1193 }
1194
1195 /*
1196  * Build a local representation of the cpu distribution
1197  */
1198 static int __attribute__((warn_unused_result))
1199 topology_probe()
1200 {
1201         int i;
1202         int ret;
1203         int max_package_id, max_core_id, max_thread_id;
1204         max_package_id = max_core_id = max_thread_id = 0;
1205
1206         /* Clean topology */
1207         free(topology.cpus);
1208         memset(&topology, 0, sizeof(topology));
1209
1210         ret = for_all_proc_cpus(update_max_cpu_id);
1211         if (ret != 0)
1212                 goto err;
1213
1214         topology.cpus = calloc(1, (topology.max_cpu_id  + 1) * sizeof(struct cpu_topology));
1215         if (topology.cpus == NULL) {
1216                 ERROR("Turbostat plugin: Unable to allocate memory for CPU topology");
1217                 return -ERR_CALLOC;
1218         }
1219
1220         ret = allocate_cpu_set(&cpu_present_set, &cpu_present_setsize);
1221         if (ret != 0)
1222                 goto err;
1223         ret = allocate_cpu_set(&cpu_affinity_set, &cpu_affinity_setsize);
1224         if (ret != 0)
1225                 goto err;
1226         ret = allocate_cpu_set(&cpu_saved_affinity_set, &cpu_saved_affinity_setsize);
1227         if (ret != 0)
1228                 goto err;
1229
1230         ret = for_all_proc_cpus(mark_cpu_present);
1231         if (ret != 0)
1232                 goto err;
1233
1234         /*
1235          * For online cpus
1236          * find max_core_id, max_package_id
1237          */
1238         for (i = 0; i <= topology.max_cpu_id; ++i) {
1239                 int num_threads;
1240                 struct cpu_topology *cpu = &topology.cpus[i];
1241
1242                 if (cpu_is_not_present(i)) {
1243                         WARNING("Turbostat plugin: cpu%d NOT PRESENT", i);
1244                         continue;
1245                 }
1246
1247                 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", i);
1248                 if (ret < 0)
1249                         goto err;
1250                 else
1251                         cpu->package_id = ret;
1252                 if (cpu->package_id > max_package_id)
1253                         max_package_id = cpu->package_id;
1254
1255                 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", i);
1256                 if (ret < 0)
1257                         goto err;
1258                 else
1259                         cpu->core_id = ret;
1260                 if (cpu->core_id > max_core_id)
1261                         max_core_id = cpu->core_id;
1262                 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", i);
1263                 if (ret < 0)
1264                         goto err;
1265                 else if (ret == i)
1266                         cpu->first_core_in_package = 1;
1267
1268                 ret = get_threads_on_core(i);
1269                 if (ret < 0)
1270                         goto err;
1271                 else
1272                         num_threads = ret;
1273                 if (num_threads > max_thread_id)
1274                         max_thread_id = num_threads;
1275                 ret = parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", i);
1276                 if (ret < 0)
1277                         goto err;
1278                 else if (ret == i)
1279                         cpu->first_thread_in_core = 1;
1280
1281                 DEBUG("Turbostat plugin: cpu %d pkg %d core %d\n",
1282                         i, cpu->package_id, cpu->core_id);
1283         }
1284         /* Num is max + 1 (need to count 0) */
1285         topology.num_packages = max_package_id + 1;
1286         topology.num_cores = max_core_id + 1;
1287         topology.num_threads = max_thread_id + 1;
1288
1289         return 0;
1290 err:
1291         free(topology.cpus);
1292         return ret;
1293 }
1294
1295
1296 /************************
1297  * Main alloc/init/free *
1298  ************************/
1299
1300 static int
1301 allocate_counters(struct thread_data **threads, struct core_data **cores, struct pkg_data **packages)
1302 {
1303         int i;
1304         int total_threads, total_cores;
1305
1306         total_threads = topology.num_threads * topology.num_cores * topology.num_packages;
1307         *threads = calloc(total_threads, sizeof(struct thread_data));
1308         if (*threads == NULL)
1309                 goto err;
1310
1311         for (i = 0; i < total_threads; ++i)
1312                 (*threads)[i].cpu_id = -1;
1313
1314         total_cores = topology.num_cores * topology.num_packages;
1315         *cores = calloc(total_cores, sizeof(struct core_data));
1316         if (*cores == NULL)
1317                 goto err_clean_threads;
1318
1319         for (i = 0; i < total_cores; ++i)
1320                 (*cores)[i].core_id = -1;
1321
1322         *packages = calloc(topology.num_packages, sizeof(struct pkg_data));
1323         if (*packages == NULL)
1324                 goto err_clean_cores;
1325
1326         for (i = 0; i < topology.num_packages; i++)
1327                 (*packages)[i].package_id = i;
1328
1329         return 0;
1330
1331 err_clean_cores:
1332         free(*cores);
1333 err_clean_threads:
1334         free(*threads);
1335 err:
1336         ERROR("Turbostat plugin: Failled to allocate memory for counters");
1337         return -ERR_CALLOC;
1338 }
1339
1340 static int
1341 init_counter(struct thread_data *thread_base, struct core_data *core_base,
1342         struct pkg_data *pkg_base, int cpu_id)
1343 {
1344         struct thread_data *t;
1345         struct core_data *c;
1346         struct pkg_data *p;
1347         struct cpu_topology *cpu = &topology.cpus[cpu_id];
1348
1349         t = GET_THREAD(thread_base, !(cpu->first_thread_in_core), cpu->core_id, cpu->package_id);
1350         c = GET_CORE(core_base, cpu->core_id, cpu->package_id);
1351         p = GET_PKG(pkg_base, cpu->package_id);
1352
1353         t->cpu_id = cpu_id;
1354         if (cpu->first_thread_in_core)
1355                 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1356         if (cpu->first_core_in_package)
1357                 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1358
1359         c->core_id = cpu->core_id;
1360         p->package_id = cpu->package_id;
1361
1362         return 0;
1363 }
1364
1365 static int
1366 initialize_counters(void)
1367 {
1368         int ret;
1369         int cpu_id;
1370
1371         for (cpu_id = 0; cpu_id <= topology.max_cpu_id; ++cpu_id) {
1372                 if (cpu_is_not_present(cpu_id)) {
1373                         continue;
1374                 }
1375
1376                 ret = init_counter(EVEN_COUNTERS, cpu_id);
1377                 if (ret < 0)
1378                         return ret;
1379                 ret = init_counter(ODD_COUNTERS, cpu_id);
1380                 if (ret < 0)
1381                         return ret;
1382                 ret = init_counter(DELTA_COUNTERS, cpu_id);
1383                 if (ret < 0)
1384                         return ret;
1385         }
1386         return 0;
1387 }
1388
1389
1390
1391 static void
1392 free_all_buffers(void)
1393 {
1394         allocated = 0;
1395         initialized = 0;
1396
1397         CPU_FREE(cpu_present_set);
1398         cpu_present_set = NULL;
1399         cpu_present_set = 0;
1400
1401         CPU_FREE(cpu_affinity_set);
1402         cpu_affinity_set = NULL;
1403         cpu_affinity_setsize = 0;
1404
1405         CPU_FREE(cpu_saved_affinity_set);
1406         cpu_saved_affinity_set = NULL;
1407         cpu_saved_affinity_setsize = 0;
1408
1409         free(thread_even);
1410         free(core_even);
1411         free(package_even);
1412
1413         thread_even = NULL;
1414         core_even = NULL;
1415         package_even = NULL;
1416
1417         free(thread_odd);
1418         free(core_odd);
1419         free(package_odd);
1420
1421         thread_odd = NULL;
1422         core_odd = NULL;
1423         package_odd = NULL;
1424
1425         free(thread_delta);
1426         free(core_delta);
1427         free(package_delta);
1428
1429         thread_delta = NULL;
1430         core_delta = NULL;
1431         package_delta = NULL;
1432 }
1433
1434
1435 /**********************
1436  * Collectd functions *
1437  **********************/
1438
1439 #define DO_OR_GOTO_ERR(something) \
1440 do {                              \
1441         ret = (something);        \
1442         if (ret < 0)              \
1443                 goto err;         \
1444 } while (0)
1445
1446 static int setup_all_buffers(void)
1447 {
1448         int ret;
1449
1450         DO_OR_GOTO_ERR(topology_probe());
1451         DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1452         DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1453         DO_OR_GOTO_ERR(allocate_counters(&thread_delta, &core_delta, &package_delta));
1454         DO_OR_GOTO_ERR(initialize_counters());
1455         DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1456         DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, ODD_COUNTERS));
1457
1458         allocated = 1;
1459         return 0;
1460 err:
1461         free_all_buffers();
1462         return ret;
1463 }
1464
1465 static int
1466 turbostat_read(void)
1467 {
1468         int ret;
1469
1470         if (!allocated) {
1471                 if ((ret = setup_all_buffers()) < 0)
1472                         return ret;
1473         }
1474
1475         if (for_all_proc_cpus(cpu_is_not_present)) {
1476                 free_all_buffers();
1477                 if ((ret = setup_all_buffers()) < 0)
1478                         return ret;
1479                 if (for_all_proc_cpus(cpu_is_not_present))
1480                         return -ERR_CPU_NOT_PRESENT;
1481         }
1482
1483         /* Saving the scheduling affinity, as it will be modified by get_counters */
1484         if (sched_getaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set) != 0)
1485                 return -ERR_CPU_SAVE_SCHED_AFFINITY;
1486
1487         if (!initialized) {
1488                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1489                         goto out;
1490                 gettimeofday(&tv_even, (struct timezone *)NULL);
1491                 is_even = 1;
1492                 initialized = 1;
1493                 ret = 0;
1494                 goto out;
1495         }
1496
1497         if (is_even) {
1498                 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
1499                         goto out;
1500                 gettimeofday(&tv_odd, (struct timezone *)NULL);
1501                 is_even = 0;
1502                 timersub(&tv_odd, &tv_even, &tv_delta);
1503                 if ((ret = for_all_cpus_delta(ODD_COUNTERS, EVEN_COUNTERS)) < 0)
1504                         goto out;
1505                 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1506                         goto out;
1507         } else {
1508                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1509                         goto out;
1510                 gettimeofday(&tv_even, (struct timezone *)NULL);
1511                 is_even = 1;
1512                 timersub(&tv_even, &tv_odd, &tv_delta);
1513                 if ((ret = for_all_cpus_delta(EVEN_COUNTERS, ODD_COUNTERS)) < 0)
1514                         goto out;
1515                 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1516                         goto out;
1517         }
1518         ret = 0;
1519 out:
1520         /*
1521          * Let's restore the affinity
1522          * This might fail if the number of CPU changed, but we can't do anything in that case..
1523          */
1524         (void)sched_setaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set);
1525         return ret;
1526 }
1527
1528 static int
1529 turbostat_init(void)
1530 {
1531         struct stat sb;
1532         int ret;
1533
1534         if (getuid() != 0) {
1535                 ERROR("Turbostat plugin: Initialization failed: this plugin "
1536                       "requires collectd to run as root in order to read "
1537                       "special CPU registers");
1538                 return -ERR_NOT_ROOT;
1539         }
1540
1541         DO_OR_GOTO_ERR(probe_cpu());
1542
1543         if (stat("/dev/cpu/0/msr", &sb)) {
1544                 ERROR("Turbostat plugin: Initialization failed: /dev/cpu/0/msr"
1545                       " does not exist while the CPU supports MSR. You may be "
1546                       "missing the corresponding kernel module, please try '# "
1547                       "modprobe msr'");
1548                 return -ERR_NO_MSR;
1549         }
1550
1551         DO_OR_GOTO_ERR(setup_all_buffers());
1552
1553         plugin_register_read(PLUGIN_NAME, turbostat_read);
1554
1555         return 0;
1556 err:
1557         free_all_buffers();
1558         return ret;
1559 }
1560
1561 static const char *config_keys[] =
1562 {
1563         "TCCActivationTemp",
1564 };
1565 static const int config_keys_num = STATIC_ARRAY_SIZE (config_keys);
1566
1567 static int
1568 turbostat_config(const char *key, const char *value)
1569 {
1570         long unsigned int tmp_val;
1571         char *end;
1572
1573         if (strcasecmp("TCCActivationTemp", key) == 0) {
1574                 tmp_val = strtoul(value, &end, 0);
1575                 if (*end != '\0' || tmp_val > UINT_MAX)
1576                         return -1;
1577                 tcc_activation_temp = (unsigned int) tmp_val;
1578         } else {
1579                 return -1;
1580         }
1581         return 0;
1582 }
1583
1584 void module_register(void)
1585 {
1586         plugin_register_init(PLUGIN_NAME, turbostat_init);
1587         plugin_register_config(PLUGIN_NAME, turbostat_config, config_keys, config_keys_num);
1588 }