Turbostat: remove unused show_* variables
[collectd.git] / src / turbostat.c
1 /*
2  * turbostat -- Log CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors for collectd.
4  *
5  * Based on the 'turbostat' tool of the Linux kernel, found at
6  * linux/tools/power/x86/turbostat/turbostat.c:
7  * ----
8  * Copyright (c) 2013 Intel Corporation.
9  * Len Brown <len.brown@intel.com>
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms and conditions of the GNU General Public License,
13  * version 2, as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  *
20  * You should have received a copy of the GNU General Public License along with
21  * this program; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23  * ----
24  * Ported to collectd by Vincent Brillault <git@lerya.net>
25  */
26
27 /*
28  * _GNU_SOURCE is required because of the following functions:
29  * - CPU_ISSET_S
30  * - CPU_ZERO_S
31  * - CPU_SET_S
32  * - CPU_FREE
33  * - CPU_ALLOC
34  * - CPU_ALLOC_SIZE
35  */
36 #define _GNU_SOURCE
37
38 #include <asm/msr-index.h>
39 #include <stdarg.h>
40 #include <stdio.h>
41 #include <err.h>
42 #include <unistd.h>
43 #include <sys/types.h>
44 #include <sys/wait.h>
45 #include <sys/stat.h>
46 #include <sys/resource.h>
47 #include <fcntl.h>
48 #include <signal.h>
49 #include <sys/time.h>
50 #include <stdlib.h>
51 #include <dirent.h>
52 #include <string.h>
53 #include <ctype.h>
54 #include <sched.h>
55 #include <cpuid.h>
56
57 #include "collectd.h"
58 #include "common.h"
59 #include "plugin.h"
60
61 #define PLUGIN_NAME "turbostat"
62
63 static const char *proc_stat = "/proc/stat";
64 static unsigned int skip_c0;
65 static unsigned int skip_c1;
66 static unsigned int do_nhm_cstates;
67 static unsigned int do_snb_cstates;
68 static unsigned int do_c8_c9_c10;
69 static unsigned int do_slm_cstates;
70 static unsigned int has_aperf;
71 static unsigned int has_epb;
72 static unsigned int units = 1000000000; /* Ghz etc */
73 static unsigned int genuine_intel;
74 static unsigned int has_invariant_tsc;
75 static unsigned int do_nehalem_platform_info;
76 static int do_smi;
77 static unsigned int do_rapl;
78 static unsigned int do_dts;
79 static unsigned int do_ptm;
80 static unsigned int tcc_activation_temp;
81 static unsigned int tcc_activation_temp_override;
82 static double rapl_power_units, rapl_energy_units, rapl_time_units;
83 static double rapl_joule_counter_range;
84
85 #define RAPL_PKG                (1 << 0)
86                                         /* 0x610 MSR_PKG_POWER_LIMIT */
87                                         /* 0x611 MSR_PKG_ENERGY_STATUS */
88 #define RAPL_PKG_PERF_STATUS    (1 << 1)
89                                         /* 0x613 MSR_PKG_PERF_STATUS */
90 #define RAPL_PKG_POWER_INFO     (1 << 2)
91                                         /* 0x614 MSR_PKG_POWER_INFO */
92
93 #define RAPL_DRAM               (1 << 3)
94                                         /* 0x618 MSR_DRAM_POWER_LIMIT */
95                                         /* 0x619 MSR_DRAM_ENERGY_STATUS */
96                                         /* 0x61c MSR_DRAM_POWER_INFO */
97 #define RAPL_DRAM_PERF_STATUS   (1 << 4)
98                                         /* 0x61b MSR_DRAM_PERF_STATUS */
99
100 #define RAPL_CORES              (1 << 5)
101                                         /* 0x638 MSR_PP0_POWER_LIMIT */
102                                         /* 0x639 MSR_PP0_ENERGY_STATUS */
103 #define RAPL_CORE_POLICY        (1 << 6)
104                                         /* 0x63a MSR_PP0_POLICY */
105
106
107 #define RAPL_GFX                (1 << 7)
108                                         /* 0x640 MSR_PP1_POWER_LIMIT */
109                                         /* 0x641 MSR_PP1_ENERGY_STATUS */
110                                         /* 0x642 MSR_PP1_POLICY */
111 #define TJMAX_DEFAULT   100
112
113 int aperf_mperf_unstable;
114 int backwards_count;
115 char *progname;
116
117 cpu_set_t *cpu_present_set, *cpu_affinity_set;
118 size_t cpu_present_setsize, cpu_affinity_setsize;
119
120 struct thread_data {
121         unsigned long long tsc;
122         unsigned long long aperf;
123         unsigned long long mperf;
124         unsigned long long c1;
125         unsigned int smi_count;
126         unsigned int cpu_id;
127         unsigned int flags;
128 #define CPU_IS_FIRST_THREAD_IN_CORE     0x2
129 #define CPU_IS_FIRST_CORE_IN_PACKAGE    0x4
130 } *thread_even, *thread_odd;
131
132 struct core_data {
133         unsigned long long c3;
134         unsigned long long c6;
135         unsigned long long c7;
136         unsigned int core_temp_c;
137         unsigned int core_id;
138 } *core_even, *core_odd;
139
140 struct pkg_data {
141         unsigned long long pc2;
142         unsigned long long pc3;
143         unsigned long long pc6;
144         unsigned long long pc7;
145         unsigned long long pc8;
146         unsigned long long pc9;
147         unsigned long long pc10;
148         unsigned int package_id;
149         unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
150         unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
151         unsigned int energy_cores;      /* MSR_PP0_ENERGY_STATUS */
152         unsigned int energy_gfx;        /* MSR_PP1_ENERGY_STATUS */
153         unsigned int rapl_pkg_perf_status;      /* MSR_PKG_PERF_STATUS */
154         unsigned int rapl_dram_perf_status;     /* MSR_DRAM_PERF_STATUS */
155         unsigned int pkg_temp_c;
156
157 } *package_even, *package_odd;
158
159 #define ODD_COUNTERS thread_odd, core_odd, package_odd
160 #define EVEN_COUNTERS thread_even, core_even, package_even
161 static _Bool is_even = 1;
162
163 static _Bool allocated = 0;
164 static _Bool initialized = 0;
165
166 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
167         (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
168                 topo.num_threads_per_core + \
169                 (core_no) * topo.num_threads_per_core + (thread_no))
170 #define GET_CORE(core_base, core_no, pkg_no) \
171         (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
172 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
173
174 struct topo_params {
175         int num_packages;
176         int num_cpus;
177         int num_cores;
178         int max_cpu_num;
179         int num_cores_per_pkg;
180         int num_threads_per_core;
181 } topo;
182
183 struct timeval tv_even, tv_odd, tv_delta;
184
185 enum return_values {
186         OK = 0,
187         ERR_CPU_MIGRATE,
188         ERR_MSR_IA32_APERF,
189         ERR_MSR_IA32_MPERF,
190         ERR_MSR_SMI_COUNT,
191         ERR_MSR_CORE_C3_RESIDENCY,
192         ERR_MSR_CORE_C6_RESIDENCY,
193         ERR_MSR_CORE_C7_RESIDENCY,
194         ERR_MSR_IA32_THERM_STATUS,
195         ERR_MSR_PKG_C3_RESIDENCY,
196         ERR_MSR_PKG_C6_RESIDENCY,
197         ERR_MSR_PKG_C2_RESIDENCY,
198         ERR_MSR_PKG_C7_RESIDENCY,
199         ERR_MSR_PKG_C8_RESIDENCY,
200         ERR_MSR_PKG_C9_RESIDENCY,
201         ERR_MSR_PKG_C10_RESIDENCY,
202         ERR_MSR_PKG_ENERGY_STATUS,
203         ERR_MSR_PP0_ENERGY_STATUS,
204         ERR_MSR_DRAM_ENERGY_STATUS,
205         ERR_MSR_PP1_ENERGY_STATUS,
206         ERR_MSR_PKG_PERF_STATUS,
207         ERR_MSR_DRAM_PERF_STATUS,
208         ERR_MSR_IA32_PACKAGE_THERM_STATUS,
209         ERR_CPU_NOT_PRESENT,
210         ERR_NO_MSR,
211         ERR_CANT_OPEN_FILE,
212         ERR_CANT_READ_NUMBER,
213         ERR_CANT_READ_PROC_STAT,
214         ERR_NO_INVARIANT_TSC,
215         ERR_NO_APERF,
216         ERR_CALLOC,
217         ERR_CPU_ALLOC,
218         ERR_NOT_ROOT,
219 };
220
221 static int setup_all_buffers(void);
222
223 static int
224 cpu_is_not_present(int cpu)
225 {
226         return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
227 }
228 /*
229  * run func(thread, core, package) in topology order
230  * skip non-present cpus
231  */
232
233 static int __attribute__((warn_unused_result))
234 for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
235         struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
236 {
237         int retval, pkg_no, core_no, thread_no;
238
239         for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
240                 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
241                         for (thread_no = 0; thread_no <
242                                 topo.num_threads_per_core; ++thread_no) {
243                                 struct thread_data *t;
244                                 struct core_data *c;
245                                 struct pkg_data *p;
246
247                                 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
248
249                                 if (cpu_is_not_present(t->cpu_id))
250                                         continue;
251
252                                 c = GET_CORE(core_base, core_no, pkg_no);
253                                 p = GET_PKG(pkg_base, pkg_no);
254
255                                 retval = func(t, c, p);
256                                 if (retval)
257                                         return retval;
258                         }
259                 }
260         }
261         return 0;
262 }
263
264 static int __attribute__((warn_unused_result))
265 cpu_migrate(int cpu)
266 {
267         CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
268         CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
269         if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
270                 return -ERR_CPU_MIGRATE;
271         else
272                 return 0;
273 }
274
275 static int __attribute__((warn_unused_result))
276 get_msr(int cpu, off_t offset, unsigned long long *msr)
277 {
278         ssize_t retval;
279         char pathname[32];
280         int fd;
281
282         ssnprintf(pathname, 32, "/dev/cpu/%d/msr", cpu);
283         fd = open(pathname, O_RDONLY);
284         if (fd < 0)
285                 return -1;
286
287         retval = pread(fd, msr, sizeof *msr, offset);
288         close(fd);
289
290         if (retval != sizeof *msr) {
291                 ERROR ("%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset);
292                 return -1;
293         }
294
295         return 0;
296 }
297
298 #define DELTA_WRAP32(new, old)                  \
299         if (new > old) {                        \
300                 old = new - old;                \
301         } else {                                \
302                 old = 0x100000000 + new - old;  \
303         }
304
305 static void
306 delta_package(struct pkg_data *new, struct pkg_data *old)
307 {
308         old->pc2 = new->pc2 - old->pc2;
309         old->pc3 = new->pc3 - old->pc3;
310         old->pc6 = new->pc6 - old->pc6;
311         old->pc7 = new->pc7 - old->pc7;
312         old->pc8 = new->pc8 - old->pc8;
313         old->pc9 = new->pc9 - old->pc9;
314         old->pc10 = new->pc10 - old->pc10;
315         old->pkg_temp_c = new->pkg_temp_c;
316
317         DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
318         DELTA_WRAP32(new->energy_cores, old->energy_cores);
319         DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
320         DELTA_WRAP32(new->energy_dram, old->energy_dram);
321         DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
322         DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
323 }
324
325 static void
326 delta_core(struct core_data *new, struct core_data *old)
327 {
328         old->c3 = new->c3 - old->c3;
329         old->c6 = new->c6 - old->c6;
330         old->c7 = new->c7 - old->c7;
331         old->core_temp_c = new->core_temp_c;
332 }
333
334 /*
335  * old = new - old
336  */
337 static int __attribute__((warn_unused_result))
338 delta_thread(struct thread_data *new, struct thread_data *old,
339         struct core_data *core_delta)
340 {
341         old->tsc = new->tsc - old->tsc;
342
343         /* check for TSC < 1 Mcycles over interval */
344         if (old->tsc < (1000 * 1000)) {
345                 WARNING("Insanely slow TSC rate, TSC stops in idle?\n"
346                         "You can disable all c-states by booting with \"idle=poll\"\n"
347                         "or just the deep ones with \"processor.max_cstate=1\"");
348                 return -1;
349         }
350
351         old->c1 = new->c1 - old->c1;
352
353         if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
354                 old->aperf = new->aperf - old->aperf;
355                 old->mperf = new->mperf - old->mperf;
356         } else {
357
358                 if (!aperf_mperf_unstable) {
359                         WARNING("%s: APERF or MPERF went backwards *\n", progname);
360                         WARNING("* Frequency results do not cover entire interval *\n");
361                         WARNING("* fix this by running Linux-2.6.30 or later *\n");
362
363                         aperf_mperf_unstable = 1;
364                 }
365                 /*
366                  * mperf delta is likely a huge "positive" number
367                  * can not use it for calculating c0 time
368                  */
369                 skip_c0 = 1;
370                 skip_c1 = 1;
371         }
372
373
374         /*
375          * As counter collection is not atomic,
376          * it is possible for mperf's non-halted cycles + idle states
377          * to exceed TSC's all cycles: show c1 = 0% in that case.
378          */
379         if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
380                 old->c1 = 0;
381         else {
382                 /* normal case, derive c1 */
383                 old->c1 = old->tsc - old->mperf - core_delta->c3
384                         - core_delta->c6 - core_delta->c7;
385         }
386
387         if (old->mperf == 0) {
388                 WARNING("cpu%d MPERF 0!\n", old->cpu_id);
389                 old->mperf = 1; /* divide by 0 protection */
390         }
391
392         if (do_smi)
393                 old->smi_count = new->smi_count - old->smi_count;
394
395         return 0;
396 }
397
398 static int __attribute__((warn_unused_result))
399 delta_cpu(struct thread_data *t, struct core_data *c,
400         struct pkg_data *p, struct thread_data *t2,
401         struct core_data *c2, struct pkg_data *p2)
402 {
403         int ret;
404
405         /* calculate core delta only for 1st thread in core */
406         if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
407                 delta_core(c, c2);
408
409         /* always calculate thread delta */
410         ret = delta_thread(t, t2, c2);  /* c2 is core delta */
411         if (ret != 0)
412                 return ret;
413
414         /* calculate package delta only for 1st core in package */
415         if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
416                 delta_package(p, p2);
417
418         return 0;
419 }
420
421 static unsigned long long
422 rdtsc(void)
423 {
424         unsigned int low, high;
425
426         asm volatile("rdtsc" : "=a" (low), "=d" (high));
427
428         return low | ((unsigned long long)high) << 32;
429 }
430
431
432 /*
433  * get_counters(...)
434  * migrate to cpu
435  * acquire and record local counters for that cpu
436  */
437 static int __attribute__((warn_unused_result))
438 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
439 {
440         int cpu = t->cpu_id;
441         unsigned long long msr;
442
443         if (cpu_migrate(cpu)) {
444                 WARNING("Could not migrate to CPU %d\n", cpu);
445                 return -ERR_CPU_MIGRATE;
446         }
447
448         t->tsc = rdtsc();       /* we are running on local CPU of interest */
449
450         if (has_aperf) {
451                 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
452                         return -ERR_MSR_IA32_APERF;
453                 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
454                         return -ERR_MSR_IA32_MPERF;
455         }
456
457         if (do_smi) {
458                 if (get_msr(cpu, MSR_SMI_COUNT, &msr))
459                         return -ERR_MSR_SMI_COUNT;
460                 t->smi_count = msr & 0xFFFFFFFF;
461         }
462
463         /* collect core counters only for 1st thread in core */
464         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
465                 return 0;
466
467         if (do_nhm_cstates && !do_slm_cstates) {
468                 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
469                         return -ERR_MSR_CORE_C3_RESIDENCY;
470         }
471
472         if (do_nhm_cstates) {
473                 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
474                         return -ERR_MSR_CORE_C6_RESIDENCY;
475         }
476
477         if (do_snb_cstates)
478                 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
479                         return -ERR_MSR_CORE_C7_RESIDENCY;
480
481         if (do_dts) {
482                 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
483                         return -ERR_MSR_IA32_THERM_STATUS;
484                 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
485         }
486
487
488         /* collect package counters only for 1st core in package */
489         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
490                 return 0;
491
492         if (do_nhm_cstates && !do_slm_cstates) {
493                 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
494                         return -ERR_MSR_PKG_C3_RESIDENCY;
495                 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
496                         return -ERR_MSR_PKG_C6_RESIDENCY;
497         }
498         if (do_snb_cstates) {
499                 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
500                         return -ERR_MSR_PKG_C2_RESIDENCY;
501                 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
502                         return -ERR_MSR_PKG_C7_RESIDENCY;
503         }
504         if (do_c8_c9_c10) {
505                 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
506                         return -ERR_MSR_PKG_C8_RESIDENCY;
507                 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
508                         return -ERR_MSR_PKG_C9_RESIDENCY;
509                 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
510                         return -ERR_MSR_PKG_C10_RESIDENCY;
511         }
512         if (do_rapl & RAPL_PKG) {
513                 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
514                         return -ERR_MSR_PKG_ENERGY_STATUS;
515                 p->energy_pkg = msr & 0xFFFFFFFF;
516         }
517         if (do_rapl & RAPL_CORES) {
518                 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
519                         return MSR_PP0_ENERGY_STATUS;
520                 p->energy_cores = msr & 0xFFFFFFFF;
521         }
522         if (do_rapl & RAPL_DRAM) {
523                 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
524                         return -ERR_MSR_DRAM_ENERGY_STATUS;
525                 p->energy_dram = msr & 0xFFFFFFFF;
526         }
527         if (do_rapl & RAPL_GFX) {
528                 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
529                         return -ERR_MSR_PP1_ENERGY_STATUS;
530                 p->energy_gfx = msr & 0xFFFFFFFF;
531         }
532         if (do_rapl & RAPL_PKG_PERF_STATUS) {
533                 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
534                         return -ERR_MSR_PKG_PERF_STATUS;
535                 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
536         }
537         if (do_rapl & RAPL_DRAM_PERF_STATUS) {
538                 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
539                         return -ERR_MSR_DRAM_PERF_STATUS;
540                 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
541         }
542         if (do_ptm) {
543                 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
544                         return -ERR_MSR_IA32_PACKAGE_THERM_STATUS;
545                 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
546         }
547         return 0;
548 }
549
550 static void
551 free_all_buffers(void)
552 {
553         allocated = 0;
554         initialized = 0;
555
556         CPU_FREE(cpu_present_set);
557         cpu_present_set = NULL;
558         cpu_present_set = 0;
559
560         CPU_FREE(cpu_affinity_set);
561         cpu_affinity_set = NULL;
562         cpu_affinity_setsize = 0;
563
564         free(thread_even);
565         free(core_even);
566         free(package_even);
567
568         thread_even = NULL;
569         core_even = NULL;
570         package_even = NULL;
571
572         free(thread_odd);
573         free(core_odd);
574         free(package_odd);
575
576         thread_odd = NULL;
577         core_odd = NULL;
578         package_odd = NULL;
579 }
580
581 /*
582  * Parse a file containing a single int.
583  */
584 static int __attribute__ ((format(printf,1,2)))
585 parse_int_file(const char *fmt, ...)
586 {
587         va_list args;
588         char path[PATH_MAX];
589         FILE *filep;
590         int value;
591
592         va_start(args, fmt);
593         vsnprintf(path, sizeof(path), fmt, args);
594         va_end(args);
595         filep = fopen(path, "r");
596         if (!filep) {
597                 ERROR("%s: open failed", path);
598                 return -ERR_CANT_OPEN_FILE;
599         }
600         if (fscanf(filep, "%d", &value) != 1) {
601                 ERROR("%s: failed to parse number from file", path);
602                 return -ERR_CANT_READ_NUMBER;
603         }
604         fclose(filep);
605         return value;
606 }
607
608 /*
609  * cpu_is_first_sibling_in_core(cpu)
610  * return 1 if given CPU is 1st HT sibling in the core
611  */
612 static int
613 cpu_is_first_sibling_in_core(int cpu)
614 {
615         return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
616 }
617
618 /*
619  * cpu_is_first_core_in_package(cpu)
620  * return 1 if given CPU is 1st core in package
621  */
622 static int
623 cpu_is_first_core_in_package(int cpu)
624 {
625         return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
626 }
627
628 static int
629 get_physical_package_id(int cpu)
630 {
631         return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
632 }
633
634 static int
635 get_core_id(int cpu)
636 {
637         return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
638 }
639
640 static int
641 get_num_ht_siblings(int cpu)
642 {
643         char path[80];
644         FILE *filep;
645         int sib1, sib2;
646         int matches;
647         char character;
648
649         ssnprintf(path, 80, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
650         filep = fopen(path, "r");
651         if (!filep) {
652                 ERROR("%s: open failed", path);
653                 return -ERR_CANT_OPEN_FILE;
654         }
655         /*
656          * file format:
657          * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
658          * otherwinse 1 sibling (self).
659          */
660         matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
661
662         fclose(filep);
663
664         if (matches == 3)
665                 return 2;
666         else
667                 return 1;
668 }
669
670 /*
671  * run func(thread, core, package) in topology order
672  * skip non-present cpus
673  */
674
675
676 static int __attribute__((warn_unused_result))
677 for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
678         struct pkg_data *, struct thread_data *, struct core_data *,
679         struct pkg_data *), struct thread_data *thread_base,
680         struct core_data *core_base, struct pkg_data *pkg_base,
681         struct thread_data *thread_base2, struct core_data *core_base2,
682         struct pkg_data *pkg_base2)
683 {
684         int retval, pkg_no, core_no, thread_no;
685
686         for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
687                 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
688                         for (thread_no = 0; thread_no <
689                                 topo.num_threads_per_core; ++thread_no) {
690                                 struct thread_data *t, *t2;
691                                 struct core_data *c, *c2;
692                                 struct pkg_data *p, *p2;
693
694                                 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
695
696                                 if (cpu_is_not_present(t->cpu_id))
697                                         continue;
698
699                                 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
700
701                                 c = GET_CORE(core_base, core_no, pkg_no);
702                                 c2 = GET_CORE(core_base2, core_no, pkg_no);
703
704                                 p = GET_PKG(pkg_base, pkg_no);
705                                 p2 = GET_PKG(pkg_base2, pkg_no);
706
707                                 retval = func(t, c, p, t2, c2, p2);
708                                 if (retval)
709                                         return retval;
710                         }
711                 }
712         }
713         return 0;
714 }
715
716 /*
717  * run func(cpu) on every cpu in /proc/stat
718  * return max_cpu number
719  */
720 static int __attribute__((warn_unused_result))
721 for_all_proc_cpus(int (func)(int))
722 {
723         FILE *fp;
724         int cpu_num;
725         int retval;
726
727         fp = fopen(proc_stat, "r");
728         if (!fp) {
729                 ERROR("%s: open failed", proc_stat);
730                 return -ERR_CANT_OPEN_FILE;
731         }
732
733         retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
734         if (retval != 0) {
735                 ERROR("%s: failed to parse format", proc_stat);
736                 return -ERR_CANT_READ_PROC_STAT;
737         }
738
739         while (1) {
740                 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
741                 if (retval != 1)
742                         break;
743
744                 retval = func(cpu_num);
745                 if (retval) {
746                         fclose(fp);
747                         return(retval);
748                 }
749         }
750         fclose(fp);
751         return 0;
752 }
753
754 /*
755  * count_cpus()
756  * remember the last one seen, it will be the max
757  */
758 static int
759 count_cpus(int cpu)
760 {
761         if (topo.max_cpu_num < cpu)
762                 topo.max_cpu_num = cpu;
763
764         topo.num_cpus += 1;
765         return 0;
766 }
767 static int
768 mark_cpu_present(int cpu)
769 {
770         CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
771         return 0;
772 }
773
774
775 static void
776 turbostat_submit (const char *plugin_instance,
777         const char *type, const char *type_instance,
778         gauge_t value)
779 {
780         value_list_t vl = VALUE_LIST_INIT;
781         value_t v;
782
783         v.gauge = value;
784         vl.values = &v;
785         vl.values_len = 1;
786         sstrncpy (vl.host, hostname_g, sizeof (vl.host));
787         sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin));
788         if (plugin_instance != NULL)
789                 sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
790         sstrncpy (vl.type, type, sizeof (vl.type));
791         if (type_instance != NULL)
792                 sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
793
794         plugin_dispatch_values (&vl);
795 }
796
797 /*
798  * column formatting convention & formats
799  * package: "pk" 2 columns %2d
800  * core: "cor" 3 columns %3d
801  * CPU: "CPU" 3 columns %3d
802  * Pkg_W: %6.2
803  * Cor_W: %6.2
804  * GFX_W: %5.2
805  * RAM_W: %5.2
806  * GHz: "GHz" 3 columns %3.2
807  * TSC: "TSC" 3 columns %3.2
808  * SMI: "SMI" 4 columns %4d
809  * percentage " %pc3" %6.2
810  * Perf Status percentage: %5.2
811  * "CTMP" 4 columns %4d
812  */
813 #define NAME_LEN 12
814 static int
815 submit_counters(struct thread_data *t, struct core_data *c,
816         struct pkg_data *p)
817 {
818         char name[NAME_LEN];
819         double interval_float;
820
821         interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
822
823         snprintf(name, NAME_LEN, "cpu%02d", t->cpu_id);
824
825         if (do_nhm_cstates) {
826                 if (!skip_c0)
827                         turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc);
828                 if (!skip_c1)
829                         turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc);
830         }
831
832         /* GHz */
833         if (has_aperf && ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc))))
834                 turbostat_submit(NULL, "frequency", name, 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
835
836         /* SMI */
837         if (do_smi)
838                 turbostat_submit(NULL, "current", name, t->smi_count);
839
840         /* print per-core data only for 1st thread in core */
841         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
842                 goto done;
843
844         snprintf(name, NAME_LEN, "core%02d", c->core_id);
845
846         if (do_nhm_cstates && !do_slm_cstates)
847                 turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc);
848         if (do_nhm_cstates)
849                 turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc);
850         if (do_snb_cstates)
851                 turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc);
852
853         if (do_dts)
854                 turbostat_submit(NULL, "temperature", name, c->core_temp_c);
855
856         /* print per-package data only for 1st core in package */
857         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
858                 goto done;
859
860         snprintf(name, NAME_LEN, "pkg%02d", p->package_id);
861
862         if (do_ptm)
863                 turbostat_submit(NULL, "temperature", name, p->pkg_temp_c);
864
865         if (do_snb_cstates)
866                 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc);
867         if (do_nhm_cstates && !do_slm_cstates)
868                 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc);
869         if (do_nhm_cstates && !do_slm_cstates)
870                 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc);
871         if (do_snb_cstates)
872                 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc);
873         if (do_c8_c9_c10) {
874                 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc);
875                 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc);
876                 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc);
877         }
878
879         if (do_rapl) {
880                 if (do_rapl & RAPL_PKG)
881                         turbostat_submit(name, "power", "Pkg_W", p->energy_pkg * rapl_energy_units / interval_float);
882                 if (do_rapl & RAPL_CORES)
883                         turbostat_submit(name, "power", "Cor_W", p->energy_cores * rapl_energy_units / interval_float);
884                 if (do_rapl & RAPL_GFX)
885                         turbostat_submit(name, "power", "GFX_W", p->energy_gfx * rapl_energy_units / interval_float);
886                 if (do_rapl & RAPL_DRAM)
887                         turbostat_submit(name, "power", "RAM_W", p->energy_dram * rapl_energy_units / interval_float);
888         }
889 done:
890         return 0;
891 }
892
893 static int
894 turbostat_read(user_data_t * not_used)
895 {
896         int ret;
897
898         if (!allocated) {
899                 if ((ret = setup_all_buffers()) < 0)
900                         return ret;
901         }
902
903         if (for_all_proc_cpus(cpu_is_not_present)) {
904                 free_all_buffers();
905                 if ((ret = setup_all_buffers()) < 0)
906                         return ret;
907                 if (for_all_proc_cpus(cpu_is_not_present))
908                         return -ERR_CPU_NOT_PRESENT;
909         }
910
911         if (!initialized) {
912                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
913                         return ret;
914                 gettimeofday(&tv_even, (struct timezone *)NULL);
915                 is_even = 1;
916                 initialized = 1;
917                 return 0;
918         }
919
920         if (is_even) {
921                 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
922                         return ret;
923                 gettimeofday(&tv_odd, (struct timezone *)NULL);
924                 is_even = 0;
925                 timersub(&tv_odd, &tv_even, &tv_delta);
926                 if ((ret = for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) < 0)
927                         return ret;
928                 if ((ret = for_all_cpus(submit_counters, EVEN_COUNTERS)) < 0)
929                         return ret;
930         } else {
931                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
932                         return ret;
933                 gettimeofday(&tv_even, (struct timezone *)NULL);
934                 is_even = 1;
935                 timersub(&tv_even, &tv_odd, &tv_delta);
936                 if ((ret = for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) < 0)
937                         return ret;
938                 if ((ret = for_all_cpus(submit_counters, ODD_COUNTERS)) < 0)
939                         return ret;
940         }
941         return 0;
942 }
943
944 static int __attribute__((warn_unused_result))
945 check_dev_msr()
946 {
947         struct stat sb;
948
949         if (stat("/dev/cpu/0/msr", &sb)) {
950                 ERROR("no /dev/cpu/0/msr\n"
951                         "Try \"# modprobe msr\"");
952                 return -ERR_NO_MSR;
953         }
954         return 0;
955 }
956
957 static int __attribute__((warn_unused_result))
958 check_super_user()
959 {
960         if (getuid() != 0) {
961                 ERROR("must be root");
962                 return -ERR_NOT_ROOT;
963         }
964         return 0;
965 }
966
967
968 #define RAPL_POWER_GRANULARITY  0x7FFF  /* 15 bit power granularity */
969 #define RAPL_TIME_GRANULARITY   0x3F /* 6 bit time granularity */
970
971 static double
972 get_tdp(unsigned int model)
973 {
974         unsigned long long msr;
975
976         if (do_rapl & RAPL_PKG_POWER_INFO)
977                 if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
978                         return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
979
980         switch (model) {
981         case 0x37:
982         case 0x4D:
983                 return 30.0;
984         default:
985                 return 135.0;
986         }
987 }
988
989
990 /*
991  * rapl_probe()
992  *
993  * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
994  */
995 static void
996 rapl_probe(unsigned int family, unsigned int model)
997 {
998         unsigned long long msr;
999         unsigned int time_unit;
1000         double tdp;
1001
1002         if (!genuine_intel)
1003                 return;
1004
1005         if (family != 6)
1006                 return;
1007
1008         switch (model) {
1009         case 0x2A:
1010         case 0x3A:
1011         case 0x3C:      /* HSW */
1012         case 0x45:      /* HSW */
1013         case 0x46:      /* HSW */
1014                 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
1015                 break;
1016         case 0x3F:      /* HSX */
1017                 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
1018                 break;
1019         case 0x2D:
1020         case 0x3E:
1021                 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
1022                 break;
1023         case 0x37:      /* BYT */
1024         case 0x4D:      /* AVN */
1025                 do_rapl = RAPL_PKG | RAPL_CORES ;
1026                 break;
1027         default:
1028                 return;
1029         }
1030
1031         /* units on package 0, verify later other packages match */
1032         if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1033                 return;
1034
1035         rapl_power_units = 1.0 / (1 << (msr & 0xF));
1036         if (model == 0x37)
1037                 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1038         else
1039                 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1040
1041         time_unit = msr >> 16 & 0xF;
1042         if (time_unit == 0)
1043                 time_unit = 0xA;
1044
1045         rapl_time_units = 1.0 / (1 << (time_unit));
1046
1047         tdp = get_tdp(model);
1048
1049         rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
1050 //      if (verbose)
1051 //              fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
1052
1053         return;
1054 }
1055
1056 static int
1057 is_snb(unsigned int family, unsigned int model)
1058 {
1059         if (!genuine_intel)
1060                 return 0;
1061
1062         switch (model) {
1063         case 0x2A:
1064         case 0x2D:
1065         case 0x3A:      /* IVB */
1066         case 0x3E:      /* IVB Xeon */
1067         case 0x3C:      /* HSW */
1068         case 0x3F:      /* HSW */
1069         case 0x45:      /* HSW */
1070         case 0x46:      /* HSW */
1071                 return 1;
1072         }
1073         return 0;
1074 }
1075
1076 static int
1077 has_c8_c9_c10(unsigned int family, unsigned int model)
1078 {
1079         if (!genuine_intel)
1080                 return 0;
1081
1082         switch (model) {
1083         case 0x45:
1084                 return 1;
1085         }
1086         return 0;
1087 }
1088
1089
1090 static int
1091 is_slm(unsigned int family, unsigned int model)
1092 {
1093         if (!genuine_intel)
1094                 return 0;
1095         switch (model) {
1096         case 0x37:      /* BYT */
1097         case 0x4D:      /* AVN */
1098                 return 1;
1099         }
1100         return 0;
1101 }
1102
1103 /*
1104  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
1105  * the Thermal Control Circuit (TCC) activates.
1106  * This is usually equal to tjMax.
1107  *
1108  * Older processors do not have this MSR, so there we guess,
1109  * but also allow cmdline over-ride with -T.
1110  *
1111  * Several MSR temperature values are in units of degrees-C
1112  * below this value, including the Digital Thermal Sensor (DTS),
1113  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
1114  */
1115 static int __attribute__((warn_unused_result))
1116 set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1117 {
1118         unsigned long long msr;
1119         unsigned int target_c_local;
1120         int cpu;
1121
1122         /* tcc_activation_temp is used only for dts or ptm */
1123         if (!(do_dts || do_ptm))
1124                 return 0;
1125
1126         /* this is a per-package concept */
1127         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1128                 return 0;
1129
1130         cpu = t->cpu_id;
1131         if (cpu_migrate(cpu)) {
1132                 ERROR("Could not migrate to CPU %d\n", cpu);
1133                 return -ERR_CPU_MIGRATE;
1134         }
1135
1136         if (tcc_activation_temp_override != 0) {
1137                 tcc_activation_temp = tcc_activation_temp_override;
1138                 ERROR("cpu%d: Using cmdline TCC Target (%d C)\n",
1139                         cpu, tcc_activation_temp);
1140                 return 0;
1141         }
1142
1143         /* Temperature Target MSR is Nehalem and newer only */
1144         if (!do_nehalem_platform_info)
1145                 goto guess;
1146
1147         if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
1148                 goto guess;
1149
1150         target_c_local = (msr >> 16) & 0x7F;
1151
1152         if (target_c_local < 85 || target_c_local > 127)
1153                 goto guess;
1154
1155         tcc_activation_temp = target_c_local;
1156
1157         return 0;
1158
1159 guess:
1160         tcc_activation_temp = TJMAX_DEFAULT;
1161         WARNING("cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
1162                 cpu, tcc_activation_temp);
1163
1164         return 0;
1165 }
1166
1167 static int __attribute__((warn_unused_result))
1168 check_cpuid()
1169 {
1170         unsigned int eax, ebx, ecx, edx, max_level;
1171         unsigned int fms, family, model;
1172
1173         eax = ebx = ecx = edx = 0;
1174
1175         __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
1176
1177         if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1178                 genuine_intel = 1;
1179
1180         fms = 0;
1181         __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1182         family = (fms >> 8) & 0xf;
1183         model = (fms >> 4) & 0xf;
1184         if (family == 6 || family == 0xf)
1185                 model += ((fms >> 16) & 0xf) << 4;
1186
1187         if (!(edx & (1 << 5))) {
1188                 ERROR("CPUID: no MSR");
1189                 return -ERR_NO_MSR;
1190         }
1191
1192         /*
1193          * check max extended function levels of CPUID.
1194          * This is needed to check for invariant TSC.
1195          * This check is valid for both Intel and AMD.
1196          */
1197         ebx = ecx = edx = 0;
1198         __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
1199
1200         if (max_level < 0x80000007) {
1201                 ERROR("CPUID: no invariant TSC (max_level 0x%x)", max_level);
1202                 return -ERR_NO_INVARIANT_TSC;
1203         }
1204
1205         /*
1206          * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
1207          * this check is valid for both Intel and AMD
1208          */
1209         __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1210         has_invariant_tsc = edx & (1 << 8);
1211
1212         if (!has_invariant_tsc) {
1213                 ERROR("No invariant TSC");
1214                 return -ERR_NO_INVARIANT_TSC;
1215         }
1216
1217         /*
1218          * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
1219          * this check is valid for both Intel and AMD
1220          */
1221
1222         __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
1223         has_aperf = ecx & (1 << 0);
1224         do_dts = eax & (1 << 0);
1225         do_ptm = eax & (1 << 6);
1226         has_epb = ecx & (1 << 3);
1227
1228         if (!has_aperf) {
1229                 ERROR("No APERF");
1230                 return -ERR_NO_APERF;
1231         }
1232
1233         do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
1234         do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
1235         do_smi = do_nhm_cstates;
1236         do_snb_cstates = is_snb(family, model);
1237         do_c8_c9_c10 = has_c8_c9_c10(family, model);
1238         do_slm_cstates = is_slm(family, model);
1239
1240         rapl_probe(family, model);
1241
1242         return 0;
1243 }
1244
1245
1246
1247 static int __attribute__((warn_unused_result))
1248 topology_probe()
1249 {
1250         int i;
1251         int ret;
1252         int max_core_id = 0;
1253         int max_package_id = 0;
1254         int max_siblings = 0;
1255         struct cpu_topology {
1256                 int core_id;
1257                 int physical_package_id;
1258         } *cpus;
1259
1260         /* Initialize num_cpus, max_cpu_num */
1261         topo.num_cpus = 0;
1262         topo.max_cpu_num = 0;
1263         ret = for_all_proc_cpus(count_cpus);
1264         if (ret < 0)
1265                 return ret;
1266
1267         DEBUG("num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
1268
1269         cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
1270         if (cpus == NULL) {
1271                 ERROR("calloc cpus");
1272                 return -ERR_CALLOC;
1273         }
1274
1275         /*
1276          * Allocate and initialize cpu_present_set
1277          */
1278         cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
1279         if (cpu_present_set == NULL) {
1280                 free(cpus);
1281                 ERROR("CPU_ALLOC");
1282                 return -ERR_CPU_ALLOC;
1283         }
1284         cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1285         CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
1286         ret = for_all_proc_cpus(mark_cpu_present);
1287         if (ret < 0) {
1288                 free(cpus);
1289                 return ret;
1290         }
1291
1292         /*
1293          * Allocate and initialize cpu_affinity_set
1294          */
1295         cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
1296         if (cpu_affinity_set == NULL) {
1297                 free(cpus);
1298                 ERROR("CPU_ALLOC");
1299                 return -ERR_CPU_ALLOC;
1300         }
1301         cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1302         CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
1303
1304
1305         /*
1306          * For online cpus
1307          * find max_core_id, max_package_id
1308          */
1309         for (i = 0; i <= topo.max_cpu_num; ++i) {
1310                 int siblings;
1311
1312                 if (cpu_is_not_present(i)) {
1313                         //if (verbose > 1)
1314                                 fprintf(stderr, "cpu%d NOT PRESENT\n", i);
1315                         continue;
1316                 }
1317                 cpus[i].core_id = get_core_id(i);
1318                 if (cpus[i].core_id < 0)
1319                         return cpus[i].core_id;
1320                 if (cpus[i].core_id > max_core_id)
1321                         max_core_id = cpus[i].core_id;
1322
1323                 cpus[i].physical_package_id = get_physical_package_id(i);
1324                 if (cpus[i].physical_package_id < 0)
1325                         return cpus[i].physical_package_id;
1326                 if (cpus[i].physical_package_id > max_package_id)
1327                         max_package_id = cpus[i].physical_package_id;
1328
1329                 siblings = get_num_ht_siblings(i);
1330                 if (siblings < 0)
1331                         return siblings;
1332                 if (siblings > max_siblings)
1333                         max_siblings = siblings;
1334                 DEBUG("cpu %d pkg %d core %d\n",
1335                         i, cpus[i].physical_package_id, cpus[i].core_id);
1336         }
1337         topo.num_cores_per_pkg = max_core_id + 1;
1338         DEBUG("max_core_id %d, sizing for %d cores per package\n",
1339                 max_core_id, topo.num_cores_per_pkg);
1340
1341         topo.num_packages = max_package_id + 1;
1342         DEBUG("max_package_id %d, sizing for %d packages\n",
1343                 max_package_id, topo.num_packages);
1344
1345         topo.num_threads_per_core = max_siblings;
1346         DEBUG("max_siblings %d\n", max_siblings);
1347
1348         free(cpus);
1349         return 0;
1350 }
1351
1352 static int
1353 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
1354 {
1355         int i;
1356
1357         *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
1358                 topo.num_packages, sizeof(struct thread_data));
1359         if (*t == NULL)
1360                 goto error;
1361
1362         for (i = 0; i < topo.num_threads_per_core *
1363                 topo.num_cores_per_pkg * topo.num_packages; i++)
1364                 (*t)[i].cpu_id = -1;
1365
1366         *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
1367                 sizeof(struct core_data));
1368         if (*c == NULL)
1369                 goto error;
1370
1371         for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
1372                 (*c)[i].core_id = -1;
1373
1374         *p = calloc(topo.num_packages, sizeof(struct pkg_data));
1375         if (*p == NULL)
1376                 goto error;
1377
1378         for (i = 0; i < topo.num_packages; i++)
1379                 (*p)[i].package_id = i;
1380
1381         return 0;
1382 error:
1383         ERROR("calloc counters");
1384         return -ERR_CALLOC;
1385 }
1386 /*
1387  * init_counter()
1388  *
1389  * set cpu_id, core_num, pkg_num
1390  * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
1391  *
1392  * increment topo.num_cores when 1st core in pkg seen
1393  */
1394 static int
1395 init_counter(struct thread_data *thread_base, struct core_data *core_base,
1396         struct pkg_data *pkg_base, int thread_num, int core_num,
1397         int pkg_num, int cpu_id)
1398 {
1399         int ret;
1400         struct thread_data *t;
1401         struct core_data *c;
1402         struct pkg_data *p;
1403
1404         t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
1405         c = GET_CORE(core_base, core_num, pkg_num);
1406         p = GET_PKG(pkg_base, pkg_num);
1407
1408         t->cpu_id = cpu_id;
1409         if (thread_num == 0) {
1410                 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1411                 if ((ret = cpu_is_first_core_in_package(cpu_id)) < 0) {
1412                         return ret;
1413                 } else if (ret != 0) {
1414                         t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1415                 }
1416         }
1417
1418         c->core_id = core_num;
1419         p->package_id = pkg_num;
1420
1421         return 0;
1422 }
1423
1424
1425 static int
1426 initialize_counters(int cpu_id)
1427 {
1428         int my_thread_id, my_core_id, my_package_id;
1429         int ret;
1430
1431         my_package_id = get_physical_package_id(cpu_id);
1432         if (my_package_id < 0)
1433                 return my_package_id;
1434         my_core_id = get_core_id(cpu_id);
1435         if (my_core_id < 0)
1436                 return my_core_id;
1437
1438         if ((ret = cpu_is_first_sibling_in_core(cpu_id)) < 0) {
1439                 return ret;
1440         } else if (ret != 0) {
1441                 my_thread_id = 0;
1442                 topo.num_cores++;
1443         } else {
1444                 my_thread_id = 1;
1445         }
1446
1447         ret = init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1448         if (ret < 0)
1449                 return ret;
1450         ret = init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1451         if (ret < 0)
1452                 return ret;
1453         return 0;
1454 }
1455
1456 #define DO_OR_GOTO_ERR(something) \
1457 do {                         \
1458         ret = (something);     \
1459         if (ret < 0)         \
1460                 goto err;    \
1461 } while (0)
1462
1463 static int setup_all_buffers(void)
1464 {
1465         int ret;
1466
1467         DO_OR_GOTO_ERR(topology_probe());
1468         DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1469         DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1470         DO_OR_GOTO_ERR(for_all_proc_cpus(initialize_counters));
1471
1472         allocated = 1;
1473         return 0;
1474 err:
1475         free_all_buffers();
1476         return ret;
1477 }
1478
1479 static int
1480 turbostat_init(void)
1481 {
1482         int ret;
1483
1484         DO_OR_GOTO_ERR(check_cpuid());
1485         DO_OR_GOTO_ERR(check_dev_msr());
1486         DO_OR_GOTO_ERR(check_super_user());
1487         DO_OR_GOTO_ERR(setup_all_buffers());
1488         DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1489
1490         plugin_register_complex_read(NULL, PLUGIN_NAME, turbostat_read, NULL, NULL);
1491
1492         return 0;
1493 err:
1494         free_all_buffers();
1495         return ret;
1496 }
1497
1498 void module_register(void);
1499 void module_register(void)
1500 {
1501         plugin_register_init(PLUGIN_NAME, turbostat_init);
1502 }