Turbostat: remove 'units' variable, only used once
[collectd.git] / src / turbostat.c
1 /*
2  * turbostat -- Log CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors for collectd.
4  *
5  * Based on the 'turbostat' tool of the Linux kernel, found at
6  * linux/tools/power/x86/turbostat/turbostat.c:
7  * ----
8  * Copyright (c) 2013 Intel Corporation.
9  * Len Brown <len.brown@intel.com>
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms and conditions of the GNU General Public License,
13  * version 2, as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  *
20  * You should have received a copy of the GNU General Public License along with
21  * this program; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23  * ----
24  * Ported to collectd by Vincent Brillault <git@lerya.net>
25  */
26
27 /*
28  * _GNU_SOURCE is required because of the following functions:
29  * - CPU_ISSET_S
30  * - CPU_ZERO_S
31  * - CPU_SET_S
32  * - CPU_FREE
33  * - CPU_ALLOC
34  * - CPU_ALLOC_SIZE
35  */
36 #define _GNU_SOURCE
37
38 #include <asm/msr-index.h>
39 #include <stdarg.h>
40 #include <stdio.h>
41 #include <err.h>
42 #include <unistd.h>
43 #include <sys/types.h>
44 #include <sys/wait.h>
45 #include <sys/stat.h>
46 #include <sys/resource.h>
47 #include <fcntl.h>
48 #include <signal.h>
49 #include <sys/time.h>
50 #include <stdlib.h>
51 #include <dirent.h>
52 #include <string.h>
53 #include <ctype.h>
54 #include <sched.h>
55 #include <cpuid.h>
56
57 #include "collectd.h"
58 #include "common.h"
59 #include "plugin.h"
60
61 #define PLUGIN_NAME "turbostat"
62
63 static const char *proc_stat = "/proc/stat";
64 static unsigned int skip_c0;
65 static unsigned int skip_c1;
66 static unsigned int do_nhm_cstates;
67 static unsigned int do_snb_cstates;
68 static unsigned int do_c8_c9_c10;
69 static unsigned int do_slm_cstates;
70 static unsigned int has_aperf;
71 static unsigned int has_epb;
72 static unsigned int genuine_intel;
73 static unsigned int has_invariant_tsc;
74 static unsigned int do_nehalem_platform_info;
75 static int do_smi;
76 static unsigned int do_rapl;
77 static unsigned int do_dts;
78 static unsigned int do_ptm;
79 static unsigned int tcc_activation_temp;
80 static unsigned int tcc_activation_temp_override;
81 static double rapl_power_units, rapl_energy_units, rapl_time_units;
82 static double rapl_joule_counter_range;
83
84 #define RAPL_PKG                (1 << 0)
85                                         /* 0x610 MSR_PKG_POWER_LIMIT */
86                                         /* 0x611 MSR_PKG_ENERGY_STATUS */
87 #define RAPL_PKG_PERF_STATUS    (1 << 1)
88                                         /* 0x613 MSR_PKG_PERF_STATUS */
89 #define RAPL_PKG_POWER_INFO     (1 << 2)
90                                         /* 0x614 MSR_PKG_POWER_INFO */
91
92 #define RAPL_DRAM               (1 << 3)
93                                         /* 0x618 MSR_DRAM_POWER_LIMIT */
94                                         /* 0x619 MSR_DRAM_ENERGY_STATUS */
95                                         /* 0x61c MSR_DRAM_POWER_INFO */
96 #define RAPL_DRAM_PERF_STATUS   (1 << 4)
97                                         /* 0x61b MSR_DRAM_PERF_STATUS */
98
99 #define RAPL_CORES              (1 << 5)
100                                         /* 0x638 MSR_PP0_POWER_LIMIT */
101                                         /* 0x639 MSR_PP0_ENERGY_STATUS */
102 #define RAPL_CORE_POLICY        (1 << 6)
103                                         /* 0x63a MSR_PP0_POLICY */
104
105
106 #define RAPL_GFX                (1 << 7)
107                                         /* 0x640 MSR_PP1_POWER_LIMIT */
108                                         /* 0x641 MSR_PP1_ENERGY_STATUS */
109                                         /* 0x642 MSR_PP1_POLICY */
110 #define TJMAX_DEFAULT   100
111
112 int aperf_mperf_unstable;
113 int backwards_count;
114 char *progname;
115
116 cpu_set_t *cpu_present_set, *cpu_affinity_set;
117 size_t cpu_present_setsize, cpu_affinity_setsize;
118
119 struct thread_data {
120         unsigned long long tsc;
121         unsigned long long aperf;
122         unsigned long long mperf;
123         unsigned long long c1;
124         unsigned int smi_count;
125         unsigned int cpu_id;
126         unsigned int flags;
127 #define CPU_IS_FIRST_THREAD_IN_CORE     0x2
128 #define CPU_IS_FIRST_CORE_IN_PACKAGE    0x4
129 } *thread_even, *thread_odd;
130
131 struct core_data {
132         unsigned long long c3;
133         unsigned long long c6;
134         unsigned long long c7;
135         unsigned int core_temp_c;
136         unsigned int core_id;
137 } *core_even, *core_odd;
138
139 struct pkg_data {
140         unsigned long long pc2;
141         unsigned long long pc3;
142         unsigned long long pc6;
143         unsigned long long pc7;
144         unsigned long long pc8;
145         unsigned long long pc9;
146         unsigned long long pc10;
147         unsigned int package_id;
148         unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
149         unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
150         unsigned int energy_cores;      /* MSR_PP0_ENERGY_STATUS */
151         unsigned int energy_gfx;        /* MSR_PP1_ENERGY_STATUS */
152         unsigned int rapl_pkg_perf_status;      /* MSR_PKG_PERF_STATUS */
153         unsigned int rapl_dram_perf_status;     /* MSR_DRAM_PERF_STATUS */
154         unsigned int pkg_temp_c;
155
156 } *package_even, *package_odd;
157
158 #define ODD_COUNTERS thread_odd, core_odd, package_odd
159 #define EVEN_COUNTERS thread_even, core_even, package_even
160 static _Bool is_even = 1;
161
162 static _Bool allocated = 0;
163 static _Bool initialized = 0;
164
165 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
166         (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
167                 topo.num_threads_per_core + \
168                 (core_no) * topo.num_threads_per_core + (thread_no))
169 #define GET_CORE(core_base, core_no, pkg_no) \
170         (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
171 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
172
173 struct topo_params {
174         int num_packages;
175         int num_cpus;
176         int num_cores;
177         int max_cpu_num;
178         int num_cores_per_pkg;
179         int num_threads_per_core;
180 } topo;
181
182 struct timeval tv_even, tv_odd, tv_delta;
183
184 enum return_values {
185         OK = 0,
186         ERR_CPU_MIGRATE,
187         ERR_MSR_IA32_APERF,
188         ERR_MSR_IA32_MPERF,
189         ERR_MSR_SMI_COUNT,
190         ERR_MSR_CORE_C3_RESIDENCY,
191         ERR_MSR_CORE_C6_RESIDENCY,
192         ERR_MSR_CORE_C7_RESIDENCY,
193         ERR_MSR_IA32_THERM_STATUS,
194         ERR_MSR_PKG_C3_RESIDENCY,
195         ERR_MSR_PKG_C6_RESIDENCY,
196         ERR_MSR_PKG_C2_RESIDENCY,
197         ERR_MSR_PKG_C7_RESIDENCY,
198         ERR_MSR_PKG_C8_RESIDENCY,
199         ERR_MSR_PKG_C9_RESIDENCY,
200         ERR_MSR_PKG_C10_RESIDENCY,
201         ERR_MSR_PKG_ENERGY_STATUS,
202         ERR_MSR_PP0_ENERGY_STATUS,
203         ERR_MSR_DRAM_ENERGY_STATUS,
204         ERR_MSR_PP1_ENERGY_STATUS,
205         ERR_MSR_PKG_PERF_STATUS,
206         ERR_MSR_DRAM_PERF_STATUS,
207         ERR_MSR_IA32_PACKAGE_THERM_STATUS,
208         ERR_CPU_NOT_PRESENT,
209         ERR_NO_MSR,
210         ERR_CANT_OPEN_FILE,
211         ERR_CANT_READ_NUMBER,
212         ERR_CANT_READ_PROC_STAT,
213         ERR_NO_INVARIANT_TSC,
214         ERR_NO_APERF,
215         ERR_CALLOC,
216         ERR_CPU_ALLOC,
217         ERR_NOT_ROOT,
218 };
219
220 static int setup_all_buffers(void);
221
222 static int
223 cpu_is_not_present(int cpu)
224 {
225         return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
226 }
227 /*
228  * run func(thread, core, package) in topology order
229  * skip non-present cpus
230  */
231
232 static int __attribute__((warn_unused_result))
233 for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
234         struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
235 {
236         int retval, pkg_no, core_no, thread_no;
237
238         for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
239                 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
240                         for (thread_no = 0; thread_no <
241                                 topo.num_threads_per_core; ++thread_no) {
242                                 struct thread_data *t;
243                                 struct core_data *c;
244                                 struct pkg_data *p;
245
246                                 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
247
248                                 if (cpu_is_not_present(t->cpu_id))
249                                         continue;
250
251                                 c = GET_CORE(core_base, core_no, pkg_no);
252                                 p = GET_PKG(pkg_base, pkg_no);
253
254                                 retval = func(t, c, p);
255                                 if (retval)
256                                         return retval;
257                         }
258                 }
259         }
260         return 0;
261 }
262
263 static int __attribute__((warn_unused_result))
264 cpu_migrate(int cpu)
265 {
266         CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
267         CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
268         if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
269                 return -ERR_CPU_MIGRATE;
270         else
271                 return 0;
272 }
273
274 static int __attribute__((warn_unused_result))
275 get_msr(int cpu, off_t offset, unsigned long long *msr)
276 {
277         ssize_t retval;
278         char pathname[32];
279         int fd;
280
281         ssnprintf(pathname, 32, "/dev/cpu/%d/msr", cpu);
282         fd = open(pathname, O_RDONLY);
283         if (fd < 0)
284                 return -1;
285
286         retval = pread(fd, msr, sizeof *msr, offset);
287         close(fd);
288
289         if (retval != sizeof *msr) {
290                 ERROR ("%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset);
291                 return -1;
292         }
293
294         return 0;
295 }
296
297 #define DELTA_WRAP32(new, old)                  \
298         if (new > old) {                        \
299                 old = new - old;                \
300         } else {                                \
301                 old = 0x100000000 + new - old;  \
302         }
303
304 static void
305 delta_package(struct pkg_data *new, struct pkg_data *old)
306 {
307         old->pc2 = new->pc2 - old->pc2;
308         old->pc3 = new->pc3 - old->pc3;
309         old->pc6 = new->pc6 - old->pc6;
310         old->pc7 = new->pc7 - old->pc7;
311         old->pc8 = new->pc8 - old->pc8;
312         old->pc9 = new->pc9 - old->pc9;
313         old->pc10 = new->pc10 - old->pc10;
314         old->pkg_temp_c = new->pkg_temp_c;
315
316         DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
317         DELTA_WRAP32(new->energy_cores, old->energy_cores);
318         DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
319         DELTA_WRAP32(new->energy_dram, old->energy_dram);
320         DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
321         DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
322 }
323
324 static void
325 delta_core(struct core_data *new, struct core_data *old)
326 {
327         old->c3 = new->c3 - old->c3;
328         old->c6 = new->c6 - old->c6;
329         old->c7 = new->c7 - old->c7;
330         old->core_temp_c = new->core_temp_c;
331 }
332
333 /*
334  * old = new - old
335  */
336 static int __attribute__((warn_unused_result))
337 delta_thread(struct thread_data *new, struct thread_data *old,
338         struct core_data *core_delta)
339 {
340         old->tsc = new->tsc - old->tsc;
341
342         /* check for TSC < 1 Mcycles over interval */
343         if (old->tsc < (1000 * 1000)) {
344                 WARNING("Insanely slow TSC rate, TSC stops in idle?\n"
345                         "You can disable all c-states by booting with \"idle=poll\"\n"
346                         "or just the deep ones with \"processor.max_cstate=1\"");
347                 return -1;
348         }
349
350         old->c1 = new->c1 - old->c1;
351
352         if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
353                 old->aperf = new->aperf - old->aperf;
354                 old->mperf = new->mperf - old->mperf;
355         } else {
356
357                 if (!aperf_mperf_unstable) {
358                         WARNING("%s: APERF or MPERF went backwards *\n", progname);
359                         WARNING("* Frequency results do not cover entire interval *\n");
360                         WARNING("* fix this by running Linux-2.6.30 or later *\n");
361
362                         aperf_mperf_unstable = 1;
363                 }
364                 /*
365                  * mperf delta is likely a huge "positive" number
366                  * can not use it for calculating c0 time
367                  */
368                 skip_c0 = 1;
369                 skip_c1 = 1;
370         }
371
372
373         /*
374          * As counter collection is not atomic,
375          * it is possible for mperf's non-halted cycles + idle states
376          * to exceed TSC's all cycles: show c1 = 0% in that case.
377          */
378         if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
379                 old->c1 = 0;
380         else {
381                 /* normal case, derive c1 */
382                 old->c1 = old->tsc - old->mperf - core_delta->c3
383                         - core_delta->c6 - core_delta->c7;
384         }
385
386         if (old->mperf == 0) {
387                 WARNING("cpu%d MPERF 0!\n", old->cpu_id);
388                 old->mperf = 1; /* divide by 0 protection */
389         }
390
391         if (do_smi)
392                 old->smi_count = new->smi_count - old->smi_count;
393
394         return 0;
395 }
396
397 static int __attribute__((warn_unused_result))
398 delta_cpu(struct thread_data *t, struct core_data *c,
399         struct pkg_data *p, struct thread_data *t2,
400         struct core_data *c2, struct pkg_data *p2)
401 {
402         int ret;
403
404         /* calculate core delta only for 1st thread in core */
405         if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
406                 delta_core(c, c2);
407
408         /* always calculate thread delta */
409         ret = delta_thread(t, t2, c2);  /* c2 is core delta */
410         if (ret != 0)
411                 return ret;
412
413         /* calculate package delta only for 1st core in package */
414         if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
415                 delta_package(p, p2);
416
417         return 0;
418 }
419
420 static unsigned long long
421 rdtsc(void)
422 {
423         unsigned int low, high;
424
425         asm volatile("rdtsc" : "=a" (low), "=d" (high));
426
427         return low | ((unsigned long long)high) << 32;
428 }
429
430
431 /*
432  * get_counters(...)
433  * migrate to cpu
434  * acquire and record local counters for that cpu
435  */
436 static int __attribute__((warn_unused_result))
437 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
438 {
439         int cpu = t->cpu_id;
440         unsigned long long msr;
441
442         if (cpu_migrate(cpu)) {
443                 WARNING("Could not migrate to CPU %d\n", cpu);
444                 return -ERR_CPU_MIGRATE;
445         }
446
447         t->tsc = rdtsc();       /* we are running on local CPU of interest */
448
449         if (has_aperf) {
450                 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
451                         return -ERR_MSR_IA32_APERF;
452                 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
453                         return -ERR_MSR_IA32_MPERF;
454         }
455
456         if (do_smi) {
457                 if (get_msr(cpu, MSR_SMI_COUNT, &msr))
458                         return -ERR_MSR_SMI_COUNT;
459                 t->smi_count = msr & 0xFFFFFFFF;
460         }
461
462         /* collect core counters only for 1st thread in core */
463         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
464                 return 0;
465
466         if (do_nhm_cstates && !do_slm_cstates) {
467                 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
468                         return -ERR_MSR_CORE_C3_RESIDENCY;
469         }
470
471         if (do_nhm_cstates) {
472                 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
473                         return -ERR_MSR_CORE_C6_RESIDENCY;
474         }
475
476         if (do_snb_cstates)
477                 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
478                         return -ERR_MSR_CORE_C7_RESIDENCY;
479
480         if (do_dts) {
481                 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
482                         return -ERR_MSR_IA32_THERM_STATUS;
483                 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
484         }
485
486
487         /* collect package counters only for 1st core in package */
488         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
489                 return 0;
490
491         if (do_nhm_cstates && !do_slm_cstates) {
492                 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
493                         return -ERR_MSR_PKG_C3_RESIDENCY;
494                 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
495                         return -ERR_MSR_PKG_C6_RESIDENCY;
496         }
497         if (do_snb_cstates) {
498                 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
499                         return -ERR_MSR_PKG_C2_RESIDENCY;
500                 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
501                         return -ERR_MSR_PKG_C7_RESIDENCY;
502         }
503         if (do_c8_c9_c10) {
504                 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
505                         return -ERR_MSR_PKG_C8_RESIDENCY;
506                 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
507                         return -ERR_MSR_PKG_C9_RESIDENCY;
508                 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
509                         return -ERR_MSR_PKG_C10_RESIDENCY;
510         }
511         if (do_rapl & RAPL_PKG) {
512                 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
513                         return -ERR_MSR_PKG_ENERGY_STATUS;
514                 p->energy_pkg = msr & 0xFFFFFFFF;
515         }
516         if (do_rapl & RAPL_CORES) {
517                 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
518                         return MSR_PP0_ENERGY_STATUS;
519                 p->energy_cores = msr & 0xFFFFFFFF;
520         }
521         if (do_rapl & RAPL_DRAM) {
522                 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
523                         return -ERR_MSR_DRAM_ENERGY_STATUS;
524                 p->energy_dram = msr & 0xFFFFFFFF;
525         }
526         if (do_rapl & RAPL_GFX) {
527                 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
528                         return -ERR_MSR_PP1_ENERGY_STATUS;
529                 p->energy_gfx = msr & 0xFFFFFFFF;
530         }
531         if (do_rapl & RAPL_PKG_PERF_STATUS) {
532                 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
533                         return -ERR_MSR_PKG_PERF_STATUS;
534                 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
535         }
536         if (do_rapl & RAPL_DRAM_PERF_STATUS) {
537                 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
538                         return -ERR_MSR_DRAM_PERF_STATUS;
539                 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
540         }
541         if (do_ptm) {
542                 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
543                         return -ERR_MSR_IA32_PACKAGE_THERM_STATUS;
544                 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
545         }
546         return 0;
547 }
548
549 static void
550 free_all_buffers(void)
551 {
552         allocated = 0;
553         initialized = 0;
554
555         CPU_FREE(cpu_present_set);
556         cpu_present_set = NULL;
557         cpu_present_set = 0;
558
559         CPU_FREE(cpu_affinity_set);
560         cpu_affinity_set = NULL;
561         cpu_affinity_setsize = 0;
562
563         free(thread_even);
564         free(core_even);
565         free(package_even);
566
567         thread_even = NULL;
568         core_even = NULL;
569         package_even = NULL;
570
571         free(thread_odd);
572         free(core_odd);
573         free(package_odd);
574
575         thread_odd = NULL;
576         core_odd = NULL;
577         package_odd = NULL;
578 }
579
580 /*
581  * Parse a file containing a single int.
582  */
583 static int __attribute__ ((format(printf,1,2)))
584 parse_int_file(const char *fmt, ...)
585 {
586         va_list args;
587         char path[PATH_MAX];
588         FILE *filep;
589         int value;
590
591         va_start(args, fmt);
592         vsnprintf(path, sizeof(path), fmt, args);
593         va_end(args);
594         filep = fopen(path, "r");
595         if (!filep) {
596                 ERROR("%s: open failed", path);
597                 return -ERR_CANT_OPEN_FILE;
598         }
599         if (fscanf(filep, "%d", &value) != 1) {
600                 ERROR("%s: failed to parse number from file", path);
601                 return -ERR_CANT_READ_NUMBER;
602         }
603         fclose(filep);
604         return value;
605 }
606
607 /*
608  * cpu_is_first_sibling_in_core(cpu)
609  * return 1 if given CPU is 1st HT sibling in the core
610  */
611 static int
612 cpu_is_first_sibling_in_core(int cpu)
613 {
614         return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
615 }
616
617 /*
618  * cpu_is_first_core_in_package(cpu)
619  * return 1 if given CPU is 1st core in package
620  */
621 static int
622 cpu_is_first_core_in_package(int cpu)
623 {
624         return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
625 }
626
627 static int
628 get_physical_package_id(int cpu)
629 {
630         return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
631 }
632
633 static int
634 get_core_id(int cpu)
635 {
636         return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
637 }
638
639 static int
640 get_num_ht_siblings(int cpu)
641 {
642         char path[80];
643         FILE *filep;
644         int sib1, sib2;
645         int matches;
646         char character;
647
648         ssnprintf(path, 80, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
649         filep = fopen(path, "r");
650         if (!filep) {
651                 ERROR("%s: open failed", path);
652                 return -ERR_CANT_OPEN_FILE;
653         }
654         /*
655          * file format:
656          * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
657          * otherwinse 1 sibling (self).
658          */
659         matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
660
661         fclose(filep);
662
663         if (matches == 3)
664                 return 2;
665         else
666                 return 1;
667 }
668
669 /*
670  * run func(thread, core, package) in topology order
671  * skip non-present cpus
672  */
673
674
675 static int __attribute__((warn_unused_result))
676 for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
677         struct pkg_data *, struct thread_data *, struct core_data *,
678         struct pkg_data *), struct thread_data *thread_base,
679         struct core_data *core_base, struct pkg_data *pkg_base,
680         struct thread_data *thread_base2, struct core_data *core_base2,
681         struct pkg_data *pkg_base2)
682 {
683         int retval, pkg_no, core_no, thread_no;
684
685         for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
686                 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
687                         for (thread_no = 0; thread_no <
688                                 topo.num_threads_per_core; ++thread_no) {
689                                 struct thread_data *t, *t2;
690                                 struct core_data *c, *c2;
691                                 struct pkg_data *p, *p2;
692
693                                 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
694
695                                 if (cpu_is_not_present(t->cpu_id))
696                                         continue;
697
698                                 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
699
700                                 c = GET_CORE(core_base, core_no, pkg_no);
701                                 c2 = GET_CORE(core_base2, core_no, pkg_no);
702
703                                 p = GET_PKG(pkg_base, pkg_no);
704                                 p2 = GET_PKG(pkg_base2, pkg_no);
705
706                                 retval = func(t, c, p, t2, c2, p2);
707                                 if (retval)
708                                         return retval;
709                         }
710                 }
711         }
712         return 0;
713 }
714
715 /*
716  * run func(cpu) on every cpu in /proc/stat
717  * return max_cpu number
718  */
719 static int __attribute__((warn_unused_result))
720 for_all_proc_cpus(int (func)(int))
721 {
722         FILE *fp;
723         int cpu_num;
724         int retval;
725
726         fp = fopen(proc_stat, "r");
727         if (!fp) {
728                 ERROR("%s: open failed", proc_stat);
729                 return -ERR_CANT_OPEN_FILE;
730         }
731
732         retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
733         if (retval != 0) {
734                 ERROR("%s: failed to parse format", proc_stat);
735                 return -ERR_CANT_READ_PROC_STAT;
736         }
737
738         while (1) {
739                 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
740                 if (retval != 1)
741                         break;
742
743                 retval = func(cpu_num);
744                 if (retval) {
745                         fclose(fp);
746                         return(retval);
747                 }
748         }
749         fclose(fp);
750         return 0;
751 }
752
753 /*
754  * count_cpus()
755  * remember the last one seen, it will be the max
756  */
757 static int
758 count_cpus(int cpu)
759 {
760         if (topo.max_cpu_num < cpu)
761                 topo.max_cpu_num = cpu;
762
763         topo.num_cpus += 1;
764         return 0;
765 }
766 static int
767 mark_cpu_present(int cpu)
768 {
769         CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
770         return 0;
771 }
772
773
774 static void
775 turbostat_submit (const char *plugin_instance,
776         const char *type, const char *type_instance,
777         gauge_t value)
778 {
779         value_list_t vl = VALUE_LIST_INIT;
780         value_t v;
781
782         v.gauge = value;
783         vl.values = &v;
784         vl.values_len = 1;
785         sstrncpy (vl.host, hostname_g, sizeof (vl.host));
786         sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin));
787         if (plugin_instance != NULL)
788                 sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
789         sstrncpy (vl.type, type, sizeof (vl.type));
790         if (type_instance != NULL)
791                 sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
792
793         plugin_dispatch_values (&vl);
794 }
795
796 /*
797  * column formatting convention & formats
798  * package: "pk" 2 columns %2d
799  * core: "cor" 3 columns %3d
800  * CPU: "CPU" 3 columns %3d
801  * Pkg_W: %6.2
802  * Cor_W: %6.2
803  * GFX_W: %5.2
804  * RAM_W: %5.2
805  * GHz: "GHz" 3 columns %3.2
806  * TSC: "TSC" 3 columns %3.2
807  * SMI: "SMI" 4 columns %4d
808  * percentage " %pc3" %6.2
809  * Perf Status percentage: %5.2
810  * "CTMP" 4 columns %4d
811  */
812 #define NAME_LEN 12
813 static int
814 submit_counters(struct thread_data *t, struct core_data *c,
815         struct pkg_data *p)
816 {
817         char name[NAME_LEN];
818         double interval_float;
819
820         interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
821
822         snprintf(name, NAME_LEN, "cpu%02d", t->cpu_id);
823
824         if (do_nhm_cstates) {
825                 if (!skip_c0)
826                         turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc);
827                 if (!skip_c1)
828                         turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc);
829         }
830
831         /* GHz */
832         if (has_aperf && ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc))))
833                 turbostat_submit(NULL, "frequency", name, 1.0 * t->tsc / 1000000000 * t->aperf / t->mperf / interval_float);
834
835         /* SMI */
836         if (do_smi)
837                 turbostat_submit(NULL, "current", name, t->smi_count);
838
839         /* print per-core data only for 1st thread in core */
840         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
841                 goto done;
842
843         snprintf(name, NAME_LEN, "core%02d", c->core_id);
844
845         if (do_nhm_cstates && !do_slm_cstates)
846                 turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc);
847         if (do_nhm_cstates)
848                 turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc);
849         if (do_snb_cstates)
850                 turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc);
851
852         if (do_dts)
853                 turbostat_submit(NULL, "temperature", name, c->core_temp_c);
854
855         /* print per-package data only for 1st core in package */
856         if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
857                 goto done;
858
859         snprintf(name, NAME_LEN, "pkg%02d", p->package_id);
860
861         if (do_ptm)
862                 turbostat_submit(NULL, "temperature", name, p->pkg_temp_c);
863
864         if (do_snb_cstates)
865                 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc);
866         if (do_nhm_cstates && !do_slm_cstates)
867                 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc);
868         if (do_nhm_cstates && !do_slm_cstates)
869                 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc);
870         if (do_snb_cstates)
871                 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc);
872         if (do_c8_c9_c10) {
873                 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc);
874                 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc);
875                 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc);
876         }
877
878         if (do_rapl) {
879                 if (do_rapl & RAPL_PKG)
880                         turbostat_submit(name, "power", "Pkg_W", p->energy_pkg * rapl_energy_units / interval_float);
881                 if (do_rapl & RAPL_CORES)
882                         turbostat_submit(name, "power", "Cor_W", p->energy_cores * rapl_energy_units / interval_float);
883                 if (do_rapl & RAPL_GFX)
884                         turbostat_submit(name, "power", "GFX_W", p->energy_gfx * rapl_energy_units / interval_float);
885                 if (do_rapl & RAPL_DRAM)
886                         turbostat_submit(name, "power", "RAM_W", p->energy_dram * rapl_energy_units / interval_float);
887         }
888 done:
889         return 0;
890 }
891
892 static int
893 turbostat_read(user_data_t * not_used)
894 {
895         int ret;
896
897         if (!allocated) {
898                 if ((ret = setup_all_buffers()) < 0)
899                         return ret;
900         }
901
902         if (for_all_proc_cpus(cpu_is_not_present)) {
903                 free_all_buffers();
904                 if ((ret = setup_all_buffers()) < 0)
905                         return ret;
906                 if (for_all_proc_cpus(cpu_is_not_present))
907                         return -ERR_CPU_NOT_PRESENT;
908         }
909
910         if (!initialized) {
911                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
912                         return ret;
913                 gettimeofday(&tv_even, (struct timezone *)NULL);
914                 is_even = 1;
915                 initialized = 1;
916                 return 0;
917         }
918
919         if (is_even) {
920                 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
921                         return ret;
922                 gettimeofday(&tv_odd, (struct timezone *)NULL);
923                 is_even = 0;
924                 timersub(&tv_odd, &tv_even, &tv_delta);
925                 if ((ret = for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) < 0)
926                         return ret;
927                 if ((ret = for_all_cpus(submit_counters, EVEN_COUNTERS)) < 0)
928                         return ret;
929         } else {
930                 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
931                         return ret;
932                 gettimeofday(&tv_even, (struct timezone *)NULL);
933                 is_even = 1;
934                 timersub(&tv_even, &tv_odd, &tv_delta);
935                 if ((ret = for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) < 0)
936                         return ret;
937                 if ((ret = for_all_cpus(submit_counters, ODD_COUNTERS)) < 0)
938                         return ret;
939         }
940         return 0;
941 }
942
943 static int __attribute__((warn_unused_result))
944 check_dev_msr()
945 {
946         struct stat sb;
947
948         if (stat("/dev/cpu/0/msr", &sb)) {
949                 ERROR("no /dev/cpu/0/msr\n"
950                         "Try \"# modprobe msr\"");
951                 return -ERR_NO_MSR;
952         }
953         return 0;
954 }
955
956 static int __attribute__((warn_unused_result))
957 check_super_user()
958 {
959         if (getuid() != 0) {
960                 ERROR("must be root");
961                 return -ERR_NOT_ROOT;
962         }
963         return 0;
964 }
965
966
967 #define RAPL_POWER_GRANULARITY  0x7FFF  /* 15 bit power granularity */
968 #define RAPL_TIME_GRANULARITY   0x3F /* 6 bit time granularity */
969
970 static double
971 get_tdp(unsigned int model)
972 {
973         unsigned long long msr;
974
975         if (do_rapl & RAPL_PKG_POWER_INFO)
976                 if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
977                         return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
978
979         switch (model) {
980         case 0x37:
981         case 0x4D:
982                 return 30.0;
983         default:
984                 return 135.0;
985         }
986 }
987
988
989 /*
990  * rapl_probe()
991  *
992  * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
993  */
994 static void
995 rapl_probe(unsigned int family, unsigned int model)
996 {
997         unsigned long long msr;
998         unsigned int time_unit;
999         double tdp;
1000
1001         if (!genuine_intel)
1002                 return;
1003
1004         if (family != 6)
1005                 return;
1006
1007         switch (model) {
1008         case 0x2A:
1009         case 0x3A:
1010         case 0x3C:      /* HSW */
1011         case 0x45:      /* HSW */
1012         case 0x46:      /* HSW */
1013                 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
1014                 break;
1015         case 0x3F:      /* HSX */
1016                 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
1017                 break;
1018         case 0x2D:
1019         case 0x3E:
1020                 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
1021                 break;
1022         case 0x37:      /* BYT */
1023         case 0x4D:      /* AVN */
1024                 do_rapl = RAPL_PKG | RAPL_CORES ;
1025                 break;
1026         default:
1027                 return;
1028         }
1029
1030         /* units on package 0, verify later other packages match */
1031         if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1032                 return;
1033
1034         rapl_power_units = 1.0 / (1 << (msr & 0xF));
1035         if (model == 0x37)
1036                 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1037         else
1038                 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1039
1040         time_unit = msr >> 16 & 0xF;
1041         if (time_unit == 0)
1042                 time_unit = 0xA;
1043
1044         rapl_time_units = 1.0 / (1 << (time_unit));
1045
1046         tdp = get_tdp(model);
1047
1048         rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
1049 //      if (verbose)
1050 //              fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
1051
1052         return;
1053 }
1054
1055 static int
1056 is_snb(unsigned int family, unsigned int model)
1057 {
1058         if (!genuine_intel)
1059                 return 0;
1060
1061         switch (model) {
1062         case 0x2A:
1063         case 0x2D:
1064         case 0x3A:      /* IVB */
1065         case 0x3E:      /* IVB Xeon */
1066         case 0x3C:      /* HSW */
1067         case 0x3F:      /* HSW */
1068         case 0x45:      /* HSW */
1069         case 0x46:      /* HSW */
1070                 return 1;
1071         }
1072         return 0;
1073 }
1074
1075 static int
1076 has_c8_c9_c10(unsigned int family, unsigned int model)
1077 {
1078         if (!genuine_intel)
1079                 return 0;
1080
1081         switch (model) {
1082         case 0x45:
1083                 return 1;
1084         }
1085         return 0;
1086 }
1087
1088
1089 static int
1090 is_slm(unsigned int family, unsigned int model)
1091 {
1092         if (!genuine_intel)
1093                 return 0;
1094         switch (model) {
1095         case 0x37:      /* BYT */
1096         case 0x4D:      /* AVN */
1097                 return 1;
1098         }
1099         return 0;
1100 }
1101
1102 /*
1103  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
1104  * the Thermal Control Circuit (TCC) activates.
1105  * This is usually equal to tjMax.
1106  *
1107  * Older processors do not have this MSR, so there we guess,
1108  * but also allow cmdline over-ride with -T.
1109  *
1110  * Several MSR temperature values are in units of degrees-C
1111  * below this value, including the Digital Thermal Sensor (DTS),
1112  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
1113  */
1114 static int __attribute__((warn_unused_result))
1115 set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1116 {
1117         unsigned long long msr;
1118         unsigned int target_c_local;
1119         int cpu;
1120
1121         /* tcc_activation_temp is used only for dts or ptm */
1122         if (!(do_dts || do_ptm))
1123                 return 0;
1124
1125         /* this is a per-package concept */
1126         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1127                 return 0;
1128
1129         cpu = t->cpu_id;
1130         if (cpu_migrate(cpu)) {
1131                 ERROR("Could not migrate to CPU %d\n", cpu);
1132                 return -ERR_CPU_MIGRATE;
1133         }
1134
1135         if (tcc_activation_temp_override != 0) {
1136                 tcc_activation_temp = tcc_activation_temp_override;
1137                 ERROR("cpu%d: Using cmdline TCC Target (%d C)\n",
1138                         cpu, tcc_activation_temp);
1139                 return 0;
1140         }
1141
1142         /* Temperature Target MSR is Nehalem and newer only */
1143         if (!do_nehalem_platform_info)
1144                 goto guess;
1145
1146         if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
1147                 goto guess;
1148
1149         target_c_local = (msr >> 16) & 0x7F;
1150
1151         if (target_c_local < 85 || target_c_local > 127)
1152                 goto guess;
1153
1154         tcc_activation_temp = target_c_local;
1155
1156         return 0;
1157
1158 guess:
1159         tcc_activation_temp = TJMAX_DEFAULT;
1160         WARNING("cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
1161                 cpu, tcc_activation_temp);
1162
1163         return 0;
1164 }
1165
1166 static int __attribute__((warn_unused_result))
1167 check_cpuid()
1168 {
1169         unsigned int eax, ebx, ecx, edx, max_level;
1170         unsigned int fms, family, model;
1171
1172         eax = ebx = ecx = edx = 0;
1173
1174         __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
1175
1176         if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1177                 genuine_intel = 1;
1178
1179         fms = 0;
1180         __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1181         family = (fms >> 8) & 0xf;
1182         model = (fms >> 4) & 0xf;
1183         if (family == 6 || family == 0xf)
1184                 model += ((fms >> 16) & 0xf) << 4;
1185
1186         if (!(edx & (1 << 5))) {
1187                 ERROR("CPUID: no MSR");
1188                 return -ERR_NO_MSR;
1189         }
1190
1191         /*
1192          * check max extended function levels of CPUID.
1193          * This is needed to check for invariant TSC.
1194          * This check is valid for both Intel and AMD.
1195          */
1196         ebx = ecx = edx = 0;
1197         __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
1198
1199         if (max_level < 0x80000007) {
1200                 ERROR("CPUID: no invariant TSC (max_level 0x%x)", max_level);
1201                 return -ERR_NO_INVARIANT_TSC;
1202         }
1203
1204         /*
1205          * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
1206          * this check is valid for both Intel and AMD
1207          */
1208         __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1209         has_invariant_tsc = edx & (1 << 8);
1210
1211         if (!has_invariant_tsc) {
1212                 ERROR("No invariant TSC");
1213                 return -ERR_NO_INVARIANT_TSC;
1214         }
1215
1216         /*
1217          * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
1218          * this check is valid for both Intel and AMD
1219          */
1220
1221         __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
1222         has_aperf = ecx & (1 << 0);
1223         do_dts = eax & (1 << 0);
1224         do_ptm = eax & (1 << 6);
1225         has_epb = ecx & (1 << 3);
1226
1227         if (!has_aperf) {
1228                 ERROR("No APERF");
1229                 return -ERR_NO_APERF;
1230         }
1231
1232         do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
1233         do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
1234         do_smi = do_nhm_cstates;
1235         do_snb_cstates = is_snb(family, model);
1236         do_c8_c9_c10 = has_c8_c9_c10(family, model);
1237         do_slm_cstates = is_slm(family, model);
1238
1239         rapl_probe(family, model);
1240
1241         return 0;
1242 }
1243
1244
1245
1246 static int __attribute__((warn_unused_result))
1247 topology_probe()
1248 {
1249         int i;
1250         int ret;
1251         int max_core_id = 0;
1252         int max_package_id = 0;
1253         int max_siblings = 0;
1254         struct cpu_topology {
1255                 int core_id;
1256                 int physical_package_id;
1257         } *cpus;
1258
1259         /* Initialize num_cpus, max_cpu_num */
1260         topo.num_cpus = 0;
1261         topo.max_cpu_num = 0;
1262         ret = for_all_proc_cpus(count_cpus);
1263         if (ret < 0)
1264                 return ret;
1265
1266         DEBUG("num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
1267
1268         cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
1269         if (cpus == NULL) {
1270                 ERROR("calloc cpus");
1271                 return -ERR_CALLOC;
1272         }
1273
1274         /*
1275          * Allocate and initialize cpu_present_set
1276          */
1277         cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
1278         if (cpu_present_set == NULL) {
1279                 free(cpus);
1280                 ERROR("CPU_ALLOC");
1281                 return -ERR_CPU_ALLOC;
1282         }
1283         cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1284         CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
1285         ret = for_all_proc_cpus(mark_cpu_present);
1286         if (ret < 0) {
1287                 free(cpus);
1288                 return ret;
1289         }
1290
1291         /*
1292          * Allocate and initialize cpu_affinity_set
1293          */
1294         cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
1295         if (cpu_affinity_set == NULL) {
1296                 free(cpus);
1297                 ERROR("CPU_ALLOC");
1298                 return -ERR_CPU_ALLOC;
1299         }
1300         cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1301         CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
1302
1303
1304         /*
1305          * For online cpus
1306          * find max_core_id, max_package_id
1307          */
1308         for (i = 0; i <= topo.max_cpu_num; ++i) {
1309                 int siblings;
1310
1311                 if (cpu_is_not_present(i)) {
1312                         //if (verbose > 1)
1313                                 fprintf(stderr, "cpu%d NOT PRESENT\n", i);
1314                         continue;
1315                 }
1316                 cpus[i].core_id = get_core_id(i);
1317                 if (cpus[i].core_id < 0)
1318                         return cpus[i].core_id;
1319                 if (cpus[i].core_id > max_core_id)
1320                         max_core_id = cpus[i].core_id;
1321
1322                 cpus[i].physical_package_id = get_physical_package_id(i);
1323                 if (cpus[i].physical_package_id < 0)
1324                         return cpus[i].physical_package_id;
1325                 if (cpus[i].physical_package_id > max_package_id)
1326                         max_package_id = cpus[i].physical_package_id;
1327
1328                 siblings = get_num_ht_siblings(i);
1329                 if (siblings < 0)
1330                         return siblings;
1331                 if (siblings > max_siblings)
1332                         max_siblings = siblings;
1333                 DEBUG("cpu %d pkg %d core %d\n",
1334                         i, cpus[i].physical_package_id, cpus[i].core_id);
1335         }
1336         topo.num_cores_per_pkg = max_core_id + 1;
1337         DEBUG("max_core_id %d, sizing for %d cores per package\n",
1338                 max_core_id, topo.num_cores_per_pkg);
1339
1340         topo.num_packages = max_package_id + 1;
1341         DEBUG("max_package_id %d, sizing for %d packages\n",
1342                 max_package_id, topo.num_packages);
1343
1344         topo.num_threads_per_core = max_siblings;
1345         DEBUG("max_siblings %d\n", max_siblings);
1346
1347         free(cpus);
1348         return 0;
1349 }
1350
1351 static int
1352 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
1353 {
1354         int i;
1355
1356         *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
1357                 topo.num_packages, sizeof(struct thread_data));
1358         if (*t == NULL)
1359                 goto error;
1360
1361         for (i = 0; i < topo.num_threads_per_core *
1362                 topo.num_cores_per_pkg * topo.num_packages; i++)
1363                 (*t)[i].cpu_id = -1;
1364
1365         *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
1366                 sizeof(struct core_data));
1367         if (*c == NULL)
1368                 goto error;
1369
1370         for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
1371                 (*c)[i].core_id = -1;
1372
1373         *p = calloc(topo.num_packages, sizeof(struct pkg_data));
1374         if (*p == NULL)
1375                 goto error;
1376
1377         for (i = 0; i < topo.num_packages; i++)
1378                 (*p)[i].package_id = i;
1379
1380         return 0;
1381 error:
1382         ERROR("calloc counters");
1383         return -ERR_CALLOC;
1384 }
1385 /*
1386  * init_counter()
1387  *
1388  * set cpu_id, core_num, pkg_num
1389  * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
1390  *
1391  * increment topo.num_cores when 1st core in pkg seen
1392  */
1393 static int
1394 init_counter(struct thread_data *thread_base, struct core_data *core_base,
1395         struct pkg_data *pkg_base, int thread_num, int core_num,
1396         int pkg_num, int cpu_id)
1397 {
1398         int ret;
1399         struct thread_data *t;
1400         struct core_data *c;
1401         struct pkg_data *p;
1402
1403         t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
1404         c = GET_CORE(core_base, core_num, pkg_num);
1405         p = GET_PKG(pkg_base, pkg_num);
1406
1407         t->cpu_id = cpu_id;
1408         if (thread_num == 0) {
1409                 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1410                 if ((ret = cpu_is_first_core_in_package(cpu_id)) < 0) {
1411                         return ret;
1412                 } else if (ret != 0) {
1413                         t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1414                 }
1415         }
1416
1417         c->core_id = core_num;
1418         p->package_id = pkg_num;
1419
1420         return 0;
1421 }
1422
1423
1424 static int
1425 initialize_counters(int cpu_id)
1426 {
1427         int my_thread_id, my_core_id, my_package_id;
1428         int ret;
1429
1430         my_package_id = get_physical_package_id(cpu_id);
1431         if (my_package_id < 0)
1432                 return my_package_id;
1433         my_core_id = get_core_id(cpu_id);
1434         if (my_core_id < 0)
1435                 return my_core_id;
1436
1437         if ((ret = cpu_is_first_sibling_in_core(cpu_id)) < 0) {
1438                 return ret;
1439         } else if (ret != 0) {
1440                 my_thread_id = 0;
1441                 topo.num_cores++;
1442         } else {
1443                 my_thread_id = 1;
1444         }
1445
1446         ret = init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1447         if (ret < 0)
1448                 return ret;
1449         ret = init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1450         if (ret < 0)
1451                 return ret;
1452         return 0;
1453 }
1454
1455 #define DO_OR_GOTO_ERR(something) \
1456 do {                         \
1457         ret = (something);     \
1458         if (ret < 0)         \
1459                 goto err;    \
1460 } while (0)
1461
1462 static int setup_all_buffers(void)
1463 {
1464         int ret;
1465
1466         DO_OR_GOTO_ERR(topology_probe());
1467         DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1468         DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1469         DO_OR_GOTO_ERR(for_all_proc_cpus(initialize_counters));
1470
1471         allocated = 1;
1472         return 0;
1473 err:
1474         free_all_buffers();
1475         return ret;
1476 }
1477
1478 static int
1479 turbostat_init(void)
1480 {
1481         int ret;
1482
1483         DO_OR_GOTO_ERR(check_cpuid());
1484         DO_OR_GOTO_ERR(check_dev_msr());
1485         DO_OR_GOTO_ERR(check_super_user());
1486         DO_OR_GOTO_ERR(setup_all_buffers());
1487         DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1488
1489         plugin_register_complex_read(NULL, PLUGIN_NAME, turbostat_read, NULL, NULL);
1490
1491         return 0;
1492 err:
1493         free_all_buffers();
1494         return ret;
1495 }
1496
1497 void module_register(void);
1498 void module_register(void)
1499 {
1500         plugin_register_init(PLUGIN_NAME, turbostat_init);
1501 }