projects
/
collectd.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
treewide: declare loop variable in loop expression
[collectd.git]
/
src
/
turbostat.c
diff --git
a/src/turbostat.c
b/src/turbostat.c
index
14ce29f
..
913511f
100644
(file)
--- a/
src/turbostat.c
+++ b/
src/turbostat.c
@@
-36,6
+36,7
@@
#define _GNU_SOURCE
#include "collectd.h"
#define _GNU_SOURCE
#include "collectd.h"
+
#include "common.h"
#include "plugin.h"
#include "utils_time.h"
#include "common.h"
#include "plugin.h"
#include "utils_time.h"
@@
-457,7
+458,7
@@
delta_core(struct core_data *delta, const struct core_data *new, const struct co
*/
static inline int __attribute__((warn_unused_result))
delta_thread(struct thread_data *delta, const struct thread_data *new, const struct thread_data *old,
*/
static inline int __attribute__((warn_unused_result))
delta_thread(struct thread_data *delta, const struct thread_data *new, const struct thread_data *old,
- const struct core_data *c
ore_
delta)
+ const struct core_data *cdelta)
{
delta->tsc = new->tsc - old->tsc;
{
delta->tsc = new->tsc - old->tsc;
@@
-491,12
+492,12
@@
delta_thread(struct thread_data *delta, const struct thread_data *new, const str
* it is possible for mperf's non-halted cycles + idle states
* to exceed TSC's all cycles: show c1 = 0% in that case.
*/
* it is possible for mperf's non-halted cycles + idle states
* to exceed TSC's all cycles: show c1 = 0% in that case.
*/
- if ((delta->mperf + c
ore_delta->c3 + core_delta->c6 + core_
delta->c7) > delta->tsc)
+ if ((delta->mperf + c
delta->c3 + cdelta->c6 + c
delta->c7) > delta->tsc)
delta->c1 = 0;
else {
/* normal case, derive c1 */
delta->c1 = 0;
else {
/* normal case, derive c1 */
- delta->c1 = delta->tsc - delta->mperf - c
ore_
delta->c3
- - c
ore_delta->c6 - core_
delta->c7;
+ delta->c1 = delta->tsc - delta->mperf - cdelta->c3
+ - c
delta->c6 - c
delta->c7;
}
if (delta->mperf == 0) {
}
if (delta->mperf == 0) {
@@
-651,11
+652,10
@@
for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_dat
struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
{
int retval;
struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
{
int retval;
- unsigned int pkg_no, core_no, thread_no;
- for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
- for (core_no = 0; core_no < topology.num_cores; ++core_no) {
- for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
+ for (
unsigned int
pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
+ for (
unsigned int
core_no = 0; core_no < topology.num_cores; ++core_no) {
+ for (
unsigned int
thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
struct thread_data *t;
struct core_data *c;
struct pkg_data *p;
struct thread_data *t;
struct core_data *c;
struct pkg_data *p;
@@
-691,11
+691,10
@@
for_all_cpus_delta(const struct thread_data *thread_new_base, const struct core_
const struct thread_data *thread_old_base, const struct core_data *core_old_base, const struct pkg_data *pkg_old_base)
{
int retval;
const struct thread_data *thread_old_base, const struct core_data *core_old_base, const struct pkg_data *pkg_old_base)
{
int retval;
- unsigned int pkg_no, core_no, thread_no;
- for (pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
- for (core_no = 0; core_no < topology.num_cores; ++core_no) {
- for (thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
+ for (
unsigned int
pkg_no = 0; pkg_no < topology.num_packages; ++pkg_no) {
+ for (
unsigned int
core_no = 0; core_no < topology.num_cores; ++core_no) {
+ for (
unsigned int
thread_no = 0; thread_no < topology.num_threads; ++thread_no) {
struct thread_data *t_delta;
const struct thread_data *t_old, *t_new;
struct core_data *c_delta;
struct thread_data *t_delta;
const struct thread_data *t_old, *t_new;
struct core_data *c_delta;
@@
-805,7
+804,7
@@
guess:
* Identify the functionality of the CPU
*/
static int __attribute__((warn_unused_result))
* Identify the functionality of the CPU
*/
static int __attribute__((warn_unused_result))
-probe_cpu()
+probe_cpu(
void
)
{
unsigned int eax, ebx, ecx, edx, max_level;
unsigned int fms, family, model;
{
unsigned int eax, ebx, ecx, edx, max_level;
unsigned int fms, family, model;
@@
-1147,9
+1146,8
@@
allocate_cpu_set(cpu_set_t ** set, size_t * size) {
* Build a local representation of the cpu distribution
*/
static int __attribute__((warn_unused_result))
* Build a local representation of the cpu distribution
*/
static int __attribute__((warn_unused_result))
-topology_probe()
+topology_probe(
void
)
{
{
- unsigned int i;
int ret;
unsigned int max_package_id, max_core_id, max_threads;
max_package_id = max_core_id = max_threads = 0;
int ret;
unsigned int max_package_id, max_core_id, max_threads;
max_package_id = max_core_id = max_threads = 0;
@@
-1186,7
+1184,7
@@
topology_probe()
* For online cpus
* find max_core_id, max_package_id
*/
* For online cpus
* find max_core_id, max_package_id
*/
- for (i = 0; i <= topology.max_cpu_id; ++i) {
+ for (
unsigned int
i = 0; i <= topology.max_cpu_id; ++i) {
unsigned int num_threads;
struct cpu_topology *cpu = &topology.cpus[i];
unsigned int num_threads;
struct cpu_topology *cpu = &topology.cpus[i];
@@
-1251,7
+1249,6
@@
err:
static int
allocate_counters(struct thread_data **threads, struct core_data **cores, struct pkg_data **packages)
{
static int
allocate_counters(struct thread_data **threads, struct core_data **cores, struct pkg_data **packages)
{
- unsigned int i;
unsigned int total_threads, total_cores;
if ((topology.num_threads == 0)
unsigned int total_threads, total_cores;
if ((topology.num_threads == 0)
@@
-1271,7
+1268,7
@@
allocate_counters(struct thread_data **threads, struct core_data **cores, struct
return -1;
}
return -1;
}
- for (i = 0; i < total_threads; ++i)
+ for (
unsigned int
i = 0; i < total_threads; ++i)
(*threads)[i].cpu_id = topology.max_cpu_id + 1;
total_cores = topology.num_cores * topology.num_packages;
(*threads)[i].cpu_id = topology.max_cpu_id + 1;
total_cores = topology.num_cores * topology.num_packages;
@@
-1321,9
+1318,7
@@
init_counter(struct thread_data *thread_base, struct core_data *core_base,
static void
initialize_counters(void)
{
static void
initialize_counters(void)
{
- unsigned int cpu_id;
-
- for (cpu_id = 0; cpu_id <= topology.max_cpu_id; ++cpu_id) {
+ for (unsigned int cpu_id = 0; cpu_id <= topology.max_cpu_id; ++cpu_id) {
if (cpu_is_not_present(cpu_id))
continue;
init_counter(EVEN_COUNTERS, cpu_id);
if (cpu_is_not_present(cpu_id))
continue;
init_counter(EVEN_COUNTERS, cpu_id);
@@
-1342,7
+1337,7
@@
free_all_buffers(void)
CPU_FREE(cpu_present_set);
cpu_present_set = NULL;
CPU_FREE(cpu_present_set);
cpu_present_set = NULL;
- cpu_present_set = 0;
+ cpu_present_set
size
= 0;
CPU_FREE(cpu_affinity_set);
cpu_affinity_set = NULL;
CPU_FREE(cpu_affinity_set);
cpu_affinity_set = NULL;