cpufreq_stats.c revision 5c720d37bf5c2864cd7e834afff88321d6e4d97d
1/* 2 * drivers/cpufreq/cpufreq_stats.c 3 * 4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12#include <linux/kernel.h> 13#include <linux/slab.h> 14#include <linux/sysdev.h> 15#include <linux/cpu.h> 16#include <linux/sysfs.h> 17#include <linux/cpufreq.h> 18#include <linux/module.h> 19#include <linux/jiffies.h> 20#include <linux/percpu.h> 21#include <linux/kobject.h> 22#include <linux/spinlock.h> 23#include <linux/notifier.h> 24#include <asm/cputime.h> 25 26static spinlock_t cpufreq_stats_lock; 27 28#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \ 29static struct freq_attr _attr_##_name = {\ 30 .attr = {.name = __stringify(_name), .mode = _mode, }, \ 31 .show = _show,\ 32}; 33 34struct cpufreq_stats { 35 unsigned int cpu; 36 unsigned int total_trans; 37 unsigned long long last_time; 38 unsigned int max_state; 39 unsigned int state_num; 40 unsigned int last_index; 41 cputime64_t *time_in_state; 42 unsigned int *freq_table; 43#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 44 unsigned int *trans_table; 45#endif 46}; 47 48static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table); 49 50struct cpufreq_stats_attribute { 51 struct attribute attr; 52 ssize_t(*show) (struct cpufreq_stats *, char *); 53}; 54 55static int cpufreq_stats_update(unsigned int cpu) 56{ 57 struct cpufreq_stats *stat; 58 unsigned long long cur_time; 59 60 cur_time = get_jiffies_64(); 61 spin_lock(&cpufreq_stats_lock); 62 stat = per_cpu(cpufreq_stats_table, cpu); 63 if (stat->time_in_state) 64 stat->time_in_state[stat->last_index] = 65 cputime64_add(stat->time_in_state[stat->last_index], 66 cputime_sub(cur_time, stat->last_time)); 67 stat->last_time = cur_time; 68 spin_unlock(&cpufreq_stats_lock); 69 return 0; 70} 71 72static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) 73{ 74 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 75 if (!stat) 76 return 0; 77 return sprintf(buf, "%d\n", 78 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans); 79} 80 81static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) 82{ 83 ssize_t len = 0; 84 int i; 85 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 86 if (!stat) 87 return 0; 88 cpufreq_stats_update(stat->cpu); 89 for (i = 0; i < stat->state_num; i++) { 90 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], 91 (unsigned long long) 92 cputime64_to_clock_t(stat->time_in_state[i])); 93 } 94 return len; 95} 96 97#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 98static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) 99{ 100 ssize_t len = 0; 101 int i, j; 102 103 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 104 if (!stat) 105 return 0; 106 cpufreq_stats_update(stat->cpu); 107 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); 108 len += snprintf(buf + len, PAGE_SIZE - len, " : "); 109 for (i = 0; i < stat->state_num; i++) { 110 if (len >= PAGE_SIZE) 111 break; 112 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 113 stat->freq_table[i]); 114 } 115 if (len >= PAGE_SIZE) 116 return PAGE_SIZE; 117 118 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 119 120 for (i = 0; i < stat->state_num; i++) { 121 if (len >= PAGE_SIZE) 122 break; 123 124 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", 125 stat->freq_table[i]); 126 127 for (j = 0; j < stat->state_num; j++) { 128 if (len >= PAGE_SIZE) 129 break; 130 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 131 stat->trans_table[i*stat->max_state+j]); 132 } 133 if (len >= PAGE_SIZE) 134 break; 135 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 136 } 137 if (len >= PAGE_SIZE) 138 return PAGE_SIZE; 139 return len; 140} 141CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table); 142#endif 143 144CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans); 145CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state); 146 147static struct attribute *default_attrs[] = { 148 &_attr_total_trans.attr, 149 &_attr_time_in_state.attr, 150#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 151 &_attr_trans_table.attr, 152#endif 153 NULL 154}; 155static struct attribute_group stats_attr_group = { 156 .attrs = default_attrs, 157 .name = "stats" 158}; 159 160static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) 161{ 162 int index; 163 for (index = 0; index < stat->max_state; index++) 164 if (stat->freq_table[index] == freq) 165 return index; 166 return -1; 167} 168 169/* should be called late in the CPU removal sequence so that the stats 170 * memory is still available in case someone tries to use it. 171 */ 172static void cpufreq_stats_free_table(unsigned int cpu) 173{ 174 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); 175 if (stat) { 176 kfree(stat->time_in_state); 177 kfree(stat); 178 } 179 per_cpu(cpufreq_stats_table, cpu) = NULL; 180} 181 182/* must be called early in the CPU removal sequence (before 183 * cpufreq_remove_dev) so that policy is still valid. 184 */ 185static void cpufreq_stats_free_sysfs(unsigned int cpu) 186{ 187 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 188 if (policy && policy->cpu == cpu) 189 sysfs_remove_group(&policy->kobj, &stats_attr_group); 190 if (policy) 191 cpufreq_cpu_put(policy); 192} 193 194static int cpufreq_stats_create_table(struct cpufreq_policy *policy, 195 struct cpufreq_frequency_table *table) 196{ 197 unsigned int i, j, count = 0, ret = 0; 198 struct cpufreq_stats *stat; 199 struct cpufreq_policy *data; 200 unsigned int alloc_size; 201 unsigned int cpu = policy->cpu; 202 if (per_cpu(cpufreq_stats_table, cpu)) 203 return -EBUSY; 204 stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL); 205 if ((stat) == NULL) 206 return -ENOMEM; 207 208 data = cpufreq_cpu_get(cpu); 209 if (data == NULL) { 210 ret = -EINVAL; 211 goto error_get_fail; 212 } 213 214 ret = sysfs_create_group(&data->kobj, &stats_attr_group); 215 if (ret) 216 goto error_out; 217 218 stat->cpu = cpu; 219 per_cpu(cpufreq_stats_table, cpu) = stat; 220 221 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 222 unsigned int freq = table[i].frequency; 223 if (freq == CPUFREQ_ENTRY_INVALID) 224 continue; 225 count++; 226 } 227 228 alloc_size = count * sizeof(int) + count * sizeof(cputime64_t); 229 230#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 231 alloc_size += count * count * sizeof(int); 232#endif 233 stat->max_state = count; 234 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); 235 if (!stat->time_in_state) { 236 ret = -ENOMEM; 237 goto error_out; 238 } 239 stat->freq_table = (unsigned int *)(stat->time_in_state + count); 240 241#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 242 stat->trans_table = stat->freq_table + count; 243#endif 244 j = 0; 245 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 246 unsigned int freq = table[i].frequency; 247 if (freq == CPUFREQ_ENTRY_INVALID) 248 continue; 249 if (freq_table_get_index(stat, freq) == -1) 250 stat->freq_table[j++] = freq; 251 } 252 stat->state_num = j; 253 spin_lock(&cpufreq_stats_lock); 254 stat->last_time = get_jiffies_64(); 255 stat->last_index = freq_table_get_index(stat, policy->cur); 256 spin_unlock(&cpufreq_stats_lock); 257 cpufreq_cpu_put(data); 258 return 0; 259error_out: 260 cpufreq_cpu_put(data); 261error_get_fail: 262 kfree(stat); 263 per_cpu(cpufreq_stats_table, cpu) = NULL; 264 return ret; 265} 266 267static int cpufreq_stat_notifier_policy(struct notifier_block *nb, 268 unsigned long val, void *data) 269{ 270 int ret; 271 struct cpufreq_policy *policy = data; 272 struct cpufreq_frequency_table *table; 273 unsigned int cpu = policy->cpu; 274 if (val != CPUFREQ_NOTIFY) 275 return 0; 276 table = cpufreq_frequency_get_table(cpu); 277 if (!table) 278 return 0; 279 ret = cpufreq_stats_create_table(policy, table); 280 if (ret) 281 return ret; 282 return 0; 283} 284 285static int cpufreq_stat_notifier_trans(struct notifier_block *nb, 286 unsigned long val, void *data) 287{ 288 struct cpufreq_freqs *freq = data; 289 struct cpufreq_stats *stat; 290 int old_index, new_index; 291 292 if (val != CPUFREQ_POSTCHANGE) 293 return 0; 294 295 stat = per_cpu(cpufreq_stats_table, freq->cpu); 296 if (!stat) 297 return 0; 298 299 old_index = stat->last_index; 300 new_index = freq_table_get_index(stat, freq->new); 301 302 /* We can't do stat->time_in_state[-1]= .. */ 303 if (old_index == -1 || new_index == -1) 304 return 0; 305 306 cpufreq_stats_update(freq->cpu); 307 308 if (old_index == new_index) 309 return 0; 310 311 spin_lock(&cpufreq_stats_lock); 312 stat->last_index = new_index; 313#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 314 stat->trans_table[old_index * stat->max_state + new_index]++; 315#endif 316 stat->total_trans++; 317 spin_unlock(&cpufreq_stats_lock); 318 return 0; 319} 320 321static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, 322 unsigned long action, 323 void *hcpu) 324{ 325 unsigned int cpu = (unsigned long)hcpu; 326 327 switch (action) { 328 case CPU_ONLINE: 329 case CPU_ONLINE_FROZEN: 330 cpufreq_update_policy(cpu); 331 break; 332 case CPU_DOWN_PREPARE: 333 cpufreq_stats_free_sysfs(cpu); 334 break; 335 case CPU_DEAD: 336 case CPU_DEAD_FROZEN: 337 cpufreq_stats_free_table(cpu); 338 break; 339 } 340 return NOTIFY_OK; 341} 342 343/* priority=1 so this will get called before cpufreq_remove_dev */ 344static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { 345 .notifier_call = cpufreq_stat_cpu_callback, 346 .priority = 1, 347}; 348 349static struct notifier_block notifier_policy_block = { 350 .notifier_call = cpufreq_stat_notifier_policy 351}; 352 353static struct notifier_block notifier_trans_block = { 354 .notifier_call = cpufreq_stat_notifier_trans 355}; 356 357static int __init cpufreq_stats_init(void) 358{ 359 int ret; 360 unsigned int cpu; 361 362 spin_lock_init(&cpufreq_stats_lock); 363 ret = cpufreq_register_notifier(¬ifier_policy_block, 364 CPUFREQ_POLICY_NOTIFIER); 365 if (ret) 366 return ret; 367 368 ret = cpufreq_register_notifier(¬ifier_trans_block, 369 CPUFREQ_TRANSITION_NOTIFIER); 370 if (ret) { 371 cpufreq_unregister_notifier(¬ifier_policy_block, 372 CPUFREQ_POLICY_NOTIFIER); 373 return ret; 374 } 375 376 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 377 for_each_online_cpu(cpu) { 378 cpufreq_update_policy(cpu); 379 } 380 return 0; 381} 382static void __exit cpufreq_stats_exit(void) 383{ 384 unsigned int cpu; 385 386 cpufreq_unregister_notifier(¬ifier_policy_block, 387 CPUFREQ_POLICY_NOTIFIER); 388 cpufreq_unregister_notifier(¬ifier_trans_block, 389 CPUFREQ_TRANSITION_NOTIFIER); 390 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 391 for_each_online_cpu(cpu) { 392 cpufreq_stats_free_table(cpu); 393 cpufreq_stats_free_sysfs(cpu); 394 } 395} 396 397MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); 398MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats " 399 "through sysfs filesystem"); 400MODULE_LICENSE("GPL"); 401 402module_init(cpufreq_stats_init); 403module_exit(cpufreq_stats_exit); 404