intel_pstate.c revision 91a4cd4f3d8169d7398f9123683f64575927c682
1/* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13#include <linux/kernel.h> 14#include <linux/kernel_stat.h> 15#include <linux/module.h> 16#include <linux/ktime.h> 17#include <linux/hrtimer.h> 18#include <linux/tick.h> 19#include <linux/slab.h> 20#include <linux/sched.h> 21#include <linux/list.h> 22#include <linux/cpu.h> 23#include <linux/cpufreq.h> 24#include <linux/sysfs.h> 25#include <linux/types.h> 26#include <linux/fs.h> 27#include <linux/debugfs.h> 28#include <linux/acpi.h> 29#include <trace/events/power.h> 30 31#include <asm/div64.h> 32#include <asm/msr.h> 33#include <asm/cpu_device_id.h> 34 35#define SAMPLE_COUNT 3 36 37#define BYT_RATIOS 0x66a 38#define BYT_VIDS 0x66b 39 40#define FRAC_BITS 8 41#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 42#define fp_toint(X) ((X) >> FRAC_BITS) 43 44static inline int32_t mul_fp(int32_t x, int32_t y) 45{ 46 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 47} 48 49static inline int32_t div_fp(int32_t x, int32_t y) 50{ 51 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); 52} 53 54struct sample { 55 int32_t core_pct_busy; 56 u64 aperf; 57 u64 mperf; 58 int freq; 59}; 60 61struct pstate_data { 62 int current_pstate; 63 int min_pstate; 64 int max_pstate; 65 int turbo_pstate; 66}; 67 68struct vid_data { 69 int32_t min; 70 int32_t max; 71 int32_t ratio; 72}; 73 74struct _pid { 75 int setpoint; 76 int32_t integral; 77 int32_t p_gain; 78 int32_t i_gain; 79 int32_t d_gain; 80 int deadband; 81 int32_t last_err; 82}; 83 84struct cpudata { 85 int cpu; 86 87 char name[64]; 88 89 struct timer_list timer; 90 91 struct pstate_data pstate; 92 struct vid_data vid; 93 struct _pid pid; 94 95 u64 prev_aperf; 96 u64 prev_mperf; 97 int sample_ptr; 98 struct sample samples[SAMPLE_COUNT]; 99}; 100 101static struct cpudata **all_cpu_data; 102struct pstate_adjust_policy { 103 int sample_rate_ms; 104 int deadband; 105 int setpoint; 106 int p_gain_pct; 107 int d_gain_pct; 108 int i_gain_pct; 109}; 110 111struct pstate_funcs { 112 int (*get_max)(void); 113 int (*get_min)(void); 114 int (*get_turbo)(void); 115 void (*set)(struct cpudata*, int pstate); 116 void (*get_vid)(struct cpudata *); 117}; 118 119struct cpu_defaults { 120 struct pstate_adjust_policy pid_policy; 121 struct pstate_funcs funcs; 122}; 123 124static struct pstate_adjust_policy pid_params; 125static struct pstate_funcs pstate_funcs; 126 127struct perf_limits { 128 int no_turbo; 129 int max_perf_pct; 130 int min_perf_pct; 131 int32_t max_perf; 132 int32_t min_perf; 133 int max_policy_pct; 134 int max_sysfs_pct; 135}; 136 137static struct perf_limits limits = { 138 .no_turbo = 0, 139 .max_perf_pct = 100, 140 .max_perf = int_tofp(1), 141 .min_perf_pct = 0, 142 .min_perf = 0, 143 .max_policy_pct = 100, 144 .max_sysfs_pct = 100, 145}; 146 147static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 148 int deadband, int integral) { 149 pid->setpoint = setpoint; 150 pid->deadband = deadband; 151 pid->integral = int_tofp(integral); 152 pid->last_err = setpoint - busy; 153} 154 155static inline void pid_p_gain_set(struct _pid *pid, int percent) 156{ 157 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 158} 159 160static inline void pid_i_gain_set(struct _pid *pid, int percent) 161{ 162 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 163} 164 165static inline void pid_d_gain_set(struct _pid *pid, int percent) 166{ 167 168 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 169} 170 171static signed int pid_calc(struct _pid *pid, int32_t busy) 172{ 173 signed int result; 174 int32_t pterm, dterm, fp_error; 175 int32_t integral_limit; 176 177 fp_error = int_tofp(pid->setpoint) - busy; 178 179 if (abs(fp_error) <= int_tofp(pid->deadband)) 180 return 0; 181 182 pterm = mul_fp(pid->p_gain, fp_error); 183 184 pid->integral += fp_error; 185 186 /* limit the integral term */ 187 integral_limit = int_tofp(30); 188 if (pid->integral > integral_limit) 189 pid->integral = integral_limit; 190 if (pid->integral < -integral_limit) 191 pid->integral = -integral_limit; 192 193 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 194 pid->last_err = fp_error; 195 196 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 197 198 return (signed int)fp_toint(result); 199} 200 201static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 202{ 203 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 204 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 205 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 206 207 pid_reset(&cpu->pid, 208 pid_params.setpoint, 209 100, 210 pid_params.deadband, 211 0); 212} 213 214static inline void intel_pstate_reset_all_pid(void) 215{ 216 unsigned int cpu; 217 for_each_online_cpu(cpu) { 218 if (all_cpu_data[cpu]) 219 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 220 } 221} 222 223/************************** debugfs begin ************************/ 224static int pid_param_set(void *data, u64 val) 225{ 226 *(u32 *)data = val; 227 intel_pstate_reset_all_pid(); 228 return 0; 229} 230static int pid_param_get(void *data, u64 *val) 231{ 232 *val = *(u32 *)data; 233 return 0; 234} 235DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, 236 pid_param_set, "%llu\n"); 237 238struct pid_param { 239 char *name; 240 void *value; 241}; 242 243static struct pid_param pid_files[] = { 244 {"sample_rate_ms", &pid_params.sample_rate_ms}, 245 {"d_gain_pct", &pid_params.d_gain_pct}, 246 {"i_gain_pct", &pid_params.i_gain_pct}, 247 {"deadband", &pid_params.deadband}, 248 {"setpoint", &pid_params.setpoint}, 249 {"p_gain_pct", &pid_params.p_gain_pct}, 250 {NULL, NULL} 251}; 252 253static struct dentry *debugfs_parent; 254static void intel_pstate_debug_expose_params(void) 255{ 256 int i = 0; 257 258 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 259 if (IS_ERR_OR_NULL(debugfs_parent)) 260 return; 261 while (pid_files[i].name) { 262 debugfs_create_file(pid_files[i].name, 0660, 263 debugfs_parent, pid_files[i].value, 264 &fops_pid_param); 265 i++; 266 } 267} 268 269/************************** debugfs end ************************/ 270 271/************************** sysfs begin ************************/ 272#define show_one(file_name, object) \ 273 static ssize_t show_##file_name \ 274 (struct kobject *kobj, struct attribute *attr, char *buf) \ 275 { \ 276 return sprintf(buf, "%u\n", limits.object); \ 277 } 278 279static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 280 const char *buf, size_t count) 281{ 282 unsigned int input; 283 int ret; 284 ret = sscanf(buf, "%u", &input); 285 if (ret != 1) 286 return -EINVAL; 287 limits.no_turbo = clamp_t(int, input, 0 , 1); 288 289 return count; 290} 291 292static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 293 const char *buf, size_t count) 294{ 295 unsigned int input; 296 int ret; 297 ret = sscanf(buf, "%u", &input); 298 if (ret != 1) 299 return -EINVAL; 300 301 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 302 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 303 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 304 return count; 305} 306 307static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 308 const char *buf, size_t count) 309{ 310 unsigned int input; 311 int ret; 312 ret = sscanf(buf, "%u", &input); 313 if (ret != 1) 314 return -EINVAL; 315 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 316 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 317 318 return count; 319} 320 321show_one(no_turbo, no_turbo); 322show_one(max_perf_pct, max_perf_pct); 323show_one(min_perf_pct, min_perf_pct); 324 325define_one_global_rw(no_turbo); 326define_one_global_rw(max_perf_pct); 327define_one_global_rw(min_perf_pct); 328 329static struct attribute *intel_pstate_attributes[] = { 330 &no_turbo.attr, 331 &max_perf_pct.attr, 332 &min_perf_pct.attr, 333 NULL 334}; 335 336static struct attribute_group intel_pstate_attr_group = { 337 .attrs = intel_pstate_attributes, 338}; 339static struct kobject *intel_pstate_kobject; 340 341static void intel_pstate_sysfs_expose_params(void) 342{ 343 int rc; 344 345 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 346 &cpu_subsys.dev_root->kobj); 347 BUG_ON(!intel_pstate_kobject); 348 rc = sysfs_create_group(intel_pstate_kobject, 349 &intel_pstate_attr_group); 350 BUG_ON(rc); 351} 352 353/************************** sysfs end ************************/ 354static int byt_get_min_pstate(void) 355{ 356 u64 value; 357 rdmsrl(BYT_RATIOS, value); 358 return value & 0xFF; 359} 360 361static int byt_get_max_pstate(void) 362{ 363 u64 value; 364 rdmsrl(BYT_RATIOS, value); 365 return (value >> 16) & 0xFF; 366} 367 368static void byt_set_pstate(struct cpudata *cpudata, int pstate) 369{ 370 u64 val; 371 int32_t vid_fp; 372 u32 vid; 373 374 val = pstate << 8; 375 if (limits.no_turbo) 376 val |= (u64)1 << 32; 377 378 vid_fp = cpudata->vid.min + mul_fp( 379 int_tofp(pstate - cpudata->pstate.min_pstate), 380 cpudata->vid.ratio); 381 382 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 383 vid = fp_toint(vid_fp); 384 385 val |= vid; 386 387 wrmsrl(MSR_IA32_PERF_CTL, val); 388} 389 390static void byt_get_vid(struct cpudata *cpudata) 391{ 392 u64 value; 393 394 rdmsrl(BYT_VIDS, value); 395 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 396 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 397 cpudata->vid.ratio = div_fp( 398 cpudata->vid.max - cpudata->vid.min, 399 int_tofp(cpudata->pstate.max_pstate - 400 cpudata->pstate.min_pstate)); 401} 402 403 404static int core_get_min_pstate(void) 405{ 406 u64 value; 407 rdmsrl(MSR_PLATFORM_INFO, value); 408 return (value >> 40) & 0xFF; 409} 410 411static int core_get_max_pstate(void) 412{ 413 u64 value; 414 rdmsrl(MSR_PLATFORM_INFO, value); 415 return (value >> 8) & 0xFF; 416} 417 418static int core_get_turbo_pstate(void) 419{ 420 u64 value; 421 int nont, ret; 422 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 423 nont = core_get_max_pstate(); 424 ret = ((value) & 255); 425 if (ret <= nont) 426 ret = nont; 427 return ret; 428} 429 430static void core_set_pstate(struct cpudata *cpudata, int pstate) 431{ 432 u64 val; 433 434 val = pstate << 8; 435 if (limits.no_turbo) 436 val |= (u64)1 << 32; 437 438 wrmsrl(MSR_IA32_PERF_CTL, val); 439} 440 441static struct cpu_defaults core_params = { 442 .pid_policy = { 443 .sample_rate_ms = 10, 444 .deadband = 0, 445 .setpoint = 97, 446 .p_gain_pct = 20, 447 .d_gain_pct = 0, 448 .i_gain_pct = 0, 449 }, 450 .funcs = { 451 .get_max = core_get_max_pstate, 452 .get_min = core_get_min_pstate, 453 .get_turbo = core_get_turbo_pstate, 454 .set = core_set_pstate, 455 }, 456}; 457 458static struct cpu_defaults byt_params = { 459 .pid_policy = { 460 .sample_rate_ms = 10, 461 .deadband = 0, 462 .setpoint = 97, 463 .p_gain_pct = 14, 464 .d_gain_pct = 0, 465 .i_gain_pct = 4, 466 }, 467 .funcs = { 468 .get_max = byt_get_max_pstate, 469 .get_min = byt_get_min_pstate, 470 .get_turbo = byt_get_max_pstate, 471 .set = byt_set_pstate, 472 .get_vid = byt_get_vid, 473 }, 474}; 475 476 477static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 478{ 479 int max_perf = cpu->pstate.turbo_pstate; 480 int max_perf_adj; 481 int min_perf; 482 if (limits.no_turbo) 483 max_perf = cpu->pstate.max_pstate; 484 485 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 486 *max = clamp_t(int, max_perf_adj, 487 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 488 489 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 490 *min = clamp_t(int, min_perf, 491 cpu->pstate.min_pstate, max_perf); 492} 493 494static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 495{ 496 int max_perf, min_perf; 497 498 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 499 500 pstate = clamp_t(int, pstate, min_perf, max_perf); 501 502 if (pstate == cpu->pstate.current_pstate) 503 return; 504 505 trace_cpu_frequency(pstate * 100000, cpu->cpu); 506 507 cpu->pstate.current_pstate = pstate; 508 509 pstate_funcs.set(cpu, pstate); 510} 511 512static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) 513{ 514 int target; 515 target = cpu->pstate.current_pstate + steps; 516 517 intel_pstate_set_pstate(cpu, target); 518} 519 520static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps) 521{ 522 int target; 523 target = cpu->pstate.current_pstate - steps; 524 intel_pstate_set_pstate(cpu, target); 525} 526 527static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 528{ 529 sprintf(cpu->name, "Intel 2nd generation core"); 530 531 cpu->pstate.min_pstate = pstate_funcs.get_min(); 532 cpu->pstate.max_pstate = pstate_funcs.get_max(); 533 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 534 535 if (pstate_funcs.get_vid) 536 pstate_funcs.get_vid(cpu); 537 538 /* 539 * goto max pstate so we don't slow up boot if we are built-in if we are 540 * a module we will take care of it during normal operation 541 */ 542 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 543} 544 545static inline void intel_pstate_calc_busy(struct cpudata *cpu, 546 struct sample *sample) 547{ 548 u64 core_pct; 549 core_pct = div64_u64(int_tofp(sample->aperf * 100), 550 sample->mperf); 551 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); 552 553 sample->core_pct_busy = core_pct; 554} 555 556static inline void intel_pstate_sample(struct cpudata *cpu) 557{ 558 u64 aperf, mperf; 559 560 rdmsrl(MSR_IA32_APERF, aperf); 561 rdmsrl(MSR_IA32_MPERF, mperf); 562 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 563 cpu->samples[cpu->sample_ptr].aperf = aperf; 564 cpu->samples[cpu->sample_ptr].mperf = mperf; 565 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; 566 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; 567 568 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); 569 570 cpu->prev_aperf = aperf; 571 cpu->prev_mperf = mperf; 572} 573 574static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 575{ 576 int sample_time, delay; 577 578 sample_time = pid_params.sample_rate_ms; 579 delay = msecs_to_jiffies(sample_time); 580 mod_timer_pinned(&cpu->timer, jiffies + delay); 581} 582 583static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) 584{ 585 int32_t core_busy, max_pstate, current_pstate; 586 587 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; 588 max_pstate = int_tofp(cpu->pstate.max_pstate); 589 current_pstate = int_tofp(cpu->pstate.current_pstate); 590 return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 591} 592 593static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 594{ 595 int32_t busy_scaled; 596 struct _pid *pid; 597 signed int ctl = 0; 598 int steps; 599 600 pid = &cpu->pid; 601 busy_scaled = intel_pstate_get_scaled_busy(cpu); 602 603 ctl = pid_calc(pid, busy_scaled); 604 605 steps = abs(ctl); 606 if (ctl < 0) 607 intel_pstate_pstate_increase(cpu, steps); 608 else 609 intel_pstate_pstate_decrease(cpu, steps); 610} 611 612static void intel_pstate_timer_func(unsigned long __data) 613{ 614 struct cpudata *cpu = (struct cpudata *) __data; 615 616 intel_pstate_sample(cpu); 617 intel_pstate_adjust_busy_pstate(cpu); 618 intel_pstate_set_sample_time(cpu); 619} 620 621#define ICPU(model, policy) \ 622 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } 623 624static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 625 ICPU(0x2a, core_params), 626 ICPU(0x2d, core_params), 627 ICPU(0x37, byt_params), 628 ICPU(0x3a, core_params), 629 ICPU(0x3c, core_params), 630 ICPU(0x3e, core_params), 631 ICPU(0x3f, core_params), 632 ICPU(0x45, core_params), 633 ICPU(0x46, core_params), 634 {} 635}; 636MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 637 638static int intel_pstate_init_cpu(unsigned int cpunum) 639{ 640 641 const struct x86_cpu_id *id; 642 struct cpudata *cpu; 643 644 id = x86_match_cpu(intel_pstate_cpu_ids); 645 if (!id) 646 return -ENODEV; 647 648 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); 649 if (!all_cpu_data[cpunum]) 650 return -ENOMEM; 651 652 cpu = all_cpu_data[cpunum]; 653 654 intel_pstate_get_cpu_pstates(cpu); 655 656 cpu->cpu = cpunum; 657 658 init_timer_deferrable(&cpu->timer); 659 cpu->timer.function = intel_pstate_timer_func; 660 cpu->timer.data = 661 (unsigned long)cpu; 662 cpu->timer.expires = jiffies + HZ/100; 663 intel_pstate_busy_pid_reset(cpu); 664 intel_pstate_sample(cpu); 665 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 666 667 add_timer_on(&cpu->timer, cpunum); 668 669 pr_info("Intel pstate controlling: cpu %d\n", cpunum); 670 671 return 0; 672} 673 674static unsigned int intel_pstate_get(unsigned int cpu_num) 675{ 676 struct sample *sample; 677 struct cpudata *cpu; 678 679 cpu = all_cpu_data[cpu_num]; 680 if (!cpu) 681 return 0; 682 sample = &cpu->samples[cpu->sample_ptr]; 683 return sample->freq; 684} 685 686static int intel_pstate_set_policy(struct cpufreq_policy *policy) 687{ 688 struct cpudata *cpu; 689 690 cpu = all_cpu_data[policy->cpu]; 691 692 if (!policy->cpuinfo.max_freq) 693 return -ENODEV; 694 695 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 696 limits.min_perf_pct = 100; 697 limits.min_perf = int_tofp(1); 698 limits.max_perf_pct = 100; 699 limits.max_perf = int_tofp(1); 700 limits.no_turbo = 0; 701 return 0; 702 } 703 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 704 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 705 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 706 707 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; 708 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 709 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 710 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 711 712 return 0; 713} 714 715static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 716{ 717 cpufreq_verify_within_cpu_limits(policy); 718 719 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && 720 (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) 721 return -EINVAL; 722 723 return 0; 724} 725 726static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 727{ 728 int cpu = policy->cpu; 729 730 del_timer(&all_cpu_data[cpu]->timer); 731 kfree(all_cpu_data[cpu]); 732 all_cpu_data[cpu] = NULL; 733 return 0; 734} 735 736static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 737{ 738 struct cpudata *cpu; 739 int rc; 740 741 rc = intel_pstate_init_cpu(policy->cpu); 742 if (rc) 743 return rc; 744 745 cpu = all_cpu_data[policy->cpu]; 746 747 if (!limits.no_turbo && 748 limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 749 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 750 else 751 policy->policy = CPUFREQ_POLICY_POWERSAVE; 752 753 policy->min = cpu->pstate.min_pstate * 100000; 754 policy->max = cpu->pstate.turbo_pstate * 100000; 755 756 /* cpuinfo and default policy values */ 757 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 758 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; 759 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 760 cpumask_set_cpu(policy->cpu, policy->cpus); 761 762 return 0; 763} 764 765static struct cpufreq_driver intel_pstate_driver = { 766 .flags = CPUFREQ_CONST_LOOPS, 767 .verify = intel_pstate_verify_policy, 768 .setpolicy = intel_pstate_set_policy, 769 .get = intel_pstate_get, 770 .init = intel_pstate_cpu_init, 771 .exit = intel_pstate_cpu_exit, 772 .name = "intel_pstate", 773}; 774 775static int __initdata no_load; 776 777static int intel_pstate_msrs_not_valid(void) 778{ 779 /* Check that all the msr's we are using are valid. */ 780 u64 aperf, mperf, tmp; 781 782 rdmsrl(MSR_IA32_APERF, aperf); 783 rdmsrl(MSR_IA32_MPERF, mperf); 784 785 if (!pstate_funcs.get_max() || 786 !pstate_funcs.get_min() || 787 !pstate_funcs.get_turbo()) 788 return -ENODEV; 789 790 rdmsrl(MSR_IA32_APERF, tmp); 791 if (!(tmp - aperf)) 792 return -ENODEV; 793 794 rdmsrl(MSR_IA32_MPERF, tmp); 795 if (!(tmp - mperf)) 796 return -ENODEV; 797 798 return 0; 799} 800 801static void copy_pid_params(struct pstate_adjust_policy *policy) 802{ 803 pid_params.sample_rate_ms = policy->sample_rate_ms; 804 pid_params.p_gain_pct = policy->p_gain_pct; 805 pid_params.i_gain_pct = policy->i_gain_pct; 806 pid_params.d_gain_pct = policy->d_gain_pct; 807 pid_params.deadband = policy->deadband; 808 pid_params.setpoint = policy->setpoint; 809} 810 811static void copy_cpu_funcs(struct pstate_funcs *funcs) 812{ 813 pstate_funcs.get_max = funcs->get_max; 814 pstate_funcs.get_min = funcs->get_min; 815 pstate_funcs.get_turbo = funcs->get_turbo; 816 pstate_funcs.set = funcs->set; 817 pstate_funcs.get_vid = funcs->get_vid; 818} 819 820#if IS_ENABLED(CONFIG_ACPI) 821#include <acpi/processor.h> 822 823static bool intel_pstate_no_acpi_pss(void) 824{ 825 int i; 826 827 for_each_possible_cpu(i) { 828 acpi_status status; 829 union acpi_object *pss; 830 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 831 struct acpi_processor *pr = per_cpu(processors, i); 832 833 if (!pr) 834 continue; 835 836 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 837 if (ACPI_FAILURE(status)) 838 continue; 839 840 pss = buffer.pointer; 841 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 842 kfree(pss); 843 return false; 844 } 845 846 kfree(pss); 847 } 848 849 return true; 850} 851 852struct hw_vendor_info { 853 u16 valid; 854 char oem_id[ACPI_OEM_ID_SIZE]; 855 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 856}; 857 858/* Hardware vendor-specific info that has its own power management modes */ 859static struct hw_vendor_info vendor_info[] = { 860 {1, "HP ", "ProLiant"}, 861 {0, "", ""}, 862}; 863 864static bool intel_pstate_platform_pwr_mgmt_exists(void) 865{ 866 struct acpi_table_header hdr; 867 struct hw_vendor_info *v_info; 868 869 if (acpi_disabled 870 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 871 return false; 872 873 for (v_info = vendor_info; v_info->valid; v_info++) { 874 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) 875 && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) 876 && intel_pstate_no_acpi_pss()) 877 return true; 878 } 879 880 return false; 881} 882#else /* CONFIG_ACPI not enabled */ 883static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 884#endif /* CONFIG_ACPI */ 885 886static int __init intel_pstate_init(void) 887{ 888 int cpu, rc = 0; 889 const struct x86_cpu_id *id; 890 struct cpu_defaults *cpu_info; 891 892 if (no_load) 893 return -ENODEV; 894 895 id = x86_match_cpu(intel_pstate_cpu_ids); 896 if (!id) 897 return -ENODEV; 898 899 /* 900 * The Intel pstate driver will be ignored if the platform 901 * firmware has its own power management modes. 902 */ 903 if (intel_pstate_platform_pwr_mgmt_exists()) 904 return -ENODEV; 905 906 cpu_info = (struct cpu_defaults *)id->driver_data; 907 908 copy_pid_params(&cpu_info->pid_policy); 909 copy_cpu_funcs(&cpu_info->funcs); 910 911 if (intel_pstate_msrs_not_valid()) 912 return -ENODEV; 913 914 pr_info("Intel P-state driver initializing.\n"); 915 916 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 917 if (!all_cpu_data) 918 return -ENOMEM; 919 920 rc = cpufreq_register_driver(&intel_pstate_driver); 921 if (rc) 922 goto out; 923 924 intel_pstate_debug_expose_params(); 925 intel_pstate_sysfs_expose_params(); 926 return rc; 927out: 928 get_online_cpus(); 929 for_each_online_cpu(cpu) { 930 if (all_cpu_data[cpu]) { 931 del_timer_sync(&all_cpu_data[cpu]->timer); 932 kfree(all_cpu_data[cpu]); 933 } 934 } 935 936 put_online_cpus(); 937 vfree(all_cpu_data); 938 return -ENODEV; 939} 940device_initcall(intel_pstate_init); 941 942static int __init intel_pstate_setup(char *str) 943{ 944 if (!str) 945 return -EINVAL; 946 947 if (!strcmp(str, "disable")) 948 no_load = 1; 949 return 0; 950} 951early_param("intel_pstate", intel_pstate_setup); 952 953MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 954MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 955MODULE_LICENSE("GPL"); 956