1/* 2 * Copyright (C) STMicroelectronics 2009 3 * Copyright (C) ST-Ericsson SA 2010 4 * 5 * License Terms: GNU General Public License v2 6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> 7 * Author: Sundar Iyer <sundar.iyer@stericsson.com> 8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> 9 * 10 * U8500 PRCM Unit interface driver 11 * 12 */ 13#include <linux/module.h> 14#include <linux/kernel.h> 15#include <linux/delay.h> 16#include <linux/errno.h> 17#include <linux/err.h> 18#include <linux/spinlock.h> 19#include <linux/io.h> 20#include <linux/slab.h> 21#include <linux/mutex.h> 22#include <linux/completion.h> 23#include <linux/irq.h> 24#include <linux/jiffies.h> 25#include <linux/bitops.h> 26#include <linux/fs.h> 27#include <linux/platform_device.h> 28#include <linux/uaccess.h> 29#include <linux/mfd/core.h> 30#include <linux/mfd/dbx500-prcmu.h> 31#include <linux/regulator/db8500-prcmu.h> 32#include <linux/regulator/machine.h> 33#include <asm/hardware/gic.h> 34#include <mach/hardware.h> 35#include <mach/irqs.h> 36#include <mach/db8500-regs.h> 37#include <mach/id.h> 38#include "dbx500-prcmu-regs.h" 39 40/* Offset for the firmware version within the TCPM */ 41#define PRCMU_FW_VERSION_OFFSET 0xA4 42 43/* Index of different voltages to be used when accessing AVSData */ 44#define PRCM_AVS_BASE 0x2FC 45#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0) 46#define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1) 47#define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2) 48#define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3) 49#define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4) 50#define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5) 51#define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6) 52#define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7) 53#define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8) 54#define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9) 55#define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA) 56#define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB) 57#define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC) 58 59#define PRCM_AVS_VOLTAGE 0 60#define PRCM_AVS_VOLTAGE_MASK 0x3f 61#define PRCM_AVS_ISSLOWSTARTUP 6 62#define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP) 63#define PRCM_AVS_ISMODEENABLE 7 64#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE) 65 66#define PRCM_BOOT_STATUS 0xFFF 67#define PRCM_ROMCODE_A2P 0xFFE 68#define PRCM_ROMCODE_P2A 0xFFD 69#define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */ 70 71#define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */ 72 73#define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */ 74#define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0) 75#define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1) 76#define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2) 77#define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3) 78#define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4) 79#define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5) 80#define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8) 81 82/* Req Mailboxes */ 83#define PRCM_REQ_MB0 0xFDC /* 12 bytes */ 84#define PRCM_REQ_MB1 0xFD0 /* 12 bytes */ 85#define PRCM_REQ_MB2 0xFC0 /* 16 bytes */ 86#define PRCM_REQ_MB3 0xE4C /* 372 bytes */ 87#define PRCM_REQ_MB4 0xE48 /* 4 bytes */ 88#define PRCM_REQ_MB5 0xE44 /* 4 bytes */ 89 90/* Ack Mailboxes */ 91#define PRCM_ACK_MB0 0xE08 /* 52 bytes */ 92#define PRCM_ACK_MB1 0xE04 /* 4 bytes */ 93#define PRCM_ACK_MB2 0xE00 /* 4 bytes */ 94#define PRCM_ACK_MB3 0xDFC /* 4 bytes */ 95#define PRCM_ACK_MB4 0xDF8 /* 4 bytes */ 96#define PRCM_ACK_MB5 0xDF4 /* 4 bytes */ 97 98/* Mailbox 0 headers */ 99#define MB0H_POWER_STATE_TRANS 0 100#define MB0H_CONFIG_WAKEUPS_EXE 1 101#define MB0H_READ_WAKEUP_ACK 3 102#define MB0H_CONFIG_WAKEUPS_SLEEP 4 103 104#define MB0H_WAKEUP_EXE 2 105#define MB0H_WAKEUP_SLEEP 5 106 107/* Mailbox 0 REQs */ 108#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0) 109#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1) 110#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2) 111#define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3) 112#define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4) 113#define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8) 114 115/* Mailbox 0 ACKs */ 116#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0) 117#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1) 118#define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4) 119#define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8) 120#define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C) 121#define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20) 122#define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20 123 124/* Mailbox 1 headers */ 125#define MB1H_ARM_APE_OPP 0x0 126#define MB1H_RESET_MODEM 0x2 127#define MB1H_REQUEST_APE_OPP_100_VOLT 0x3 128#define MB1H_RELEASE_APE_OPP_100_VOLT 0x4 129#define MB1H_RELEASE_USB_WAKEUP 0x5 130#define MB1H_PLL_ON_OFF 0x6 131 132/* Mailbox 1 Requests */ 133#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0) 134#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1) 135#define PRCM_REQ_MB1_PLL_ON_OFF (PRCM_REQ_MB1 + 0x4) 136#define PLL_SOC0_OFF 0x1 137#define PLL_SOC0_ON 0x2 138#define PLL_SOC1_OFF 0x4 139#define PLL_SOC1_ON 0x8 140 141/* Mailbox 1 ACKs */ 142#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0) 143#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1) 144#define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2) 145#define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3) 146 147/* Mailbox 2 headers */ 148#define MB2H_DPS 0x0 149#define MB2H_AUTO_PWR 0x1 150 151/* Mailbox 2 REQs */ 152#define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0) 153#define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1) 154#define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2) 155#define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3) 156#define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4) 157#define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5) 158#define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6) 159#define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7) 160#define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8) 161#define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC) 162 163/* Mailbox 2 ACKs */ 164#define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0) 165#define HWACC_PWR_ST_OK 0xFE 166 167/* Mailbox 3 headers */ 168#define MB3H_ANC 0x0 169#define MB3H_SIDETONE 0x1 170#define MB3H_SYSCLK 0xE 171 172/* Mailbox 3 Requests */ 173#define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0) 174#define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20) 175#define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60) 176#define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64) 177#define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68) 178#define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C) 179#define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C) 180 181/* Mailbox 4 headers */ 182#define MB4H_DDR_INIT 0x0 183#define MB4H_MEM_ST 0x1 184#define MB4H_HOTDOG 0x12 185#define MB4H_HOTMON 0x13 186#define MB4H_HOT_PERIOD 0x14 187#define MB4H_A9WDOG_CONF 0x16 188#define MB4H_A9WDOG_EN 0x17 189#define MB4H_A9WDOG_DIS 0x18 190#define MB4H_A9WDOG_LOAD 0x19 191#define MB4H_A9WDOG_KICK 0x20 192 193/* Mailbox 4 Requests */ 194#define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0) 195#define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1) 196#define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3) 197#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0) 198#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0) 199#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1) 200#define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2) 201#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0) 202#define HOTMON_CONFIG_LOW BIT(0) 203#define HOTMON_CONFIG_HIGH BIT(1) 204#define PRCM_REQ_MB4_A9WDOG_0 (PRCM_REQ_MB4 + 0x0) 205#define PRCM_REQ_MB4_A9WDOG_1 (PRCM_REQ_MB4 + 0x1) 206#define PRCM_REQ_MB4_A9WDOG_2 (PRCM_REQ_MB4 + 0x2) 207#define PRCM_REQ_MB4_A9WDOG_3 (PRCM_REQ_MB4 + 0x3) 208#define A9WDOG_AUTO_OFF_EN BIT(7) 209#define A9WDOG_AUTO_OFF_DIS 0 210#define A9WDOG_ID_MASK 0xf 211 212/* Mailbox 5 Requests */ 213#define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0) 214#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1) 215#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2) 216#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3) 217#define PRCMU_I2C_WRITE(slave) \ 218 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0)) 219#define PRCMU_I2C_READ(slave) \ 220 (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0)) 221#define PRCMU_I2C_STOP_EN BIT(3) 222 223/* Mailbox 5 ACKs */ 224#define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1) 225#define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3) 226#define I2C_WR_OK 0x1 227#define I2C_RD_OK 0x2 228 229#define NUM_MB 8 230#define MBOX_BIT BIT 231#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1) 232 233/* 234 * Wakeups/IRQs 235 */ 236 237#define WAKEUP_BIT_RTC BIT(0) 238#define WAKEUP_BIT_RTT0 BIT(1) 239#define WAKEUP_BIT_RTT1 BIT(2) 240#define WAKEUP_BIT_HSI0 BIT(3) 241#define WAKEUP_BIT_HSI1 BIT(4) 242#define WAKEUP_BIT_CA_WAKE BIT(5) 243#define WAKEUP_BIT_USB BIT(6) 244#define WAKEUP_BIT_ABB BIT(7) 245#define WAKEUP_BIT_ABB_FIFO BIT(8) 246#define WAKEUP_BIT_SYSCLK_OK BIT(9) 247#define WAKEUP_BIT_CA_SLEEP BIT(10) 248#define WAKEUP_BIT_AC_WAKE_ACK BIT(11) 249#define WAKEUP_BIT_SIDE_TONE_OK BIT(12) 250#define WAKEUP_BIT_ANC_OK BIT(13) 251#define WAKEUP_BIT_SW_ERROR BIT(14) 252#define WAKEUP_BIT_AC_SLEEP_ACK BIT(15) 253#define WAKEUP_BIT_ARM BIT(17) 254#define WAKEUP_BIT_HOTMON_LOW BIT(18) 255#define WAKEUP_BIT_HOTMON_HIGH BIT(19) 256#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20) 257#define WAKEUP_BIT_GPIO0 BIT(23) 258#define WAKEUP_BIT_GPIO1 BIT(24) 259#define WAKEUP_BIT_GPIO2 BIT(25) 260#define WAKEUP_BIT_GPIO3 BIT(26) 261#define WAKEUP_BIT_GPIO4 BIT(27) 262#define WAKEUP_BIT_GPIO5 BIT(28) 263#define WAKEUP_BIT_GPIO6 BIT(29) 264#define WAKEUP_BIT_GPIO7 BIT(30) 265#define WAKEUP_BIT_GPIO8 BIT(31) 266 267static struct { 268 bool valid; 269 struct prcmu_fw_version version; 270} fw_info; 271 272/* 273 * This vector maps irq numbers to the bits in the bit field used in 274 * communication with the PRCMU firmware. 275 * 276 * The reason for having this is to keep the irq numbers contiguous even though 277 * the bits in the bit field are not. (The bits also have a tendency to move 278 * around, to further complicate matters.) 279 */ 280#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE) 281#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name) 282static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = { 283 IRQ_ENTRY(RTC), 284 IRQ_ENTRY(RTT0), 285 IRQ_ENTRY(RTT1), 286 IRQ_ENTRY(HSI0), 287 IRQ_ENTRY(HSI1), 288 IRQ_ENTRY(CA_WAKE), 289 IRQ_ENTRY(USB), 290 IRQ_ENTRY(ABB), 291 IRQ_ENTRY(ABB_FIFO), 292 IRQ_ENTRY(CA_SLEEP), 293 IRQ_ENTRY(ARM), 294 IRQ_ENTRY(HOTMON_LOW), 295 IRQ_ENTRY(HOTMON_HIGH), 296 IRQ_ENTRY(MODEM_SW_RESET_REQ), 297 IRQ_ENTRY(GPIO0), 298 IRQ_ENTRY(GPIO1), 299 IRQ_ENTRY(GPIO2), 300 IRQ_ENTRY(GPIO3), 301 IRQ_ENTRY(GPIO4), 302 IRQ_ENTRY(GPIO5), 303 IRQ_ENTRY(GPIO6), 304 IRQ_ENTRY(GPIO7), 305 IRQ_ENTRY(GPIO8) 306}; 307 308#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1) 309#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name) 310static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = { 311 WAKEUP_ENTRY(RTC), 312 WAKEUP_ENTRY(RTT0), 313 WAKEUP_ENTRY(RTT1), 314 WAKEUP_ENTRY(HSI0), 315 WAKEUP_ENTRY(HSI1), 316 WAKEUP_ENTRY(USB), 317 WAKEUP_ENTRY(ABB), 318 WAKEUP_ENTRY(ABB_FIFO), 319 WAKEUP_ENTRY(ARM) 320}; 321 322/* 323 * mb0_transfer - state needed for mailbox 0 communication. 324 * @lock: The transaction lock. 325 * @dbb_events_lock: A lock used to handle concurrent access to (parts of) 326 * the request data. 327 * @mask_work: Work structure used for (un)masking wakeup interrupts. 328 * @req: Request data that need to persist between requests. 329 */ 330static struct { 331 spinlock_t lock; 332 spinlock_t dbb_irqs_lock; 333 struct work_struct mask_work; 334 struct mutex ac_wake_lock; 335 struct completion ac_wake_work; 336 struct { 337 u32 dbb_irqs; 338 u32 dbb_wakeups; 339 u32 abb_events; 340 } req; 341} mb0_transfer; 342 343/* 344 * mb1_transfer - state needed for mailbox 1 communication. 345 * @lock: The transaction lock. 346 * @work: The transaction completion structure. 347 * @ape_opp: The current APE OPP. 348 * @ack: Reply ("acknowledge") data. 349 */ 350static struct { 351 struct mutex lock; 352 struct completion work; 353 u8 ape_opp; 354 struct { 355 u8 header; 356 u8 arm_opp; 357 u8 ape_opp; 358 u8 ape_voltage_status; 359 } ack; 360} mb1_transfer; 361 362/* 363 * mb2_transfer - state needed for mailbox 2 communication. 364 * @lock: The transaction lock. 365 * @work: The transaction completion structure. 366 * @auto_pm_lock: The autonomous power management configuration lock. 367 * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled. 368 * @req: Request data that need to persist between requests. 369 * @ack: Reply ("acknowledge") data. 370 */ 371static struct { 372 struct mutex lock; 373 struct completion work; 374 spinlock_t auto_pm_lock; 375 bool auto_pm_enabled; 376 struct { 377 u8 status; 378 } ack; 379} mb2_transfer; 380 381/* 382 * mb3_transfer - state needed for mailbox 3 communication. 383 * @lock: The request lock. 384 * @sysclk_lock: A lock used to handle concurrent sysclk requests. 385 * @sysclk_work: Work structure used for sysclk requests. 386 */ 387static struct { 388 spinlock_t lock; 389 struct mutex sysclk_lock; 390 struct completion sysclk_work; 391} mb3_transfer; 392 393/* 394 * mb4_transfer - state needed for mailbox 4 communication. 395 * @lock: The transaction lock. 396 * @work: The transaction completion structure. 397 */ 398static struct { 399 struct mutex lock; 400 struct completion work; 401} mb4_transfer; 402 403/* 404 * mb5_transfer - state needed for mailbox 5 communication. 405 * @lock: The transaction lock. 406 * @work: The transaction completion structure. 407 * @ack: Reply ("acknowledge") data. 408 */ 409static struct { 410 struct mutex lock; 411 struct completion work; 412 struct { 413 u8 status; 414 u8 value; 415 } ack; 416} mb5_transfer; 417 418static atomic_t ac_wake_req_state = ATOMIC_INIT(0); 419 420/* Spinlocks */ 421static DEFINE_SPINLOCK(prcmu_lock); 422static DEFINE_SPINLOCK(clkout_lock); 423 424/* Global var to runtime determine TCDM base for v2 or v1 */ 425static __iomem void *tcdm_base; 426 427struct clk_mgt { 428 void __iomem *reg; 429 u32 pllsw; 430 int branch; 431 bool clk38div; 432}; 433 434enum { 435 PLL_RAW, 436 PLL_FIX, 437 PLL_DIV 438}; 439 440static DEFINE_SPINLOCK(clk_mgt_lock); 441 442#define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \ 443 { (PRCM_##_name##_MGT), 0 , _branch, _clk38div} 444struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = { 445 CLK_MGT_ENTRY(SGACLK, PLL_DIV, false), 446 CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true), 447 CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true), 448 CLK_MGT_ENTRY(MSP1CLK, PLL_FIX, true), 449 CLK_MGT_ENTRY(I2CCLK, PLL_FIX, true), 450 CLK_MGT_ENTRY(SDMMCCLK, PLL_DIV, true), 451 CLK_MGT_ENTRY(SLIMCLK, PLL_FIX, true), 452 CLK_MGT_ENTRY(PER1CLK, PLL_DIV, true), 453 CLK_MGT_ENTRY(PER2CLK, PLL_DIV, true), 454 CLK_MGT_ENTRY(PER3CLK, PLL_DIV, true), 455 CLK_MGT_ENTRY(PER5CLK, PLL_DIV, true), 456 CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true), 457 CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true), 458 CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true), 459 CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true), 460 CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true), 461 CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true), 462 CLK_MGT_ENTRY(HDMICLK, PLL_FIX, false), 463 CLK_MGT_ENTRY(APEATCLK, PLL_DIV, true), 464 CLK_MGT_ENTRY(APETRACECLK, PLL_DIV, true), 465 CLK_MGT_ENTRY(MCDECLK, PLL_DIV, true), 466 CLK_MGT_ENTRY(IPI2CCLK, PLL_FIX, true), 467 CLK_MGT_ENTRY(DSIALTCLK, PLL_FIX, false), 468 CLK_MGT_ENTRY(DMACLK, PLL_DIV, true), 469 CLK_MGT_ENTRY(B2R2CLK, PLL_DIV, true), 470 CLK_MGT_ENTRY(TVCLK, PLL_FIX, true), 471 CLK_MGT_ENTRY(SSPCLK, PLL_FIX, true), 472 CLK_MGT_ENTRY(RNGCLK, PLL_FIX, true), 473 CLK_MGT_ENTRY(UICCCLK, PLL_FIX, false), 474}; 475 476struct dsiclk { 477 u32 divsel_mask; 478 u32 divsel_shift; 479 u32 divsel; 480}; 481 482static struct dsiclk dsiclk[2] = { 483 { 484 .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK, 485 .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT, 486 .divsel = PRCM_DSI_PLLOUT_SEL_PHI, 487 }, 488 { 489 .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK, 490 .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT, 491 .divsel = PRCM_DSI_PLLOUT_SEL_PHI, 492 } 493}; 494 495struct dsiescclk { 496 u32 en; 497 u32 div_mask; 498 u32 div_shift; 499}; 500 501static struct dsiescclk dsiescclk[3] = { 502 { 503 .en = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN, 504 .div_mask = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK, 505 .div_shift = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT, 506 }, 507 { 508 .en = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN, 509 .div_mask = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK, 510 .div_shift = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT, 511 }, 512 { 513 .en = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN, 514 .div_mask = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK, 515 .div_shift = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT, 516 } 517}; 518 519/* 520* Used by MCDE to setup all necessary PRCMU registers 521*/ 522#define PRCMU_RESET_DSIPLL 0x00004000 523#define PRCMU_UNCLAMP_DSIPLL 0x00400800 524 525#define PRCMU_CLK_PLL_DIV_SHIFT 0 526#define PRCMU_CLK_PLL_SW_SHIFT 5 527#define PRCMU_CLK_38 (1 << 9) 528#define PRCMU_CLK_38_SRC (1 << 10) 529#define PRCMU_CLK_38_DIV (1 << 11) 530 531/* PLLDIV=12, PLLSW=4 (PLLDDR) */ 532#define PRCMU_DSI_CLOCK_SETTING 0x0000008C 533 534/* DPI 50000000 Hz */ 535#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \ 536 (16 << PRCMU_CLK_PLL_DIV_SHIFT)) 537#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00 538 539/* D=101, N=1, R=4, SELDIV2=0 */ 540#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165 541 542#define PRCMU_ENABLE_PLLDSI 0x00000001 543#define PRCMU_DISABLE_PLLDSI 0x00000000 544#define PRCMU_RELEASE_RESET_DSS 0x0000400C 545#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202 546/* ESC clk, div0=1, div1=1, div2=3 */ 547#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101 548#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101 549#define PRCMU_DSI_RESET_SW 0x00000007 550 551#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3 552 553int db8500_prcmu_enable_dsipll(void) 554{ 555 int i; 556 557 /* Clear DSIPLL_RESETN */ 558 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR); 559 /* Unclamp DSIPLL in/out */ 560 writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR); 561 562 /* Set DSI PLL FREQ */ 563 writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ); 564 writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL); 565 /* Enable Escape clocks */ 566 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); 567 568 /* Start DSI PLL */ 569 writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE); 570 /* Reset DSI PLL */ 571 writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET); 572 for (i = 0; i < 10; i++) { 573 if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED) 574 == PRCMU_PLLDSI_LOCKP_LOCKED) 575 break; 576 udelay(100); 577 } 578 /* Set DSIPLL_RESETN */ 579 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET); 580 return 0; 581} 582 583int db8500_prcmu_disable_dsipll(void) 584{ 585 /* Disable dsi pll */ 586 writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE); 587 /* Disable escapeclock */ 588 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); 589 return 0; 590} 591 592int db8500_prcmu_set_display_clocks(void) 593{ 594 unsigned long flags; 595 596 spin_lock_irqsave(&clk_mgt_lock, flags); 597 598 /* Grab the HW semaphore. */ 599 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 600 cpu_relax(); 601 602 writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT); 603 writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT); 604 writel(PRCMU_DPI_CLOCK_SETTING, PRCM_LCDCLK_MGT); 605 606 /* Release the HW semaphore. */ 607 writel(0, PRCM_SEM); 608 609 spin_unlock_irqrestore(&clk_mgt_lock, flags); 610 611 return 0; 612} 613 614u32 db8500_prcmu_read(unsigned int reg) 615{ 616 return readl(_PRCMU_BASE + reg); 617} 618 619void db8500_prcmu_write(unsigned int reg, u32 value) 620{ 621 unsigned long flags; 622 623 spin_lock_irqsave(&prcmu_lock, flags); 624 writel(value, (_PRCMU_BASE + reg)); 625 spin_unlock_irqrestore(&prcmu_lock, flags); 626} 627 628void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value) 629{ 630 u32 val; 631 unsigned long flags; 632 633 spin_lock_irqsave(&prcmu_lock, flags); 634 val = readl(_PRCMU_BASE + reg); 635 val = ((val & ~mask) | (value & mask)); 636 writel(val, (_PRCMU_BASE + reg)); 637 spin_unlock_irqrestore(&prcmu_lock, flags); 638} 639 640struct prcmu_fw_version *prcmu_get_fw_version(void) 641{ 642 return fw_info.valid ? &fw_info.version : NULL; 643} 644 645bool prcmu_has_arm_maxopp(void) 646{ 647 return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) & 648 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK; 649} 650 651/** 652 * prcmu_get_boot_status - PRCMU boot status checking 653 * Returns: the current PRCMU boot status 654 */ 655int prcmu_get_boot_status(void) 656{ 657 return readb(tcdm_base + PRCM_BOOT_STATUS); 658} 659 660/** 661 * prcmu_set_rc_a2p - This function is used to run few power state sequences 662 * @val: Value to be set, i.e. transition requested 663 * Returns: 0 on success, -EINVAL on invalid argument 664 * 665 * This function is used to run the following power state sequences - 666 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep 667 */ 668int prcmu_set_rc_a2p(enum romcode_write val) 669{ 670 if (val < RDY_2_DS || val > RDY_2_XP70_RST) 671 return -EINVAL; 672 writeb(val, (tcdm_base + PRCM_ROMCODE_A2P)); 673 return 0; 674} 675 676/** 677 * prcmu_get_rc_p2a - This function is used to get power state sequences 678 * Returns: the power transition that has last happened 679 * 680 * This function can return the following transitions- 681 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep 682 */ 683enum romcode_read prcmu_get_rc_p2a(void) 684{ 685 return readb(tcdm_base + PRCM_ROMCODE_P2A); 686} 687 688/** 689 * prcmu_get_current_mode - Return the current XP70 power mode 690 * Returns: Returns the current AP(ARM) power mode: init, 691 * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset 692 */ 693enum ap_pwrst prcmu_get_xp70_current_state(void) 694{ 695 return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE); 696} 697 698/** 699 * prcmu_config_clkout - Configure one of the programmable clock outputs. 700 * @clkout: The CLKOUT number (0 or 1). 701 * @source: The clock to be used (one of the PRCMU_CLKSRC_*). 702 * @div: The divider to be applied. 703 * 704 * Configures one of the programmable clock outputs (CLKOUTs). 705 * @div should be in the range [1,63] to request a configuration, or 0 to 706 * inform that the configuration is no longer requested. 707 */ 708int prcmu_config_clkout(u8 clkout, u8 source, u8 div) 709{ 710 static int requests[2]; 711 int r = 0; 712 unsigned long flags; 713 u32 val; 714 u32 bits; 715 u32 mask; 716 u32 div_mask; 717 718 BUG_ON(clkout > 1); 719 BUG_ON(div > 63); 720 BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009)); 721 722 if (!div && !requests[clkout]) 723 return -EINVAL; 724 725 switch (clkout) { 726 case 0: 727 div_mask = PRCM_CLKOCR_CLKODIV0_MASK; 728 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK); 729 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) | 730 (div << PRCM_CLKOCR_CLKODIV0_SHIFT)); 731 break; 732 case 1: 733 div_mask = PRCM_CLKOCR_CLKODIV1_MASK; 734 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK | 735 PRCM_CLKOCR_CLK1TYPE); 736 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) | 737 (div << PRCM_CLKOCR_CLKODIV1_SHIFT)); 738 break; 739 } 740 bits &= mask; 741 742 spin_lock_irqsave(&clkout_lock, flags); 743 744 val = readl(PRCM_CLKOCR); 745 if (val & div_mask) { 746 if (div) { 747 if ((val & mask) != bits) { 748 r = -EBUSY; 749 goto unlock_and_return; 750 } 751 } else { 752 if ((val & mask & ~div_mask) != bits) { 753 r = -EINVAL; 754 goto unlock_and_return; 755 } 756 } 757 } 758 writel((bits | (val & ~mask)), PRCM_CLKOCR); 759 requests[clkout] += (div ? 1 : -1); 760 761unlock_and_return: 762 spin_unlock_irqrestore(&clkout_lock, flags); 763 764 return r; 765} 766 767int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll) 768{ 769 unsigned long flags; 770 771 BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state)); 772 773 spin_lock_irqsave(&mb0_transfer.lock, flags); 774 775 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 776 cpu_relax(); 777 778 writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 779 writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE)); 780 writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE)); 781 writeb((keep_ulp_clk ? 1 : 0), 782 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE)); 783 writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI)); 784 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 785 786 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 787 788 return 0; 789} 790 791u8 db8500_prcmu_get_power_state_result(void) 792{ 793 return readb(tcdm_base + PRCM_ACK_MB0_AP_PWRSTTR_STATUS); 794} 795 796/* This function decouple the gic from the prcmu */ 797int db8500_prcmu_gic_decouple(void) 798{ 799 u32 val = readl(PRCM_A9_MASK_REQ); 800 801 /* Set bit 0 register value to 1 */ 802 writel(val | PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ, 803 PRCM_A9_MASK_REQ); 804 805 /* Make sure the register is updated */ 806 readl(PRCM_A9_MASK_REQ); 807 808 /* Wait a few cycles for the gic mask completion */ 809 udelay(1); 810 811 return 0; 812} 813 814/* This function recouple the gic with the prcmu */ 815int db8500_prcmu_gic_recouple(void) 816{ 817 u32 val = readl(PRCM_A9_MASK_REQ); 818 819 /* Set bit 0 register value to 0 */ 820 writel(val & ~PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ, PRCM_A9_MASK_REQ); 821 822 return 0; 823} 824 825#define PRCMU_GIC_NUMBER_REGS 5 826 827/* 828 * This function checks if there are pending irq on the gic. It only 829 * makes sense if the gic has been decoupled before with the 830 * db8500_prcmu_gic_decouple function. Disabling an interrupt only 831 * disables the forwarding of the interrupt to any CPU interface. It 832 * does not prevent the interrupt from changing state, for example 833 * becoming pending, or active and pending if it is already 834 * active. Hence, we have to check the interrupt is pending *and* is 835 * active. 836 */ 837bool db8500_prcmu_gic_pending_irq(void) 838{ 839 u32 pr; /* Pending register */ 840 u32 er; /* Enable register */ 841 void __iomem *dist_base = __io_address(U8500_GIC_DIST_BASE); 842 int i; 843 844 /* 5 registers. STI & PPI not skipped */ 845 for (i = 0; i < PRCMU_GIC_NUMBER_REGS; i++) { 846 847 pr = readl_relaxed(dist_base + GIC_DIST_PENDING_SET + i * 4); 848 er = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 849 850 if (pr & er) 851 return true; /* There is a pending interrupt */ 852 } 853 854 return false; 855} 856 857/* 858 * This function checks if there are pending interrupt on the 859 * prcmu which has been delegated to monitor the irqs with the 860 * db8500_prcmu_copy_gic_settings function. 861 */ 862bool db8500_prcmu_pending_irq(void) 863{ 864 u32 it, im; 865 int i; 866 867 for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) { 868 it = readl(PRCM_ARMITVAL31TO0 + i * 4); 869 im = readl(PRCM_ARMITMSK31TO0 + i * 4); 870 if (it & im) 871 return true; /* There is a pending interrupt */ 872 } 873 874 return false; 875} 876 877/* 878 * This function checks if the specified cpu is in in WFI. It's usage 879 * makes sense only if the gic is decoupled with the db8500_prcmu_gic_decouple 880 * function. Of course passing smp_processor_id() to this function will 881 * always return false... 882 */ 883bool db8500_prcmu_is_cpu_in_wfi(int cpu) 884{ 885 return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : 886 PRCM_ARM_WFI_STANDBY_WFI0; 887} 888 889/* 890 * This function copies the gic SPI settings to the prcmu in order to 891 * monitor them and abort/finish the retention/off sequence or state. 892 */ 893int db8500_prcmu_copy_gic_settings(void) 894{ 895 u32 er; /* Enable register */ 896 void __iomem *dist_base = __io_address(U8500_GIC_DIST_BASE); 897 int i; 898 899 /* We skip the STI and PPI */ 900 for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) { 901 er = readl_relaxed(dist_base + 902 GIC_DIST_ENABLE_SET + (i + 1) * 4); 903 writel(er, PRCM_ARMITMSK31TO0 + i * 4); 904 } 905 906 return 0; 907} 908 909/* This function should only be called while mb0_transfer.lock is held. */ 910static void config_wakeups(void) 911{ 912 const u8 header[2] = { 913 MB0H_CONFIG_WAKEUPS_EXE, 914 MB0H_CONFIG_WAKEUPS_SLEEP 915 }; 916 static u32 last_dbb_events; 917 static u32 last_abb_events; 918 u32 dbb_events; 919 u32 abb_events; 920 unsigned int i; 921 922 dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups; 923 dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK); 924 925 abb_events = mb0_transfer.req.abb_events; 926 927 if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events)) 928 return; 929 930 for (i = 0; i < 2; i++) { 931 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 932 cpu_relax(); 933 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500)); 934 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500)); 935 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 936 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 937 } 938 last_dbb_events = dbb_events; 939 last_abb_events = abb_events; 940} 941 942void db8500_prcmu_enable_wakeups(u32 wakeups) 943{ 944 unsigned long flags; 945 u32 bits; 946 int i; 947 948 BUG_ON(wakeups != (wakeups & VALID_WAKEUPS)); 949 950 for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) { 951 if (wakeups & BIT(i)) 952 bits |= prcmu_wakeup_bit[i]; 953 } 954 955 spin_lock_irqsave(&mb0_transfer.lock, flags); 956 957 mb0_transfer.req.dbb_wakeups = bits; 958 config_wakeups(); 959 960 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 961} 962 963void db8500_prcmu_config_abb_event_readout(u32 abb_events) 964{ 965 unsigned long flags; 966 967 spin_lock_irqsave(&mb0_transfer.lock, flags); 968 969 mb0_transfer.req.abb_events = abb_events; 970 config_wakeups(); 971 972 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 973} 974 975void db8500_prcmu_get_abb_event_buffer(void __iomem **buf) 976{ 977 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) 978 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500); 979 else 980 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500); 981} 982 983/** 984 * db8500_prcmu_set_arm_opp - set the appropriate ARM OPP 985 * @opp: The new ARM operating point to which transition is to be made 986 * Returns: 0 on success, non-zero on failure 987 * 988 * This function sets the the operating point of the ARM. 989 */ 990int db8500_prcmu_set_arm_opp(u8 opp) 991{ 992 int r; 993 994 if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK) 995 return -EINVAL; 996 997 r = 0; 998 999 mutex_lock(&mb1_transfer.lock); 1000 1001 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1002 cpu_relax(); 1003 1004 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1005 writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); 1006 writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP)); 1007 1008 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1009 wait_for_completion(&mb1_transfer.work); 1010 1011 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || 1012 (mb1_transfer.ack.arm_opp != opp)) 1013 r = -EIO; 1014 1015 mutex_unlock(&mb1_transfer.lock); 1016 1017 return r; 1018} 1019 1020/** 1021 * db8500_prcmu_get_arm_opp - get the current ARM OPP 1022 * 1023 * Returns: the current ARM OPP 1024 */ 1025int db8500_prcmu_get_arm_opp(void) 1026{ 1027 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP); 1028} 1029 1030/** 1031 * db8500_prcmu_get_ddr_opp - get the current DDR OPP 1032 * 1033 * Returns: the current DDR OPP 1034 */ 1035int db8500_prcmu_get_ddr_opp(void) 1036{ 1037 return readb(PRCM_DDR_SUBSYS_APE_MINBW); 1038} 1039 1040/** 1041 * db8500_set_ddr_opp - set the appropriate DDR OPP 1042 * @opp: The new DDR operating point to which transition is to be made 1043 * Returns: 0 on success, non-zero on failure 1044 * 1045 * This function sets the operating point of the DDR. 1046 */ 1047int db8500_prcmu_set_ddr_opp(u8 opp) 1048{ 1049 if (opp < DDR_100_OPP || opp > DDR_25_OPP) 1050 return -EINVAL; 1051 /* Changing the DDR OPP can hang the hardware pre-v21 */ 1052 if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20()) 1053 writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW); 1054 1055 return 0; 1056} 1057 1058/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */ 1059static void request_even_slower_clocks(bool enable) 1060{ 1061 void __iomem *clock_reg[] = { 1062 PRCM_ACLK_MGT, 1063 PRCM_DMACLK_MGT 1064 }; 1065 unsigned long flags; 1066 unsigned int i; 1067 1068 spin_lock_irqsave(&clk_mgt_lock, flags); 1069 1070 /* Grab the HW semaphore. */ 1071 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1072 cpu_relax(); 1073 1074 for (i = 0; i < ARRAY_SIZE(clock_reg); i++) { 1075 u32 val; 1076 u32 div; 1077 1078 val = readl(clock_reg[i]); 1079 div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK); 1080 if (enable) { 1081 if ((div <= 1) || (div > 15)) { 1082 pr_err("prcmu: Bad clock divider %d in %s\n", 1083 div, __func__); 1084 goto unlock_and_return; 1085 } 1086 div <<= 1; 1087 } else { 1088 if (div <= 2) 1089 goto unlock_and_return; 1090 div >>= 1; 1091 } 1092 val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) | 1093 (div & PRCM_CLK_MGT_CLKPLLDIV_MASK)); 1094 writel(val, clock_reg[i]); 1095 } 1096 1097unlock_and_return: 1098 /* Release the HW semaphore. */ 1099 writel(0, PRCM_SEM); 1100 1101 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1102} 1103 1104/** 1105 * db8500_set_ape_opp - set the appropriate APE OPP 1106 * @opp: The new APE operating point to which transition is to be made 1107 * Returns: 0 on success, non-zero on failure 1108 * 1109 * This function sets the operating point of the APE. 1110 */ 1111int db8500_prcmu_set_ape_opp(u8 opp) 1112{ 1113 int r = 0; 1114 1115 if (opp == mb1_transfer.ape_opp) 1116 return 0; 1117 1118 mutex_lock(&mb1_transfer.lock); 1119 1120 if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP) 1121 request_even_slower_clocks(false); 1122 1123 if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP)) 1124 goto skip_message; 1125 1126 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1127 cpu_relax(); 1128 1129 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1130 writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); 1131 writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp), 1132 (tcdm_base + PRCM_REQ_MB1_APE_OPP)); 1133 1134 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1135 wait_for_completion(&mb1_transfer.work); 1136 1137 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || 1138 (mb1_transfer.ack.ape_opp != opp)) 1139 r = -EIO; 1140 1141skip_message: 1142 if ((!r && (opp == APE_50_PARTLY_25_OPP)) || 1143 (r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP))) 1144 request_even_slower_clocks(true); 1145 if (!r) 1146 mb1_transfer.ape_opp = opp; 1147 1148 mutex_unlock(&mb1_transfer.lock); 1149 1150 return r; 1151} 1152 1153/** 1154 * db8500_prcmu_get_ape_opp - get the current APE OPP 1155 * 1156 * Returns: the current APE OPP 1157 */ 1158int db8500_prcmu_get_ape_opp(void) 1159{ 1160 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP); 1161} 1162 1163/** 1164 * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage 1165 * @enable: true to request the higher voltage, false to drop a request. 1166 * 1167 * Calls to this function to enable and disable requests must be balanced. 1168 */ 1169int prcmu_request_ape_opp_100_voltage(bool enable) 1170{ 1171 int r = 0; 1172 u8 header; 1173 static unsigned int requests; 1174 1175 mutex_lock(&mb1_transfer.lock); 1176 1177 if (enable) { 1178 if (0 != requests++) 1179 goto unlock_and_return; 1180 header = MB1H_REQUEST_APE_OPP_100_VOLT; 1181 } else { 1182 if (requests == 0) { 1183 r = -EIO; 1184 goto unlock_and_return; 1185 } else if (1 != requests--) { 1186 goto unlock_and_return; 1187 } 1188 header = MB1H_RELEASE_APE_OPP_100_VOLT; 1189 } 1190 1191 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1192 cpu_relax(); 1193 1194 writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1195 1196 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1197 wait_for_completion(&mb1_transfer.work); 1198 1199 if ((mb1_transfer.ack.header != header) || 1200 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) 1201 r = -EIO; 1202 1203unlock_and_return: 1204 mutex_unlock(&mb1_transfer.lock); 1205 1206 return r; 1207} 1208 1209/** 1210 * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup 1211 * 1212 * This function releases the power state requirements of a USB wakeup. 1213 */ 1214int prcmu_release_usb_wakeup_state(void) 1215{ 1216 int r = 0; 1217 1218 mutex_lock(&mb1_transfer.lock); 1219 1220 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1221 cpu_relax(); 1222 1223 writeb(MB1H_RELEASE_USB_WAKEUP, 1224 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1225 1226 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1227 wait_for_completion(&mb1_transfer.work); 1228 1229 if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) || 1230 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) 1231 r = -EIO; 1232 1233 mutex_unlock(&mb1_transfer.lock); 1234 1235 return r; 1236} 1237 1238static int request_pll(u8 clock, bool enable) 1239{ 1240 int r = 0; 1241 1242 if (clock == PRCMU_PLLSOC0) 1243 clock = (enable ? PLL_SOC0_ON : PLL_SOC0_OFF); 1244 else if (clock == PRCMU_PLLSOC1) 1245 clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF); 1246 else 1247 return -EINVAL; 1248 1249 mutex_lock(&mb1_transfer.lock); 1250 1251 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1252 cpu_relax(); 1253 1254 writeb(MB1H_PLL_ON_OFF, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1255 writeb(clock, (tcdm_base + PRCM_REQ_MB1_PLL_ON_OFF)); 1256 1257 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1258 wait_for_completion(&mb1_transfer.work); 1259 1260 if (mb1_transfer.ack.header != MB1H_PLL_ON_OFF) 1261 r = -EIO; 1262 1263 mutex_unlock(&mb1_transfer.lock); 1264 1265 return r; 1266} 1267 1268/** 1269 * db8500_prcmu_set_epod - set the state of a EPOD (power domain) 1270 * @epod_id: The EPOD to set 1271 * @epod_state: The new EPOD state 1272 * 1273 * This function sets the state of a EPOD (power domain). It may not be called 1274 * from interrupt context. 1275 */ 1276int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state) 1277{ 1278 int r = 0; 1279 bool ram_retention = false; 1280 int i; 1281 1282 /* check argument */ 1283 BUG_ON(epod_id >= NUM_EPOD_ID); 1284 1285 /* set flag if retention is possible */ 1286 switch (epod_id) { 1287 case EPOD_ID_SVAMMDSP: 1288 case EPOD_ID_SIAMMDSP: 1289 case EPOD_ID_ESRAM12: 1290 case EPOD_ID_ESRAM34: 1291 ram_retention = true; 1292 break; 1293 } 1294 1295 /* check argument */ 1296 BUG_ON(epod_state > EPOD_STATE_ON); 1297 BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention); 1298 1299 /* get lock */ 1300 mutex_lock(&mb2_transfer.lock); 1301 1302 /* wait for mailbox */ 1303 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2)) 1304 cpu_relax(); 1305 1306 /* fill in mailbox */ 1307 for (i = 0; i < NUM_EPOD_ID; i++) 1308 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i)); 1309 writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id)); 1310 1311 writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2)); 1312 1313 writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET); 1314 1315 /* 1316 * The current firmware version does not handle errors correctly, 1317 * and we cannot recover if there is an error. 1318 * This is expected to change when the firmware is updated. 1319 */ 1320 if (!wait_for_completion_timeout(&mb2_transfer.work, 1321 msecs_to_jiffies(20000))) { 1322 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 1323 __func__); 1324 r = -EIO; 1325 goto unlock_and_return; 1326 } 1327 1328 if (mb2_transfer.ack.status != HWACC_PWR_ST_OK) 1329 r = -EIO; 1330 1331unlock_and_return: 1332 mutex_unlock(&mb2_transfer.lock); 1333 return r; 1334} 1335 1336/** 1337 * prcmu_configure_auto_pm - Configure autonomous power management. 1338 * @sleep: Configuration for ApSleep. 1339 * @idle: Configuration for ApIdle. 1340 */ 1341void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, 1342 struct prcmu_auto_pm_config *idle) 1343{ 1344 u32 sleep_cfg; 1345 u32 idle_cfg; 1346 unsigned long flags; 1347 1348 BUG_ON((sleep == NULL) || (idle == NULL)); 1349 1350 sleep_cfg = (sleep->sva_auto_pm_enable & 0xF); 1351 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF)); 1352 sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF)); 1353 sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF)); 1354 sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF)); 1355 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF)); 1356 1357 idle_cfg = (idle->sva_auto_pm_enable & 0xF); 1358 idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF)); 1359 idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF)); 1360 idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF)); 1361 idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF)); 1362 idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF)); 1363 1364 spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags); 1365 1366 /* 1367 * The autonomous power management configuration is done through 1368 * fields in mailbox 2, but these fields are only used as shared 1369 * variables - i.e. there is no need to send a message. 1370 */ 1371 writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP)); 1372 writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE)); 1373 1374 mb2_transfer.auto_pm_enabled = 1375 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1376 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1377 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1378 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON)); 1379 1380 spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags); 1381} 1382EXPORT_SYMBOL(prcmu_configure_auto_pm); 1383 1384bool prcmu_is_auto_pm_enabled(void) 1385{ 1386 return mb2_transfer.auto_pm_enabled; 1387} 1388 1389static int request_sysclk(bool enable) 1390{ 1391 int r; 1392 unsigned long flags; 1393 1394 r = 0; 1395 1396 mutex_lock(&mb3_transfer.sysclk_lock); 1397 1398 spin_lock_irqsave(&mb3_transfer.lock, flags); 1399 1400 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3)) 1401 cpu_relax(); 1402 1403 writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT)); 1404 1405 writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3)); 1406 writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET); 1407 1408 spin_unlock_irqrestore(&mb3_transfer.lock, flags); 1409 1410 /* 1411 * The firmware only sends an ACK if we want to enable the 1412 * SysClk, and it succeeds. 1413 */ 1414 if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work, 1415 msecs_to_jiffies(20000))) { 1416 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 1417 __func__); 1418 r = -EIO; 1419 } 1420 1421 mutex_unlock(&mb3_transfer.sysclk_lock); 1422 1423 return r; 1424} 1425 1426static int request_timclk(bool enable) 1427{ 1428 u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK); 1429 1430 if (!enable) 1431 val |= PRCM_TCR_STOP_TIMERS; 1432 writel(val, PRCM_TCR); 1433 1434 return 0; 1435} 1436 1437static int request_clock(u8 clock, bool enable) 1438{ 1439 u32 val; 1440 unsigned long flags; 1441 1442 spin_lock_irqsave(&clk_mgt_lock, flags); 1443 1444 /* Grab the HW semaphore. */ 1445 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1446 cpu_relax(); 1447 1448 val = readl(clk_mgt[clock].reg); 1449 if (enable) { 1450 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw); 1451 } else { 1452 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); 1453 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK); 1454 } 1455 writel(val, clk_mgt[clock].reg); 1456 1457 /* Release the HW semaphore. */ 1458 writel(0, PRCM_SEM); 1459 1460 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1461 1462 return 0; 1463} 1464 1465static int request_sga_clock(u8 clock, bool enable) 1466{ 1467 u32 val; 1468 int ret; 1469 1470 if (enable) { 1471 val = readl(PRCM_CGATING_BYPASS); 1472 writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS); 1473 } 1474 1475 ret = request_clock(clock, enable); 1476 1477 if (!ret && !enable) { 1478 val = readl(PRCM_CGATING_BYPASS); 1479 writel(val & ~PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS); 1480 } 1481 1482 return ret; 1483} 1484 1485static inline bool plldsi_locked(void) 1486{ 1487 return (readl(PRCM_PLLDSI_LOCKP) & 1488 (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 | 1489 PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3)) == 1490 (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 | 1491 PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3); 1492} 1493 1494static int request_plldsi(bool enable) 1495{ 1496 int r = 0; 1497 u32 val; 1498 1499 writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP | 1500 PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), (enable ? 1501 PRCM_MMIP_LS_CLAMP_CLR : PRCM_MMIP_LS_CLAMP_SET)); 1502 1503 val = readl(PRCM_PLLDSI_ENABLE); 1504 if (enable) 1505 val |= PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1506 else 1507 val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1508 writel(val, PRCM_PLLDSI_ENABLE); 1509 1510 if (enable) { 1511 unsigned int i; 1512 bool locked = plldsi_locked(); 1513 1514 for (i = 10; !locked && (i > 0); --i) { 1515 udelay(100); 1516 locked = plldsi_locked(); 1517 } 1518 if (locked) { 1519 writel(PRCM_APE_RESETN_DSIPLL_RESETN, 1520 PRCM_APE_RESETN_SET); 1521 } else { 1522 writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP | 1523 PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), 1524 PRCM_MMIP_LS_CLAMP_SET); 1525 val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1526 writel(val, PRCM_PLLDSI_ENABLE); 1527 r = -EAGAIN; 1528 } 1529 } else { 1530 writel(PRCM_APE_RESETN_DSIPLL_RESETN, PRCM_APE_RESETN_CLR); 1531 } 1532 return r; 1533} 1534 1535static int request_dsiclk(u8 n, bool enable) 1536{ 1537 u32 val; 1538 1539 val = readl(PRCM_DSI_PLLOUT_SEL); 1540 val &= ~dsiclk[n].divsel_mask; 1541 val |= ((enable ? dsiclk[n].divsel : PRCM_DSI_PLLOUT_SEL_OFF) << 1542 dsiclk[n].divsel_shift); 1543 writel(val, PRCM_DSI_PLLOUT_SEL); 1544 return 0; 1545} 1546 1547static int request_dsiescclk(u8 n, bool enable) 1548{ 1549 u32 val; 1550 1551 val = readl(PRCM_DSITVCLK_DIV); 1552 enable ? (val |= dsiescclk[n].en) : (val &= ~dsiescclk[n].en); 1553 writel(val, PRCM_DSITVCLK_DIV); 1554 return 0; 1555} 1556 1557/** 1558 * db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled. 1559 * @clock: The clock for which the request is made. 1560 * @enable: Whether the clock should be enabled (true) or disabled (false). 1561 * 1562 * This function should only be used by the clock implementation. 1563 * Do not use it from any other place! 1564 */ 1565int db8500_prcmu_request_clock(u8 clock, bool enable) 1566{ 1567 if (clock == PRCMU_SGACLK) 1568 return request_sga_clock(clock, enable); 1569 else if (clock < PRCMU_NUM_REG_CLOCKS) 1570 return request_clock(clock, enable); 1571 else if (clock == PRCMU_TIMCLK) 1572 return request_timclk(enable); 1573 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1574 return request_dsiclk((clock - PRCMU_DSI0CLK), enable); 1575 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1576 return request_dsiescclk((clock - PRCMU_DSI0ESCCLK), enable); 1577 else if (clock == PRCMU_PLLDSI) 1578 return request_plldsi(enable); 1579 else if (clock == PRCMU_SYSCLK) 1580 return request_sysclk(enable); 1581 else if ((clock == PRCMU_PLLSOC0) || (clock == PRCMU_PLLSOC1)) 1582 return request_pll(clock, enable); 1583 else 1584 return -EINVAL; 1585} 1586 1587static unsigned long pll_rate(void __iomem *reg, unsigned long src_rate, 1588 int branch) 1589{ 1590 u64 rate; 1591 u32 val; 1592 u32 d; 1593 u32 div = 1; 1594 1595 val = readl(reg); 1596 1597 rate = src_rate; 1598 rate *= ((val & PRCM_PLL_FREQ_D_MASK) >> PRCM_PLL_FREQ_D_SHIFT); 1599 1600 d = ((val & PRCM_PLL_FREQ_N_MASK) >> PRCM_PLL_FREQ_N_SHIFT); 1601 if (d > 1) 1602 div *= d; 1603 1604 d = ((val & PRCM_PLL_FREQ_R_MASK) >> PRCM_PLL_FREQ_R_SHIFT); 1605 if (d > 1) 1606 div *= d; 1607 1608 if (val & PRCM_PLL_FREQ_SELDIV2) 1609 div *= 2; 1610 1611 if ((branch == PLL_FIX) || ((branch == PLL_DIV) && 1612 (val & PRCM_PLL_FREQ_DIV2EN) && 1613 ((reg == PRCM_PLLSOC0_FREQ) || 1614 (reg == PRCM_PLLDDR_FREQ)))) 1615 div *= 2; 1616 1617 (void)do_div(rate, div); 1618 1619 return (unsigned long)rate; 1620} 1621 1622#define ROOT_CLOCK_RATE 38400000 1623 1624static unsigned long clock_rate(u8 clock) 1625{ 1626 u32 val; 1627 u32 pllsw; 1628 unsigned long rate = ROOT_CLOCK_RATE; 1629 1630 val = readl(clk_mgt[clock].reg); 1631 1632 if (val & PRCM_CLK_MGT_CLK38) { 1633 if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV)) 1634 rate /= 2; 1635 return rate; 1636 } 1637 1638 val |= clk_mgt[clock].pllsw; 1639 pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); 1640 1641 if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC0) 1642 rate = pll_rate(PRCM_PLLSOC0_FREQ, rate, clk_mgt[clock].branch); 1643 else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC1) 1644 rate = pll_rate(PRCM_PLLSOC1_FREQ, rate, clk_mgt[clock].branch); 1645 else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_DDR) 1646 rate = pll_rate(PRCM_PLLDDR_FREQ, rate, clk_mgt[clock].branch); 1647 else 1648 return 0; 1649 1650 if ((clock == PRCMU_SGACLK) && 1651 (val & PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN)) { 1652 u64 r = (rate * 10); 1653 1654 (void)do_div(r, 25); 1655 return (unsigned long)r; 1656 } 1657 val &= PRCM_CLK_MGT_CLKPLLDIV_MASK; 1658 if (val) 1659 return rate / val; 1660 else 1661 return 0; 1662} 1663 1664static unsigned long dsiclk_rate(u8 n) 1665{ 1666 u32 divsel; 1667 u32 div = 1; 1668 1669 divsel = readl(PRCM_DSI_PLLOUT_SEL); 1670 divsel = ((divsel & dsiclk[n].divsel_mask) >> dsiclk[n].divsel_shift); 1671 1672 if (divsel == PRCM_DSI_PLLOUT_SEL_OFF) 1673 divsel = dsiclk[n].divsel; 1674 1675 switch (divsel) { 1676 case PRCM_DSI_PLLOUT_SEL_PHI_4: 1677 div *= 2; 1678 case PRCM_DSI_PLLOUT_SEL_PHI_2: 1679 div *= 2; 1680 case PRCM_DSI_PLLOUT_SEL_PHI: 1681 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1682 PLL_RAW) / div; 1683 default: 1684 return 0; 1685 } 1686} 1687 1688static unsigned long dsiescclk_rate(u8 n) 1689{ 1690 u32 div; 1691 1692 div = readl(PRCM_DSITVCLK_DIV); 1693 div = ((div & dsiescclk[n].div_mask) >> (dsiescclk[n].div_shift)); 1694 return clock_rate(PRCMU_TVCLK) / max((u32)1, div); 1695} 1696 1697unsigned long prcmu_clock_rate(u8 clock) 1698{ 1699 if (clock < PRCMU_NUM_REG_CLOCKS) 1700 return clock_rate(clock); 1701 else if (clock == PRCMU_TIMCLK) 1702 return ROOT_CLOCK_RATE / 16; 1703 else if (clock == PRCMU_SYSCLK) 1704 return ROOT_CLOCK_RATE; 1705 else if (clock == PRCMU_PLLSOC0) 1706 return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1707 else if (clock == PRCMU_PLLSOC1) 1708 return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1709 else if (clock == PRCMU_PLLDDR) 1710 return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1711 else if (clock == PRCMU_PLLDSI) 1712 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1713 PLL_RAW); 1714 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1715 return dsiclk_rate(clock - PRCMU_DSI0CLK); 1716 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1717 return dsiescclk_rate(clock - PRCMU_DSI0ESCCLK); 1718 else 1719 return 0; 1720} 1721 1722static unsigned long clock_source_rate(u32 clk_mgt_val, int branch) 1723{ 1724 if (clk_mgt_val & PRCM_CLK_MGT_CLK38) 1725 return ROOT_CLOCK_RATE; 1726 clk_mgt_val &= PRCM_CLK_MGT_CLKPLLSW_MASK; 1727 if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC0) 1728 return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, branch); 1729 else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC1) 1730 return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, branch); 1731 else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_DDR) 1732 return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, branch); 1733 else 1734 return 0; 1735} 1736 1737static u32 clock_divider(unsigned long src_rate, unsigned long rate) 1738{ 1739 u32 div; 1740 1741 div = (src_rate / rate); 1742 if (div == 0) 1743 return 1; 1744 if (rate < (src_rate / div)) 1745 div++; 1746 return div; 1747} 1748 1749static long round_clock_rate(u8 clock, unsigned long rate) 1750{ 1751 u32 val; 1752 u32 div; 1753 unsigned long src_rate; 1754 long rounded_rate; 1755 1756 val = readl(clk_mgt[clock].reg); 1757 src_rate = clock_source_rate((val | clk_mgt[clock].pllsw), 1758 clk_mgt[clock].branch); 1759 div = clock_divider(src_rate, rate); 1760 if (val & PRCM_CLK_MGT_CLK38) { 1761 if (clk_mgt[clock].clk38div) { 1762 if (div > 2) 1763 div = 2; 1764 } else { 1765 div = 1; 1766 } 1767 } else if ((clock == PRCMU_SGACLK) && (div == 3)) { 1768 u64 r = (src_rate * 10); 1769 1770 (void)do_div(r, 25); 1771 if (r <= rate) 1772 return (unsigned long)r; 1773 } 1774 rounded_rate = (src_rate / min(div, (u32)31)); 1775 1776 return rounded_rate; 1777} 1778 1779#define MIN_PLL_VCO_RATE 600000000ULL 1780#define MAX_PLL_VCO_RATE 1680640000ULL 1781 1782static long round_plldsi_rate(unsigned long rate) 1783{ 1784 long rounded_rate = 0; 1785 unsigned long src_rate; 1786 unsigned long rem; 1787 u32 r; 1788 1789 src_rate = clock_rate(PRCMU_HDMICLK); 1790 rem = rate; 1791 1792 for (r = 7; (rem > 0) && (r > 0); r--) { 1793 u64 d; 1794 1795 d = (r * rate); 1796 (void)do_div(d, src_rate); 1797 if (d < 6) 1798 d = 6; 1799 else if (d > 255) 1800 d = 255; 1801 d *= src_rate; 1802 if (((2 * d) < (r * MIN_PLL_VCO_RATE)) || 1803 ((r * MAX_PLL_VCO_RATE) < (2 * d))) 1804 continue; 1805 (void)do_div(d, r); 1806 if (rate < d) { 1807 if (rounded_rate == 0) 1808 rounded_rate = (long)d; 1809 break; 1810 } 1811 if ((rate - d) < rem) { 1812 rem = (rate - d); 1813 rounded_rate = (long)d; 1814 } 1815 } 1816 return rounded_rate; 1817} 1818 1819static long round_dsiclk_rate(unsigned long rate) 1820{ 1821 u32 div; 1822 unsigned long src_rate; 1823 long rounded_rate; 1824 1825 src_rate = pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1826 PLL_RAW); 1827 div = clock_divider(src_rate, rate); 1828 rounded_rate = (src_rate / ((div > 2) ? 4 : div)); 1829 1830 return rounded_rate; 1831} 1832 1833static long round_dsiescclk_rate(unsigned long rate) 1834{ 1835 u32 div; 1836 unsigned long src_rate; 1837 long rounded_rate; 1838 1839 src_rate = clock_rate(PRCMU_TVCLK); 1840 div = clock_divider(src_rate, rate); 1841 rounded_rate = (src_rate / min(div, (u32)255)); 1842 1843 return rounded_rate; 1844} 1845 1846long prcmu_round_clock_rate(u8 clock, unsigned long rate) 1847{ 1848 if (clock < PRCMU_NUM_REG_CLOCKS) 1849 return round_clock_rate(clock, rate); 1850 else if (clock == PRCMU_PLLDSI) 1851 return round_plldsi_rate(rate); 1852 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1853 return round_dsiclk_rate(rate); 1854 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1855 return round_dsiescclk_rate(rate); 1856 else 1857 return (long)prcmu_clock_rate(clock); 1858} 1859 1860static void set_clock_rate(u8 clock, unsigned long rate) 1861{ 1862 u32 val; 1863 u32 div; 1864 unsigned long src_rate; 1865 unsigned long flags; 1866 1867 spin_lock_irqsave(&clk_mgt_lock, flags); 1868 1869 /* Grab the HW semaphore. */ 1870 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1871 cpu_relax(); 1872 1873 val = readl(clk_mgt[clock].reg); 1874 src_rate = clock_source_rate((val | clk_mgt[clock].pllsw), 1875 clk_mgt[clock].branch); 1876 div = clock_divider(src_rate, rate); 1877 if (val & PRCM_CLK_MGT_CLK38) { 1878 if (clk_mgt[clock].clk38div) { 1879 if (div > 1) 1880 val |= PRCM_CLK_MGT_CLK38DIV; 1881 else 1882 val &= ~PRCM_CLK_MGT_CLK38DIV; 1883 } 1884 } else if (clock == PRCMU_SGACLK) { 1885 val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK | 1886 PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN); 1887 if (div == 3) { 1888 u64 r = (src_rate * 10); 1889 1890 (void)do_div(r, 25); 1891 if (r <= rate) { 1892 val |= PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN; 1893 div = 0; 1894 } 1895 } 1896 val |= min(div, (u32)31); 1897 } else { 1898 val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK; 1899 val |= min(div, (u32)31); 1900 } 1901 writel(val, clk_mgt[clock].reg); 1902 1903 /* Release the HW semaphore. */ 1904 writel(0, PRCM_SEM); 1905 1906 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1907} 1908 1909static int set_plldsi_rate(unsigned long rate) 1910{ 1911 unsigned long src_rate; 1912 unsigned long rem; 1913 u32 pll_freq = 0; 1914 u32 r; 1915 1916 src_rate = clock_rate(PRCMU_HDMICLK); 1917 rem = rate; 1918 1919 for (r = 7; (rem > 0) && (r > 0); r--) { 1920 u64 d; 1921 u64 hwrate; 1922 1923 d = (r * rate); 1924 (void)do_div(d, src_rate); 1925 if (d < 6) 1926 d = 6; 1927 else if (d > 255) 1928 d = 255; 1929 hwrate = (d * src_rate); 1930 if (((2 * hwrate) < (r * MIN_PLL_VCO_RATE)) || 1931 ((r * MAX_PLL_VCO_RATE) < (2 * hwrate))) 1932 continue; 1933 (void)do_div(hwrate, r); 1934 if (rate < hwrate) { 1935 if (pll_freq == 0) 1936 pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) | 1937 (r << PRCM_PLL_FREQ_R_SHIFT)); 1938 break; 1939 } 1940 if ((rate - hwrate) < rem) { 1941 rem = (rate - hwrate); 1942 pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) | 1943 (r << PRCM_PLL_FREQ_R_SHIFT)); 1944 } 1945 } 1946 if (pll_freq == 0) 1947 return -EINVAL; 1948 1949 pll_freq |= (1 << PRCM_PLL_FREQ_N_SHIFT); 1950 writel(pll_freq, PRCM_PLLDSI_FREQ); 1951 1952 return 0; 1953} 1954 1955static void set_dsiclk_rate(u8 n, unsigned long rate) 1956{ 1957 u32 val; 1958 u32 div; 1959 1960 div = clock_divider(pll_rate(PRCM_PLLDSI_FREQ, 1961 clock_rate(PRCMU_HDMICLK), PLL_RAW), rate); 1962 1963 dsiclk[n].divsel = (div == 1) ? PRCM_DSI_PLLOUT_SEL_PHI : 1964 (div == 2) ? PRCM_DSI_PLLOUT_SEL_PHI_2 : 1965 /* else */ PRCM_DSI_PLLOUT_SEL_PHI_4; 1966 1967 val = readl(PRCM_DSI_PLLOUT_SEL); 1968 val &= ~dsiclk[n].divsel_mask; 1969 val |= (dsiclk[n].divsel << dsiclk[n].divsel_shift); 1970 writel(val, PRCM_DSI_PLLOUT_SEL); 1971} 1972 1973static void set_dsiescclk_rate(u8 n, unsigned long rate) 1974{ 1975 u32 val; 1976 u32 div; 1977 1978 div = clock_divider(clock_rate(PRCMU_TVCLK), rate); 1979 val = readl(PRCM_DSITVCLK_DIV); 1980 val &= ~dsiescclk[n].div_mask; 1981 val |= (min(div, (u32)255) << dsiescclk[n].div_shift); 1982 writel(val, PRCM_DSITVCLK_DIV); 1983} 1984 1985int prcmu_set_clock_rate(u8 clock, unsigned long rate) 1986{ 1987 if (clock < PRCMU_NUM_REG_CLOCKS) 1988 set_clock_rate(clock, rate); 1989 else if (clock == PRCMU_PLLDSI) 1990 return set_plldsi_rate(rate); 1991 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1992 set_dsiclk_rate((clock - PRCMU_DSI0CLK), rate); 1993 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1994 set_dsiescclk_rate((clock - PRCMU_DSI0ESCCLK), rate); 1995 return 0; 1996} 1997 1998int db8500_prcmu_config_esram0_deep_sleep(u8 state) 1999{ 2000 if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) || 2001 (state < ESRAM0_DEEP_SLEEP_STATE_OFF)) 2002 return -EINVAL; 2003 2004 mutex_lock(&mb4_transfer.lock); 2005 2006 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2007 cpu_relax(); 2008 2009 writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2010 writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON), 2011 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE)); 2012 writeb(DDR_PWR_STATE_ON, 2013 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE)); 2014 writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST)); 2015 2016 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2017 wait_for_completion(&mb4_transfer.work); 2018 2019 mutex_unlock(&mb4_transfer.lock); 2020 2021 return 0; 2022} 2023 2024int db8500_prcmu_config_hotdog(u8 threshold) 2025{ 2026 mutex_lock(&mb4_transfer.lock); 2027 2028 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2029 cpu_relax(); 2030 2031 writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD)); 2032 writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2033 2034 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2035 wait_for_completion(&mb4_transfer.work); 2036 2037 mutex_unlock(&mb4_transfer.lock); 2038 2039 return 0; 2040} 2041 2042int db8500_prcmu_config_hotmon(u8 low, u8 high) 2043{ 2044 mutex_lock(&mb4_transfer.lock); 2045 2046 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2047 cpu_relax(); 2048 2049 writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW)); 2050 writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH)); 2051 writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH), 2052 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG)); 2053 writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2054 2055 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2056 wait_for_completion(&mb4_transfer.work); 2057 2058 mutex_unlock(&mb4_transfer.lock); 2059 2060 return 0; 2061} 2062 2063static int config_hot_period(u16 val) 2064{ 2065 mutex_lock(&mb4_transfer.lock); 2066 2067 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2068 cpu_relax(); 2069 2070 writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD)); 2071 writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2072 2073 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2074 wait_for_completion(&mb4_transfer.work); 2075 2076 mutex_unlock(&mb4_transfer.lock); 2077 2078 return 0; 2079} 2080 2081int db8500_prcmu_start_temp_sense(u16 cycles32k) 2082{ 2083 if (cycles32k == 0xFFFF) 2084 return -EINVAL; 2085 2086 return config_hot_period(cycles32k); 2087} 2088 2089int db8500_prcmu_stop_temp_sense(void) 2090{ 2091 return config_hot_period(0xFFFF); 2092} 2093 2094static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) 2095{ 2096 2097 mutex_lock(&mb4_transfer.lock); 2098 2099 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2100 cpu_relax(); 2101 2102 writeb(d0, (tcdm_base + PRCM_REQ_MB4_A9WDOG_0)); 2103 writeb(d1, (tcdm_base + PRCM_REQ_MB4_A9WDOG_1)); 2104 writeb(d2, (tcdm_base + PRCM_REQ_MB4_A9WDOG_2)); 2105 writeb(d3, (tcdm_base + PRCM_REQ_MB4_A9WDOG_3)); 2106 2107 writeb(cmd, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2108 2109 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2110 wait_for_completion(&mb4_transfer.work); 2111 2112 mutex_unlock(&mb4_transfer.lock); 2113 2114 return 0; 2115 2116} 2117 2118int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off) 2119{ 2120 BUG_ON(num == 0 || num > 0xf); 2121 return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0, 2122 sleep_auto_off ? A9WDOG_AUTO_OFF_EN : 2123 A9WDOG_AUTO_OFF_DIS); 2124} 2125 2126int db8500_prcmu_enable_a9wdog(u8 id) 2127{ 2128 return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0); 2129} 2130 2131int db8500_prcmu_disable_a9wdog(u8 id) 2132{ 2133 return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0); 2134} 2135 2136int db8500_prcmu_kick_a9wdog(u8 id) 2137{ 2138 return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0); 2139} 2140 2141/* 2142 * timeout is 28 bit, in ms. 2143 */ 2144int db8500_prcmu_load_a9wdog(u8 id, u32 timeout) 2145{ 2146 return prcmu_a9wdog(MB4H_A9WDOG_LOAD, 2147 (id & A9WDOG_ID_MASK) | 2148 /* 2149 * Put the lowest 28 bits of timeout at 2150 * offset 4. Four first bits are used for id. 2151 */ 2152 (u8)((timeout << 4) & 0xf0), 2153 (u8)((timeout >> 4) & 0xff), 2154 (u8)((timeout >> 12) & 0xff), 2155 (u8)((timeout >> 20) & 0xff)); 2156} 2157 2158/** 2159 * prcmu_abb_read() - Read register value(s) from the ABB. 2160 * @slave: The I2C slave address. 2161 * @reg: The (start) register address. 2162 * @value: The read out value(s). 2163 * @size: The number of registers to read. 2164 * 2165 * Reads register value(s) from the ABB. 2166 * @size has to be 1 for the current firmware version. 2167 */ 2168int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) 2169{ 2170 int r; 2171 2172 if (size != 1) 2173 return -EINVAL; 2174 2175 mutex_lock(&mb5_transfer.lock); 2176 2177 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) 2178 cpu_relax(); 2179 2180 writeb(0, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5)); 2181 writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); 2182 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); 2183 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); 2184 writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); 2185 2186 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); 2187 2188 if (!wait_for_completion_timeout(&mb5_transfer.work, 2189 msecs_to_jiffies(20000))) { 2190 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 2191 __func__); 2192 r = -EIO; 2193 } else { 2194 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO); 2195 } 2196 2197 if (!r) 2198 *value = mb5_transfer.ack.value; 2199 2200 mutex_unlock(&mb5_transfer.lock); 2201 2202 return r; 2203} 2204 2205/** 2206 * prcmu_abb_write_masked() - Write masked register value(s) to the ABB. 2207 * @slave: The I2C slave address. 2208 * @reg: The (start) register address. 2209 * @value: The value(s) to write. 2210 * @mask: The mask(s) to use. 2211 * @size: The number of registers to write. 2212 * 2213 * Writes masked register value(s) to the ABB. 2214 * For each @value, only the bits set to 1 in the corresponding @mask 2215 * will be written. The other bits are not changed. 2216 * @size has to be 1 for the current firmware version. 2217 */ 2218int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size) 2219{ 2220 int r; 2221 2222 if (size != 1) 2223 return -EINVAL; 2224 2225 mutex_lock(&mb5_transfer.lock); 2226 2227 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) 2228 cpu_relax(); 2229 2230 writeb(~*mask, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5)); 2231 writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); 2232 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); 2233 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); 2234 writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); 2235 2236 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); 2237 2238 if (!wait_for_completion_timeout(&mb5_transfer.work, 2239 msecs_to_jiffies(20000))) { 2240 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 2241 __func__); 2242 r = -EIO; 2243 } else { 2244 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO); 2245 } 2246 2247 mutex_unlock(&mb5_transfer.lock); 2248 2249 return r; 2250} 2251 2252/** 2253 * prcmu_abb_write() - Write register value(s) to the ABB. 2254 * @slave: The I2C slave address. 2255 * @reg: The (start) register address. 2256 * @value: The value(s) to write. 2257 * @size: The number of registers to write. 2258 * 2259 * Writes register value(s) to the ABB. 2260 * @size has to be 1 for the current firmware version. 2261 */ 2262int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) 2263{ 2264 u8 mask = ~0; 2265 2266 return prcmu_abb_write_masked(slave, reg, value, &mask, size); 2267} 2268 2269/** 2270 * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem 2271 */ 2272void prcmu_ac_wake_req(void) 2273{ 2274 u32 val; 2275 u32 status; 2276 2277 mutex_lock(&mb0_transfer.ac_wake_lock); 2278 2279 val = readl(PRCM_HOSTACCESS_REQ); 2280 if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ) 2281 goto unlock_and_return; 2282 2283 atomic_set(&ac_wake_req_state, 1); 2284 2285retry: 2286 writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), PRCM_HOSTACCESS_REQ); 2287 2288 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, 2289 msecs_to_jiffies(5000))) { 2290 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", 2291 __func__); 2292 goto unlock_and_return; 2293 } 2294 2295 /* 2296 * The modem can generate an AC_WAKE_ACK, and then still go to sleep. 2297 * As a workaround, we wait, and then check that the modem is indeed 2298 * awake (in terms of the value of the PRCM_MOD_AWAKE_STATUS 2299 * register, which may not be the whole truth). 2300 */ 2301 udelay(400); 2302 status = (readl(PRCM_MOD_AWAKE_STATUS) & BITS(0, 2)); 2303 if (status != (PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE | 2304 PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE)) { 2305 pr_err("prcmu: %s received ack, but modem not awake (0x%X).\n", 2306 __func__, status); 2307 udelay(1200); 2308 writel(val, PRCM_HOSTACCESS_REQ); 2309 if (wait_for_completion_timeout(&mb0_transfer.ac_wake_work, 2310 msecs_to_jiffies(5000))) 2311 goto retry; 2312 pr_crit("prcmu: %s timed out (5 s) waiting for AC_SLEEP_ACK.\n", 2313 __func__); 2314 } 2315 2316unlock_and_return: 2317 mutex_unlock(&mb0_transfer.ac_wake_lock); 2318} 2319 2320/** 2321 * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem 2322 */ 2323void prcmu_ac_sleep_req() 2324{ 2325 u32 val; 2326 2327 mutex_lock(&mb0_transfer.ac_wake_lock); 2328 2329 val = readl(PRCM_HOSTACCESS_REQ); 2330 if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)) 2331 goto unlock_and_return; 2332 2333 writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), 2334 PRCM_HOSTACCESS_REQ); 2335 2336 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, 2337 msecs_to_jiffies(5000))) { 2338 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", 2339 __func__); 2340 } 2341 2342 atomic_set(&ac_wake_req_state, 0); 2343 2344unlock_and_return: 2345 mutex_unlock(&mb0_transfer.ac_wake_lock); 2346} 2347 2348bool db8500_prcmu_is_ac_wake_requested(void) 2349{ 2350 return (atomic_read(&ac_wake_req_state) != 0); 2351} 2352 2353/** 2354 * db8500_prcmu_system_reset - System reset 2355 * 2356 * Saves the reset reason code and then sets the APE_SOFTRST register which 2357 * fires interrupt to fw 2358 */ 2359void db8500_prcmu_system_reset(u16 reset_code) 2360{ 2361 writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON)); 2362 writel(1, PRCM_APE_SOFTRST); 2363} 2364 2365/** 2366 * db8500_prcmu_get_reset_code - Retrieve SW reset reason code 2367 * 2368 * Retrieves the reset reason code stored by prcmu_system_reset() before 2369 * last restart. 2370 */ 2371u16 db8500_prcmu_get_reset_code(void) 2372{ 2373 return readw(tcdm_base + PRCM_SW_RST_REASON); 2374} 2375 2376/** 2377 * db8500_prcmu_reset_modem - ask the PRCMU to reset modem 2378 */ 2379void db8500_prcmu_modem_reset(void) 2380{ 2381 mutex_lock(&mb1_transfer.lock); 2382 2383 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 2384 cpu_relax(); 2385 2386 writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 2387 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 2388 wait_for_completion(&mb1_transfer.work); 2389 2390 /* 2391 * No need to check return from PRCMU as modem should go in reset state 2392 * This state is already managed by upper layer 2393 */ 2394 2395 mutex_unlock(&mb1_transfer.lock); 2396} 2397 2398static void ack_dbb_wakeup(void) 2399{ 2400 unsigned long flags; 2401 2402 spin_lock_irqsave(&mb0_transfer.lock, flags); 2403 2404 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 2405 cpu_relax(); 2406 2407 writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 2408 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 2409 2410 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 2411} 2412 2413static inline void print_unknown_header_warning(u8 n, u8 header) 2414{ 2415 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n", 2416 header, n); 2417} 2418 2419static bool read_mailbox_0(void) 2420{ 2421 bool r; 2422 u32 ev; 2423 unsigned int n; 2424 u8 header; 2425 2426 header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0); 2427 switch (header) { 2428 case MB0H_WAKEUP_EXE: 2429 case MB0H_WAKEUP_SLEEP: 2430 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) 2431 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500); 2432 else 2433 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500); 2434 2435 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK)) 2436 complete(&mb0_transfer.ac_wake_work); 2437 if (ev & WAKEUP_BIT_SYSCLK_OK) 2438 complete(&mb3_transfer.sysclk_work); 2439 2440 ev &= mb0_transfer.req.dbb_irqs; 2441 2442 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { 2443 if (ev & prcmu_irq_bit[n]) 2444 generic_handle_irq(IRQ_PRCMU_BASE + n); 2445 } 2446 r = true; 2447 break; 2448 default: 2449 print_unknown_header_warning(0, header); 2450 r = false; 2451 break; 2452 } 2453 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR); 2454 return r; 2455} 2456 2457static bool read_mailbox_1(void) 2458{ 2459 mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1); 2460 mb1_transfer.ack.arm_opp = readb(tcdm_base + 2461 PRCM_ACK_MB1_CURRENT_ARM_OPP); 2462 mb1_transfer.ack.ape_opp = readb(tcdm_base + 2463 PRCM_ACK_MB1_CURRENT_APE_OPP); 2464 mb1_transfer.ack.ape_voltage_status = readb(tcdm_base + 2465 PRCM_ACK_MB1_APE_VOLTAGE_STATUS); 2466 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR); 2467 complete(&mb1_transfer.work); 2468 return false; 2469} 2470 2471static bool read_mailbox_2(void) 2472{ 2473 mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS); 2474 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR); 2475 complete(&mb2_transfer.work); 2476 return false; 2477} 2478 2479static bool read_mailbox_3(void) 2480{ 2481 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR); 2482 return false; 2483} 2484 2485static bool read_mailbox_4(void) 2486{ 2487 u8 header; 2488 bool do_complete = true; 2489 2490 header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4); 2491 switch (header) { 2492 case MB4H_MEM_ST: 2493 case MB4H_HOTDOG: 2494 case MB4H_HOTMON: 2495 case MB4H_HOT_PERIOD: 2496 case MB4H_A9WDOG_CONF: 2497 case MB4H_A9WDOG_EN: 2498 case MB4H_A9WDOG_DIS: 2499 case MB4H_A9WDOG_LOAD: 2500 case MB4H_A9WDOG_KICK: 2501 break; 2502 default: 2503 print_unknown_header_warning(4, header); 2504 do_complete = false; 2505 break; 2506 } 2507 2508 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR); 2509 2510 if (do_complete) 2511 complete(&mb4_transfer.work); 2512 2513 return false; 2514} 2515 2516static bool read_mailbox_5(void) 2517{ 2518 mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS); 2519 mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL); 2520 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR); 2521 complete(&mb5_transfer.work); 2522 return false; 2523} 2524 2525static bool read_mailbox_6(void) 2526{ 2527 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR); 2528 return false; 2529} 2530 2531static bool read_mailbox_7(void) 2532{ 2533 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR); 2534 return false; 2535} 2536 2537static bool (* const read_mailbox[NUM_MB])(void) = { 2538 read_mailbox_0, 2539 read_mailbox_1, 2540 read_mailbox_2, 2541 read_mailbox_3, 2542 read_mailbox_4, 2543 read_mailbox_5, 2544 read_mailbox_6, 2545 read_mailbox_7 2546}; 2547 2548static irqreturn_t prcmu_irq_handler(int irq, void *data) 2549{ 2550 u32 bits; 2551 u8 n; 2552 irqreturn_t r; 2553 2554 bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS); 2555 if (unlikely(!bits)) 2556 return IRQ_NONE; 2557 2558 r = IRQ_HANDLED; 2559 for (n = 0; bits; n++) { 2560 if (bits & MBOX_BIT(n)) { 2561 bits -= MBOX_BIT(n); 2562 if (read_mailbox[n]()) 2563 r = IRQ_WAKE_THREAD; 2564 } 2565 } 2566 return r; 2567} 2568 2569static irqreturn_t prcmu_irq_thread_fn(int irq, void *data) 2570{ 2571 ack_dbb_wakeup(); 2572 return IRQ_HANDLED; 2573} 2574 2575static void prcmu_mask_work(struct work_struct *work) 2576{ 2577 unsigned long flags; 2578 2579 spin_lock_irqsave(&mb0_transfer.lock, flags); 2580 2581 config_wakeups(); 2582 2583 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 2584} 2585 2586static void prcmu_irq_mask(struct irq_data *d) 2587{ 2588 unsigned long flags; 2589 2590 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); 2591 2592 mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE]; 2593 2594 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); 2595 2596 if (d->irq != IRQ_PRCMU_CA_SLEEP) 2597 schedule_work(&mb0_transfer.mask_work); 2598} 2599 2600static void prcmu_irq_unmask(struct irq_data *d) 2601{ 2602 unsigned long flags; 2603 2604 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); 2605 2606 mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE]; 2607 2608 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); 2609 2610 if (d->irq != IRQ_PRCMU_CA_SLEEP) 2611 schedule_work(&mb0_transfer.mask_work); 2612} 2613 2614static void noop(struct irq_data *d) 2615{ 2616} 2617 2618static struct irq_chip prcmu_irq_chip = { 2619 .name = "prcmu", 2620 .irq_disable = prcmu_irq_mask, 2621 .irq_ack = noop, 2622 .irq_mask = prcmu_irq_mask, 2623 .irq_unmask = prcmu_irq_unmask, 2624}; 2625 2626static char *fw_project_name(u8 project) 2627{ 2628 switch (project) { 2629 case PRCMU_FW_PROJECT_U8500: 2630 return "U8500"; 2631 case PRCMU_FW_PROJECT_U8500_C2: 2632 return "U8500 C2"; 2633 case PRCMU_FW_PROJECT_U9500: 2634 return "U9500"; 2635 case PRCMU_FW_PROJECT_U9500_C2: 2636 return "U9500 C2"; 2637 case PRCMU_FW_PROJECT_U8520: 2638 return "U8520"; 2639 case PRCMU_FW_PROJECT_U8420: 2640 return "U8420"; 2641 default: 2642 return "Unknown"; 2643 } 2644} 2645 2646void __init db8500_prcmu_early_init(void) 2647{ 2648 unsigned int i; 2649 if (cpu_is_u8500v2()) { 2650 void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K); 2651 2652 if (tcpm_base != NULL) { 2653 u32 version; 2654 version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET); 2655 fw_info.version.project = version & 0xFF; 2656 fw_info.version.api_version = (version >> 8) & 0xFF; 2657 fw_info.version.func_version = (version >> 16) & 0xFF; 2658 fw_info.version.errata = (version >> 24) & 0xFF; 2659 fw_info.valid = true; 2660 pr_info("PRCMU firmware: %s, version %d.%d.%d\n", 2661 fw_project_name(fw_info.version.project), 2662 (version >> 8) & 0xFF, (version >> 16) & 0xFF, 2663 (version >> 24) & 0xFF); 2664 iounmap(tcpm_base); 2665 } 2666 2667 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE); 2668 } else { 2669 pr_err("prcmu: Unsupported chip version\n"); 2670 BUG(); 2671 } 2672 2673 spin_lock_init(&mb0_transfer.lock); 2674 spin_lock_init(&mb0_transfer.dbb_irqs_lock); 2675 mutex_init(&mb0_transfer.ac_wake_lock); 2676 init_completion(&mb0_transfer.ac_wake_work); 2677 mutex_init(&mb1_transfer.lock); 2678 init_completion(&mb1_transfer.work); 2679 mb1_transfer.ape_opp = APE_NO_CHANGE; 2680 mutex_init(&mb2_transfer.lock); 2681 init_completion(&mb2_transfer.work); 2682 spin_lock_init(&mb2_transfer.auto_pm_lock); 2683 spin_lock_init(&mb3_transfer.lock); 2684 mutex_init(&mb3_transfer.sysclk_lock); 2685 init_completion(&mb3_transfer.sysclk_work); 2686 mutex_init(&mb4_transfer.lock); 2687 init_completion(&mb4_transfer.work); 2688 mutex_init(&mb5_transfer.lock); 2689 init_completion(&mb5_transfer.work); 2690 2691 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); 2692 2693 /* Initalize irqs. */ 2694 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) { 2695 unsigned int irq; 2696 2697 irq = IRQ_PRCMU_BASE + i; 2698 irq_set_chip_and_handler(irq, &prcmu_irq_chip, 2699 handle_simple_irq); 2700 set_irq_flags(irq, IRQF_VALID); 2701 } 2702} 2703 2704static void __init init_prcm_registers(void) 2705{ 2706 u32 val; 2707 2708 val = readl(PRCM_A9PL_FORCE_CLKEN); 2709 val &= ~(PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN | 2710 PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN); 2711 writel(val, (PRCM_A9PL_FORCE_CLKEN)); 2712} 2713 2714/* 2715 * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC 2716 */ 2717static struct regulator_consumer_supply db8500_vape_consumers[] = { 2718 REGULATOR_SUPPLY("v-ape", NULL), 2719 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"), 2720 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"), 2721 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"), 2722 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"), 2723 /* "v-mmc" changed to "vcore" in the mainline kernel */ 2724 REGULATOR_SUPPLY("vcore", "sdi0"), 2725 REGULATOR_SUPPLY("vcore", "sdi1"), 2726 REGULATOR_SUPPLY("vcore", "sdi2"), 2727 REGULATOR_SUPPLY("vcore", "sdi3"), 2728 REGULATOR_SUPPLY("vcore", "sdi4"), 2729 REGULATOR_SUPPLY("v-dma", "dma40.0"), 2730 REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"), 2731 /* "v-uart" changed to "vcore" in the mainline kernel */ 2732 REGULATOR_SUPPLY("vcore", "uart0"), 2733 REGULATOR_SUPPLY("vcore", "uart1"), 2734 REGULATOR_SUPPLY("vcore", "uart2"), 2735 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), 2736 REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"), 2737}; 2738 2739static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { 2740 REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"), 2741 /* AV8100 regulator */ 2742 REGULATOR_SUPPLY("hdmi_1v8", "0-0070"), 2743}; 2744 2745static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = { 2746 REGULATOR_SUPPLY("vsupply", "b2r2_bus"), 2747 REGULATOR_SUPPLY("vsupply", "mcde"), 2748}; 2749 2750/* SVA MMDSP regulator switch */ 2751static struct regulator_consumer_supply db8500_svammdsp_consumers[] = { 2752 REGULATOR_SUPPLY("sva-mmdsp", "cm_control"), 2753}; 2754 2755/* SVA pipe regulator switch */ 2756static struct regulator_consumer_supply db8500_svapipe_consumers[] = { 2757 REGULATOR_SUPPLY("sva-pipe", "cm_control"), 2758}; 2759 2760/* SIA MMDSP regulator switch */ 2761static struct regulator_consumer_supply db8500_siammdsp_consumers[] = { 2762 REGULATOR_SUPPLY("sia-mmdsp", "cm_control"), 2763}; 2764 2765/* SIA pipe regulator switch */ 2766static struct regulator_consumer_supply db8500_siapipe_consumers[] = { 2767 REGULATOR_SUPPLY("sia-pipe", "cm_control"), 2768}; 2769 2770static struct regulator_consumer_supply db8500_sga_consumers[] = { 2771 REGULATOR_SUPPLY("v-mali", NULL), 2772}; 2773 2774/* ESRAM1 and 2 regulator switch */ 2775static struct regulator_consumer_supply db8500_esram12_consumers[] = { 2776 REGULATOR_SUPPLY("esram12", "cm_control"), 2777}; 2778 2779/* ESRAM3 and 4 regulator switch */ 2780static struct regulator_consumer_supply db8500_esram34_consumers[] = { 2781 REGULATOR_SUPPLY("v-esram34", "mcde"), 2782 REGULATOR_SUPPLY("esram34", "cm_control"), 2783 REGULATOR_SUPPLY("lcla_esram", "dma40.0"), 2784}; 2785 2786static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = { 2787 [DB8500_REGULATOR_VAPE] = { 2788 .constraints = { 2789 .name = "db8500-vape", 2790 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2791 .always_on = true, 2792 }, 2793 .consumer_supplies = db8500_vape_consumers, 2794 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers), 2795 }, 2796 [DB8500_REGULATOR_VARM] = { 2797 .constraints = { 2798 .name = "db8500-varm", 2799 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2800 }, 2801 }, 2802 [DB8500_REGULATOR_VMODEM] = { 2803 .constraints = { 2804 .name = "db8500-vmodem", 2805 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2806 }, 2807 }, 2808 [DB8500_REGULATOR_VPLL] = { 2809 .constraints = { 2810 .name = "db8500-vpll", 2811 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2812 }, 2813 }, 2814 [DB8500_REGULATOR_VSMPS1] = { 2815 .constraints = { 2816 .name = "db8500-vsmps1", 2817 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2818 }, 2819 }, 2820 [DB8500_REGULATOR_VSMPS2] = { 2821 .constraints = { 2822 .name = "db8500-vsmps2", 2823 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2824 }, 2825 .consumer_supplies = db8500_vsmps2_consumers, 2826 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers), 2827 }, 2828 [DB8500_REGULATOR_VSMPS3] = { 2829 .constraints = { 2830 .name = "db8500-vsmps3", 2831 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2832 }, 2833 }, 2834 [DB8500_REGULATOR_VRF1] = { 2835 .constraints = { 2836 .name = "db8500-vrf1", 2837 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2838 }, 2839 }, 2840 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = { 2841 /* dependency to u8500-vape is handled outside regulator framework */ 2842 .constraints = { 2843 .name = "db8500-sva-mmdsp", 2844 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2845 }, 2846 .consumer_supplies = db8500_svammdsp_consumers, 2847 .num_consumer_supplies = ARRAY_SIZE(db8500_svammdsp_consumers), 2848 }, 2849 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = { 2850 .constraints = { 2851 /* "ret" means "retention" */ 2852 .name = "db8500-sva-mmdsp-ret", 2853 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2854 }, 2855 }, 2856 [DB8500_REGULATOR_SWITCH_SVAPIPE] = { 2857 /* dependency to u8500-vape is handled outside regulator framework */ 2858 .constraints = { 2859 .name = "db8500-sva-pipe", 2860 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2861 }, 2862 .consumer_supplies = db8500_svapipe_consumers, 2863 .num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers), 2864 }, 2865 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = { 2866 /* dependency to u8500-vape is handled outside regulator framework */ 2867 .constraints = { 2868 .name = "db8500-sia-mmdsp", 2869 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2870 }, 2871 .consumer_supplies = db8500_siammdsp_consumers, 2872 .num_consumer_supplies = ARRAY_SIZE(db8500_siammdsp_consumers), 2873 }, 2874 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = { 2875 .constraints = { 2876 .name = "db8500-sia-mmdsp-ret", 2877 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2878 }, 2879 }, 2880 [DB8500_REGULATOR_SWITCH_SIAPIPE] = { 2881 /* dependency to u8500-vape is handled outside regulator framework */ 2882 .constraints = { 2883 .name = "db8500-sia-pipe", 2884 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2885 }, 2886 .consumer_supplies = db8500_siapipe_consumers, 2887 .num_consumer_supplies = ARRAY_SIZE(db8500_siapipe_consumers), 2888 }, 2889 [DB8500_REGULATOR_SWITCH_SGA] = { 2890 .supply_regulator = "db8500-vape", 2891 .constraints = { 2892 .name = "db8500-sga", 2893 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2894 }, 2895 .consumer_supplies = db8500_sga_consumers, 2896 .num_consumer_supplies = ARRAY_SIZE(db8500_sga_consumers), 2897 2898 }, 2899 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = { 2900 .supply_regulator = "db8500-vape", 2901 .constraints = { 2902 .name = "db8500-b2r2-mcde", 2903 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2904 }, 2905 .consumer_supplies = db8500_b2r2_mcde_consumers, 2906 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers), 2907 }, 2908 [DB8500_REGULATOR_SWITCH_ESRAM12] = { 2909 /* 2910 * esram12 is set in retention and supplied by Vsafe when Vape is off, 2911 * no need to hold Vape 2912 */ 2913 .constraints = { 2914 .name = "db8500-esram12", 2915 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2916 }, 2917 .consumer_supplies = db8500_esram12_consumers, 2918 .num_consumer_supplies = ARRAY_SIZE(db8500_esram12_consumers), 2919 }, 2920 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = { 2921 .constraints = { 2922 .name = "db8500-esram12-ret", 2923 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2924 }, 2925 }, 2926 [DB8500_REGULATOR_SWITCH_ESRAM34] = { 2927 /* 2928 * esram34 is set in retention and supplied by Vsafe when Vape is off, 2929 * no need to hold Vape 2930 */ 2931 .constraints = { 2932 .name = "db8500-esram34", 2933 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2934 }, 2935 .consumer_supplies = db8500_esram34_consumers, 2936 .num_consumer_supplies = ARRAY_SIZE(db8500_esram34_consumers), 2937 }, 2938 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = { 2939 .constraints = { 2940 .name = "db8500-esram34-ret", 2941 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2942 }, 2943 }, 2944}; 2945 2946static struct mfd_cell db8500_prcmu_devs[] = { 2947 { 2948 .name = "db8500-prcmu-regulators", 2949 .platform_data = &db8500_regulators, 2950 .pdata_size = sizeof(db8500_regulators), 2951 }, 2952 { 2953 .name = "cpufreq-u8500", 2954 }, 2955}; 2956 2957/** 2958 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic 2959 * 2960 */ 2961static int __init db8500_prcmu_probe(struct platform_device *pdev) 2962{ 2963 int err = 0; 2964 2965 if (ux500_is_svp()) 2966 return -ENODEV; 2967 2968 init_prcm_registers(); 2969 2970 /* Clean up the mailbox interrupts after pre-kernel code. */ 2971 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); 2972 2973 err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler, 2974 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); 2975 if (err < 0) { 2976 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n"); 2977 err = -EBUSY; 2978 goto no_irq_return; 2979 } 2980 2981 if (cpu_is_u8500v20_or_later()) 2982 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); 2983 2984 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs, 2985 ARRAY_SIZE(db8500_prcmu_devs), NULL, 2986 0); 2987 2988 if (err) 2989 pr_err("prcmu: Failed to add subdevices\n"); 2990 else 2991 pr_info("DB8500 PRCMU initialized\n"); 2992 2993no_irq_return: 2994 return err; 2995} 2996 2997static struct platform_driver db8500_prcmu_driver = { 2998 .driver = { 2999 .name = "db8500-prcmu", 3000 .owner = THIS_MODULE, 3001 }, 3002}; 3003 3004static int __init db8500_prcmu_init(void) 3005{ 3006 return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe); 3007} 3008 3009arch_initcall(db8500_prcmu_init); 3010 3011MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>"); 3012MODULE_DESCRIPTION("DB8500 PRCM Unit driver"); 3013MODULE_LICENSE("GPL v2"); 3014