1/* 2 * Copyright 2010 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 25#include "drmP.h" 26#include "nouveau_drv.h" 27#include "nouveau_bios.h" 28#include "nouveau_hw.h" 29#include "nouveau_pm.h" 30#include "nouveau_hwsq.h" 31#include "nv50_display.h" 32 33enum clk_src { 34 clk_src_crystal, 35 clk_src_href, 36 clk_src_hclk, 37 clk_src_hclkm3, 38 clk_src_hclkm3d2, 39 clk_src_host, 40 clk_src_nvclk, 41 clk_src_sclk, 42 clk_src_mclk, 43 clk_src_vdec, 44 clk_src_dom6 45}; 46 47static u32 read_clk(struct drm_device *, enum clk_src); 48 49static u32 50read_div(struct drm_device *dev) 51{ 52 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 54 switch (dev_priv->chipset) { 55 case 0x50: /* it exists, but only has bit 31, not the dividers.. */ 56 case 0x84: 57 case 0x86: 58 case 0x98: 59 case 0xa0: 60 return nv_rd32(dev, 0x004700); 61 case 0x92: 62 case 0x94: 63 case 0x96: 64 return nv_rd32(dev, 0x004800); 65 default: 66 return 0x00000000; 67 } 68} 69 70static u32 71read_pll_src(struct drm_device *dev, u32 base) 72{ 73 struct drm_nouveau_private *dev_priv = dev->dev_private; 74 u32 coef, ref = read_clk(dev, clk_src_crystal); 75 u32 rsel = nv_rd32(dev, 0x00e18c); 76 int P, N, M, id; 77 78 switch (dev_priv->chipset) { 79 case 0x50: 80 case 0xa0: 81 switch (base) { 82 case 0x4020: 83 case 0x4028: id = !!(rsel & 0x00000004); break; 84 case 0x4008: id = !!(rsel & 0x00000008); break; 85 case 0x4030: id = 0; break; 86 default: 87 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base); 88 return 0; 89 } 90 91 coef = nv_rd32(dev, 0x00e81c + (id * 0x0c)); 92 ref *= (coef & 0x01000000) ? 2 : 4; 93 P = (coef & 0x00070000) >> 16; 94 N = ((coef & 0x0000ff00) >> 8) + 1; 95 M = ((coef & 0x000000ff) >> 0) + 1; 96 break; 97 case 0x84: 98 case 0x86: 99 case 0x92: 100 coef = nv_rd32(dev, 0x00e81c); 101 P = (coef & 0x00070000) >> 16; 102 N = (coef & 0x0000ff00) >> 8; 103 M = (coef & 0x000000ff) >> 0; 104 break; 105 case 0x94: 106 case 0x96: 107 case 0x98: 108 rsel = nv_rd32(dev, 0x00c050); 109 switch (base) { 110 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; 111 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; 112 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break; 113 case 0x4030: rsel = 3; break; 114 default: 115 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base); 116 return 0; 117 } 118 119 switch (rsel) { 120 case 0: id = 1; break; 121 case 1: return read_clk(dev, clk_src_crystal); 122 case 2: return read_clk(dev, clk_src_href); 123 case 3: id = 0; break; 124 } 125 126 coef = nv_rd32(dev, 0x00e81c + (id * 0x28)); 127 P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7; 128 P += (coef & 0x00070000) >> 16; 129 N = (coef & 0x0000ff00) >> 8; 130 M = (coef & 0x000000ff) >> 0; 131 break; 132 default: 133 BUG_ON(1); 134 } 135 136 if (M) 137 return (ref * N / M) >> P; 138 return 0; 139} 140 141static u32 142read_pll_ref(struct drm_device *dev, u32 base) 143{ 144 u32 src, mast = nv_rd32(dev, 0x00c040); 145 146 switch (base) { 147 case 0x004028: 148 src = !!(mast & 0x00200000); 149 break; 150 case 0x004020: 151 src = !!(mast & 0x00400000); 152 break; 153 case 0x004008: 154 src = !!(mast & 0x00010000); 155 break; 156 case 0x004030: 157 src = !!(mast & 0x02000000); 158 break; 159 case 0x00e810: 160 return read_clk(dev, clk_src_crystal); 161 default: 162 NV_ERROR(dev, "bad pll 0x%06x\n", base); 163 return 0; 164 } 165 166 if (src) 167 return read_clk(dev, clk_src_href); 168 return read_pll_src(dev, base); 169} 170 171static u32 172read_pll(struct drm_device *dev, u32 base) 173{ 174 struct drm_nouveau_private *dev_priv = dev->dev_private; 175 u32 mast = nv_rd32(dev, 0x00c040); 176 u32 ctrl = nv_rd32(dev, base + 0); 177 u32 coef = nv_rd32(dev, base + 4); 178 u32 ref = read_pll_ref(dev, base); 179 u32 clk = 0; 180 int N1, N2, M1, M2; 181 182 if (base == 0x004028 && (mast & 0x00100000)) { 183 /* wtf, appears to only disable post-divider on nva0 */ 184 if (dev_priv->chipset != 0xa0) 185 return read_clk(dev, clk_src_dom6); 186 } 187 188 N2 = (coef & 0xff000000) >> 24; 189 M2 = (coef & 0x00ff0000) >> 16; 190 N1 = (coef & 0x0000ff00) >> 8; 191 M1 = (coef & 0x000000ff); 192 if ((ctrl & 0x80000000) && M1) { 193 clk = ref * N1 / M1; 194 if ((ctrl & 0x40000100) == 0x40000000) { 195 if (M2) 196 clk = clk * N2 / M2; 197 else 198 clk = 0; 199 } 200 } 201 202 return clk; 203} 204 205static u32 206read_clk(struct drm_device *dev, enum clk_src src) 207{ 208 struct drm_nouveau_private *dev_priv = dev->dev_private; 209 u32 mast = nv_rd32(dev, 0x00c040); 210 u32 P = 0; 211 212 switch (src) { 213 case clk_src_crystal: 214 return dev_priv->crystal; 215 case clk_src_href: 216 return 100000; /* PCIE reference clock */ 217 case clk_src_hclk: 218 return read_clk(dev, clk_src_href) * 27778 / 10000; 219 case clk_src_hclkm3: 220 return read_clk(dev, clk_src_hclk) * 3; 221 case clk_src_hclkm3d2: 222 return read_clk(dev, clk_src_hclk) * 3 / 2; 223 case clk_src_host: 224 switch (mast & 0x30000000) { 225 case 0x00000000: return read_clk(dev, clk_src_href); 226 case 0x10000000: break; 227 case 0x20000000: /* !0x50 */ 228 case 0x30000000: return read_clk(dev, clk_src_hclk); 229 } 230 break; 231 case clk_src_nvclk: 232 if (!(mast & 0x00100000)) 233 P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16; 234 switch (mast & 0x00000003) { 235 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P; 236 case 0x00000001: return read_clk(dev, clk_src_dom6); 237 case 0x00000002: return read_pll(dev, 0x004020) >> P; 238 case 0x00000003: return read_pll(dev, 0x004028) >> P; 239 } 240 break; 241 case clk_src_sclk: 242 P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16; 243 switch (mast & 0x00000030) { 244 case 0x00000000: 245 if (mast & 0x00000080) 246 return read_clk(dev, clk_src_host) >> P; 247 return read_clk(dev, clk_src_crystal) >> P; 248 case 0x00000010: break; 249 case 0x00000020: return read_pll(dev, 0x004028) >> P; 250 case 0x00000030: return read_pll(dev, 0x004020) >> P; 251 } 252 break; 253 case clk_src_mclk: 254 P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16; 255 if (nv_rd32(dev, 0x004008) & 0x00000200) { 256 switch (mast & 0x0000c000) { 257 case 0x00000000: 258 return read_clk(dev, clk_src_crystal) >> P; 259 case 0x00008000: 260 case 0x0000c000: 261 return read_clk(dev, clk_src_href) >> P; 262 } 263 } else { 264 return read_pll(dev, 0x004008) >> P; 265 } 266 break; 267 case clk_src_vdec: 268 P = (read_div(dev) & 0x00000700) >> 8; 269 switch (dev_priv->chipset) { 270 case 0x84: 271 case 0x86: 272 case 0x92: 273 case 0x94: 274 case 0x96: 275 case 0xa0: 276 switch (mast & 0x00000c00) { 277 case 0x00000000: 278 if (dev_priv->chipset == 0xa0) /* wtf?? */ 279 return read_clk(dev, clk_src_nvclk) >> P; 280 return read_clk(dev, clk_src_crystal) >> P; 281 case 0x00000400: 282 return 0; 283 case 0x00000800: 284 if (mast & 0x01000000) 285 return read_pll(dev, 0x004028) >> P; 286 return read_pll(dev, 0x004030) >> P; 287 case 0x00000c00: 288 return read_clk(dev, clk_src_nvclk) >> P; 289 } 290 break; 291 case 0x98: 292 switch (mast & 0x00000c00) { 293 case 0x00000000: 294 return read_clk(dev, clk_src_nvclk) >> P; 295 case 0x00000400: 296 return 0; 297 case 0x00000800: 298 return read_clk(dev, clk_src_hclkm3d2) >> P; 299 case 0x00000c00: 300 return read_clk(dev, clk_src_mclk) >> P; 301 } 302 break; 303 } 304 break; 305 case clk_src_dom6: 306 switch (dev_priv->chipset) { 307 case 0x50: 308 case 0xa0: 309 return read_pll(dev, 0x00e810) >> 2; 310 case 0x84: 311 case 0x86: 312 case 0x92: 313 case 0x94: 314 case 0x96: 315 case 0x98: 316 P = (read_div(dev) & 0x00000007) >> 0; 317 switch (mast & 0x0c000000) { 318 case 0x00000000: return read_clk(dev, clk_src_href); 319 case 0x04000000: break; 320 case 0x08000000: return read_clk(dev, clk_src_hclk); 321 case 0x0c000000: 322 return read_clk(dev, clk_src_hclkm3) >> P; 323 } 324 break; 325 default: 326 break; 327 } 328 default: 329 break; 330 } 331 332 NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast); 333 return 0; 334} 335 336int 337nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 338{ 339 struct drm_nouveau_private *dev_priv = dev->dev_private; 340 if (dev_priv->chipset == 0xaa || 341 dev_priv->chipset == 0xac) 342 return 0; 343 344 perflvl->core = read_clk(dev, clk_src_nvclk); 345 perflvl->shader = read_clk(dev, clk_src_sclk); 346 perflvl->memory = read_clk(dev, clk_src_mclk); 347 if (dev_priv->chipset != 0x50) { 348 perflvl->vdec = read_clk(dev, clk_src_vdec); 349 perflvl->dom6 = read_clk(dev, clk_src_dom6); 350 } 351 352 return 0; 353} 354 355struct nv50_pm_state { 356 struct nouveau_pm_level *perflvl; 357 struct hwsq_ucode eclk_hwsq; 358 struct hwsq_ucode mclk_hwsq; 359 u32 mscript; 360 u32 mmast; 361 u32 mctrl; 362 u32 mcoef; 363}; 364 365static u32 366calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll, 367 u32 clk, int *N1, int *M1, int *log2P) 368{ 369 struct nouveau_pll_vals coef; 370 int ret; 371 372 ret = get_pll_limits(dev, reg, pll); 373 if (ret) 374 return 0; 375 376 pll->vco2.maxfreq = 0; 377 pll->refclk = read_pll_ref(dev, reg); 378 if (!pll->refclk) 379 return 0; 380 381 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef); 382 if (ret == 0) 383 return 0; 384 385 *N1 = coef.N1; 386 *M1 = coef.M1; 387 *log2P = coef.log2P; 388 return ret; 389} 390 391static inline u32 392calc_div(u32 src, u32 target, int *div) 393{ 394 u32 clk0 = src, clk1 = src; 395 for (*div = 0; *div <= 7; (*div)++) { 396 if (clk0 <= target) { 397 clk1 = clk0 << (*div ? 1 : 0); 398 break; 399 } 400 clk0 >>= 1; 401 } 402 403 if (target - clk0 <= clk1 - target) 404 return clk0; 405 (*div)--; 406 return clk1; 407} 408 409static inline u32 410clk_same(u32 a, u32 b) 411{ 412 return ((a / 1000) == (b / 1000)); 413} 414 415static void 416mclk_precharge(struct nouveau_mem_exec_func *exec) 417{ 418 struct nv50_pm_state *info = exec->priv; 419 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 420 421 hwsq_wr32(hwsq, 0x1002d4, 0x00000001); 422} 423 424static void 425mclk_refresh(struct nouveau_mem_exec_func *exec) 426{ 427 struct nv50_pm_state *info = exec->priv; 428 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 429 430 hwsq_wr32(hwsq, 0x1002d0, 0x00000001); 431} 432 433static void 434mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) 435{ 436 struct nv50_pm_state *info = exec->priv; 437 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 438 439 hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000); 440} 441 442static void 443mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) 444{ 445 struct nv50_pm_state *info = exec->priv; 446 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 447 448 hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000); 449} 450 451static void 452mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) 453{ 454 struct nv50_pm_state *info = exec->priv; 455 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 456 457 if (nsec > 1000) 458 hwsq_usec(hwsq, (nsec + 500) / 1000); 459} 460 461static u32 462mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) 463{ 464 if (mr <= 1) 465 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4)); 466 if (mr <= 3) 467 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4)); 468 return 0; 469} 470 471static void 472mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) 473{ 474 struct drm_nouveau_private *dev_priv = exec->dev->dev_private; 475 struct nv50_pm_state *info = exec->priv; 476 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 477 478 if (mr <= 1) { 479 if (dev_priv->vram_rank_B) 480 hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data); 481 hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data); 482 } else 483 if (mr <= 3) { 484 if (dev_priv->vram_rank_B) 485 hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data); 486 hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data); 487 } 488} 489 490static void 491mclk_clock_set(struct nouveau_mem_exec_func *exec) 492{ 493 struct nv50_pm_state *info = exec->priv; 494 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 495 u32 ctrl = nv_rd32(exec->dev, 0x004008); 496 497 info->mmast = nv_rd32(exec->dev, 0x00c040); 498 info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */ 499 info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */ 500 501 hwsq_wr32(hwsq, 0xc040, info->mmast); 502 hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */ 503 if (info->mctrl & 0x80000000) 504 hwsq_wr32(hwsq, 0x400c, info->mcoef); 505 hwsq_wr32(hwsq, 0x4008, info->mctrl); 506} 507 508static void 509mclk_timing_set(struct nouveau_mem_exec_func *exec) 510{ 511 struct drm_device *dev = exec->dev; 512 struct nv50_pm_state *info = exec->priv; 513 struct nouveau_pm_level *perflvl = info->perflvl; 514 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 515 int i; 516 517 for (i = 0; i < 9; i++) { 518 u32 reg = 0x100220 + (i * 4); 519 u32 val = nv_rd32(dev, reg); 520 if (val != perflvl->timing.reg[i]) 521 hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]); 522 } 523} 524 525static int 526calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl, 527 struct nv50_pm_state *info) 528{ 529 struct drm_nouveau_private *dev_priv = dev->dev_private; 530 u32 crtc_mask = nv50_display_active_crtcs(dev); 531 struct nouveau_mem_exec_func exec = { 532 .dev = dev, 533 .precharge = mclk_precharge, 534 .refresh = mclk_refresh, 535 .refresh_auto = mclk_refresh_auto, 536 .refresh_self = mclk_refresh_self, 537 .wait = mclk_wait, 538 .mrg = mclk_mrg, 539 .mrs = mclk_mrs, 540 .clock_set = mclk_clock_set, 541 .timing_set = mclk_timing_set, 542 .priv = info 543 }; 544 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 545 struct pll_lims pll; 546 int N, M, P; 547 int ret; 548 549 /* use pcie refclock if possible, otherwise use mpll */ 550 info->mctrl = nv_rd32(dev, 0x004008); 551 info->mctrl &= ~0x81ff0200; 552 if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) { 553 info->mctrl |= 0x00000200 | (pll.log2p_bias << 19); 554 } else { 555 ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P); 556 if (ret == 0) 557 return -EINVAL; 558 559 info->mctrl |= 0x80000000 | (P << 22) | (P << 16); 560 info->mctrl |= pll.log2p_bias << 19; 561 info->mcoef = (N << 8) | M; 562 } 563 564 /* build the ucode which will reclock the memory for us */ 565 hwsq_init(hwsq); 566 if (crtc_mask) { 567 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */ 568 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */ 569 } 570 if (dev_priv->chipset >= 0x92) 571 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */ 572 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ 573 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */ 574 575 ret = nouveau_mem_exec(&exec, perflvl); 576 if (ret) 577 return ret; 578 579 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ 580 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */ 581 if (dev_priv->chipset >= 0x92) 582 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */ 583 hwsq_fini(hwsq); 584 return 0; 585} 586 587void * 588nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 589{ 590 struct drm_nouveau_private *dev_priv = dev->dev_private; 591 struct nv50_pm_state *info; 592 struct hwsq_ucode *hwsq; 593 struct pll_lims pll; 594 u32 out, mast, divs, ctrl; 595 int clk, ret = -EINVAL; 596 int N, M, P1, P2; 597 598 if (dev_priv->chipset == 0xaa || 599 dev_priv->chipset == 0xac) 600 return ERR_PTR(-ENODEV); 601 602 info = kmalloc(sizeof(*info), GFP_KERNEL); 603 if (!info) 604 return ERR_PTR(-ENOMEM); 605 info->perflvl = perflvl; 606 607 /* memory: build hwsq ucode which we'll use to reclock memory. 608 * use pcie refclock if possible, otherwise use mpll */ 609 info->mclk_hwsq.len = 0; 610 if (perflvl->memory) { 611 ret = calc_mclk(dev, perflvl, info); 612 if (ret) 613 goto error; 614 info->mscript = perflvl->memscript; 615 } 616 617 divs = read_div(dev); 618 mast = info->mmast; 619 620 /* start building HWSQ script for engine reclocking */ 621 hwsq = &info->eclk_hwsq; 622 hwsq_init(hwsq); 623 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ 624 hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */ 625 626 /* vdec/dom6: switch to "safe" clocks temporarily */ 627 if (perflvl->vdec) { 628 mast &= ~0x00000c00; 629 divs &= ~0x00000700; 630 } 631 632 if (perflvl->dom6) { 633 mast &= ~0x0c000000; 634 divs &= ~0x00000007; 635 } 636 637 hwsq_wr32(hwsq, 0x00c040, mast); 638 639 /* vdec: avoid modifying xpll until we know exactly how the other 640 * clock domains work, i suspect at least some of them can also be 641 * tied to xpll... 642 */ 643 if (perflvl->vdec) { 644 /* see how close we can get using nvclk as a source */ 645 clk = calc_div(perflvl->core, perflvl->vdec, &P1); 646 647 /* see how close we can get using xpll/hclk as a source */ 648 if (dev_priv->chipset != 0x98) 649 out = read_pll(dev, 0x004030); 650 else 651 out = read_clk(dev, clk_src_hclkm3d2); 652 out = calc_div(out, perflvl->vdec, &P2); 653 654 /* select whichever gets us closest */ 655 if (abs((int)perflvl->vdec - clk) <= 656 abs((int)perflvl->vdec - out)) { 657 if (dev_priv->chipset != 0x98) 658 mast |= 0x00000c00; 659 divs |= P1 << 8; 660 } else { 661 mast |= 0x00000800; 662 divs |= P2 << 8; 663 } 664 } 665 666 /* dom6: nfi what this is, but we're limited to various combinations 667 * of the host clock frequency 668 */ 669 if (perflvl->dom6) { 670 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) { 671 mast |= 0x00000000; 672 } else 673 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) { 674 mast |= 0x08000000; 675 } else { 676 clk = read_clk(dev, clk_src_hclk) * 3; 677 clk = calc_div(clk, perflvl->dom6, &P1); 678 679 mast |= 0x0c000000; 680 divs |= P1; 681 } 682 } 683 684 /* vdec/dom6: complete switch to new clocks */ 685 switch (dev_priv->chipset) { 686 case 0x92: 687 case 0x94: 688 case 0x96: 689 hwsq_wr32(hwsq, 0x004800, divs); 690 break; 691 default: 692 hwsq_wr32(hwsq, 0x004700, divs); 693 break; 694 } 695 696 hwsq_wr32(hwsq, 0x00c040, mast); 697 698 /* core/shader: make sure sclk/nvclk are disconnected from their 699 * PLLs (nvclk to dom6, sclk to hclk) 700 */ 701 if (dev_priv->chipset < 0x92) 702 mast = (mast & ~0x001000b0) | 0x00100080; 703 else 704 mast = (mast & ~0x000000b3) | 0x00000081; 705 706 hwsq_wr32(hwsq, 0x00c040, mast); 707 708 /* core: for the moment at least, always use nvpll */ 709 clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1); 710 if (clk == 0) 711 goto error; 712 713 ctrl = nv_rd32(dev, 0x004028) & ~0xc03f0100; 714 mast &= ~0x00100000; 715 mast |= 3; 716 717 hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl); 718 hwsq_wr32(hwsq, 0x00402c, (N << 8) | M); 719 720 /* shader: tie to nvclk if possible, otherwise use spll. have to be 721 * very careful that the shader clock is at least twice the core, or 722 * some chipsets will be very unhappy. i expect most or all of these 723 * cases will be handled by tying to nvclk, but it's possible there's 724 * corners 725 */ 726 ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100; 727 728 if (P1-- && perflvl->shader == (perflvl->core << 1)) { 729 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); 730 hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast); 731 } else { 732 clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1); 733 if (clk == 0) 734 goto error; 735 ctrl |= 0x80000000; 736 737 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); 738 hwsq_wr32(hwsq, 0x004024, (N << 8) | M); 739 hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast); 740 } 741 742 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ 743 hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */ 744 hwsq_fini(hwsq); 745 746 return info; 747error: 748 kfree(info); 749 return ERR_PTR(ret); 750} 751 752static int 753prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq) 754{ 755 struct drm_nouveau_private *dev_priv = dev->dev_private; 756 u32 hwsq_data, hwsq_kick; 757 int i; 758 759 if (dev_priv->chipset < 0x94) { 760 hwsq_data = 0x001400; 761 hwsq_kick = 0x00000003; 762 } else { 763 hwsq_data = 0x080000; 764 hwsq_kick = 0x00000001; 765 } 766 /* upload hwsq ucode */ 767 nv_mask(dev, 0x001098, 0x00000008, 0x00000000); 768 nv_wr32(dev, 0x001304, 0x00000000); 769 if (dev_priv->chipset >= 0x92) 770 nv_wr32(dev, 0x001318, 0x00000000); 771 for (i = 0; i < hwsq->len / 4; i++) 772 nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]); 773 nv_mask(dev, 0x001098, 0x00000018, 0x00000018); 774 775 /* launch, and wait for completion */ 776 nv_wr32(dev, 0x00130c, hwsq_kick); 777 if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) { 778 NV_ERROR(dev, "hwsq ucode exec timed out\n"); 779 NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308)); 780 for (i = 0; i < hwsq->len / 4; i++) { 781 NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4), 782 nv_rd32(dev, 0x001400 + (i * 4))); 783 } 784 785 return -EIO; 786 } 787 788 return 0; 789} 790 791int 792nv50_pm_clocks_set(struct drm_device *dev, void *data) 793{ 794 struct nv50_pm_state *info = data; 795 struct bit_entry M; 796 int ret = -EBUSY; 797 798 /* halt and idle execution engines */ 799 nv_mask(dev, 0x002504, 0x00000001, 0x00000001); 800 if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) 801 goto resume; 802 if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f)) 803 goto resume; 804 805 /* program memory clock, if necessary - must come before engine clock 806 * reprogramming due to how we construct the hwsq scripts in pre() 807 */ 808 if (info->mclk_hwsq.len) { 809 /* execute some scripts that do ??? from the vbios.. */ 810 if (!bit_table(dev, 'M', &M) && M.version == 1) { 811 if (M.length >= 6) 812 nouveau_bios_init_exec(dev, ROM16(M.data[5])); 813 if (M.length >= 8) 814 nouveau_bios_init_exec(dev, ROM16(M.data[7])); 815 if (M.length >= 10) 816 nouveau_bios_init_exec(dev, ROM16(M.data[9])); 817 nouveau_bios_init_exec(dev, info->mscript); 818 } 819 820 ret = prog_hwsq(dev, &info->mclk_hwsq); 821 if (ret) 822 goto resume; 823 } 824 825 /* program engine clocks */ 826 ret = prog_hwsq(dev, &info->eclk_hwsq); 827 828resume: 829 nv_mask(dev, 0x002504, 0x00000001, 0x00000000); 830 kfree(info); 831 return ret; 832} 833 834static int 835pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx) 836{ 837 if (*line == 0x04) { 838 *ctrl = 0x00e100; 839 *line = 4; 840 *indx = 0; 841 } else 842 if (*line == 0x09) { 843 *ctrl = 0x00e100; 844 *line = 9; 845 *indx = 1; 846 } else 847 if (*line == 0x10) { 848 *ctrl = 0x00e28c; 849 *line = 0; 850 *indx = 0; 851 } else { 852 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line); 853 return -ENODEV; 854 } 855 856 return 0; 857} 858 859int 860nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty) 861{ 862 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id); 863 if (ret) 864 return ret; 865 866 if (nv_rd32(dev, ctrl) & (1 << line)) { 867 *divs = nv_rd32(dev, 0x00e114 + (id * 8)); 868 *duty = nv_rd32(dev, 0x00e118 + (id * 8)); 869 return 0; 870 } 871 872 return -EINVAL; 873} 874 875int 876nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty) 877{ 878 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id); 879 if (ret) 880 return ret; 881 882 nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line); 883 nv_wr32(dev, 0x00e114 + (id * 8), divs); 884 nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000); 885 return 0; 886} 887