1/* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24#include <linux/firmware.h> 25#include <linux/platform_device.h> 26#include <linux/slab.h> 27#include <linux/module.h> 28#include "drmP.h" 29#include "radeon.h" 30#include "radeon_asic.h" 31#include "radeon_drm.h" 32#include "nid.h" 33#include "atom.h" 34#include "ni_reg.h" 35#include "cayman_blit_shaders.h" 36 37extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 38extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 39extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 40extern void evergreen_mc_program(struct radeon_device *rdev); 41extern void evergreen_irq_suspend(struct radeon_device *rdev); 42extern int evergreen_mc_init(struct radeon_device *rdev); 43extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 44extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 45extern void si_rlc_fini(struct radeon_device *rdev); 46extern int si_rlc_init(struct radeon_device *rdev); 47 48#define EVERGREEN_PFP_UCODE_SIZE 1120 49#define EVERGREEN_PM4_UCODE_SIZE 1376 50#define EVERGREEN_RLC_UCODE_SIZE 768 51#define BTC_MC_UCODE_SIZE 6024 52 53#define CAYMAN_PFP_UCODE_SIZE 2176 54#define CAYMAN_PM4_UCODE_SIZE 2176 55#define CAYMAN_RLC_UCODE_SIZE 1024 56#define CAYMAN_MC_UCODE_SIZE 6037 57 58#define ARUBA_RLC_UCODE_SIZE 1536 59 60/* Firmware Names */ 61MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 62MODULE_FIRMWARE("radeon/BARTS_me.bin"); 63MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 64MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 65MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 66MODULE_FIRMWARE("radeon/TURKS_me.bin"); 67MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 68MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 69MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 70MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 71MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 72MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 73MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 74MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 75MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); 76MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 77MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 78 79#define BTC_IO_MC_REGS_SIZE 29 80 81static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 82 {0x00000077, 0xff010100}, 83 {0x00000078, 0x00000000}, 84 {0x00000079, 0x00001434}, 85 {0x0000007a, 0xcc08ec08}, 86 {0x0000007b, 0x00040000}, 87 {0x0000007c, 0x000080c0}, 88 {0x0000007d, 0x09000000}, 89 {0x0000007e, 0x00210404}, 90 {0x00000081, 0x08a8e800}, 91 {0x00000082, 0x00030444}, 92 {0x00000083, 0x00000000}, 93 {0x00000085, 0x00000001}, 94 {0x00000086, 0x00000002}, 95 {0x00000087, 0x48490000}, 96 {0x00000088, 0x20244647}, 97 {0x00000089, 0x00000005}, 98 {0x0000008b, 0x66030000}, 99 {0x0000008c, 0x00006603}, 100 {0x0000008d, 0x00000100}, 101 {0x0000008f, 0x00001c0a}, 102 {0x00000090, 0xff000001}, 103 {0x00000094, 0x00101101}, 104 {0x00000095, 0x00000fff}, 105 {0x00000096, 0x00116fff}, 106 {0x00000097, 0x60010000}, 107 {0x00000098, 0x10010000}, 108 {0x00000099, 0x00006000}, 109 {0x0000009a, 0x00001000}, 110 {0x0000009f, 0x00946a00} 111}; 112 113static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 114 {0x00000077, 0xff010100}, 115 {0x00000078, 0x00000000}, 116 {0x00000079, 0x00001434}, 117 {0x0000007a, 0xcc08ec08}, 118 {0x0000007b, 0x00040000}, 119 {0x0000007c, 0x000080c0}, 120 {0x0000007d, 0x09000000}, 121 {0x0000007e, 0x00210404}, 122 {0x00000081, 0x08a8e800}, 123 {0x00000082, 0x00030444}, 124 {0x00000083, 0x00000000}, 125 {0x00000085, 0x00000001}, 126 {0x00000086, 0x00000002}, 127 {0x00000087, 0x48490000}, 128 {0x00000088, 0x20244647}, 129 {0x00000089, 0x00000005}, 130 {0x0000008b, 0x66030000}, 131 {0x0000008c, 0x00006603}, 132 {0x0000008d, 0x00000100}, 133 {0x0000008f, 0x00001c0a}, 134 {0x00000090, 0xff000001}, 135 {0x00000094, 0x00101101}, 136 {0x00000095, 0x00000fff}, 137 {0x00000096, 0x00116fff}, 138 {0x00000097, 0x60010000}, 139 {0x00000098, 0x10010000}, 140 {0x00000099, 0x00006000}, 141 {0x0000009a, 0x00001000}, 142 {0x0000009f, 0x00936a00} 143}; 144 145static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 146 {0x00000077, 0xff010100}, 147 {0x00000078, 0x00000000}, 148 {0x00000079, 0x00001434}, 149 {0x0000007a, 0xcc08ec08}, 150 {0x0000007b, 0x00040000}, 151 {0x0000007c, 0x000080c0}, 152 {0x0000007d, 0x09000000}, 153 {0x0000007e, 0x00210404}, 154 {0x00000081, 0x08a8e800}, 155 {0x00000082, 0x00030444}, 156 {0x00000083, 0x00000000}, 157 {0x00000085, 0x00000001}, 158 {0x00000086, 0x00000002}, 159 {0x00000087, 0x48490000}, 160 {0x00000088, 0x20244647}, 161 {0x00000089, 0x00000005}, 162 {0x0000008b, 0x66030000}, 163 {0x0000008c, 0x00006603}, 164 {0x0000008d, 0x00000100}, 165 {0x0000008f, 0x00001c0a}, 166 {0x00000090, 0xff000001}, 167 {0x00000094, 0x00101101}, 168 {0x00000095, 0x00000fff}, 169 {0x00000096, 0x00116fff}, 170 {0x00000097, 0x60010000}, 171 {0x00000098, 0x10010000}, 172 {0x00000099, 0x00006000}, 173 {0x0000009a, 0x00001000}, 174 {0x0000009f, 0x00916a00} 175}; 176 177static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 178 {0x00000077, 0xff010100}, 179 {0x00000078, 0x00000000}, 180 {0x00000079, 0x00001434}, 181 {0x0000007a, 0xcc08ec08}, 182 {0x0000007b, 0x00040000}, 183 {0x0000007c, 0x000080c0}, 184 {0x0000007d, 0x09000000}, 185 {0x0000007e, 0x00210404}, 186 {0x00000081, 0x08a8e800}, 187 {0x00000082, 0x00030444}, 188 {0x00000083, 0x00000000}, 189 {0x00000085, 0x00000001}, 190 {0x00000086, 0x00000002}, 191 {0x00000087, 0x48490000}, 192 {0x00000088, 0x20244647}, 193 {0x00000089, 0x00000005}, 194 {0x0000008b, 0x66030000}, 195 {0x0000008c, 0x00006603}, 196 {0x0000008d, 0x00000100}, 197 {0x0000008f, 0x00001c0a}, 198 {0x00000090, 0xff000001}, 199 {0x00000094, 0x00101101}, 200 {0x00000095, 0x00000fff}, 201 {0x00000096, 0x00116fff}, 202 {0x00000097, 0x60010000}, 203 {0x00000098, 0x10010000}, 204 {0x00000099, 0x00006000}, 205 {0x0000009a, 0x00001000}, 206 {0x0000009f, 0x00976b00} 207}; 208 209int ni_mc_load_microcode(struct radeon_device *rdev) 210{ 211 const __be32 *fw_data; 212 u32 mem_type, running, blackout = 0; 213 u32 *io_mc_regs; 214 int i, ucode_size, regs_size; 215 216 if (!rdev->mc_fw) 217 return -EINVAL; 218 219 switch (rdev->family) { 220 case CHIP_BARTS: 221 io_mc_regs = (u32 *)&barts_io_mc_regs; 222 ucode_size = BTC_MC_UCODE_SIZE; 223 regs_size = BTC_IO_MC_REGS_SIZE; 224 break; 225 case CHIP_TURKS: 226 io_mc_regs = (u32 *)&turks_io_mc_regs; 227 ucode_size = BTC_MC_UCODE_SIZE; 228 regs_size = BTC_IO_MC_REGS_SIZE; 229 break; 230 case CHIP_CAICOS: 231 default: 232 io_mc_regs = (u32 *)&caicos_io_mc_regs; 233 ucode_size = BTC_MC_UCODE_SIZE; 234 regs_size = BTC_IO_MC_REGS_SIZE; 235 break; 236 case CHIP_CAYMAN: 237 io_mc_regs = (u32 *)&cayman_io_mc_regs; 238 ucode_size = CAYMAN_MC_UCODE_SIZE; 239 regs_size = BTC_IO_MC_REGS_SIZE; 240 break; 241 } 242 243 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 244 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 245 246 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 247 if (running) { 248 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 249 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 250 } 251 252 /* reset the engine and set to writable */ 253 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 254 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 255 256 /* load mc io regs */ 257 for (i = 0; i < regs_size; i++) { 258 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 259 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 260 } 261 /* load the MC ucode */ 262 fw_data = (const __be32 *)rdev->mc_fw->data; 263 for (i = 0; i < ucode_size; i++) 264 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 265 266 /* put the engine back into the active state */ 267 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 268 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 269 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 270 271 /* wait for training to complete */ 272 for (i = 0; i < rdev->usec_timeout; i++) { 273 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) 274 break; 275 udelay(1); 276 } 277 278 if (running) 279 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 280 } 281 282 return 0; 283} 284 285int ni_init_microcode(struct radeon_device *rdev) 286{ 287 struct platform_device *pdev; 288 const char *chip_name; 289 const char *rlc_chip_name; 290 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 291 char fw_name[30]; 292 int err; 293 294 DRM_DEBUG("\n"); 295 296 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 297 err = IS_ERR(pdev); 298 if (err) { 299 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 300 return -EINVAL; 301 } 302 303 switch (rdev->family) { 304 case CHIP_BARTS: 305 chip_name = "BARTS"; 306 rlc_chip_name = "BTC"; 307 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 308 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 309 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 310 mc_req_size = BTC_MC_UCODE_SIZE * 4; 311 break; 312 case CHIP_TURKS: 313 chip_name = "TURKS"; 314 rlc_chip_name = "BTC"; 315 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 316 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 317 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 318 mc_req_size = BTC_MC_UCODE_SIZE * 4; 319 break; 320 case CHIP_CAICOS: 321 chip_name = "CAICOS"; 322 rlc_chip_name = "BTC"; 323 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 324 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 325 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 326 mc_req_size = BTC_MC_UCODE_SIZE * 4; 327 break; 328 case CHIP_CAYMAN: 329 chip_name = "CAYMAN"; 330 rlc_chip_name = "CAYMAN"; 331 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 332 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 333 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 334 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 335 break; 336 case CHIP_ARUBA: 337 chip_name = "ARUBA"; 338 rlc_chip_name = "ARUBA"; 339 /* pfp/me same size as CAYMAN */ 340 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 341 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 342 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4; 343 mc_req_size = 0; 344 break; 345 default: BUG(); 346 } 347 348 DRM_INFO("Loading %s Microcode\n", chip_name); 349 350 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 351 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 352 if (err) 353 goto out; 354 if (rdev->pfp_fw->size != pfp_req_size) { 355 printk(KERN_ERR 356 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 357 rdev->pfp_fw->size, fw_name); 358 err = -EINVAL; 359 goto out; 360 } 361 362 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 363 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 364 if (err) 365 goto out; 366 if (rdev->me_fw->size != me_req_size) { 367 printk(KERN_ERR 368 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 369 rdev->me_fw->size, fw_name); 370 err = -EINVAL; 371 } 372 373 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 374 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 375 if (err) 376 goto out; 377 if (rdev->rlc_fw->size != rlc_req_size) { 378 printk(KERN_ERR 379 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 380 rdev->rlc_fw->size, fw_name); 381 err = -EINVAL; 382 } 383 384 /* no MC ucode on TN */ 385 if (!(rdev->flags & RADEON_IS_IGP)) { 386 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 387 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 388 if (err) 389 goto out; 390 if (rdev->mc_fw->size != mc_req_size) { 391 printk(KERN_ERR 392 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 393 rdev->mc_fw->size, fw_name); 394 err = -EINVAL; 395 } 396 } 397out: 398 platform_device_unregister(pdev); 399 400 if (err) { 401 if (err != -EINVAL) 402 printk(KERN_ERR 403 "ni_cp: Failed to load firmware \"%s\"\n", 404 fw_name); 405 release_firmware(rdev->pfp_fw); 406 rdev->pfp_fw = NULL; 407 release_firmware(rdev->me_fw); 408 rdev->me_fw = NULL; 409 release_firmware(rdev->rlc_fw); 410 rdev->rlc_fw = NULL; 411 release_firmware(rdev->mc_fw); 412 rdev->mc_fw = NULL; 413 } 414 return err; 415} 416 417/* 418 * Core functions 419 */ 420static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, 421 u32 num_tile_pipes, 422 u32 num_backends_per_asic, 423 u32 *backend_disable_mask_per_asic, 424 u32 num_shader_engines) 425{ 426 u32 backend_map = 0; 427 u32 enabled_backends_mask = 0; 428 u32 enabled_backends_count = 0; 429 u32 num_backends_per_se; 430 u32 cur_pipe; 431 u32 swizzle_pipe[CAYMAN_MAX_PIPES]; 432 u32 cur_backend = 0; 433 u32 i; 434 bool force_no_swizzle; 435 436 /* force legal values */ 437 if (num_tile_pipes < 1) 438 num_tile_pipes = 1; 439 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) 440 num_tile_pipes = rdev->config.cayman.max_tile_pipes; 441 if (num_shader_engines < 1) 442 num_shader_engines = 1; 443 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 444 num_shader_engines = rdev->config.cayman.max_shader_engines; 445 if (num_backends_per_asic < num_shader_engines) 446 num_backends_per_asic = num_shader_engines; 447 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 448 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; 449 450 /* make sure we have the same number of backends per se */ 451 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); 452 /* set up the number of backends per se */ 453 num_backends_per_se = num_backends_per_asic / num_shader_engines; 454 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { 455 num_backends_per_se = rdev->config.cayman.max_backends_per_se; 456 num_backends_per_asic = num_backends_per_se * num_shader_engines; 457 } 458 459 /* create enable mask and count for enabled backends */ 460 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 461 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { 462 enabled_backends_mask |= (1 << i); 463 ++enabled_backends_count; 464 } 465 if (enabled_backends_count == num_backends_per_asic) 466 break; 467 } 468 469 /* force the backends mask to match the current number of backends */ 470 if (enabled_backends_count != num_backends_per_asic) { 471 u32 this_backend_enabled; 472 u32 shader_engine; 473 u32 backend_per_se; 474 475 enabled_backends_mask = 0; 476 enabled_backends_count = 0; 477 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; 478 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 479 /* calc the current se */ 480 shader_engine = i / rdev->config.cayman.max_backends_per_se; 481 /* calc the backend per se */ 482 backend_per_se = i % rdev->config.cayman.max_backends_per_se; 483 /* default to not enabled */ 484 this_backend_enabled = 0; 485 if ((shader_engine < num_shader_engines) && 486 (backend_per_se < num_backends_per_se)) 487 this_backend_enabled = 1; 488 if (this_backend_enabled) { 489 enabled_backends_mask |= (1 << i); 490 *backend_disable_mask_per_asic &= ~(1 << i); 491 ++enabled_backends_count; 492 } 493 } 494 } 495 496 497 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); 498 switch (rdev->family) { 499 case CHIP_CAYMAN: 500 case CHIP_ARUBA: 501 force_no_swizzle = true; 502 break; 503 default: 504 force_no_swizzle = false; 505 break; 506 } 507 if (force_no_swizzle) { 508 bool last_backend_enabled = false; 509 510 force_no_swizzle = false; 511 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 512 if (((enabled_backends_mask >> i) & 1) == 1) { 513 if (last_backend_enabled) 514 force_no_swizzle = true; 515 last_backend_enabled = true; 516 } else 517 last_backend_enabled = false; 518 } 519 } 520 521 switch (num_tile_pipes) { 522 case 1: 523 case 3: 524 case 5: 525 case 7: 526 DRM_ERROR("odd number of pipes!\n"); 527 break; 528 case 2: 529 swizzle_pipe[0] = 0; 530 swizzle_pipe[1] = 1; 531 break; 532 case 4: 533 if (force_no_swizzle) { 534 swizzle_pipe[0] = 0; 535 swizzle_pipe[1] = 1; 536 swizzle_pipe[2] = 2; 537 swizzle_pipe[3] = 3; 538 } else { 539 swizzle_pipe[0] = 0; 540 swizzle_pipe[1] = 2; 541 swizzle_pipe[2] = 1; 542 swizzle_pipe[3] = 3; 543 } 544 break; 545 case 6: 546 if (force_no_swizzle) { 547 swizzle_pipe[0] = 0; 548 swizzle_pipe[1] = 1; 549 swizzle_pipe[2] = 2; 550 swizzle_pipe[3] = 3; 551 swizzle_pipe[4] = 4; 552 swizzle_pipe[5] = 5; 553 } else { 554 swizzle_pipe[0] = 0; 555 swizzle_pipe[1] = 2; 556 swizzle_pipe[2] = 4; 557 swizzle_pipe[3] = 1; 558 swizzle_pipe[4] = 3; 559 swizzle_pipe[5] = 5; 560 } 561 break; 562 case 8: 563 if (force_no_swizzle) { 564 swizzle_pipe[0] = 0; 565 swizzle_pipe[1] = 1; 566 swizzle_pipe[2] = 2; 567 swizzle_pipe[3] = 3; 568 swizzle_pipe[4] = 4; 569 swizzle_pipe[5] = 5; 570 swizzle_pipe[6] = 6; 571 swizzle_pipe[7] = 7; 572 } else { 573 swizzle_pipe[0] = 0; 574 swizzle_pipe[1] = 2; 575 swizzle_pipe[2] = 4; 576 swizzle_pipe[3] = 6; 577 swizzle_pipe[4] = 1; 578 swizzle_pipe[5] = 3; 579 swizzle_pipe[6] = 5; 580 swizzle_pipe[7] = 7; 581 } 582 break; 583 } 584 585 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 586 while (((1 << cur_backend) & enabled_backends_mask) == 0) 587 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 588 589 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); 590 591 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 592 } 593 594 return backend_map; 595} 596 597static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 598 u32 disable_mask_per_se, 599 u32 max_disable_mask_per_se, 600 u32 num_shader_engines) 601{ 602 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); 603 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; 604 605 if (num_shader_engines == 1) 606 return disable_mask_per_asic; 607 else if (num_shader_engines == 2) 608 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); 609 else 610 return 0xffffffff; 611} 612 613static void cayman_gpu_init(struct radeon_device *rdev) 614{ 615 u32 cc_rb_backend_disable = 0; 616 u32 cc_gc_shader_pipe_config; 617 u32 gb_addr_config = 0; 618 u32 mc_shared_chmap, mc_arb_ramcfg; 619 u32 gb_backend_map; 620 u32 cgts_tcc_disable; 621 u32 sx_debug_1; 622 u32 smx_dc_ctl0; 623 u32 gc_user_shader_pipe_config; 624 u32 gc_user_rb_backend_disable; 625 u32 cgts_user_tcc_disable; 626 u32 cgts_sm_ctrl_reg; 627 u32 hdp_host_path_cntl; 628 u32 tmp; 629 int i, j; 630 631 switch (rdev->family) { 632 case CHIP_CAYMAN: 633 rdev->config.cayman.max_shader_engines = 2; 634 rdev->config.cayman.max_pipes_per_simd = 4; 635 rdev->config.cayman.max_tile_pipes = 8; 636 rdev->config.cayman.max_simds_per_se = 12; 637 rdev->config.cayman.max_backends_per_se = 4; 638 rdev->config.cayman.max_texture_channel_caches = 8; 639 rdev->config.cayman.max_gprs = 256; 640 rdev->config.cayman.max_threads = 256; 641 rdev->config.cayman.max_gs_threads = 32; 642 rdev->config.cayman.max_stack_entries = 512; 643 rdev->config.cayman.sx_num_of_sets = 8; 644 rdev->config.cayman.sx_max_export_size = 256; 645 rdev->config.cayman.sx_max_export_pos_size = 64; 646 rdev->config.cayman.sx_max_export_smx_size = 192; 647 rdev->config.cayman.max_hw_contexts = 8; 648 rdev->config.cayman.sq_num_cf_insts = 2; 649 650 rdev->config.cayman.sc_prim_fifo_size = 0x100; 651 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 652 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 653 break; 654 case CHIP_ARUBA: 655 default: 656 rdev->config.cayman.max_shader_engines = 1; 657 rdev->config.cayman.max_pipes_per_simd = 4; 658 rdev->config.cayman.max_tile_pipes = 2; 659 if ((rdev->pdev->device == 0x9900) || 660 (rdev->pdev->device == 0x9901) || 661 (rdev->pdev->device == 0x9905) || 662 (rdev->pdev->device == 0x9906) || 663 (rdev->pdev->device == 0x9907) || 664 (rdev->pdev->device == 0x9908) || 665 (rdev->pdev->device == 0x9909) || 666 (rdev->pdev->device == 0x9910) || 667 (rdev->pdev->device == 0x9917)) { 668 rdev->config.cayman.max_simds_per_se = 6; 669 rdev->config.cayman.max_backends_per_se = 2; 670 } else if ((rdev->pdev->device == 0x9903) || 671 (rdev->pdev->device == 0x9904) || 672 (rdev->pdev->device == 0x990A) || 673 (rdev->pdev->device == 0x9913) || 674 (rdev->pdev->device == 0x9918)) { 675 rdev->config.cayman.max_simds_per_se = 4; 676 rdev->config.cayman.max_backends_per_se = 2; 677 } else if ((rdev->pdev->device == 0x9919) || 678 (rdev->pdev->device == 0x9990) || 679 (rdev->pdev->device == 0x9991) || 680 (rdev->pdev->device == 0x9994) || 681 (rdev->pdev->device == 0x99A0)) { 682 rdev->config.cayman.max_simds_per_se = 3; 683 rdev->config.cayman.max_backends_per_se = 1; 684 } else { 685 rdev->config.cayman.max_simds_per_se = 2; 686 rdev->config.cayman.max_backends_per_se = 1; 687 } 688 rdev->config.cayman.max_texture_channel_caches = 2; 689 rdev->config.cayman.max_gprs = 256; 690 rdev->config.cayman.max_threads = 256; 691 rdev->config.cayman.max_gs_threads = 32; 692 rdev->config.cayman.max_stack_entries = 512; 693 rdev->config.cayman.sx_num_of_sets = 8; 694 rdev->config.cayman.sx_max_export_size = 256; 695 rdev->config.cayman.sx_max_export_pos_size = 64; 696 rdev->config.cayman.sx_max_export_smx_size = 192; 697 rdev->config.cayman.max_hw_contexts = 8; 698 rdev->config.cayman.sq_num_cf_insts = 2; 699 700 rdev->config.cayman.sc_prim_fifo_size = 0x40; 701 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 702 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 703 break; 704 } 705 706 /* Initialize HDP */ 707 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 708 WREG32((0x2c14 + j), 0x00000000); 709 WREG32((0x2c18 + j), 0x00000000); 710 WREG32((0x2c1c + j), 0x00000000); 711 WREG32((0x2c20 + j), 0x00000000); 712 WREG32((0x2c24 + j), 0x00000000); 713 } 714 715 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 716 717 evergreen_fix_pci_max_read_req_size(rdev); 718 719 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 720 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 721 722 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 723 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 724 cgts_tcc_disable = 0xffff0000; 725 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) 726 cgts_tcc_disable &= ~(1 << (16 + i)); 727 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 728 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); 729 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); 730 731 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; 732 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; 733 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); 734 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; 735 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; 736 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); 737 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 738 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); 739 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 740 rdev->config.cayman.backend_disable_mask_per_asic = 741 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, 742 rdev->config.cayman.num_shader_engines); 743 rdev->config.cayman.backend_map = 744 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 745 rdev->config.cayman.num_backends_per_se * 746 rdev->config.cayman.num_shader_engines, 747 &rdev->config.cayman.backend_disable_mask_per_asic, 748 rdev->config.cayman.num_shader_engines); 749 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; 750 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); 751 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; 752 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 753 if (rdev->config.cayman.mem_max_burst_length_bytes > 512) 754 rdev->config.cayman.mem_max_burst_length_bytes = 512; 755 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 756 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 757 if (rdev->config.cayman.mem_row_size_in_kb > 4) 758 rdev->config.cayman.mem_row_size_in_kb = 4; 759 /* XXX use MC settings? */ 760 rdev->config.cayman.shader_engine_tile_size = 32; 761 rdev->config.cayman.num_gpus = 1; 762 rdev->config.cayman.multi_gpu_tile_size = 64; 763 764 //gb_addr_config = 0x02011003 765#if 0 766 gb_addr_config = RREG32(GB_ADDR_CONFIG); 767#else 768 gb_addr_config = 0; 769 switch (rdev->config.cayman.num_tile_pipes) { 770 case 1: 771 default: 772 gb_addr_config |= NUM_PIPES(0); 773 break; 774 case 2: 775 gb_addr_config |= NUM_PIPES(1); 776 break; 777 case 4: 778 gb_addr_config |= NUM_PIPES(2); 779 break; 780 case 8: 781 gb_addr_config |= NUM_PIPES(3); 782 break; 783 } 784 785 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; 786 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); 787 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); 788 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; 789 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); 790 switch (rdev->config.cayman.num_gpus) { 791 case 1: 792 default: 793 gb_addr_config |= NUM_GPUS(0); 794 break; 795 case 2: 796 gb_addr_config |= NUM_GPUS(1); 797 break; 798 case 4: 799 gb_addr_config |= NUM_GPUS(2); 800 break; 801 } 802 switch (rdev->config.cayman.multi_gpu_tile_size) { 803 case 16: 804 gb_addr_config |= MULTI_GPU_TILE_SIZE(0); 805 break; 806 case 32: 807 default: 808 gb_addr_config |= MULTI_GPU_TILE_SIZE(1); 809 break; 810 case 64: 811 gb_addr_config |= MULTI_GPU_TILE_SIZE(2); 812 break; 813 case 128: 814 gb_addr_config |= MULTI_GPU_TILE_SIZE(3); 815 break; 816 } 817 switch (rdev->config.cayman.mem_row_size_in_kb) { 818 case 1: 819 default: 820 gb_addr_config |= ROW_SIZE(0); 821 break; 822 case 2: 823 gb_addr_config |= ROW_SIZE(1); 824 break; 825 case 4: 826 gb_addr_config |= ROW_SIZE(2); 827 break; 828 } 829#endif 830 831 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 832 rdev->config.cayman.num_tile_pipes = (1 << tmp); 833 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 834 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 835 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 836 rdev->config.cayman.num_shader_engines = tmp + 1; 837 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 838 rdev->config.cayman.num_gpus = tmp + 1; 839 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 840 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 841 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 842 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 843 844 //gb_backend_map = 0x76541032; 845#if 0 846 gb_backend_map = RREG32(GB_BACKEND_MAP); 847#else 848 gb_backend_map = 849 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 850 rdev->config.cayman.num_backends_per_se * 851 rdev->config.cayman.num_shader_engines, 852 &rdev->config.cayman.backend_disable_mask_per_asic, 853 rdev->config.cayman.num_shader_engines); 854#endif 855 /* setup tiling info dword. gb_addr_config is not adequate since it does 856 * not have bank info, so create a custom tiling dword. 857 * bits 3:0 num_pipes 858 * bits 7:4 num_banks 859 * bits 11:8 group_size 860 * bits 15:12 row_size 861 */ 862 rdev->config.cayman.tile_config = 0; 863 switch (rdev->config.cayman.num_tile_pipes) { 864 case 1: 865 default: 866 rdev->config.cayman.tile_config |= (0 << 0); 867 break; 868 case 2: 869 rdev->config.cayman.tile_config |= (1 << 0); 870 break; 871 case 4: 872 rdev->config.cayman.tile_config |= (2 << 0); 873 break; 874 case 8: 875 rdev->config.cayman.tile_config |= (3 << 0); 876 break; 877 } 878 879 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 880 if (rdev->flags & RADEON_IS_IGP) 881 rdev->config.cayman.tile_config |= 1 << 4; 882 else { 883 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 884 rdev->config.cayman.tile_config |= 1 << 4; 885 else 886 rdev->config.cayman.tile_config |= 0 << 4; 887 } 888 rdev->config.cayman.tile_config |= 889 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 890 rdev->config.cayman.tile_config |= 891 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 892 893 rdev->config.cayman.backend_map = gb_backend_map; 894 WREG32(GB_BACKEND_MAP, gb_backend_map); 895 WREG32(GB_ADDR_CONFIG, gb_addr_config); 896 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 897 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 898 899 /* primary versions */ 900 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 901 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 902 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 903 904 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 905 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 906 907 /* user versions */ 908 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 909 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 910 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 911 912 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 913 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 914 915 /* reprogram the shader complex */ 916 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 917 for (i = 0; i < 16; i++) 918 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 919 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 920 921 /* set HW defaults for 3D engine */ 922 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 923 924 sx_debug_1 = RREG32(SX_DEBUG_1); 925 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 926 WREG32(SX_DEBUG_1, sx_debug_1); 927 928 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 929 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 930 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 931 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 932 933 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 934 935 /* need to be explicitly zero-ed */ 936 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 937 WREG32(SQ_LSTMP_RING_BASE, 0); 938 WREG32(SQ_HSTMP_RING_BASE, 0); 939 WREG32(SQ_ESTMP_RING_BASE, 0); 940 WREG32(SQ_GSTMP_RING_BASE, 0); 941 WREG32(SQ_VSTMP_RING_BASE, 0); 942 WREG32(SQ_PSTMP_RING_BASE, 0); 943 944 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 945 946 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 947 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 948 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 949 950 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 951 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 952 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 953 954 955 WREG32(VGT_NUM_INSTANCES, 1); 956 957 WREG32(CP_PERFMON_CNTL, 0); 958 959 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 960 FETCH_FIFO_HIWATER(0x4) | 961 DONE_FIFO_HIWATER(0xe0) | 962 ALU_UPDATE_FIFO_HIWATER(0x8))); 963 964 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 965 WREG32(SQ_CONFIG, (VC_ENABLE | 966 EXPORT_SRC_C | 967 GFX_PRIO(0) | 968 CS1_PRIO(0) | 969 CS2_PRIO(1))); 970 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 971 972 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 973 FORCE_EOV_MAX_REZ_CNT(255))); 974 975 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 976 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 977 978 WREG32(VGT_GS_VERTEX_REUSE, 16); 979 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 980 981 WREG32(CB_PERF_CTR0_SEL_0, 0); 982 WREG32(CB_PERF_CTR0_SEL_1, 0); 983 WREG32(CB_PERF_CTR1_SEL_0, 0); 984 WREG32(CB_PERF_CTR1_SEL_1, 0); 985 WREG32(CB_PERF_CTR2_SEL_0, 0); 986 WREG32(CB_PERF_CTR2_SEL_1, 0); 987 WREG32(CB_PERF_CTR3_SEL_0, 0); 988 WREG32(CB_PERF_CTR3_SEL_1, 0); 989 990 tmp = RREG32(HDP_MISC_CNTL); 991 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 992 WREG32(HDP_MISC_CNTL, tmp); 993 994 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 995 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 996 997 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 998 999 udelay(50); 1000} 1001 1002/* 1003 * GART 1004 */ 1005void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 1006{ 1007 /* flush hdp cache */ 1008 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1009 1010 /* bits 0-7 are the VM contexts0-7 */ 1011 WREG32(VM_INVALIDATE_REQUEST, 1); 1012} 1013 1014int cayman_pcie_gart_enable(struct radeon_device *rdev) 1015{ 1016 int i, r; 1017 1018 if (rdev->gart.robj == NULL) { 1019 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 1020 return -EINVAL; 1021 } 1022 r = radeon_gart_table_vram_pin(rdev); 1023 if (r) 1024 return r; 1025 radeon_gart_restore(rdev); 1026 /* Setup TLB control */ 1027 WREG32(MC_VM_MX_L1_TLB_CNTL, 1028 (0xA << 7) | 1029 ENABLE_L1_TLB | 1030 ENABLE_L1_FRAGMENT_PROCESSING | 1031 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1032 ENABLE_ADVANCED_DRIVER_MODEL | 1033 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 1034 /* Setup L2 cache */ 1035 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 1036 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1037 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 1038 EFFECTIVE_L2_QUEUE_SIZE(7) | 1039 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 1040 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 1041 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 1042 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1043 /* setup context0 */ 1044 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1045 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1046 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1047 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1048 (u32)(rdev->dummy_page.addr >> 12)); 1049 WREG32(VM_CONTEXT0_CNTL2, 0); 1050 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1051 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1052 1053 WREG32(0x15D4, 0); 1054 WREG32(0x15D8, 0); 1055 WREG32(0x15DC, 0); 1056 1057 /* empty context1-7 */ 1058 for (i = 1; i < 8; i++) { 1059 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 1060 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0); 1061 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 1062 rdev->gart.table_addr >> 12); 1063 } 1064 1065 /* enable context1-7 */ 1066 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 1067 (u32)(rdev->dummy_page.addr >> 12)); 1068 WREG32(VM_CONTEXT1_CNTL2, 0); 1069 WREG32(VM_CONTEXT1_CNTL, 0); 1070 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1071 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1072 1073 cayman_pcie_gart_tlb_flush(rdev); 1074 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1075 (unsigned)(rdev->mc.gtt_size >> 20), 1076 (unsigned long long)rdev->gart.table_addr); 1077 rdev->gart.ready = true; 1078 return 0; 1079} 1080 1081void cayman_pcie_gart_disable(struct radeon_device *rdev) 1082{ 1083 /* Disable all tables */ 1084 WREG32(VM_CONTEXT0_CNTL, 0); 1085 WREG32(VM_CONTEXT1_CNTL, 0); 1086 /* Setup TLB control */ 1087 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 1088 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1089 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 1090 /* Setup L2 cache */ 1091 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1092 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 1093 EFFECTIVE_L2_QUEUE_SIZE(7) | 1094 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 1095 WREG32(VM_L2_CNTL2, 0); 1096 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 1097 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1098 radeon_gart_table_vram_unpin(rdev); 1099} 1100 1101void cayman_pcie_gart_fini(struct radeon_device *rdev) 1102{ 1103 cayman_pcie_gart_disable(rdev); 1104 radeon_gart_table_vram_free(rdev); 1105 radeon_gart_fini(rdev); 1106} 1107 1108void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 1109 int ring, u32 cp_int_cntl) 1110{ 1111 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; 1112 1113 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); 1114 WREG32(CP_INT_CNTL, cp_int_cntl); 1115} 1116 1117/* 1118 * CP. 1119 */ 1120void cayman_fence_ring_emit(struct radeon_device *rdev, 1121 struct radeon_fence *fence) 1122{ 1123 struct radeon_ring *ring = &rdev->ring[fence->ring]; 1124 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 1125 1126 /* flush read cache over gart for this vmid */ 1127 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1128 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 1129 radeon_ring_write(ring, 0); 1130 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1131 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 1132 radeon_ring_write(ring, 0xFFFFFFFF); 1133 radeon_ring_write(ring, 0); 1134 radeon_ring_write(ring, 10); /* poll interval */ 1135 /* EVENT_WRITE_EOP - flush caches, send int */ 1136 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 1137 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 1138 radeon_ring_write(ring, addr & 0xffffffff); 1139 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 1140 radeon_ring_write(ring, fence->seq); 1141 radeon_ring_write(ring, 0); 1142} 1143 1144void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1145{ 1146 struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; 1147 1148 /* set to DX10/11 mode */ 1149 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 1150 radeon_ring_write(ring, 1); 1151 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1152 radeon_ring_write(ring, 1153#ifdef __BIG_ENDIAN 1154 (2 << 0) | 1155#endif 1156 (ib->gpu_addr & 0xFFFFFFFC)); 1157 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 1158 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); 1159 1160 /* flush read cache over gart for this vmid */ 1161 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1162 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 1163 radeon_ring_write(ring, ib->vm_id); 1164 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1165 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 1166 radeon_ring_write(ring, 0xFFFFFFFF); 1167 radeon_ring_write(ring, 0); 1168 radeon_ring_write(ring, 10); /* poll interval */ 1169} 1170 1171static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1172{ 1173 if (enable) 1174 WREG32(CP_ME_CNTL, 0); 1175 else { 1176 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1177 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1178 WREG32(SCRATCH_UMSK, 0); 1179 } 1180} 1181 1182static int cayman_cp_load_microcode(struct radeon_device *rdev) 1183{ 1184 const __be32 *fw_data; 1185 int i; 1186 1187 if (!rdev->me_fw || !rdev->pfp_fw) 1188 return -EINVAL; 1189 1190 cayman_cp_enable(rdev, false); 1191 1192 fw_data = (const __be32 *)rdev->pfp_fw->data; 1193 WREG32(CP_PFP_UCODE_ADDR, 0); 1194 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 1195 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1196 WREG32(CP_PFP_UCODE_ADDR, 0); 1197 1198 fw_data = (const __be32 *)rdev->me_fw->data; 1199 WREG32(CP_ME_RAM_WADDR, 0); 1200 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 1201 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1202 1203 WREG32(CP_PFP_UCODE_ADDR, 0); 1204 WREG32(CP_ME_RAM_WADDR, 0); 1205 WREG32(CP_ME_RAM_RADDR, 0); 1206 return 0; 1207} 1208 1209static int cayman_cp_start(struct radeon_device *rdev) 1210{ 1211 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1212 int r, i; 1213 1214 r = radeon_ring_lock(rdev, ring, 7); 1215 if (r) { 1216 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1217 return r; 1218 } 1219 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1220 radeon_ring_write(ring, 0x1); 1221 radeon_ring_write(ring, 0x0); 1222 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); 1223 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1224 radeon_ring_write(ring, 0); 1225 radeon_ring_write(ring, 0); 1226 radeon_ring_unlock_commit(rdev, ring); 1227 1228 cayman_cp_enable(rdev, true); 1229 1230 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); 1231 if (r) { 1232 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1233 return r; 1234 } 1235 1236 /* setup clear context state */ 1237 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1238 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1239 1240 for (i = 0; i < cayman_default_size; i++) 1241 radeon_ring_write(ring, cayman_default_state[i]); 1242 1243 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1244 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 1245 1246 /* set clear context state */ 1247 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 1248 radeon_ring_write(ring, 0); 1249 1250 /* SQ_VTX_BASE_VTX_LOC */ 1251 radeon_ring_write(ring, 0xc0026f00); 1252 radeon_ring_write(ring, 0x00000000); 1253 radeon_ring_write(ring, 0x00000000); 1254 radeon_ring_write(ring, 0x00000000); 1255 1256 /* Clear consts */ 1257 radeon_ring_write(ring, 0xc0036f00); 1258 radeon_ring_write(ring, 0x00000bc4); 1259 radeon_ring_write(ring, 0xffffffff); 1260 radeon_ring_write(ring, 0xffffffff); 1261 radeon_ring_write(ring, 0xffffffff); 1262 1263 radeon_ring_write(ring, 0xc0026900); 1264 radeon_ring_write(ring, 0x00000316); 1265 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1266 radeon_ring_write(ring, 0x00000010); /* */ 1267 1268 radeon_ring_unlock_commit(rdev, ring); 1269 1270 /* XXX init other rings */ 1271 1272 return 0; 1273} 1274 1275static void cayman_cp_fini(struct radeon_device *rdev) 1276{ 1277 cayman_cp_enable(rdev, false); 1278 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1279} 1280 1281int cayman_cp_resume(struct radeon_device *rdev) 1282{ 1283 struct radeon_ring *ring; 1284 u32 tmp; 1285 u32 rb_bufsz; 1286 int r; 1287 1288 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1289 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1290 SOFT_RESET_PA | 1291 SOFT_RESET_SH | 1292 SOFT_RESET_VGT | 1293 SOFT_RESET_SPI | 1294 SOFT_RESET_SX)); 1295 RREG32(GRBM_SOFT_RESET); 1296 mdelay(15); 1297 WREG32(GRBM_SOFT_RESET, 0); 1298 RREG32(GRBM_SOFT_RESET); 1299 1300 WREG32(CP_SEM_WAIT_TIMER, 0x0); 1301 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 1302 1303 /* Set the write pointer delay */ 1304 WREG32(CP_RB_WPTR_DELAY, 0); 1305 1306 WREG32(CP_DEBUG, (1 << 27)); 1307 1308 /* ring 0 - compute and gfx */ 1309 /* Set ring buffer size */ 1310 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1311 rb_bufsz = drm_order(ring->ring_size / 8); 1312 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1313#ifdef __BIG_ENDIAN 1314 tmp |= BUF_SWAP_32BIT; 1315#endif 1316 WREG32(CP_RB0_CNTL, tmp); 1317 1318 /* Initialize the ring buffer's read and write pointers */ 1319 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1320 ring->wptr = 0; 1321 WREG32(CP_RB0_WPTR, ring->wptr); 1322 1323 /* set the wb address wether it's enabled or not */ 1324 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1325 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1326 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1327 1328 if (rdev->wb.enabled) 1329 WREG32(SCRATCH_UMSK, 0xff); 1330 else { 1331 tmp |= RB_NO_UPDATE; 1332 WREG32(SCRATCH_UMSK, 0); 1333 } 1334 1335 mdelay(1); 1336 WREG32(CP_RB0_CNTL, tmp); 1337 1338 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); 1339 1340 ring->rptr = RREG32(CP_RB0_RPTR); 1341 1342 /* ring1 - compute only */ 1343 /* Set ring buffer size */ 1344 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 1345 rb_bufsz = drm_order(ring->ring_size / 8); 1346 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1347#ifdef __BIG_ENDIAN 1348 tmp |= BUF_SWAP_32BIT; 1349#endif 1350 WREG32(CP_RB1_CNTL, tmp); 1351 1352 /* Initialize the ring buffer's read and write pointers */ 1353 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1354 ring->wptr = 0; 1355 WREG32(CP_RB1_WPTR, ring->wptr); 1356 1357 /* set the wb address wether it's enabled or not */ 1358 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1359 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 1360 1361 mdelay(1); 1362 WREG32(CP_RB1_CNTL, tmp); 1363 1364 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); 1365 1366 ring->rptr = RREG32(CP_RB1_RPTR); 1367 1368 /* ring2 - compute only */ 1369 /* Set ring buffer size */ 1370 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 1371 rb_bufsz = drm_order(ring->ring_size / 8); 1372 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1373#ifdef __BIG_ENDIAN 1374 tmp |= BUF_SWAP_32BIT; 1375#endif 1376 WREG32(CP_RB2_CNTL, tmp); 1377 1378 /* Initialize the ring buffer's read and write pointers */ 1379 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1380 ring->wptr = 0; 1381 WREG32(CP_RB2_WPTR, ring->wptr); 1382 1383 /* set the wb address wether it's enabled or not */ 1384 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1385 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 1386 1387 mdelay(1); 1388 WREG32(CP_RB2_CNTL, tmp); 1389 1390 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); 1391 1392 ring->rptr = RREG32(CP_RB2_RPTR); 1393 1394 /* start the rings */ 1395 cayman_cp_start(rdev); 1396 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 1397 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1398 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1399 /* this only test cp0 */ 1400 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1401 if (r) { 1402 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1403 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1404 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1405 return r; 1406 } 1407 1408 return 0; 1409} 1410 1411bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1412{ 1413 u32 srbm_status; 1414 u32 grbm_status; 1415 u32 grbm_status_se0, grbm_status_se1; 1416 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup; 1417 int r; 1418 1419 srbm_status = RREG32(SRBM_STATUS); 1420 grbm_status = RREG32(GRBM_STATUS); 1421 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 1422 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 1423 if (!(grbm_status & GUI_ACTIVE)) { 1424 r100_gpu_lockup_update(lockup, ring); 1425 return false; 1426 } 1427 /* force CP activities */ 1428 r = radeon_ring_lock(rdev, ring, 2); 1429 if (!r) { 1430 /* PACKET2 NOP */ 1431 radeon_ring_write(ring, 0x80000000); 1432 radeon_ring_write(ring, 0x80000000); 1433 radeon_ring_unlock_commit(rdev, ring); 1434 } 1435 /* XXX deal with CP0,1,2 */ 1436 ring->rptr = RREG32(ring->rptr_reg); 1437 return r100_gpu_cp_is_lockup(rdev, lockup, ring); 1438} 1439 1440static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1441{ 1442 struct evergreen_mc_save save; 1443 u32 grbm_reset = 0; 1444 1445 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1446 return 0; 1447 1448 dev_info(rdev->dev, "GPU softreset \n"); 1449 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1450 RREG32(GRBM_STATUS)); 1451 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1452 RREG32(GRBM_STATUS_SE0)); 1453 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1454 RREG32(GRBM_STATUS_SE1)); 1455 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1456 RREG32(SRBM_STATUS)); 1457 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", 1458 RREG32(0x14F8)); 1459 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", 1460 RREG32(0x14D8)); 1461 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1462 RREG32(0x14FC)); 1463 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1464 RREG32(0x14DC)); 1465 1466 evergreen_mc_stop(rdev, &save); 1467 if (evergreen_mc_wait_for_idle(rdev)) { 1468 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1469 } 1470 /* Disable CP parsing/prefetching */ 1471 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1472 1473 /* reset all the gfx blocks */ 1474 grbm_reset = (SOFT_RESET_CP | 1475 SOFT_RESET_CB | 1476 SOFT_RESET_DB | 1477 SOFT_RESET_GDS | 1478 SOFT_RESET_PA | 1479 SOFT_RESET_SC | 1480 SOFT_RESET_SPI | 1481 SOFT_RESET_SH | 1482 SOFT_RESET_SX | 1483 SOFT_RESET_TC | 1484 SOFT_RESET_TA | 1485 SOFT_RESET_VGT | 1486 SOFT_RESET_IA); 1487 1488 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1489 WREG32(GRBM_SOFT_RESET, grbm_reset); 1490 (void)RREG32(GRBM_SOFT_RESET); 1491 udelay(50); 1492 WREG32(GRBM_SOFT_RESET, 0); 1493 (void)RREG32(GRBM_SOFT_RESET); 1494 /* Wait a little for things to settle down */ 1495 udelay(50); 1496 1497 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1498 RREG32(GRBM_STATUS)); 1499 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1500 RREG32(GRBM_STATUS_SE0)); 1501 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1502 RREG32(GRBM_STATUS_SE1)); 1503 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1504 RREG32(SRBM_STATUS)); 1505 evergreen_mc_resume(rdev, &save); 1506 return 0; 1507} 1508 1509int cayman_asic_reset(struct radeon_device *rdev) 1510{ 1511 return cayman_gpu_soft_reset(rdev); 1512} 1513 1514static int cayman_startup(struct radeon_device *rdev) 1515{ 1516 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1517 int r; 1518 1519 /* enable pcie gen2 link */ 1520 evergreen_pcie_gen2_enable(rdev); 1521 1522 if (rdev->flags & RADEON_IS_IGP) { 1523 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1524 r = ni_init_microcode(rdev); 1525 if (r) { 1526 DRM_ERROR("Failed to load firmware!\n"); 1527 return r; 1528 } 1529 } 1530 } else { 1531 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1532 r = ni_init_microcode(rdev); 1533 if (r) { 1534 DRM_ERROR("Failed to load firmware!\n"); 1535 return r; 1536 } 1537 } 1538 1539 r = ni_mc_load_microcode(rdev); 1540 if (r) { 1541 DRM_ERROR("Failed to load MC firmware!\n"); 1542 return r; 1543 } 1544 } 1545 1546 r = r600_vram_scratch_init(rdev); 1547 if (r) 1548 return r; 1549 1550 evergreen_mc_program(rdev); 1551 r = cayman_pcie_gart_enable(rdev); 1552 if (r) 1553 return r; 1554 cayman_gpu_init(rdev); 1555 1556 r = evergreen_blit_init(rdev); 1557 if (r) { 1558 r600_blit_fini(rdev); 1559 rdev->asic->copy.copy = NULL; 1560 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1561 } 1562 1563 /* allocate rlc buffers */ 1564 if (rdev->flags & RADEON_IS_IGP) { 1565 r = si_rlc_init(rdev); 1566 if (r) { 1567 DRM_ERROR("Failed to init rlc BOs!\n"); 1568 return r; 1569 } 1570 } 1571 1572 /* allocate wb buffer */ 1573 r = radeon_wb_init(rdev); 1574 if (r) 1575 return r; 1576 1577 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1578 if (r) { 1579 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1580 return r; 1581 } 1582 1583 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 1584 if (r) { 1585 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1586 return r; 1587 } 1588 1589 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 1590 if (r) { 1591 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1592 return r; 1593 } 1594 1595 /* Enable IRQ */ 1596 r = r600_irq_init(rdev); 1597 if (r) { 1598 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1599 radeon_irq_kms_fini(rdev); 1600 return r; 1601 } 1602 evergreen_irq_set(rdev); 1603 1604 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1605 CP_RB0_RPTR, CP_RB0_WPTR, 1606 0, 0xfffff, RADEON_CP_PACKET2); 1607 if (r) 1608 return r; 1609 r = cayman_cp_load_microcode(rdev); 1610 if (r) 1611 return r; 1612 r = cayman_cp_resume(rdev); 1613 if (r) 1614 return r; 1615 1616 r = radeon_ib_pool_start(rdev); 1617 if (r) 1618 return r; 1619 1620 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1621 if (r) { 1622 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 1623 rdev->accel_working = false; 1624 return r; 1625 } 1626 1627 r = radeon_vm_manager_start(rdev); 1628 if (r) 1629 return r; 1630 1631 return 0; 1632} 1633 1634int cayman_resume(struct radeon_device *rdev) 1635{ 1636 int r; 1637 1638 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1639 * posting will perform necessary task to bring back GPU into good 1640 * shape. 1641 */ 1642 /* post card */ 1643 atom_asic_init(rdev->mode_info.atom_context); 1644 1645 rdev->accel_working = true; 1646 r = cayman_startup(rdev); 1647 if (r) { 1648 DRM_ERROR("cayman startup failed on resume\n"); 1649 rdev->accel_working = false; 1650 return r; 1651 } 1652 return r; 1653} 1654 1655int cayman_suspend(struct radeon_device *rdev) 1656{ 1657 /* FIXME: we should wait for ring to be empty */ 1658 radeon_ib_pool_suspend(rdev); 1659 radeon_vm_manager_suspend(rdev); 1660 r600_blit_suspend(rdev); 1661 cayman_cp_enable(rdev, false); 1662 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1663 evergreen_irq_suspend(rdev); 1664 radeon_wb_disable(rdev); 1665 cayman_pcie_gart_disable(rdev); 1666 return 0; 1667} 1668 1669/* Plan is to move initialization in that function and use 1670 * helper function so that radeon_device_init pretty much 1671 * do nothing more than calling asic specific function. This 1672 * should also allow to remove a bunch of callback function 1673 * like vram_info. 1674 */ 1675int cayman_init(struct radeon_device *rdev) 1676{ 1677 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1678 int r; 1679 1680 /* This don't do much */ 1681 r = radeon_gem_init(rdev); 1682 if (r) 1683 return r; 1684 /* Read BIOS */ 1685 if (!radeon_get_bios(rdev)) { 1686 if (ASIC_IS_AVIVO(rdev)) 1687 return -EINVAL; 1688 } 1689 /* Must be an ATOMBIOS */ 1690 if (!rdev->is_atom_bios) { 1691 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1692 return -EINVAL; 1693 } 1694 r = radeon_atombios_init(rdev); 1695 if (r) 1696 return r; 1697 1698 /* Post card if necessary */ 1699 if (!radeon_card_posted(rdev)) { 1700 if (!rdev->bios) { 1701 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1702 return -EINVAL; 1703 } 1704 DRM_INFO("GPU not posted. posting now...\n"); 1705 atom_asic_init(rdev->mode_info.atom_context); 1706 } 1707 /* Initialize scratch registers */ 1708 r600_scratch_init(rdev); 1709 /* Initialize surface registers */ 1710 radeon_surface_init(rdev); 1711 /* Initialize clocks */ 1712 radeon_get_clock_info(rdev->ddev); 1713 /* Fence driver */ 1714 r = radeon_fence_driver_init(rdev); 1715 if (r) 1716 return r; 1717 /* initialize memory controller */ 1718 r = evergreen_mc_init(rdev); 1719 if (r) 1720 return r; 1721 /* Memory manager */ 1722 r = radeon_bo_init(rdev); 1723 if (r) 1724 return r; 1725 1726 r = radeon_irq_kms_init(rdev); 1727 if (r) 1728 return r; 1729 1730 ring->ring_obj = NULL; 1731 r600_ring_init(rdev, ring, 1024 * 1024); 1732 1733 rdev->ih.ring_obj = NULL; 1734 r600_ih_ring_init(rdev, 64 * 1024); 1735 1736 r = r600_pcie_gart_init(rdev); 1737 if (r) 1738 return r; 1739 1740 r = radeon_ib_pool_init(rdev); 1741 rdev->accel_working = true; 1742 if (r) { 1743 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1744 rdev->accel_working = false; 1745 } 1746 r = radeon_vm_manager_init(rdev); 1747 if (r) { 1748 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); 1749 } 1750 1751 r = cayman_startup(rdev); 1752 if (r) { 1753 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1754 cayman_cp_fini(rdev); 1755 r600_irq_fini(rdev); 1756 if (rdev->flags & RADEON_IS_IGP) 1757 si_rlc_fini(rdev); 1758 radeon_wb_fini(rdev); 1759 r100_ib_fini(rdev); 1760 radeon_vm_manager_fini(rdev); 1761 radeon_irq_kms_fini(rdev); 1762 cayman_pcie_gart_fini(rdev); 1763 rdev->accel_working = false; 1764 } 1765 1766 /* Don't start up if the MC ucode is missing. 1767 * The default clocks and voltages before the MC ucode 1768 * is loaded are not suffient for advanced operations. 1769 * 1770 * We can skip this check for TN, because there is no MC 1771 * ucode. 1772 */ 1773 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { 1774 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1775 return -EINVAL; 1776 } 1777 1778 return 0; 1779} 1780 1781void cayman_fini(struct radeon_device *rdev) 1782{ 1783 r600_blit_fini(rdev); 1784 cayman_cp_fini(rdev); 1785 r600_irq_fini(rdev); 1786 if (rdev->flags & RADEON_IS_IGP) 1787 si_rlc_fini(rdev); 1788 radeon_wb_fini(rdev); 1789 radeon_vm_manager_fini(rdev); 1790 r100_ib_fini(rdev); 1791 radeon_irq_kms_fini(rdev); 1792 cayman_pcie_gart_fini(rdev); 1793 r600_vram_scratch_fini(rdev); 1794 radeon_gem_fini(rdev); 1795 radeon_semaphore_driver_fini(rdev); 1796 radeon_fence_driver_fini(rdev); 1797 radeon_bo_fini(rdev); 1798 radeon_atombios_fini(rdev); 1799 kfree(rdev->bios); 1800 rdev->bios = NULL; 1801} 1802 1803/* 1804 * vm 1805 */ 1806int cayman_vm_init(struct radeon_device *rdev) 1807{ 1808 /* number of VMs */ 1809 rdev->vm_manager.nvm = 8; 1810 /* base offset of vram pages */ 1811 if (rdev->flags & RADEON_IS_IGP) { 1812 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET); 1813 tmp <<= 22; 1814 rdev->vm_manager.vram_base_offset = tmp; 1815 } else 1816 rdev->vm_manager.vram_base_offset = 0; 1817 return 0; 1818} 1819 1820void cayman_vm_fini(struct radeon_device *rdev) 1821{ 1822} 1823 1824int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) 1825{ 1826 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0); 1827 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn); 1828 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); 1829 /* flush hdp cache */ 1830 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1831 /* bits 0-7 are the VM contexts0-7 */ 1832 WREG32(VM_INVALIDATE_REQUEST, 1 << id); 1833 return 0; 1834} 1835 1836void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) 1837{ 1838 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0); 1839 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0); 1840 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0); 1841 /* flush hdp cache */ 1842 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1843 /* bits 0-7 are the VM contexts0-7 */ 1844 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); 1845} 1846 1847void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) 1848{ 1849 if (vm->id == -1) 1850 return; 1851 1852 /* flush hdp cache */ 1853 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1854 /* bits 0-7 are the VM contexts0-7 */ 1855 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); 1856} 1857 1858#define R600_PTE_VALID (1 << 0) 1859#define R600_PTE_SYSTEM (1 << 1) 1860#define R600_PTE_SNOOPED (1 << 2) 1861#define R600_PTE_READABLE (1 << 5) 1862#define R600_PTE_WRITEABLE (1 << 6) 1863 1864uint32_t cayman_vm_page_flags(struct radeon_device *rdev, 1865 struct radeon_vm *vm, 1866 uint32_t flags) 1867{ 1868 uint32_t r600_flags = 0; 1869 1870 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; 1871 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 1872 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 1873 if (flags & RADEON_VM_PAGE_SYSTEM) { 1874 r600_flags |= R600_PTE_SYSTEM; 1875 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 1876 } 1877 return r600_flags; 1878} 1879 1880void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm, 1881 unsigned pfn, uint64_t addr, uint32_t flags) 1882{ 1883 void __iomem *ptr = (void *)vm->pt; 1884 1885 addr = addr & 0xFFFFFFFFFFFFF000ULL; 1886 addr |= flags; 1887 writeq(addr, ptr + (pfn * 8)); 1888} 1889