nouveau_chan.c revision a7c6e75eb28d18ed8d53182891330f5c1fa4477c
1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 25#include <core/object.h> 26#include <core/client.h> 27#include <core/device.h> 28#include <core/class.h> 29 30#include <subdev/fb.h> 31#include <subdev/vm.h> 32#include <subdev/instmem.h> 33 34#include <engine/software.h> 35 36#include "nouveau_drm.h" 37#include "nouveau_dma.h" 38#include "nouveau_bo.h" 39#include "nouveau_chan.h" 40#include "nouveau_fence.h" 41#include "nouveau_abi16.h" 42 43MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); 44static int nouveau_vram_pushbuf; 45module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 46 47int 48nouveau_channel_idle(struct nouveau_channel *chan) 49{ 50 struct nouveau_drm *drm = chan->drm; 51 struct nouveau_fence *fence = NULL; 52 int ret; 53 54 ret = nouveau_fence_new(chan, &fence); 55 if (!ret) { 56 ret = nouveau_fence_wait(fence, false, false); 57 nouveau_fence_unref(&fence); 58 } 59 60 if (ret) 61 NV_ERROR(drm, "failed to idle channel 0x%08x\n", chan->handle); 62 return ret; 63} 64 65void 66nouveau_channel_del(struct nouveau_channel **pchan) 67{ 68 struct nouveau_channel *chan = *pchan; 69 if (chan) { 70 struct nouveau_object *client = nv_object(chan->cli); 71 if (chan->fence) { 72 nouveau_channel_idle(chan); 73 nouveau_fence(chan->drm)->context_del(chan); 74 } 75 nouveau_object_del(client, NVDRM_DEVICE, chan->handle); 76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); 77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); 78 nouveau_bo_unmap(chan->push.buffer); 79 nouveau_bo_ref(NULL, &chan->push.buffer); 80 kfree(chan); 81 } 82 *pchan = NULL; 83} 84 85static int 86nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, 87 u32 parent, u32 handle, u32 size, 88 struct nouveau_channel **pchan) 89{ 90 struct nouveau_device *device = nv_device(drm->device); 91 struct nouveau_instmem *imem = nouveau_instmem(device); 92 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); 93 struct nouveau_fb *pfb = nouveau_fb(device); 94 struct nouveau_client *client = &cli->base; 95 struct nv_dma_class args = {}; 96 struct nouveau_channel *chan; 97 struct nouveau_object *push; 98 u32 target; 99 int ret; 100 101 chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL); 102 if (!chan) 103 return -ENOMEM; 104 105 chan->cli = cli; 106 chan->drm = drm; 107 chan->handle = handle; 108 109 /* allocate memory for dma push buffer */ 110 target = TTM_PL_FLAG_TT; 111 if (nouveau_vram_pushbuf) 112 target = TTM_PL_FLAG_VRAM; 113 114 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, 115 &chan->push.buffer); 116 if (ret == 0) { 117 ret = nouveau_bo_pin(chan->push.buffer, target); 118 if (ret == 0) 119 ret = nouveau_bo_map(chan->push.buffer); 120 } 121 122 if (ret) { 123 nouveau_channel_del(pchan); 124 return ret; 125 } 126 127 /* create dma object covering the *entire* memory space that the 128 * pushbuf lives in, this is because the GEM code requires that 129 * we be able to call out to other (indirect) push buffers 130 */ 131 chan->push.vma.offset = chan->push.buffer->bo.offset; 132 chan->push.handle = NVDRM_PUSH | (handle & 0xffff); 133 134 if (device->card_type >= NV_50) { 135 ret = nouveau_bo_vma_add(chan->push.buffer, client->vm, 136 &chan->push.vma); 137 if (ret) { 138 nouveau_channel_del(pchan); 139 return ret; 140 } 141 142 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; 143 args.start = 0; 144 args.limit = client->vm->vmm->limit - 1; 145 } else 146 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { 147 u64 limit = pfb->ram.size - imem->reserved - 1; 148 if (device->card_type == NV_04) { 149 /* nv04 vram pushbuf hack, retarget to its location in 150 * the framebuffer bar rather than direct vram access.. 151 * nfi why this exists, it came from the -nv ddx. 152 */ 153 args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR; 154 args.start = pci_resource_start(device->pdev, 1); 155 args.limit = args.start + limit; 156 } else { 157 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; 158 args.start = 0; 159 args.limit = limit; 160 } 161 } else { 162 if (chan->drm->agp.stat == ENABLED) { 163 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; 164 args.start = chan->drm->agp.base; 165 args.limit = chan->drm->agp.base + 166 chan->drm->agp.size - 1; 167 } else { 168 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; 169 args.start = 0; 170 args.limit = vmm->limit - 1; 171 } 172 } 173 174 ret = nouveau_object_new(nv_object(chan->cli), parent, 175 chan->push.handle, 0x0002, 176 &args, sizeof(args), &push); 177 if (ret) { 178 nouveau_channel_del(pchan); 179 return ret; 180 } 181 182 return 0; 183} 184 185int 186nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, 187 u32 parent, u32 handle, u32 engine, 188 struct nouveau_channel **pchan) 189{ 190 static const u16 oclasses[] = { 0xa06f, 0x906f, 0x826f, 0x506f, 0 }; 191 const u16 *oclass = oclasses; 192 struct nve0_channel_ind_class args; 193 struct nouveau_channel *chan; 194 int ret; 195 196 /* allocate dma push buffer */ 197 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan); 198 *pchan = chan; 199 if (ret) 200 return ret; 201 202 /* create channel object */ 203 args.pushbuf = chan->push.handle; 204 args.ioffset = 0x10000 + chan->push.vma.offset; 205 args.ilength = 0x02000; 206 args.engine = engine; 207 208 do { 209 ret = nouveau_object_new(nv_object(cli), parent, handle, 210 *oclass++, &args, sizeof(args), 211 &chan->object); 212 if (ret == 0) 213 return ret; 214 } while (*oclass); 215 216 nouveau_channel_del(pchan); 217 return ret; 218} 219 220static int 221nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, 222 u32 parent, u32 handle, struct nouveau_channel **pchan) 223{ 224 static const u16 oclasses[] = { 0x406e, 0x176e, 0x006e, 0x006b, 0 }; 225 const u16 *oclass = oclasses; 226 struct nv03_channel_dma_class args; 227 struct nouveau_channel *chan; 228 int ret; 229 230 /* allocate dma push buffer */ 231 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan); 232 *pchan = chan; 233 if (ret) 234 return ret; 235 236 /* create channel object */ 237 args.pushbuf = chan->push.handle; 238 args.offset = chan->push.vma.offset; 239 240 do { 241 ret = nouveau_object_new(nv_object(cli), parent, handle, 242 *oclass++, &args, sizeof(args), 243 &chan->object); 244 if (ret == 0) 245 return ret; 246 } while (ret && *oclass); 247 248 nouveau_channel_del(pchan); 249 return ret; 250} 251 252static int 253nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) 254{ 255 struct nouveau_client *client = nv_client(chan->cli); 256 struct nouveau_device *device = nv_device(chan->drm->device); 257 struct nouveau_instmem *imem = nouveau_instmem(device); 258 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); 259 struct nouveau_fb *pfb = nouveau_fb(device); 260 struct nouveau_software_chan *swch; 261 struct nouveau_object *object; 262 struct nv_dma_class args; 263 int ret, i; 264 265 /* allocate dma objects to cover all allowed vram, and gart */ 266 if (device->card_type < NV_C0) { 267 if (device->card_type >= NV_50) { 268 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; 269 args.start = 0; 270 args.limit = client->vm->vmm->limit - 1; 271 } else { 272 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; 273 args.start = 0; 274 args.limit = pfb->ram.size - imem->reserved - 1; 275 } 276 277 ret = nouveau_object_new(nv_object(client), chan->handle, vram, 278 0x003d, &args, sizeof(args), &object); 279 if (ret) 280 return ret; 281 282 if (device->card_type >= NV_50) { 283 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; 284 args.start = 0; 285 args.limit = client->vm->vmm->limit - 1; 286 } else 287 if (chan->drm->agp.stat == ENABLED) { 288 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; 289 args.start = chan->drm->agp.base; 290 args.limit = chan->drm->agp.base + 291 chan->drm->agp.size - 1; 292 } else { 293 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; 294 args.start = 0; 295 args.limit = vmm->limit - 1; 296 } 297 298 ret = nouveau_object_new(nv_object(client), chan->handle, gart, 299 0x003d, &args, sizeof(args), &object); 300 if (ret) 301 return ret; 302 303 chan->vram = vram; 304 chan->gart = gart; 305 } 306 307 /* initialise dma tracking parameters */ 308 switch (nv_hclass(chan->object) & 0x00ff) { 309 case 0x006b: 310 case 0x006e: 311 chan->user_put = 0x40; 312 chan->user_get = 0x44; 313 chan->dma.max = (0x10000 / 4) - 2; 314 break; 315 default: 316 chan->user_put = 0x40; 317 chan->user_get = 0x44; 318 chan->user_get_hi = 0x60; 319 chan->dma.ib_base = 0x10000 / 4; 320 chan->dma.ib_max = (0x02000 / 8) - 1; 321 chan->dma.ib_put = 0; 322 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; 323 chan->dma.max = chan->dma.ib_base; 324 break; 325 } 326 327 chan->dma.put = 0; 328 chan->dma.cur = chan->dma.put; 329 chan->dma.free = chan->dma.max - chan->dma.cur; 330 331 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 332 if (ret) 333 return ret; 334 335 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 336 OUT_RING(chan, 0x00000000); 337 338 /* allocate software object class (used for fences on <= nv05, and 339 * to signal flip completion), bind it to a subchannel. 340 */ 341 if (chan != chan->drm->cechan) { 342 ret = nouveau_object_new(nv_object(client), chan->handle, 343 NvSw, nouveau_abi16_swclass(chan->drm), 344 NULL, 0, &object); 345 if (ret) 346 return ret; 347 348 swch = (void *)object->parent; 349 swch->flip = nouveau_flip_complete; 350 swch->flip_data = chan; 351 } 352 353 if (device->card_type < NV_C0) { 354 ret = RING_SPACE(chan, 2); 355 if (ret) 356 return ret; 357 358 BEGIN_NV04(chan, NvSubSw, 0x0000, 1); 359 OUT_RING (chan, NvSw); 360 FIRE_RING (chan); 361 } 362 363 /* initialise synchronisation */ 364 return nouveau_fence(chan->drm)->context_new(chan); 365} 366 367int 368nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli, 369 u32 parent, u32 handle, u32 arg0, u32 arg1, 370 struct nouveau_channel **pchan) 371{ 372 int ret; 373 374 ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan); 375 if (ret) { 376 NV_DEBUG(drm, "ib channel create, %d\n", ret); 377 ret = nouveau_channel_dma(drm, cli, parent, handle, pchan); 378 if (ret) { 379 NV_DEBUG(drm, "dma channel create, %d\n", ret); 380 return ret; 381 } 382 } 383 384 ret = nouveau_channel_init(*pchan, arg0, arg1); 385 if (ret) { 386 NV_ERROR(drm, "channel failed to initialise, %d\n", ret); 387 nouveau_channel_del(pchan); 388 return ret; 389 } 390 391 return 0; 392} 393