via_dmablit.c revision bfd8303af0c46bd094289ee4e65f1e4bcc4fb7d3
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro 2 * 3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sub license, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the 13 * next paragraph) shall be included in all copies or substantial portions 14 * of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Thomas Hellstrom. 26 * Partially based on code obtained from Digeo Inc. 27 */ 28 29 30/* 31 * Unmaps the DMA mappings. 32 * FIXME: Is this a NoOp on x86? Also 33 * FIXME: What happens if this one is called and a pending blit has previously done 34 * the same DMA mappings? 35 */ 36 37#include <drm/drmP.h> 38#include <drm/via_drm.h> 39#include "via_drv.h" 40#include "via_dmablit.h" 41 42#include <linux/pagemap.h> 43#include <linux/slab.h> 44 45#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 46#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 47#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 48 49typedef struct _drm_via_descriptor { 50 uint32_t mem_addr; 51 uint32_t dev_addr; 52 uint32_t size; 53 uint32_t next; 54} drm_via_descriptor_t; 55 56 57/* 58 * Unmap a DMA mapping. 59 */ 60 61 62 63static void 64via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 65{ 66 int num_desc = vsg->num_desc; 67 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; 68 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; 69 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 70 descriptor_this_page; 71 dma_addr_t next = vsg->chain_start; 72 73 while (num_desc--) { 74 if (descriptor_this_page-- == 0) { 75 cur_descriptor_page--; 76 descriptor_this_page = vsg->descriptors_per_page - 1; 77 desc_ptr = vsg->desc_pages[cur_descriptor_page] + 78 descriptor_this_page; 79 } 80 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); 81 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); 82 next = (dma_addr_t) desc_ptr->next; 83 desc_ptr--; 84 } 85} 86 87/* 88 * If mode = 0, count how many descriptors are needed. 89 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. 90 * Descriptors are run in reverse order by the hardware because we are not allowed to update the 91 * 'next' field without syncing calls when the descriptor is already mapped. 92 */ 93 94static void 95via_map_blit_for_device(struct pci_dev *pdev, 96 const drm_via_dmablit_t *xfer, 97 drm_via_sg_info_t *vsg, 98 int mode) 99{ 100 unsigned cur_descriptor_page = 0; 101 unsigned num_descriptors_this_page = 0; 102 unsigned char *mem_addr = xfer->mem_addr; 103 unsigned char *cur_mem; 104 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); 105 uint32_t fb_addr = xfer->fb_addr; 106 uint32_t cur_fb; 107 unsigned long line_len; 108 unsigned remaining_len; 109 int num_desc = 0; 110 int cur_line; 111 dma_addr_t next = 0 | VIA_DMA_DPR_EC; 112 drm_via_descriptor_t *desc_ptr = NULL; 113 114 if (mode == 1) 115 desc_ptr = vsg->desc_pages[cur_descriptor_page]; 116 117 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { 118 119 line_len = xfer->line_length; 120 cur_fb = fb_addr; 121 cur_mem = mem_addr; 122 123 while (line_len > 0) { 124 125 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 126 line_len -= remaining_len; 127 128 if (mode == 1) { 129 desc_ptr->mem_addr = 130 dma_map_page(&pdev->dev, 131 vsg->pages[VIA_PFN(cur_mem) - 132 VIA_PFN(first_addr)], 133 VIA_PGOFF(cur_mem), remaining_len, 134 vsg->direction); 135 desc_ptr->dev_addr = cur_fb; 136 137 desc_ptr->size = remaining_len; 138 desc_ptr->next = (uint32_t) next; 139 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 140 DMA_TO_DEVICE); 141 desc_ptr++; 142 if (++num_descriptors_this_page >= vsg->descriptors_per_page) { 143 num_descriptors_this_page = 0; 144 desc_ptr = vsg->desc_pages[++cur_descriptor_page]; 145 } 146 } 147 148 num_desc++; 149 cur_mem += remaining_len; 150 cur_fb += remaining_len; 151 } 152 153 mem_addr += xfer->mem_stride; 154 fb_addr += xfer->fb_stride; 155 } 156 157 if (mode == 1) { 158 vsg->chain_start = next; 159 vsg->state = dr_via_device_mapped; 160 } 161 vsg->num_desc = num_desc; 162} 163 164/* 165 * Function that frees up all resources for a blit. It is usable even if the 166 * blit info has only been partially built as long as the status enum is consistent 167 * with the actual status of the used resources. 168 */ 169 170 171static void 172via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 173{ 174 struct page *page; 175 int i; 176 177 switch (vsg->state) { 178 case dr_via_device_mapped: 179 via_unmap_blit_from_device(pdev, vsg); 180 case dr_via_desc_pages_alloc: 181 for (i = 0; i < vsg->num_desc_pages; ++i) { 182 if (vsg->desc_pages[i] != NULL) 183 free_page((unsigned long)vsg->desc_pages[i]); 184 } 185 kfree(vsg->desc_pages); 186 case dr_via_pages_locked: 187 for (i = 0; i < vsg->num_pages; ++i) { 188 if (NULL != (page = vsg->pages[i])) { 189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 190 SetPageDirty(page); 191 page_cache_release(page); 192 } 193 } 194 case dr_via_pages_alloc: 195 vfree(vsg->pages); 196 default: 197 vsg->state = dr_via_sg_init; 198 } 199 vfree(vsg->bounce_buffer); 200 vsg->bounce_buffer = NULL; 201 vsg->free_on_sequence = 0; 202} 203 204/* 205 * Fire a blit engine. 206 */ 207 208static void 209via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) 210{ 211 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 212 213 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); 214 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); 215 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 216 VIA_DMA_CSR_DE); 217 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 218 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); 219 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 220 DRM_WRITEMEMORYBARRIER(); 221 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 222 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); 223} 224 225/* 226 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will 227 * occur here if the calling user does not have access to the submitted address. 228 */ 229 230static int 231via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 232{ 233 int ret; 234 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 236 first_pfn + 1; 237 238 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); 239 if (NULL == vsg->pages) 240 return -ENOMEM; 241 down_read(¤t->mm->mmap_sem); 242 ret = get_user_pages(current, current->mm, 243 (unsigned long)xfer->mem_addr, 244 vsg->num_pages, 245 (vsg->direction == DMA_FROM_DEVICE), 246 0, vsg->pages, NULL); 247 248 up_read(¤t->mm->mmap_sem); 249 if (ret != vsg->num_pages) { 250 if (ret < 0) 251 return ret; 252 vsg->state = dr_via_pages_locked; 253 return -EINVAL; 254 } 255 vsg->state = dr_via_pages_locked; 256 DRM_DEBUG("DMA pages locked\n"); 257 return 0; 258} 259 260/* 261 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the 262 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be 263 * quite large for some blits, and pages don't need to be contingous. 264 */ 265 266static int 267via_alloc_desc_pages(drm_via_sg_info_t *vsg) 268{ 269 int i; 270 271 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t); 272 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 273 vsg->descriptors_per_page; 274 275 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 276 return -ENOMEM; 277 278 vsg->state = dr_via_desc_pages_alloc; 279 for (i = 0; i < vsg->num_desc_pages; ++i) { 280 if (NULL == (vsg->desc_pages[i] = 281 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 282 return -ENOMEM; 283 } 284 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 285 vsg->num_desc); 286 return 0; 287} 288 289static void 290via_abort_dmablit(struct drm_device *dev, int engine) 291{ 292 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 293 294 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); 295} 296 297static void 298via_dmablit_engine_off(struct drm_device *dev, int engine) 299{ 300 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 301 302 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 303} 304 305 306 307/* 308 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. 309 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue 310 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while 311 * the workqueue task takes care of processing associated with the old blit. 312 */ 313 314void 315via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) 316{ 317 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 318 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 319 int cur; 320 int done_transfer; 321 unsigned long irqsave = 0; 322 uint32_t status = 0; 323 324 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 325 engine, from_irq, (unsigned long) blitq); 326 327 if (from_irq) 328 spin_lock(&blitq->blit_lock); 329 else 330 spin_lock_irqsave(&blitq->blit_lock, irqsave); 331 332 done_transfer = blitq->is_active && 333 ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 334 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE)); 335 336 cur = blitq->cur; 337 if (done_transfer) { 338 339 blitq->blits[cur]->aborted = blitq->aborting; 340 blitq->done_blit_handle++; 341 DRM_WAKEUP(blitq->blit_queue + cur); 342 343 cur++; 344 if (cur >= VIA_NUM_BLIT_SLOTS) 345 cur = 0; 346 blitq->cur = cur; 347 348 /* 349 * Clear transfer done flag. 350 */ 351 352 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); 353 354 blitq->is_active = 0; 355 blitq->aborting = 0; 356 schedule_work(&blitq->wq); 357 358 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { 359 360 /* 361 * Abort transfer after one second. 362 */ 363 364 via_abort_dmablit(dev, engine); 365 blitq->aborting = 1; 366 blitq->end = jiffies + HZ; 367 } 368 369 if (!blitq->is_active) { 370 if (blitq->num_outstanding) { 371 via_fire_dmablit(dev, blitq->blits[cur], engine); 372 blitq->is_active = 1; 373 blitq->cur = cur; 374 blitq->num_outstanding--; 375 blitq->end = jiffies + HZ; 376 if (!timer_pending(&blitq->poll_timer)) 377 mod_timer(&blitq->poll_timer, jiffies + 1); 378 } else { 379 if (timer_pending(&blitq->poll_timer)) 380 del_timer(&blitq->poll_timer); 381 via_dmablit_engine_off(dev, engine); 382 } 383 } 384 385 if (from_irq) 386 spin_unlock(&blitq->blit_lock); 387 else 388 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 389} 390 391 392 393/* 394 * Check whether this blit is still active, performing necessary locking. 395 */ 396 397static int 398via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) 399{ 400 unsigned long irqsave; 401 uint32_t slot; 402 int active; 403 404 spin_lock_irqsave(&blitq->blit_lock, irqsave); 405 406 /* 407 * Allow for handle wraparounds. 408 */ 409 410 active = ((blitq->done_blit_handle - handle) > (1 << 23)) && 411 ((blitq->cur_blit_handle - handle) <= (1 << 23)); 412 413 if (queue && active) { 414 slot = handle - blitq->done_blit_handle + blitq->cur - 1; 415 if (slot >= VIA_NUM_BLIT_SLOTS) 416 slot -= VIA_NUM_BLIT_SLOTS; 417 *queue = blitq->blit_queue + slot; 418 } 419 420 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 421 422 return active; 423} 424 425/* 426 * Sync. Wait for at least three seconds for the blit to be performed. 427 */ 428 429static int 430via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) 431{ 432 433 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 434 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 435 wait_queue_head_t *queue; 436 int ret = 0; 437 438 if (via_dmablit_active(blitq, engine, handle, &queue)) { 439 DRM_WAIT_ON(ret, *queue, 3 * HZ, 440 !via_dmablit_active(blitq, engine, handle, NULL)); 441 } 442 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 443 handle, engine, ret); 444 445 return ret; 446} 447 448 449/* 450 * A timer that regularly polls the blit engine in cases where we don't have interrupts: 451 * a) Broken hardware (typically those that don't have any video capture facility). 452 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. 453 * The timer and hardware IRQ's can and do work in parallel. If the hardware has 454 * irqs, it will shorten the latency somewhat. 455 */ 456 457 458 459static void 460via_dmablit_timer(unsigned long data) 461{ 462 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 463 struct drm_device *dev = blitq->dev; 464 int engine = (int) 465 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); 466 467 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 468 (unsigned long) jiffies); 469 470 via_dmablit_handler(dev, engine, 0); 471 472 if (!timer_pending(&blitq->poll_timer)) { 473 mod_timer(&blitq->poll_timer, jiffies + 1); 474 475 /* 476 * Rerun handler to delete timer if engines are off, and 477 * to shorten abort latency. This is a little nasty. 478 */ 479 480 via_dmablit_handler(dev, engine, 0); 481 482 } 483} 484 485 486 487 488/* 489 * Workqueue task that frees data and mappings associated with a blit. 490 * Also wakes up waiting processes. Each of these tasks handles one 491 * blit engine only and may not be called on each interrupt. 492 */ 493 494 495static void 496via_dmablit_workqueue(struct work_struct *work) 497{ 498 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); 499 struct drm_device *dev = blitq->dev; 500 unsigned long irqsave; 501 drm_via_sg_info_t *cur_sg; 502 int cur_released; 503 504 505 DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long) 506 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 507 508 spin_lock_irqsave(&blitq->blit_lock, irqsave); 509 510 while (blitq->serviced != blitq->cur) { 511 512 cur_released = blitq->serviced++; 513 514 DRM_DEBUG("Releasing blit slot %d\n", cur_released); 515 516 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 517 blitq->serviced = 0; 518 519 cur_sg = blitq->blits[cur_released]; 520 blitq->num_free++; 521 522 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 523 524 DRM_WAKEUP(&blitq->busy_queue); 525 526 via_free_sg_info(dev->pdev, cur_sg); 527 kfree(cur_sg); 528 529 spin_lock_irqsave(&blitq->blit_lock, irqsave); 530 } 531 532 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 533} 534 535 536/* 537 * Init all blit engines. Currently we use two, but some hardware have 4. 538 */ 539 540 541void 542via_init_dmablit(struct drm_device *dev) 543{ 544 int i, j; 545 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 546 drm_via_blitq_t *blitq; 547 548 pci_set_master(dev->pdev); 549 550 for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) { 551 blitq = dev_priv->blit_queues + i; 552 blitq->dev = dev; 553 blitq->cur_blit_handle = 0; 554 blitq->done_blit_handle = 0; 555 blitq->head = 0; 556 blitq->cur = 0; 557 blitq->serviced = 0; 558 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; 559 blitq->num_outstanding = 0; 560 blitq->is_active = 0; 561 blitq->aborting = 0; 562 spin_lock_init(&blitq->blit_lock); 563 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) 564 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 565 DRM_INIT_WAITQUEUE(&blitq->busy_queue); 566 INIT_WORK(&blitq->wq, via_dmablit_workqueue); 567 setup_timer(&blitq->poll_timer, via_dmablit_timer, 568 (unsigned long)blitq); 569 } 570} 571 572/* 573 * Build all info and do all mappings required for a blit. 574 */ 575 576 577static int 578via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 579{ 580 int draw = xfer->to_fb; 581 int ret = 0; 582 583 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 584 vsg->bounce_buffer = NULL; 585 586 vsg->state = dr_via_sg_init; 587 588 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 589 DRM_ERROR("Zero size bitblt.\n"); 590 return -EINVAL; 591 } 592 593 /* 594 * Below check is a driver limitation, not a hardware one. We 595 * don't want to lock unused pages, and don't want to incoporate the 596 * extra logic of avoiding them. Make sure there are no. 597 * (Not a big limitation anyway.) 598 */ 599 600 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) { 601 DRM_ERROR("Too large system memory stride. Stride: %d, " 602 "Length: %d\n", xfer->mem_stride, xfer->line_length); 603 return -EINVAL; 604 } 605 606 if ((xfer->mem_stride == xfer->line_length) && 607 (xfer->fb_stride == xfer->line_length)) { 608 xfer->mem_stride *= xfer->num_lines; 609 xfer->line_length = xfer->mem_stride; 610 xfer->fb_stride = xfer->mem_stride; 611 xfer->num_lines = 1; 612 } 613 614 /* 615 * Don't lock an arbitrary large number of pages, since that causes a 616 * DOS security hole. 617 */ 618 619 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 620 DRM_ERROR("Too large PCI DMA bitblt.\n"); 621 return -EINVAL; 622 } 623 624 /* 625 * we allow a negative fb stride to allow flipping of images in 626 * transfer. 627 */ 628 629 if (xfer->mem_stride < xfer->line_length || 630 abs(xfer->fb_stride) < xfer->line_length) { 631 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 632 return -EINVAL; 633 } 634 635 /* 636 * A hardware bug seems to be worked around if system memory addresses start on 637 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted 638 * about this. Meanwhile, impose the following restrictions: 639 */ 640 641#ifdef VIA_BUGFREE 642 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 643 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 644 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 645 return -EINVAL; 646 } 647#else 648 if ((((unsigned long)xfer->mem_addr & 15) || 649 ((unsigned long)xfer->fb_addr & 3)) || 650 ((xfer->num_lines > 1) && 651 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 652 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 653 return -EINVAL; 654 } 655#endif 656 657 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { 658 DRM_ERROR("Could not lock DMA pages.\n"); 659 via_free_sg_info(dev->pdev, vsg); 660 return ret; 661 } 662 663 via_map_blit_for_device(dev->pdev, xfer, vsg, 0); 664 if (0 != (ret = via_alloc_desc_pages(vsg))) { 665 DRM_ERROR("Could not allocate DMA descriptor pages.\n"); 666 via_free_sg_info(dev->pdev, vsg); 667 return ret; 668 } 669 via_map_blit_for_device(dev->pdev, xfer, vsg, 1); 670 671 return 0; 672} 673 674 675/* 676 * Reserve one free slot in the blit queue. Will wait for one second for one 677 * to become available. Otherwise -EBUSY is returned. 678 */ 679 680static int 681via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 682{ 683 int ret = 0; 684 unsigned long irqsave; 685 686 DRM_DEBUG("Num free is %d\n", blitq->num_free); 687 spin_lock_irqsave(&blitq->blit_lock, irqsave); 688 while (blitq->num_free == 0) { 689 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 690 691 DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); 692 if (ret) 693 return (-EINTR == ret) ? -EAGAIN : ret; 694 695 spin_lock_irqsave(&blitq->blit_lock, irqsave); 696 } 697 698 blitq->num_free--; 699 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 700 701 return 0; 702} 703 704/* 705 * Hand back a free slot if we changed our mind. 706 */ 707 708static void 709via_dmablit_release_slot(drm_via_blitq_t *blitq) 710{ 711 unsigned long irqsave; 712 713 spin_lock_irqsave(&blitq->blit_lock, irqsave); 714 blitq->num_free++; 715 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 716 DRM_WAKEUP(&blitq->busy_queue); 717} 718 719/* 720 * Grab a free slot. Build blit info and queue a blit. 721 */ 722 723 724static int 725via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) 726{ 727 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 728 drm_via_sg_info_t *vsg; 729 drm_via_blitq_t *blitq; 730 int ret; 731 int engine; 732 unsigned long irqsave; 733 734 if (dev_priv == NULL) { 735 DRM_ERROR("Called without initialization.\n"); 736 return -EINVAL; 737 } 738 739 engine = (xfer->to_fb) ? 0 : 1; 740 blitq = dev_priv->blit_queues + engine; 741 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) 742 return ret; 743 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 744 via_dmablit_release_slot(blitq); 745 return -ENOMEM; 746 } 747 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 748 via_dmablit_release_slot(blitq); 749 kfree(vsg); 750 return ret; 751 } 752 spin_lock_irqsave(&blitq->blit_lock, irqsave); 753 754 blitq->blits[blitq->head++] = vsg; 755 if (blitq->head >= VIA_NUM_BLIT_SLOTS) 756 blitq->head = 0; 757 blitq->num_outstanding++; 758 xfer->sync.sync_handle = ++blitq->cur_blit_handle; 759 760 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 761 xfer->sync.engine = engine; 762 763 via_dmablit_handler(dev, engine, 0); 764 765 return 0; 766} 767 768/* 769 * Sync on a previously submitted blit. Note that the X server use signals extensively, and 770 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that 771 * case it returns with -EAGAIN for the signal to be delivered. 772 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 773 */ 774 775int 776via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv) 777{ 778 drm_via_blitsync_t *sync = data; 779 int err; 780 781 if (sync->engine >= VIA_NUM_BLIT_ENGINES) 782 return -EINVAL; 783 784 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); 785 786 if (-EINTR == err) 787 err = -EAGAIN; 788 789 return err; 790} 791 792 793/* 794 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal 795 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 796 * be reissued. See the above IOCTL code. 797 */ 798 799int 800via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) 801{ 802 drm_via_dmablit_t *xfer = data; 803 int err; 804 805 err = via_dmablit(dev, xfer); 806 807 return err; 808} 809