drm_fops.c revision d1794f2c5b5817eb79ccc5e00701ca748d1b073a
1/** 2 * \file drm_fops.c 3 * File operations for DRM 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Daryll Strauss <daryll@valinux.com> 7 * \author Gareth Hughes <gareth@valinux.com> 8 */ 9 10/* 11 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com 12 * 13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 15 * All Rights Reserved. 16 * 17 * Permission is hereby granted, free of charge, to any person obtaining a 18 * copy of this software and associated documentation files (the "Software"), 19 * to deal in the Software without restriction, including without limitation 20 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 21 * and/or sell copies of the Software, and to permit persons to whom the 22 * Software is furnished to do so, subject to the following conditions: 23 * 24 * The above copyright notice and this permission notice (including the next 25 * paragraph) shall be included in all copies or substantial portions of the 26 * Software. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 34 * OTHER DEALINGS IN THE SOFTWARE. 35 */ 36 37#include "drmP.h" 38#include "drm_sarea.h" 39#include <linux/poll.h> 40#include <linux/smp_lock.h> 41 42static int drm_open_helper(struct inode *inode, struct file *filp, 43 struct drm_device * dev); 44 45static int drm_setup(struct drm_device * dev) 46{ 47 drm_local_map_t *map; 48 int i; 49 int ret; 50 u32 sareapage; 51 52 if (dev->driver->firstopen) { 53 ret = dev->driver->firstopen(dev); 54 if (ret != 0) 55 return ret; 56 } 57 58 dev->magicfree.next = NULL; 59 60 /* prebuild the SAREA */ 61 sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE); 62 i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); 63 if (i != 0) 64 return i; 65 66 atomic_set(&dev->ioctl_count, 0); 67 atomic_set(&dev->vma_count, 0); 68 dev->buf_use = 0; 69 atomic_set(&dev->buf_alloc, 0); 70 71 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { 72 i = drm_dma_setup(dev); 73 if (i < 0) 74 return i; 75 } 76 77 for (i = 0; i < ARRAY_SIZE(dev->counts); i++) 78 atomic_set(&dev->counts[i], 0); 79 80 drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER); 81 INIT_LIST_HEAD(&dev->magicfree); 82 83 dev->sigdata.lock = NULL; 84 init_waitqueue_head(&dev->lock.lock_queue); 85 dev->queue_count = 0; 86 dev->queue_reserved = 0; 87 dev->queue_slots = 0; 88 dev->queuelist = NULL; 89 dev->irq_enabled = 0; 90 dev->context_flag = 0; 91 dev->interrupt_flag = 0; 92 dev->dma_flag = 0; 93 dev->last_context = 0; 94 dev->last_switch = 0; 95 dev->last_checked = 0; 96 init_waitqueue_head(&dev->context_wait); 97 dev->if_version = 0; 98 99 dev->ctx_start = 0; 100 dev->lck_start = 0; 101 102 dev->buf_async = NULL; 103 init_waitqueue_head(&dev->buf_readers); 104 init_waitqueue_head(&dev->buf_writers); 105 106 DRM_DEBUG("\n"); 107 108 /* 109 * The kernel's context could be created here, but is now created 110 * in drm_dma_enqueue. This is more resource-efficient for 111 * hardware that does not do DMA, but may mean that 112 * drm_select_queue fails between the time the interrupt is 113 * initialized and the time the queues are initialized. 114 */ 115 116 return 0; 117} 118 119/** 120 * Open file. 121 * 122 * \param inode device inode 123 * \param filp file pointer. 124 * \return zero on success or a negative number on failure. 125 * 126 * Searches the DRM device with the same minor number, calls open_helper(), and 127 * increments the device open count. If the open count was previous at zero, 128 * i.e., it's the first that the device is open, then calls setup(). 129 */ 130int drm_open(struct inode *inode, struct file *filp) 131{ 132 struct drm_device *dev = NULL; 133 int minor_id = iminor(inode); 134 struct drm_minor *minor; 135 int retcode = 0; 136 137 minor = idr_find(&drm_minors_idr, minor_id); 138 if (!minor) 139 return -ENODEV; 140 141 if (!(dev = minor->dev)) 142 return -ENODEV; 143 144 retcode = drm_open_helper(inode, filp, dev); 145 if (!retcode) { 146 atomic_inc(&dev->counts[_DRM_STAT_OPENS]); 147 spin_lock(&dev->count_lock); 148 if (!dev->open_count++) { 149 spin_unlock(&dev->count_lock); 150 return drm_setup(dev); 151 } 152 spin_unlock(&dev->count_lock); 153 } 154 155 return retcode; 156} 157EXPORT_SYMBOL(drm_open); 158 159/** 160 * File \c open operation. 161 * 162 * \param inode device inode. 163 * \param filp file pointer. 164 * 165 * Puts the dev->fops corresponding to the device minor number into 166 * \p filp, call the \c open method, and restore the file operations. 167 */ 168int drm_stub_open(struct inode *inode, struct file *filp) 169{ 170 struct drm_device *dev = NULL; 171 struct drm_minor *minor; 172 int minor_id = iminor(inode); 173 int err = -ENODEV; 174 const struct file_operations *old_fops; 175 176 DRM_DEBUG("\n"); 177 178 /* BKL pushdown: note that nothing else serializes idr_find() */ 179 lock_kernel(); 180 minor = idr_find(&drm_minors_idr, minor_id); 181 if (!minor) 182 goto out; 183 184 if (!(dev = minor->dev)) 185 goto out; 186 187 old_fops = filp->f_op; 188 filp->f_op = fops_get(&dev->driver->fops); 189 if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { 190 fops_put(filp->f_op); 191 filp->f_op = fops_get(old_fops); 192 } 193 fops_put(old_fops); 194 195out: 196 unlock_kernel(); 197 return err; 198} 199 200/** 201 * Check whether DRI will run on this CPU. 202 * 203 * \return non-zero if the DRI will run on this CPU, or zero otherwise. 204 */ 205static int drm_cpu_valid(void) 206{ 207#if defined(__i386__) 208 if (boot_cpu_data.x86 == 3) 209 return 0; /* No cmpxchg on a 386 */ 210#endif 211#if defined(__sparc__) && !defined(__sparc_v9__) 212 return 0; /* No cmpxchg before v9 sparc. */ 213#endif 214 return 1; 215} 216 217/** 218 * Called whenever a process opens /dev/drm. 219 * 220 * \param inode device inode. 221 * \param filp file pointer. 222 * \param dev device. 223 * \return zero on success or a negative number on failure. 224 * 225 * Creates and initializes a drm_file structure for the file private data in \p 226 * filp and add it into the double linked list in \p dev. 227 */ 228static int drm_open_helper(struct inode *inode, struct file *filp, 229 struct drm_device * dev) 230{ 231 int minor_id = iminor(inode); 232 struct drm_file *priv; 233 int ret; 234 235 if (filp->f_flags & O_EXCL) 236 return -EBUSY; /* No exclusive opens */ 237 if (!drm_cpu_valid()) 238 return -EINVAL; 239 240 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 241 242 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 243 if (!priv) 244 return -ENOMEM; 245 246 memset(priv, 0, sizeof(*priv)); 247 filp->private_data = priv; 248 priv->filp = filp; 249 priv->uid = current->euid; 250 priv->pid = task_pid_nr(current); 251 priv->minor = idr_find(&drm_minors_idr, minor_id); 252 priv->ioctl_count = 0; 253 /* for compatibility root is always authenticated */ 254 priv->authenticated = capable(CAP_SYS_ADMIN); 255 priv->lock_count = 0; 256 257 INIT_LIST_HEAD(&priv->lhead); 258 259 if (dev->driver->open) { 260 ret = dev->driver->open(dev, priv); 261 if (ret < 0) 262 goto out_free; 263 } 264 265 mutex_lock(&dev->struct_mutex); 266 if (list_empty(&dev->filelist)) 267 priv->master = 1; 268 269 list_add(&priv->lhead, &dev->filelist); 270 mutex_unlock(&dev->struct_mutex); 271 272#ifdef __alpha__ 273 /* 274 * Default the hose 275 */ 276 if (!dev->hose) { 277 struct pci_dev *pci_dev; 278 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); 279 if (pci_dev) { 280 dev->hose = pci_dev->sysdata; 281 pci_dev_put(pci_dev); 282 } 283 if (!dev->hose) { 284 struct pci_bus *b = pci_bus_b(pci_root_buses.next); 285 if (b) 286 dev->hose = b->sysdata; 287 } 288 } 289#endif 290 291 return 0; 292 out_free: 293 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 294 filp->private_data = NULL; 295 return ret; 296} 297 298/** No-op. */ 299int drm_fasync(int fd, struct file *filp, int on) 300{ 301 struct drm_file *priv = filp->private_data; 302 struct drm_device *dev = priv->minor->dev; 303 int retcode; 304 305 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, 306 (long)old_encode_dev(priv->minor->device)); 307 retcode = fasync_helper(fd, filp, on, &dev->buf_async); 308 if (retcode < 0) 309 return retcode; 310 return 0; 311} 312EXPORT_SYMBOL(drm_fasync); 313 314/** 315 * Release file. 316 * 317 * \param inode device inode 318 * \param file_priv DRM file private. 319 * \return zero on success or a negative number on failure. 320 * 321 * If the hardware lock is held then free it, and take it again for the kernel 322 * context since it's necessary to reclaim buffers. Unlink the file private 323 * data from its list and free it. Decreases the open count and if it reaches 324 * zero calls drm_lastclose(). 325 */ 326int drm_release(struct inode *inode, struct file *filp) 327{ 328 struct drm_file *file_priv = filp->private_data; 329 struct drm_device *dev = file_priv->minor->dev; 330 int retcode = 0; 331 332 lock_kernel(); 333 334 DRM_DEBUG("open_count = %d\n", dev->open_count); 335 336 if (dev->driver->preclose) 337 dev->driver->preclose(dev, file_priv); 338 339 /* ======================================================== 340 * Begin inline drm_release 341 */ 342 343 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 344 task_pid_nr(current), 345 (long)old_encode_dev(file_priv->minor->device), 346 dev->open_count); 347 348 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { 349 if (drm_i_have_hw_lock(dev, file_priv)) { 350 dev->driver->reclaim_buffers_locked(dev, file_priv); 351 } else { 352 unsigned long endtime = jiffies + 3 * DRM_HZ; 353 int locked = 0; 354 355 drm_idlelock_take(&dev->lock); 356 357 /* 358 * Wait for a while. 359 */ 360 361 do{ 362 spin_lock_bh(&dev->lock.spinlock); 363 locked = dev->lock.idle_has_lock; 364 spin_unlock_bh(&dev->lock.spinlock); 365 if (locked) 366 break; 367 schedule(); 368 } while (!time_after_eq(jiffies, endtime)); 369 370 if (!locked) { 371 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" 372 "\tdriver to use reclaim_buffers_idlelocked() instead.\n" 373 "\tI will go on reclaiming the buffers anyway.\n"); 374 } 375 376 dev->driver->reclaim_buffers_locked(dev, file_priv); 377 drm_idlelock_release(&dev->lock); 378 } 379 } 380 381 if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { 382 383 drm_idlelock_take(&dev->lock); 384 dev->driver->reclaim_buffers_idlelocked(dev, file_priv); 385 drm_idlelock_release(&dev->lock); 386 387 } 388 389 if (drm_i_have_hw_lock(dev, file_priv)) { 390 DRM_DEBUG("File %p released, freeing lock for context %d\n", 391 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 392 393 drm_lock_free(&dev->lock, 394 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 395 } 396 397 398 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 399 !dev->driver->reclaim_buffers_locked) { 400 dev->driver->reclaim_buffers(dev, file_priv); 401 } 402 403 drm_fasync(-1, filp, 0); 404 405 mutex_lock(&dev->ctxlist_mutex); 406 if (!list_empty(&dev->ctxlist)) { 407 struct drm_ctx_list *pos, *n; 408 409 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 410 if (pos->tag == file_priv && 411 pos->handle != DRM_KERNEL_CONTEXT) { 412 if (dev->driver->context_dtor) 413 dev->driver->context_dtor(dev, 414 pos->handle); 415 416 drm_ctxbitmap_free(dev, pos->handle); 417 418 list_del(&pos->head); 419 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 420 --dev->ctx_count; 421 } 422 } 423 } 424 mutex_unlock(&dev->ctxlist_mutex); 425 426 mutex_lock(&dev->struct_mutex); 427 if (file_priv->remove_auth_on_close == 1) { 428 struct drm_file *temp; 429 430 list_for_each_entry(temp, &dev->filelist, lhead) 431 temp->authenticated = 0; 432 } 433 list_del(&file_priv->lhead); 434 mutex_unlock(&dev->struct_mutex); 435 436 if (dev->driver->postclose) 437 dev->driver->postclose(dev, file_priv); 438 drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); 439 440 /* ======================================================== 441 * End inline drm_release 442 */ 443 444 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); 445 spin_lock(&dev->count_lock); 446 if (!--dev->open_count) { 447 if (atomic_read(&dev->ioctl_count) || dev->blocked) { 448 DRM_ERROR("Device busy: %d %d\n", 449 atomic_read(&dev->ioctl_count), dev->blocked); 450 spin_unlock(&dev->count_lock); 451 unlock_kernel(); 452 return -EBUSY; 453 } 454 spin_unlock(&dev->count_lock); 455 unlock_kernel(); 456 return drm_lastclose(dev); 457 } 458 spin_unlock(&dev->count_lock); 459 460 unlock_kernel(); 461 462 return retcode; 463} 464EXPORT_SYMBOL(drm_release); 465 466/** No-op. */ 467unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) 468{ 469 return 0; 470} 471EXPORT_SYMBOL(drm_poll); 472