ipath_file_ops.c revision 124b4dcb1dd3a6fb80051f1785117a732d785f70
1/* 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/pci.h> 35#include <linux/poll.h> 36#include <linux/cdev.h> 37#include <linux/swap.h> 38#include <linux/vmalloc.h> 39#include <linux/highmem.h> 40#include <linux/io.h> 41#include <linux/jiffies.h> 42#include <asm/pgtable.h> 43 44#include "ipath_kernel.h" 45#include "ipath_common.h" 46#include "ipath_user_sdma.h" 47 48static int ipath_open(struct inode *, struct file *); 49static int ipath_close(struct inode *, struct file *); 50static ssize_t ipath_write(struct file *, const char __user *, size_t, 51 loff_t *); 52static ssize_t ipath_writev(struct kiocb *, const struct iovec *, 53 unsigned long , loff_t); 54static unsigned int ipath_poll(struct file *, struct poll_table_struct *); 55static int ipath_mmap(struct file *, struct vm_area_struct *); 56 57static const struct file_operations ipath_file_ops = { 58 .owner = THIS_MODULE, 59 .write = ipath_write, 60 .aio_write = ipath_writev, 61 .open = ipath_open, 62 .release = ipath_close, 63 .poll = ipath_poll, 64 .mmap = ipath_mmap 65}; 66 67/* 68 * Convert kernel virtual addresses to physical addresses so they don't 69 * potentially conflict with the chip addresses used as mmap offsets. 70 * It doesn't really matter what mmap offset we use as long as we can 71 * interpret it correctly. 72 */ 73static u64 cvt_kvaddr(void *p) 74{ 75 struct page *page; 76 u64 paddr = 0; 77 78 page = vmalloc_to_page(p); 79 if (page) 80 paddr = page_to_pfn(page) << PAGE_SHIFT; 81 82 return paddr; 83} 84 85static int ipath_get_base_info(struct file *fp, 86 void __user *ubase, size_t ubase_size) 87{ 88 struct ipath_portdata *pd = port_fp(fp); 89 int ret = 0; 90 struct ipath_base_info *kinfo = NULL; 91 struct ipath_devdata *dd = pd->port_dd; 92 unsigned subport_cnt; 93 int shared, master; 94 size_t sz; 95 96 subport_cnt = pd->port_subport_cnt; 97 if (!subport_cnt) { 98 shared = 0; 99 master = 0; 100 subport_cnt = 1; 101 } else { 102 shared = 1; 103 master = !subport_fp(fp); 104 } 105 106 sz = sizeof(*kinfo); 107 /* If port sharing is not requested, allow the old size structure */ 108 if (!shared) 109 sz -= 7 * sizeof(u64); 110 if (ubase_size < sz) { 111 ipath_cdbg(PROC, 112 "Base size %zu, need %zu (version mismatch?)\n", 113 ubase_size, sz); 114 ret = -EINVAL; 115 goto bail; 116 } 117 118 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); 119 if (kinfo == NULL) { 120 ret = -ENOMEM; 121 goto bail; 122 } 123 124 ret = dd->ipath_f_get_base_info(pd, kinfo); 125 if (ret < 0) 126 goto bail; 127 128 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; 129 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; 130 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; 131 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; 132 /* 133 * have to mmap whole thing 134 */ 135 kinfo->spi_rcv_egrbuftotlen = 136 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 137 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; 138 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / 139 pd->port_rcvegrbuf_chunks; 140 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; 141 if (master) 142 kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; 143 /* 144 * for this use, may be ipath_cfgports summed over all chips that 145 * are are configured and present 146 */ 147 kinfo->spi_nports = dd->ipath_cfgports; 148 /* unit (chip/board) our port is on */ 149 kinfo->spi_unit = dd->ipath_unit; 150 /* for now, only a single page */ 151 kinfo->spi_tid_maxsize = PAGE_SIZE; 152 153 /* 154 * Doing this per port, and based on the skip value, etc. This has 155 * to be the actual buffer size, since the protocol code treats it 156 * as an array. 157 * 158 * These have to be set to user addresses in the user code via mmap. 159 * These values are used on return to user code for the mmap target 160 * addresses only. For 32 bit, same 44 bit address problem, so use 161 * the physical address, not virtual. Before 2.6.11, using the 162 * page_address() macro worked, but in 2.6.11, even that returns the 163 * full 64 bit address (upper bits all 1's). So far, using the 164 * physical addresses (or chip offsets, for chip mapping) works, but 165 * no doubt some future kernel release will change that, and we'll be 166 * on to yet another method of dealing with this. 167 */ 168 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; 169 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; 170 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; 171 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; 172 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + 173 (void *) dd->ipath_statusp - 174 (void *) dd->ipath_pioavailregs_dma; 175 if (!shared) { 176 kinfo->spi_piocnt = dd->ipath_pbufsport; 177 kinfo->spi_piobufbase = (u64) pd->port_piobufs; 178 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + 179 dd->ipath_ureg_align * pd->port_port; 180 } else if (master) { 181 kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + 182 (dd->ipath_pbufsport % subport_cnt); 183 /* Master's PIO buffers are after all the slave's */ 184 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 185 dd->ipath_palign * 186 (dd->ipath_pbufsport - kinfo->spi_piocnt); 187 } else { 188 unsigned slave = subport_fp(fp) - 1; 189 190 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; 191 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 192 dd->ipath_palign * kinfo->spi_piocnt * slave; 193 } 194 195 /* 196 * Set the PIO avail update threshold to no larger 197 * than the number of buffers per process. Note that 198 * we decrease it here, but won't ever increase it. 199 */ 200 if (dd->ipath_pioupd_thresh && 201 kinfo->spi_piocnt < dd->ipath_pioupd_thresh) { 202 unsigned long flags; 203 204 dd->ipath_pioupd_thresh = kinfo->spi_piocnt; 205 ipath_dbg("Decreased pio update threshold to %u\n", 206 dd->ipath_pioupd_thresh); 207 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 208 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK 209 << INFINIPATH_S_UPDTHRESH_SHIFT); 210 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh 211 << INFINIPATH_S_UPDTHRESH_SHIFT; 212 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 213 dd->ipath_sendctrl); 214 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 215 } 216 217 if (shared) { 218 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 219 dd->ipath_ureg_align * pd->port_port; 220 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; 221 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; 222 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; 223 224 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + 225 PAGE_SIZE * subport_fp(fp)); 226 227 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + 228 pd->port_rcvhdrq_size * subport_fp(fp)); 229 kinfo->spi_rcvhdr_tailaddr = 0; 230 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + 231 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * 232 subport_fp(fp)); 233 234 kinfo->spi_subport_uregbase = 235 cvt_kvaddr(pd->subport_uregbase); 236 kinfo->spi_subport_rcvegrbuf = 237 cvt_kvaddr(pd->subport_rcvegrbuf); 238 kinfo->spi_subport_rcvhdr_base = 239 cvt_kvaddr(pd->subport_rcvhdr_base); 240 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", 241 kinfo->spi_port, kinfo->spi_runtime_flags, 242 (unsigned long long) kinfo->spi_subport_uregbase, 243 (unsigned long long) kinfo->spi_subport_rcvegrbuf, 244 (unsigned long long) kinfo->spi_subport_rcvhdr_base); 245 } 246 247 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / 248 dd->ipath_palign; 249 kinfo->spi_pioalign = dd->ipath_palign; 250 251 kinfo->spi_qpair = IPATH_KD_QP; 252 /* 253 * user mode PIO buffers are always 2KB, even when 4KB can 254 * be received, and sent via the kernel; this is ibmaxlen 255 * for 2K MTU. 256 */ 257 kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32); 258 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ 259 kinfo->spi_port = pd->port_port; 260 kinfo->spi_subport = subport_fp(fp); 261 kinfo->spi_sw_version = IPATH_KERN_SWVERSION; 262 kinfo->spi_hw_version = dd->ipath_revision; 263 264 if (master) { 265 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; 266 } 267 268 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); 269 if (copy_to_user(ubase, kinfo, sz)) 270 ret = -EFAULT; 271 272bail: 273 kfree(kinfo); 274 return ret; 275} 276 277/** 278 * ipath_tid_update - update a port TID 279 * @pd: the port 280 * @fp: the ipath device file 281 * @ti: the TID information 282 * 283 * The new implementation as of Oct 2004 is that the driver assigns 284 * the tid and returns it to the caller. To make it easier to 285 * catch bugs, and to reduce search time, we keep a cursor for 286 * each port, walking the shadow tid array to find one that's not 287 * in use. 288 * 289 * For now, if we can't allocate the full list, we fail, although 290 * in the long run, we'll allocate as many as we can, and the 291 * caller will deal with that by trying the remaining pages later. 292 * That means that when we fail, we have to mark the tids as not in 293 * use again, in our shadow copy. 294 * 295 * It's up to the caller to free the tids when they are done. 296 * We'll unlock the pages as they free them. 297 * 298 * Also, right now we are locking one page at a time, but since 299 * the intended use of this routine is for a single group of 300 * virtually contiguous pages, that should change to improve 301 * performance. 302 */ 303static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, 304 const struct ipath_tid_info *ti) 305{ 306 int ret = 0, ntids; 307 u32 tid, porttid, cnt, i, tidcnt, tidoff; 308 u16 *tidlist; 309 struct ipath_devdata *dd = pd->port_dd; 310 u64 physaddr; 311 unsigned long vaddr; 312 u64 __iomem *tidbase; 313 unsigned long tidmap[8]; 314 struct page **pagep = NULL; 315 unsigned subport = subport_fp(fp); 316 317 if (!dd->ipath_pageshadow) { 318 ret = -ENOMEM; 319 goto done; 320 } 321 322 cnt = ti->tidcnt; 323 if (!cnt) { 324 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", 325 (unsigned long long) ti->tidlist); 326 /* 327 * Should we treat as success? likely a bug 328 */ 329 ret = -EFAULT; 330 goto done; 331 } 332 porttid = pd->port_port * dd->ipath_rcvtidcnt; 333 if (!pd->port_subport_cnt) { 334 tidcnt = dd->ipath_rcvtidcnt; 335 tid = pd->port_tidcursor; 336 tidoff = 0; 337 } else if (!subport) { 338 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + 339 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); 340 tidoff = dd->ipath_rcvtidcnt - tidcnt; 341 porttid += tidoff; 342 tid = tidcursor_fp(fp); 343 } else { 344 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; 345 tidoff = tidcnt * (subport - 1); 346 porttid += tidoff; 347 tid = tidcursor_fp(fp); 348 } 349 if (cnt > tidcnt) { 350 /* make sure it all fits in port_tid_pg_list */ 351 dev_info(&dd->pcidev->dev, "Process tried to allocate %u " 352 "TIDs, only trying max (%u)\n", cnt, tidcnt); 353 cnt = tidcnt; 354 } 355 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; 356 tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; 357 358 memset(tidmap, 0, sizeof(tidmap)); 359 /* before decrement; chip actual # */ 360 ntids = tidcnt; 361 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + 362 dd->ipath_rcvtidbase + 363 porttid * sizeof(*tidbase)); 364 365 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", 366 pd->port_port, cnt, tid, tidbase); 367 368 /* virtual address of first page in transfer */ 369 vaddr = ti->tidvaddr; 370 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, 371 cnt * PAGE_SIZE)) { 372 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", 373 (void *)vaddr, cnt); 374 ret = -EFAULT; 375 goto done; 376 } 377 ret = ipath_get_user_pages(vaddr, cnt, pagep); 378 if (ret) { 379 if (ret == -EBUSY) { 380 ipath_dbg("Failed to lock addr %p, %u pages " 381 "(already locked)\n", 382 (void *) vaddr, cnt); 383 /* 384 * for now, continue, and see what happens but with 385 * the new implementation, this should never happen, 386 * unless perhaps the user has mpin'ed the pages 387 * themselves (something we need to test) 388 */ 389 ret = 0; 390 } else { 391 dev_info(&dd->pcidev->dev, 392 "Failed to lock addr %p, %u pages: " 393 "errno %d\n", (void *) vaddr, cnt, -ret); 394 goto done; 395 } 396 } 397 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 398 for (; ntids--; tid++) { 399 if (tid == tidcnt) 400 tid = 0; 401 if (!dd->ipath_pageshadow[porttid + tid]) 402 break; 403 } 404 if (ntids < 0) { 405 /* 406 * oops, wrapped all the way through their TIDs, 407 * and didn't have enough free; see comments at 408 * start of routine 409 */ 410 ipath_dbg("Not enough free TIDs for %u pages " 411 "(index %d), failing\n", cnt, i); 412 i--; /* last tidlist[i] not filled in */ 413 ret = -ENOMEM; 414 break; 415 } 416 tidlist[i] = tid + tidoff; 417 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " 418 "vaddr %lx\n", i, tid + tidoff, vaddr); 419 /* we "know" system pages and TID pages are same size */ 420 dd->ipath_pageshadow[porttid + tid] = pagep[i]; 421 dd->ipath_physshadow[porttid + tid] = ipath_map_page( 422 dd->pcidev, pagep[i], 0, PAGE_SIZE, 423 PCI_DMA_FROMDEVICE); 424 /* 425 * don't need atomic or it's overhead 426 */ 427 __set_bit(tid, tidmap); 428 physaddr = dd->ipath_physshadow[porttid + tid]; 429 ipath_stats.sps_pagelocks++; 430 ipath_cdbg(VERBOSE, 431 "TID %u, vaddr %lx, physaddr %llx pgp %p\n", 432 tid, vaddr, (unsigned long long) physaddr, 433 pagep[i]); 434 dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, 435 physaddr); 436 /* 437 * don't check this tid in ipath_portshadow, since we 438 * just filled it in; start with the next one. 439 */ 440 tid++; 441 } 442 443 if (ret) { 444 u32 limit; 445 cleanup: 446 /* jump here if copy out of updated info failed... */ 447 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", 448 -ret, i, cnt); 449 /* same code that's in ipath_free_tid() */ 450 limit = sizeof(tidmap) * BITS_PER_BYTE; 451 if (limit > tidcnt) 452 /* just in case size changes in future */ 453 limit = tidcnt; 454 tid = find_first_bit((const unsigned long *)tidmap, limit); 455 for (; tid < limit; tid++) { 456 if (!test_bit(tid, tidmap)) 457 continue; 458 if (dd->ipath_pageshadow[porttid + tid]) { 459 ipath_cdbg(VERBOSE, "Freeing TID %u\n", 460 tid); 461 dd->ipath_f_put_tid(dd, &tidbase[tid], 462 RCVHQ_RCV_TYPE_EXPECTED, 463 dd->ipath_tidinvalid); 464 pci_unmap_page(dd->pcidev, 465 dd->ipath_physshadow[porttid + tid], 466 PAGE_SIZE, PCI_DMA_FROMDEVICE); 467 dd->ipath_pageshadow[porttid + tid] = NULL; 468 ipath_stats.sps_pageunlocks++; 469 } 470 } 471 ipath_release_user_pages(pagep, cnt); 472 } else { 473 /* 474 * Copy the updated array, with ipath_tid's filled in, back 475 * to user. Since we did the copy in already, this "should 476 * never fail" If it does, we have to clean up... 477 */ 478 if (copy_to_user((void __user *) 479 (unsigned long) ti->tidlist, 480 tidlist, cnt * sizeof(*tidlist))) { 481 ret = -EFAULT; 482 goto cleanup; 483 } 484 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 485 tidmap, sizeof tidmap)) { 486 ret = -EFAULT; 487 goto cleanup; 488 } 489 if (tid == tidcnt) 490 tid = 0; 491 if (!pd->port_subport_cnt) 492 pd->port_tidcursor = tid; 493 else 494 tidcursor_fp(fp) = tid; 495 } 496 497done: 498 if (ret) 499 ipath_dbg("Failed to map %u TID pages, failing with %d\n", 500 ti->tidcnt, -ret); 501 return ret; 502} 503 504/** 505 * ipath_tid_free - free a port TID 506 * @pd: the port 507 * @subport: the subport 508 * @ti: the TID info 509 * 510 * right now we are unlocking one page at a time, but since 511 * the intended use of this routine is for a single group of 512 * virtually contiguous pages, that should change to improve 513 * performance. We check that the TID is in range for this port 514 * but otherwise don't check validity; if user has an error and 515 * frees the wrong tid, it's only their own data that can thereby 516 * be corrupted. We do check that the TID was in use, for sanity 517 * We always use our idea of the saved address, not the address that 518 * they pass in to us. 519 */ 520 521static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, 522 const struct ipath_tid_info *ti) 523{ 524 int ret = 0; 525 u32 tid, porttid, cnt, limit, tidcnt; 526 struct ipath_devdata *dd = pd->port_dd; 527 u64 __iomem *tidbase; 528 unsigned long tidmap[8]; 529 530 if (!dd->ipath_pageshadow) { 531 ret = -ENOMEM; 532 goto done; 533 } 534 535 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 536 sizeof tidmap)) { 537 ret = -EFAULT; 538 goto done; 539 } 540 541 porttid = pd->port_port * dd->ipath_rcvtidcnt; 542 if (!pd->port_subport_cnt) 543 tidcnt = dd->ipath_rcvtidcnt; 544 else if (!subport) { 545 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + 546 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); 547 porttid += dd->ipath_rcvtidcnt - tidcnt; 548 } else { 549 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; 550 porttid += tidcnt * (subport - 1); 551 } 552 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + 553 dd->ipath_rcvtidbase + 554 porttid * sizeof(*tidbase)); 555 556 limit = sizeof(tidmap) * BITS_PER_BYTE; 557 if (limit > tidcnt) 558 /* just in case size changes in future */ 559 limit = tidcnt; 560 tid = find_first_bit(tidmap, limit); 561 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " 562 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, 563 limit, tid, porttid); 564 for (cnt = 0; tid < limit; tid++) { 565 /* 566 * small optimization; if we detect a run of 3 or so without 567 * any set, use find_first_bit again. That's mainly to 568 * accelerate the case where we wrapped, so we have some at 569 * the beginning, and some at the end, and a big gap 570 * in the middle. 571 */ 572 if (!test_bit(tid, tidmap)) 573 continue; 574 cnt++; 575 if (dd->ipath_pageshadow[porttid + tid]) { 576 struct page *p; 577 p = dd->ipath_pageshadow[porttid + tid]; 578 dd->ipath_pageshadow[porttid + tid] = NULL; 579 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", 580 pd->port_pid, tid); 581 dd->ipath_f_put_tid(dd, &tidbase[tid], 582 RCVHQ_RCV_TYPE_EXPECTED, 583 dd->ipath_tidinvalid); 584 pci_unmap_page(dd->pcidev, 585 dd->ipath_physshadow[porttid + tid], 586 PAGE_SIZE, PCI_DMA_FROMDEVICE); 587 ipath_release_user_pages(&p, 1); 588 ipath_stats.sps_pageunlocks++; 589 } else 590 ipath_dbg("Unused tid %u, ignoring\n", tid); 591 } 592 if (cnt != ti->tidcnt) 593 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", 594 ti->tidcnt, cnt); 595done: 596 if (ret) 597 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", 598 ti->tidcnt, -ret); 599 return ret; 600} 601 602/** 603 * ipath_set_part_key - set a partition key 604 * @pd: the port 605 * @key: the key 606 * 607 * We can have up to 4 active at a time (other than the default, which is 608 * always allowed). This is somewhat tricky, since multiple ports may set 609 * the same key, so we reference count them, and clean up at exit. All 4 610 * partition keys are packed into a single infinipath register. It's an 611 * error for a process to set the same pkey multiple times. We provide no 612 * mechanism to de-allocate a pkey at this time, we may eventually need to 613 * do that. I've used the atomic operations, and no locking, and only make 614 * a single pass through what's available. This should be more than 615 * adequate for some time. I'll think about spinlocks or the like if and as 616 * it's necessary. 617 */ 618static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) 619{ 620 struct ipath_devdata *dd = pd->port_dd; 621 int i, any = 0, pidx = -1; 622 u16 lkey = key & 0x7FFF; 623 int ret; 624 625 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) { 626 /* nothing to do; this key always valid */ 627 ret = 0; 628 goto bail; 629 } 630 631 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " 632 "%hx:%x %hx:%x %hx:%x %hx:%x\n", 633 pd->port_port, key, dd->ipath_pkeys[0], 634 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], 635 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], 636 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], 637 atomic_read(&dd->ipath_pkeyrefs[3])); 638 639 if (!lkey) { 640 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", 641 pd->port_port); 642 ret = -EINVAL; 643 goto bail; 644 } 645 646 /* 647 * Set the full membership bit, because it has to be 648 * set in the register or the packet, and it seems 649 * cleaner to set in the register than to force all 650 * callers to set it. (see bug 4331) 651 */ 652 key |= 0x8000; 653 654 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { 655 if (!pd->port_pkeys[i] && pidx == -1) 656 pidx = i; 657 if (pd->port_pkeys[i] == key) { 658 ipath_cdbg(VERBOSE, "p%u tries to set same pkey " 659 "(%x) more than once\n", 660 pd->port_port, key); 661 ret = -EEXIST; 662 goto bail; 663 } 664 } 665 if (pidx == -1) { 666 ipath_dbg("All pkeys for port %u already in use, " 667 "can't set %x\n", pd->port_port, key); 668 ret = -EBUSY; 669 goto bail; 670 } 671 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { 672 if (!dd->ipath_pkeys[i]) { 673 any++; 674 continue; 675 } 676 if (dd->ipath_pkeys[i] == key) { 677 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i]; 678 679 if (atomic_inc_return(pkrefs) > 1) { 680 pd->port_pkeys[pidx] = key; 681 ipath_cdbg(VERBOSE, "p%u set key %x " 682 "matches #%d, count now %d\n", 683 pd->port_port, key, i, 684 atomic_read(pkrefs)); 685 ret = 0; 686 goto bail; 687 } else { 688 /* 689 * lost race, decrement count, catch below 690 */ 691 atomic_dec(pkrefs); 692 ipath_cdbg(VERBOSE, "Lost race, count was " 693 "0, after dec, it's %d\n", 694 atomic_read(pkrefs)); 695 any++; 696 } 697 } 698 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { 699 /* 700 * It makes no sense to have both the limited and 701 * full membership PKEY set at the same time since 702 * the unlimited one will disable the limited one. 703 */ 704 ret = -EEXIST; 705 goto bail; 706 } 707 } 708 if (!any) { 709 ipath_dbg("port %u, all pkeys already in use, " 710 "can't set %x\n", pd->port_port, key); 711 ret = -EBUSY; 712 goto bail; 713 } 714 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { 715 if (!dd->ipath_pkeys[i] && 716 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { 717 u64 pkey; 718 719 /* for ipathstats, etc. */ 720 ipath_stats.sps_pkeys[i] = lkey; 721 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; 722 pkey = 723 (u64) dd->ipath_pkeys[0] | 724 ((u64) dd->ipath_pkeys[1] << 16) | 725 ((u64) dd->ipath_pkeys[2] << 32) | 726 ((u64) dd->ipath_pkeys[3] << 48); 727 ipath_cdbg(PROC, "p%u set key %x in #%d, " 728 "portidx %d, new pkey reg %llx\n", 729 pd->port_port, key, i, pidx, 730 (unsigned long long) pkey); 731 ipath_write_kreg( 732 dd, dd->ipath_kregs->kr_partitionkey, pkey); 733 734 ret = 0; 735 goto bail; 736 } 737 } 738 ipath_dbg("port %u, all pkeys already in use 2nd pass, " 739 "can't set %x\n", pd->port_port, key); 740 ret = -EBUSY; 741 742bail: 743 return ret; 744} 745 746/** 747 * ipath_manage_rcvq - manage a port's receive queue 748 * @pd: the port 749 * @subport: the subport 750 * @start_stop: action to carry out 751 * 752 * start_stop == 0 disables receive on the port, for use in queue 753 * overflow conditions. start_stop==1 re-enables, to be used to 754 * re-init the software copy of the head register 755 */ 756static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, 757 int start_stop) 758{ 759 struct ipath_devdata *dd = pd->port_dd; 760 761 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", 762 start_stop ? "en" : "dis", dd->ipath_unit, 763 pd->port_port, subport); 764 if (subport) 765 goto bail; 766 /* atomically clear receive enable port. */ 767 if (start_stop) { 768 /* 769 * On enable, force in-memory copy of the tail register to 770 * 0, so that protocol code doesn't have to worry about 771 * whether or not the chip has yet updated the in-memory 772 * copy or not on return from the system call. The chip 773 * always resets it's tail register back to 0 on a 774 * transition from disabled to enabled. This could cause a 775 * problem if software was broken, and did the enable w/o 776 * the disable, but eventually the in-memory copy will be 777 * updated and correct itself, even in the face of software 778 * bugs. 779 */ 780 if (pd->port_rcvhdrtail_kvaddr) 781 ipath_clear_rcvhdrtail(pd); 782 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 783 &dd->ipath_rcvctrl); 784 } else 785 clear_bit(dd->ipath_r_portenable_shift + pd->port_port, 786 &dd->ipath_rcvctrl); 787 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 788 dd->ipath_rcvctrl); 789 /* now be sure chip saw it before we return */ 790 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 791 if (start_stop) { 792 /* 793 * And try to be sure that tail reg update has happened too. 794 * This should in theory interlock with the RXE changes to 795 * the tail register. Don't assign it to the tail register 796 * in memory copy, since we could overwrite an update by the 797 * chip if we did. 798 */ 799 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 800 } 801 /* always; new head should be equal to new tail; see above */ 802bail: 803 return 0; 804} 805 806static void ipath_clean_part_key(struct ipath_portdata *pd, 807 struct ipath_devdata *dd) 808{ 809 int i, j, pchanged = 0; 810 u64 oldpkey; 811 812 /* for debugging only */ 813 oldpkey = (u64) dd->ipath_pkeys[0] | 814 ((u64) dd->ipath_pkeys[1] << 16) | 815 ((u64) dd->ipath_pkeys[2] << 32) | 816 ((u64) dd->ipath_pkeys[3] << 48); 817 818 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { 819 if (!pd->port_pkeys[i]) 820 continue; 821 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i, 822 pd->port_pkeys[i]); 823 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) { 824 /* check for match independent of the global bit */ 825 if ((dd->ipath_pkeys[j] & 0x7fff) != 826 (pd->port_pkeys[i] & 0x7fff)) 827 continue; 828 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) { 829 ipath_cdbg(VERBOSE, "p%u clear key " 830 "%x matches #%d\n", 831 pd->port_port, 832 pd->port_pkeys[i], j); 833 ipath_stats.sps_pkeys[j] = 834 dd->ipath_pkeys[j] = 0; 835 pchanged++; 836 } 837 else ipath_cdbg( 838 VERBOSE, "p%u key %x matches #%d, " 839 "but ref still %d\n", pd->port_port, 840 pd->port_pkeys[i], j, 841 atomic_read(&dd->ipath_pkeyrefs[j])); 842 break; 843 } 844 pd->port_pkeys[i] = 0; 845 } 846 if (pchanged) { 847 u64 pkey = (u64) dd->ipath_pkeys[0] | 848 ((u64) dd->ipath_pkeys[1] << 16) | 849 ((u64) dd->ipath_pkeys[2] << 32) | 850 ((u64) dd->ipath_pkeys[3] << 48); 851 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, " 852 "new pkey reg %llx\n", pd->port_port, 853 (unsigned long long) oldpkey, 854 (unsigned long long) pkey); 855 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, 856 pkey); 857 } 858} 859 860/* 861 * Initialize the port data with the receive buffer sizes 862 * so this can be done while the master port is locked. 863 * Otherwise, there is a race with a slave opening the port 864 * and seeing these fields uninitialized. 865 */ 866static void init_user_egr_sizes(struct ipath_portdata *pd) 867{ 868 struct ipath_devdata *dd = pd->port_dd; 869 unsigned egrperchunk, egrcnt, size; 870 871 /* 872 * to avoid wasting a lot of memory, we allocate 32KB chunks of 873 * physically contiguous memory, advance through it until used up 874 * and then allocate more. Of course, we need memory to store those 875 * extra pointers, now. Started out with 256KB, but under heavy 876 * memory pressure (creating large files and then copying them over 877 * NFS while doing lots of MPI jobs), we hit some allocation 878 * failures, even though we can sleep... (2.6.10) Still get 879 * failures at 64K. 32K is the lowest we can go without wasting 880 * additional memory. 881 */ 882 size = 0x8000; 883 egrperchunk = size / dd->ipath_rcvegrbufsize; 884 egrcnt = dd->ipath_rcvegrcnt; 885 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; 886 pd->port_rcvegrbufs_perchunk = egrperchunk; 887 pd->port_rcvegrbuf_size = size; 888} 889 890/** 891 * ipath_create_user_egr - allocate eager TID buffers 892 * @pd: the port to allocate TID buffers for 893 * 894 * This routine is now quite different for user and kernel, because 895 * the kernel uses skb's, for the accelerated network performance 896 * This is the user port version 897 * 898 * Allocate the eager TID buffers and program them into infinipath 899 * They are no longer completely contiguous, we do multiple allocation 900 * calls. 901 */ 902static int ipath_create_user_egr(struct ipath_portdata *pd) 903{ 904 struct ipath_devdata *dd = pd->port_dd; 905 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; 906 size_t size; 907 int ret; 908 gfp_t gfp_flags; 909 910 /* 911 * GFP_USER, but without GFP_FS, so buffer cache can be 912 * coalesced (we hope); otherwise, even at order 4, 913 * heavy filesystem activity makes these fail, and we can 914 * use compound pages. 915 */ 916 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 917 918 egrcnt = dd->ipath_rcvegrcnt; 919 /* TID number offset for this port */ 920 egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt; 921 egrsize = dd->ipath_rcvegrbufsize; 922 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " 923 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); 924 925 chunk = pd->port_rcvegrbuf_chunks; 926 egrperchunk = pd->port_rcvegrbufs_perchunk; 927 size = pd->port_rcvegrbuf_size; 928 pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), 929 GFP_KERNEL); 930 if (!pd->port_rcvegrbuf) { 931 ret = -ENOMEM; 932 goto bail; 933 } 934 pd->port_rcvegrbuf_phys = 935 kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), 936 GFP_KERNEL); 937 if (!pd->port_rcvegrbuf_phys) { 938 ret = -ENOMEM; 939 goto bail_rcvegrbuf; 940 } 941 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { 942 943 pd->port_rcvegrbuf[e] = dma_alloc_coherent( 944 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], 945 gfp_flags); 946 947 if (!pd->port_rcvegrbuf[e]) { 948 ret = -ENOMEM; 949 goto bail_rcvegrbuf_phys; 950 } 951 } 952 953 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; 954 955 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { 956 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; 957 unsigned i; 958 959 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { 960 dd->ipath_f_put_tid(dd, e + egroff + 961 (u64 __iomem *) 962 ((char __iomem *) 963 dd->ipath_kregbase + 964 dd->ipath_rcvegrbase), 965 RCVHQ_RCV_TYPE_EAGER, pa); 966 pa += egrsize; 967 } 968 cond_resched(); /* don't hog the cpu */ 969 } 970 971 ret = 0; 972 goto bail; 973 974bail_rcvegrbuf_phys: 975 for (e = 0; e < pd->port_rcvegrbuf_chunks && 976 pd->port_rcvegrbuf[e]; e++) { 977 dma_free_coherent(&dd->pcidev->dev, size, 978 pd->port_rcvegrbuf[e], 979 pd->port_rcvegrbuf_phys[e]); 980 981 } 982 kfree(pd->port_rcvegrbuf_phys); 983 pd->port_rcvegrbuf_phys = NULL; 984bail_rcvegrbuf: 985 kfree(pd->port_rcvegrbuf); 986 pd->port_rcvegrbuf = NULL; 987bail: 988 return ret; 989} 990 991 992/* common code for the mappings on dma_alloc_coherent mem */ 993static int ipath_mmap_mem(struct vm_area_struct *vma, 994 struct ipath_portdata *pd, unsigned len, int write_ok, 995 void *kvaddr, char *what) 996{ 997 struct ipath_devdata *dd = pd->port_dd; 998 unsigned long pfn; 999 int ret; 1000 1001 if ((vma->vm_end - vma->vm_start) > len) { 1002 dev_info(&dd->pcidev->dev, 1003 "FAIL on %s: len %lx > %x\n", what, 1004 vma->vm_end - vma->vm_start, len); 1005 ret = -EFAULT; 1006 goto bail; 1007 } 1008 1009 if (!write_ok) { 1010 if (vma->vm_flags & VM_WRITE) { 1011 dev_info(&dd->pcidev->dev, 1012 "%s must be mapped readonly\n", what); 1013 ret = -EPERM; 1014 goto bail; 1015 } 1016 1017 /* don't allow them to later change with mprotect */ 1018 vma->vm_flags &= ~VM_MAYWRITE; 1019 } 1020 1021 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; 1022 ret = remap_pfn_range(vma, vma->vm_start, pfn, 1023 len, vma->vm_page_prot); 1024 if (ret) 1025 dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " 1026 "bytes r%c failed: %d\n", what, pd->port_port, 1027 pfn, len, write_ok?'w':'o', ret); 1028 else 1029 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " 1030 "r%c\n", what, pd->port_port, pfn, len, 1031 write_ok?'w':'o'); 1032bail: 1033 return ret; 1034} 1035 1036static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, 1037 u64 ureg) 1038{ 1039 unsigned long phys; 1040 int ret; 1041 1042 /* 1043 * This is real hardware, so use io_remap. This is the mechanism 1044 * for the user process to update the head registers for their port 1045 * in the chip. 1046 */ 1047 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { 1048 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " 1049 "%lx > PAGE\n", vma->vm_end - vma->vm_start); 1050 ret = -EFAULT; 1051 } else { 1052 phys = dd->ipath_physaddr + ureg; 1053 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1054 1055 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 1056 ret = io_remap_pfn_range(vma, vma->vm_start, 1057 phys >> PAGE_SHIFT, 1058 vma->vm_end - vma->vm_start, 1059 vma->vm_page_prot); 1060 } 1061 return ret; 1062} 1063 1064static int mmap_piobufs(struct vm_area_struct *vma, 1065 struct ipath_devdata *dd, 1066 struct ipath_portdata *pd, 1067 unsigned piobufs, unsigned piocnt) 1068{ 1069 unsigned long phys; 1070 int ret; 1071 1072 /* 1073 * When we map the PIO buffers in the chip, we want to map them as 1074 * writeonly, no read possible. This prevents access to previous 1075 * process data, and catches users who might try to read the i/o 1076 * space due to a bug. 1077 */ 1078 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { 1079 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " 1080 "reqlen %lx > PAGE\n", 1081 vma->vm_end - vma->vm_start); 1082 ret = -EINVAL; 1083 goto bail; 1084 } 1085 1086 phys = dd->ipath_physaddr + piobufs; 1087 1088#if defined(__powerpc__) 1089 /* There isn't a generic way to specify writethrough mappings */ 1090 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1091 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; 1092 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; 1093#endif 1094 1095 /* 1096 * don't allow them to later change to readable with mprotect (for when 1097 * not initially mapped readable, as is normally the case) 1098 */ 1099 vma->vm_flags &= ~VM_MAYREAD; 1100 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 1101 1102 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, 1103 vma->vm_end - vma->vm_start, 1104 vma->vm_page_prot); 1105bail: 1106 return ret; 1107} 1108 1109static int mmap_rcvegrbufs(struct vm_area_struct *vma, 1110 struct ipath_portdata *pd) 1111{ 1112 struct ipath_devdata *dd = pd->port_dd; 1113 unsigned long start, size; 1114 size_t total_size, i; 1115 unsigned long pfn; 1116 int ret; 1117 1118 size = pd->port_rcvegrbuf_size; 1119 total_size = pd->port_rcvegrbuf_chunks * size; 1120 if ((vma->vm_end - vma->vm_start) > total_size) { 1121 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " 1122 "reqlen %lx > actual %lx\n", 1123 vma->vm_end - vma->vm_start, 1124 (unsigned long) total_size); 1125 ret = -EINVAL; 1126 goto bail; 1127 } 1128 1129 if (vma->vm_flags & VM_WRITE) { 1130 dev_info(&dd->pcidev->dev, "Can't map eager buffers as " 1131 "writable (flags=%lx)\n", vma->vm_flags); 1132 ret = -EPERM; 1133 goto bail; 1134 } 1135 /* don't allow them to later change to writeable with mprotect */ 1136 vma->vm_flags &= ~VM_MAYWRITE; 1137 1138 start = vma->vm_start; 1139 1140 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { 1141 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; 1142 ret = remap_pfn_range(vma, start, pfn, size, 1143 vma->vm_page_prot); 1144 if (ret < 0) 1145 goto bail; 1146 } 1147 ret = 0; 1148 1149bail: 1150 return ret; 1151} 1152 1153/* 1154 * ipath_file_vma_fault - handle a VMA page fault. 1155 */ 1156static int ipath_file_vma_fault(struct vm_area_struct *vma, 1157 struct vm_fault *vmf) 1158{ 1159 struct page *page; 1160 1161 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); 1162 if (!page) 1163 return VM_FAULT_SIGBUS; 1164 get_page(page); 1165 vmf->page = page; 1166 1167 return 0; 1168} 1169 1170static struct vm_operations_struct ipath_file_vm_ops = { 1171 .fault = ipath_file_vma_fault, 1172}; 1173 1174static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, 1175 struct ipath_portdata *pd, unsigned subport) 1176{ 1177 unsigned long len; 1178 struct ipath_devdata *dd; 1179 void *addr; 1180 size_t size; 1181 int ret = 0; 1182 1183 /* If the port is not shared, all addresses should be physical */ 1184 if (!pd->port_subport_cnt) 1185 goto bail; 1186 1187 dd = pd->port_dd; 1188 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 1189 1190 /* 1191 * Each process has all the subport uregbase, rcvhdrq, and 1192 * rcvegrbufs mmapped - as an array for all the processes, 1193 * and also separately for this process. 1194 */ 1195 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { 1196 addr = pd->subport_uregbase; 1197 size = PAGE_SIZE * pd->port_subport_cnt; 1198 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { 1199 addr = pd->subport_rcvhdr_base; 1200 size = pd->port_rcvhdrq_size * pd->port_subport_cnt; 1201 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { 1202 addr = pd->subport_rcvegrbuf; 1203 size *= pd->port_subport_cnt; 1204 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + 1205 PAGE_SIZE * subport)) { 1206 addr = pd->subport_uregbase + PAGE_SIZE * subport; 1207 size = PAGE_SIZE; 1208 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + 1209 pd->port_rcvhdrq_size * subport)) { 1210 addr = pd->subport_rcvhdr_base + 1211 pd->port_rcvhdrq_size * subport; 1212 size = pd->port_rcvhdrq_size; 1213 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + 1214 size * subport)) { 1215 addr = pd->subport_rcvegrbuf + size * subport; 1216 /* rcvegrbufs are read-only on the slave */ 1217 if (vma->vm_flags & VM_WRITE) { 1218 dev_info(&dd->pcidev->dev, 1219 "Can't map eager buffers as " 1220 "writable (flags=%lx)\n", vma->vm_flags); 1221 ret = -EPERM; 1222 goto bail; 1223 } 1224 /* 1225 * Don't allow permission to later change to writeable 1226 * with mprotect. 1227 */ 1228 vma->vm_flags &= ~VM_MAYWRITE; 1229 } else { 1230 goto bail; 1231 } 1232 len = vma->vm_end - vma->vm_start; 1233 if (len > size) { 1234 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); 1235 ret = -EINVAL; 1236 goto bail; 1237 } 1238 1239 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1240 vma->vm_ops = &ipath_file_vm_ops; 1241 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1242 ret = 1; 1243 1244bail: 1245 return ret; 1246} 1247 1248/** 1249 * ipath_mmap - mmap various structures into user space 1250 * @fp: the file pointer 1251 * @vma: the VM area 1252 * 1253 * We use this to have a shared buffer between the kernel and the user code 1254 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio 1255 * buffers in the chip. We have the open and close entries so we can bump 1256 * the ref count and keep the driver from being unloaded while still mapped. 1257 */ 1258static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) 1259{ 1260 struct ipath_portdata *pd; 1261 struct ipath_devdata *dd; 1262 u64 pgaddr, ureg; 1263 unsigned piobufs, piocnt; 1264 int ret; 1265 1266 pd = port_fp(fp); 1267 if (!pd) { 1268 ret = -EINVAL; 1269 goto bail; 1270 } 1271 dd = pd->port_dd; 1272 1273 /* 1274 * This is the ipath_do_user_init() code, mapping the shared buffers 1275 * into the user process. The address referred to by vm_pgoff is the 1276 * file offset passed via mmap(). For shared ports, this is the 1277 * kernel vmalloc() address of the pages to share with the master. 1278 * For non-shared or master ports, this is a physical address. 1279 * We only do one mmap for each space mapped. 1280 */ 1281 pgaddr = vma->vm_pgoff << PAGE_SHIFT; 1282 1283 /* 1284 * Check for 0 in case one of the allocations failed, but user 1285 * called mmap anyway. 1286 */ 1287 if (!pgaddr) { 1288 ret = -EINVAL; 1289 goto bail; 1290 } 1291 1292 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", 1293 (unsigned long long) pgaddr, vma->vm_start, 1294 vma->vm_end - vma->vm_start, dd->ipath_unit, 1295 pd->port_port, subport_fp(fp)); 1296 1297 /* 1298 * Physical addresses must fit in 40 bits for our hardware. 1299 * Check for kernel virtual addresses first, anything else must 1300 * match a HW or memory address. 1301 */ 1302 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); 1303 if (ret) { 1304 if (ret > 0) 1305 ret = 0; 1306 goto bail; 1307 } 1308 1309 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; 1310 if (!pd->port_subport_cnt) { 1311 /* port is not shared */ 1312 piocnt = dd->ipath_pbufsport; 1313 piobufs = pd->port_piobufs; 1314 } else if (!subport_fp(fp)) { 1315 /* caller is the master */ 1316 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + 1317 (dd->ipath_pbufsport % pd->port_subport_cnt); 1318 piobufs = pd->port_piobufs + 1319 dd->ipath_palign * (dd->ipath_pbufsport - piocnt); 1320 } else { 1321 unsigned slave = subport_fp(fp) - 1; 1322 1323 /* caller is a slave */ 1324 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; 1325 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; 1326 } 1327 1328 if (pgaddr == ureg) 1329 ret = mmap_ureg(vma, dd, ureg); 1330 else if (pgaddr == piobufs) 1331 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); 1332 else if (pgaddr == dd->ipath_pioavailregs_phys) 1333 /* in-memory copy of pioavail registers */ 1334 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1335 (void *) dd->ipath_pioavailregs_dma, 1336 "pioavail registers"); 1337 else if (pgaddr == pd->port_rcvegr_phys) 1338 ret = mmap_rcvegrbufs(vma, pd); 1339 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) 1340 /* 1341 * The rcvhdrq itself; readonly except on HT (so have 1342 * to allow writable mapping), multiple pages, contiguous 1343 * from an i/o perspective. 1344 */ 1345 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, 1346 pd->port_rcvhdrq, 1347 "rcvhdrq"); 1348 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) 1349 /* in-memory copy of rcvhdrq tail register */ 1350 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1351 pd->port_rcvhdrtail_kvaddr, 1352 "rcvhdrq tail"); 1353 else 1354 ret = -EINVAL; 1355 1356 vma->vm_private_data = NULL; 1357 1358 if (ret < 0) 1359 dev_info(&dd->pcidev->dev, 1360 "Failure %d on off %llx len %lx\n", 1361 -ret, (unsigned long long)pgaddr, 1362 vma->vm_end - vma->vm_start); 1363bail: 1364 return ret; 1365} 1366 1367static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd) 1368{ 1369 unsigned pollflag = 0; 1370 1371 if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && 1372 pd->port_hdrqfull != pd->port_hdrqfull_poll) { 1373 pollflag |= POLLIN | POLLRDNORM; 1374 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1375 } 1376 1377 return pollflag; 1378} 1379 1380static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, 1381 struct file *fp, 1382 struct poll_table_struct *pt) 1383{ 1384 unsigned pollflag = 0; 1385 struct ipath_devdata *dd; 1386 1387 dd = pd->port_dd; 1388 1389 /* variable access in ipath_poll_hdrqfull() needs this */ 1390 rmb(); 1391 pollflag = ipath_poll_hdrqfull(pd); 1392 1393 if (pd->port_urgent != pd->port_urgent_poll) { 1394 pollflag |= POLLIN | POLLRDNORM; 1395 pd->port_urgent_poll = pd->port_urgent; 1396 } 1397 1398 if (!pollflag) { 1399 /* this saves a spin_lock/unlock in interrupt handler... */ 1400 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag); 1401 /* flush waiting flag so don't miss an event... */ 1402 wmb(); 1403 poll_wait(fp, &pd->port_wait, pt); 1404 } 1405 1406 return pollflag; 1407} 1408 1409static unsigned int ipath_poll_next(struct ipath_portdata *pd, 1410 struct file *fp, 1411 struct poll_table_struct *pt) 1412{ 1413 u32 head; 1414 u32 tail; 1415 unsigned pollflag = 0; 1416 struct ipath_devdata *dd; 1417 1418 dd = pd->port_dd; 1419 1420 /* variable access in ipath_poll_hdrqfull() needs this */ 1421 rmb(); 1422 pollflag = ipath_poll_hdrqfull(pd); 1423 1424 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); 1425 if (pd->port_rcvhdrtail_kvaddr) 1426 tail = ipath_get_rcvhdrtail(pd); 1427 else 1428 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 1429 1430 if (head != tail) 1431 pollflag |= POLLIN | POLLRDNORM; 1432 else { 1433 /* this saves a spin_lock/unlock in interrupt handler */ 1434 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); 1435 /* flush waiting flag so we don't miss an event */ 1436 wmb(); 1437 1438 set_bit(pd->port_port + dd->ipath_r_intravail_shift, 1439 &dd->ipath_rcvctrl); 1440 1441 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1442 dd->ipath_rcvctrl); 1443 1444 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ 1445 ipath_write_ureg(dd, ur_rcvhdrhead, 1446 dd->ipath_rhdrhead_intr_off | head, 1447 pd->port_port); 1448 1449 poll_wait(fp, &pd->port_wait, pt); 1450 } 1451 1452 return pollflag; 1453} 1454 1455static unsigned int ipath_poll(struct file *fp, 1456 struct poll_table_struct *pt) 1457{ 1458 struct ipath_portdata *pd; 1459 unsigned pollflag; 1460 1461 pd = port_fp(fp); 1462 if (!pd) 1463 pollflag = 0; 1464 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT) 1465 pollflag = ipath_poll_urgent(pd, fp, pt); 1466 else 1467 pollflag = ipath_poll_next(pd, fp, pt); 1468 1469 return pollflag; 1470} 1471 1472static int ipath_supports_subports(int user_swmajor, int user_swminor) 1473{ 1474 /* no subport implementation prior to software version 1.3 */ 1475 return (user_swmajor > 1) || (user_swminor >= 3); 1476} 1477 1478static int ipath_compatible_subports(int user_swmajor, int user_swminor) 1479{ 1480 /* this code is written long-hand for clarity */ 1481 if (IPATH_USER_SWMAJOR != user_swmajor) { 1482 /* no promise of compatibility if major mismatch */ 1483 return 0; 1484 } 1485 if (IPATH_USER_SWMAJOR == 1) { 1486 switch (IPATH_USER_SWMINOR) { 1487 case 0: 1488 case 1: 1489 case 2: 1490 /* no subport implementation so cannot be compatible */ 1491 return 0; 1492 case 3: 1493 /* 3 is only compatible with itself */ 1494 return user_swminor == 3; 1495 default: 1496 /* >= 4 are compatible (or are expected to be) */ 1497 return user_swminor >= 4; 1498 } 1499 } 1500 /* make no promises yet for future major versions */ 1501 return 0; 1502} 1503 1504static int init_subports(struct ipath_devdata *dd, 1505 struct ipath_portdata *pd, 1506 const struct ipath_user_info *uinfo) 1507{ 1508 int ret = 0; 1509 unsigned num_subports; 1510 size_t size; 1511 1512 /* 1513 * If the user is requesting zero subports, 1514 * skip the subport allocation. 1515 */ 1516 if (uinfo->spu_subport_cnt <= 0) 1517 goto bail; 1518 1519 /* Self-consistency check for ipath_compatible_subports() */ 1520 if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) && 1521 !ipath_compatible_subports(IPATH_USER_SWMAJOR, 1522 IPATH_USER_SWMINOR)) { 1523 dev_info(&dd->pcidev->dev, 1524 "Inconsistent ipath_compatible_subports()\n"); 1525 goto bail; 1526 } 1527 1528 /* Check for subport compatibility */ 1529 if (!ipath_compatible_subports(uinfo->spu_userversion >> 16, 1530 uinfo->spu_userversion & 0xffff)) { 1531 dev_info(&dd->pcidev->dev, 1532 "Mismatched user version (%d.%d) and driver " 1533 "version (%d.%d) while port sharing. Ensure " 1534 "that driver and library are from the same " 1535 "release.\n", 1536 (int) (uinfo->spu_userversion >> 16), 1537 (int) (uinfo->spu_userversion & 0xffff), 1538 IPATH_USER_SWMAJOR, 1539 IPATH_USER_SWMINOR); 1540 goto bail; 1541 } 1542 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) { 1543 ret = -EINVAL; 1544 goto bail; 1545 } 1546 1547 num_subports = uinfo->spu_subport_cnt; 1548 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); 1549 if (!pd->subport_uregbase) { 1550 ret = -ENOMEM; 1551 goto bail; 1552 } 1553 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1554 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1555 sizeof(u32), PAGE_SIZE) * num_subports; 1556 pd->subport_rcvhdr_base = vmalloc(size); 1557 if (!pd->subport_rcvhdr_base) { 1558 ret = -ENOMEM; 1559 goto bail_ureg; 1560 } 1561 1562 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1563 pd->port_rcvegrbuf_size * 1564 num_subports); 1565 if (!pd->subport_rcvegrbuf) { 1566 ret = -ENOMEM; 1567 goto bail_rhdr; 1568 } 1569 1570 pd->port_subport_cnt = uinfo->spu_subport_cnt; 1571 pd->port_subport_id = uinfo->spu_subport_id; 1572 pd->active_slaves = 1; 1573 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1574 memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); 1575 memset(pd->subport_rcvhdr_base, 0, size); 1576 memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * 1577 pd->port_rcvegrbuf_size * 1578 num_subports); 1579 goto bail; 1580 1581bail_rhdr: 1582 vfree(pd->subport_rcvhdr_base); 1583bail_ureg: 1584 vfree(pd->subport_uregbase); 1585 pd->subport_uregbase = NULL; 1586bail: 1587 return ret; 1588} 1589 1590static int try_alloc_port(struct ipath_devdata *dd, int port, 1591 struct file *fp, 1592 const struct ipath_user_info *uinfo) 1593{ 1594 struct ipath_portdata *pd; 1595 int ret; 1596 1597 if (!(pd = dd->ipath_pd[port])) { 1598 void *ptmp; 1599 1600 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); 1601 1602 /* 1603 * Allocate memory for use in ipath_tid_update() just once 1604 * at open, not per call. Reduces cost of expected send 1605 * setup. 1606 */ 1607 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + 1608 dd->ipath_rcvtidcnt * sizeof(struct page **), 1609 GFP_KERNEL); 1610 if (!pd || !ptmp) { 1611 ipath_dev_err(dd, "Unable to allocate portdata " 1612 "memory, failing open\n"); 1613 ret = -ENOMEM; 1614 kfree(pd); 1615 kfree(ptmp); 1616 goto bail; 1617 } 1618 dd->ipath_pd[port] = pd; 1619 dd->ipath_pd[port]->port_port = port; 1620 dd->ipath_pd[port]->port_dd = dd; 1621 dd->ipath_pd[port]->port_tid_pg_list = ptmp; 1622 init_waitqueue_head(&dd->ipath_pd[port]->port_wait); 1623 } 1624 if (!pd->port_cnt) { 1625 pd->userversion = uinfo->spu_userversion; 1626 init_user_egr_sizes(pd); 1627 if ((ret = init_subports(dd, pd, uinfo)) != 0) 1628 goto bail; 1629 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", 1630 current->comm, current->pid, dd->ipath_unit, 1631 port); 1632 pd->port_cnt = 1; 1633 port_fp(fp) = pd; 1634 pd->port_pid = current->pid; 1635 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1636 ipath_chg_pioavailkernel(dd, 1637 dd->ipath_pbufsport * (pd->port_port - 1), 1638 dd->ipath_pbufsport, 0); 1639 ipath_stats.sps_ports++; 1640 ret = 0; 1641 } else 1642 ret = -EBUSY; 1643 1644bail: 1645 return ret; 1646} 1647 1648static inline int usable(struct ipath_devdata *dd) 1649{ 1650 return dd && 1651 (dd->ipath_flags & IPATH_PRESENT) && 1652 dd->ipath_kregbase && 1653 dd->ipath_lid && 1654 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED 1655 | IPATH_LINKUNK)); 1656} 1657 1658static int find_free_port(int unit, struct file *fp, 1659 const struct ipath_user_info *uinfo) 1660{ 1661 struct ipath_devdata *dd = ipath_lookup(unit); 1662 int ret, i; 1663 1664 if (!dd) { 1665 ret = -ENODEV; 1666 goto bail; 1667 } 1668 1669 if (!usable(dd)) { 1670 ret = -ENETDOWN; 1671 goto bail; 1672 } 1673 1674 for (i = 1; i < dd->ipath_cfgports; i++) { 1675 ret = try_alloc_port(dd, i, fp, uinfo); 1676 if (ret != -EBUSY) 1677 goto bail; 1678 } 1679 ret = -EBUSY; 1680 1681bail: 1682 return ret; 1683} 1684 1685static int find_best_unit(struct file *fp, 1686 const struct ipath_user_info *uinfo) 1687{ 1688 int ret = 0, i, prefunit = -1, devmax; 1689 int maxofallports, npresent, nup; 1690 int ndev; 1691 1692 devmax = ipath_count_units(&npresent, &nup, &maxofallports); 1693 1694 /* 1695 * This code is present to allow a knowledgeable person to 1696 * specify the layout of processes to processors before opening 1697 * this driver, and then we'll assign the process to the "closest" 1698 * InfiniPath chip to that processor (we assume reasonable connectivity, 1699 * for now). This code assumes that if affinity has been set 1700 * before this point, that at most one cpu is set; for now this 1701 * is reasonable. I check for both cpus_empty() and cpus_full(), 1702 * in case some kernel variant sets none of the bits when no 1703 * affinity is set. 2.6.11 and 12 kernels have all present 1704 * cpus set. Some day we'll have to fix it up further to handle 1705 * a cpu subset. This algorithm fails for two HT chips connected 1706 * in tunnel fashion. Eventually this needs real topology 1707 * information. There may be some issues with dual core numbering 1708 * as well. This needs more work prior to release. 1709 */ 1710 if (!cpus_empty(current->cpus_allowed) && 1711 !cpus_full(current->cpus_allowed)) { 1712 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1713 for (i = 0; i < ncpus; i++) 1714 if (cpu_isset(i, current->cpus_allowed)) { 1715 ipath_cdbg(PROC, "%s[%u] affinity set for " 1716 "cpu %d/%d\n", current->comm, 1717 current->pid, i, ncpus); 1718 curcpu = i; 1719 nset++; 1720 } 1721 if (curcpu != -1 && nset != ncpus) { 1722 if (npresent) { 1723 prefunit = curcpu / (ncpus / npresent); 1724 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, " 1725 "%d cpus/chip, select unit %d\n", 1726 current->comm, current->pid, 1727 npresent, ncpus, ncpus / npresent, 1728 prefunit); 1729 } 1730 } 1731 } 1732 1733 /* 1734 * user ports start at 1, kernel port is 0 1735 * For now, we do round-robin access across all chips 1736 */ 1737 1738 if (prefunit != -1) 1739 devmax = prefunit + 1; 1740recheck: 1741 for (i = 1; i < maxofallports; i++) { 1742 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; 1743 ndev++) { 1744 struct ipath_devdata *dd = ipath_lookup(ndev); 1745 1746 if (!usable(dd)) 1747 continue; /* can't use this unit */ 1748 if (i >= dd->ipath_cfgports) 1749 /* 1750 * Maxed out on users of this unit. Try 1751 * next. 1752 */ 1753 continue; 1754 ret = try_alloc_port(dd, i, fp, uinfo); 1755 if (!ret) 1756 goto done; 1757 } 1758 } 1759 1760 if (npresent) { 1761 if (nup == 0) { 1762 ret = -ENETDOWN; 1763 ipath_dbg("No ports available (none initialized " 1764 "and ready)\n"); 1765 } else { 1766 if (prefunit > 0) { 1767 /* if started above 0, retry from 0 */ 1768 ipath_cdbg(PROC, 1769 "%s[%u] no ports on prefunit " 1770 "%d, clear and re-check\n", 1771 current->comm, current->pid, 1772 prefunit); 1773 devmax = ipath_count_units(NULL, NULL, 1774 NULL); 1775 prefunit = -1; 1776 goto recheck; 1777 } 1778 ret = -EBUSY; 1779 ipath_dbg("No ports available\n"); 1780 } 1781 } else { 1782 ret = -ENXIO; 1783 ipath_dbg("No boards found\n"); 1784 } 1785 1786done: 1787 return ret; 1788} 1789 1790static int find_shared_port(struct file *fp, 1791 const struct ipath_user_info *uinfo) 1792{ 1793 int devmax, ndev, i; 1794 int ret = 0; 1795 1796 devmax = ipath_count_units(NULL, NULL, NULL); 1797 1798 for (ndev = 0; ndev < devmax; ndev++) { 1799 struct ipath_devdata *dd = ipath_lookup(ndev); 1800 1801 if (!usable(dd)) 1802 continue; 1803 for (i = 1; i < dd->ipath_cfgports; i++) { 1804 struct ipath_portdata *pd = dd->ipath_pd[i]; 1805 1806 /* Skip ports which are not yet open */ 1807 if (!pd || !pd->port_cnt) 1808 continue; 1809 /* Skip port if it doesn't match the requested one */ 1810 if (pd->port_subport_id != uinfo->spu_subport_id) 1811 continue; 1812 /* Verify the sharing process matches the master */ 1813 if (pd->port_subport_cnt != uinfo->spu_subport_cnt || 1814 pd->userversion != uinfo->spu_userversion || 1815 pd->port_cnt >= pd->port_subport_cnt) { 1816 ret = -EINVAL; 1817 goto done; 1818 } 1819 port_fp(fp) = pd; 1820 subport_fp(fp) = pd->port_cnt++; 1821 pd->port_subpid[subport_fp(fp)] = current->pid; 1822 tidcursor_fp(fp) = 0; 1823 pd->active_slaves |= 1 << subport_fp(fp); 1824 ipath_cdbg(PROC, 1825 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", 1826 current->comm, current->pid, 1827 subport_fp(fp), 1828 pd->port_comm, pd->port_pid, 1829 dd->ipath_unit, pd->port_port); 1830 ret = 1; 1831 goto done; 1832 } 1833 } 1834 1835done: 1836 return ret; 1837} 1838 1839static int ipath_open(struct inode *in, struct file *fp) 1840{ 1841 /* The real work is performed later in ipath_assign_port() */ 1842 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); 1843 return fp->private_data ? 0 : -ENOMEM; 1844} 1845 1846/* Get port early, so can set affinity prior to memory allocation */ 1847static int ipath_assign_port(struct file *fp, 1848 const struct ipath_user_info *uinfo) 1849{ 1850 int ret; 1851 int i_minor; 1852 unsigned swmajor, swminor; 1853 1854 /* Check to be sure we haven't already initialized this file */ 1855 if (port_fp(fp)) { 1856 ret = -EINVAL; 1857 goto done; 1858 } 1859 1860 /* for now, if major version is different, bail */ 1861 swmajor = uinfo->spu_userversion >> 16; 1862 if (swmajor != IPATH_USER_SWMAJOR) { 1863 ipath_dbg("User major version %d not same as driver " 1864 "major %d\n", uinfo->spu_userversion >> 16, 1865 IPATH_USER_SWMAJOR); 1866 ret = -ENODEV; 1867 goto done; 1868 } 1869 1870 swminor = uinfo->spu_userversion & 0xffff; 1871 if (swminor != IPATH_USER_SWMINOR) 1872 ipath_dbg("User minor version %d not same as driver " 1873 "minor %d\n", swminor, IPATH_USER_SWMINOR); 1874 1875 mutex_lock(&ipath_mutex); 1876 1877 if (ipath_compatible_subports(swmajor, swminor) && 1878 uinfo->spu_subport_cnt && 1879 (ret = find_shared_port(fp, uinfo))) { 1880 if (ret > 0) 1881 ret = 0; 1882 goto done_chk_sdma; 1883 } 1884 1885 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; 1886 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", 1887 (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); 1888 1889 if (i_minor) 1890 ret = find_free_port(i_minor - 1, fp, uinfo); 1891 else 1892 ret = find_best_unit(fp, uinfo); 1893 1894done_chk_sdma: 1895 if (!ret) { 1896 struct ipath_filedata *fd = fp->private_data; 1897 const struct ipath_portdata *pd = fd->pd; 1898 const struct ipath_devdata *dd = pd->port_dd; 1899 1900 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev, 1901 dd->ipath_unit, 1902 pd->port_port, 1903 fd->subport); 1904 1905 if (!fd->pq) 1906 ret = -ENOMEM; 1907 } 1908 1909 mutex_unlock(&ipath_mutex); 1910 1911done: 1912 return ret; 1913} 1914 1915 1916static int ipath_do_user_init(struct file *fp, 1917 const struct ipath_user_info *uinfo) 1918{ 1919 int ret; 1920 struct ipath_portdata *pd = port_fp(fp); 1921 struct ipath_devdata *dd; 1922 u32 head32; 1923 1924 /* Subports don't need to initialize anything since master did it. */ 1925 if (subport_fp(fp)) { 1926 ret = wait_event_interruptible(pd->port_wait, 1927 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); 1928 goto done; 1929 } 1930 1931 dd = pd->port_dd; 1932 1933 if (uinfo->spu_rcvhdrsize) { 1934 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); 1935 if (ret) 1936 goto done; 1937 } 1938 1939 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ 1940 1941 /* for right now, kernel piobufs are at end, so port 1 is at 0 */ 1942 pd->port_piobufs = dd->ipath_piobufbase + 1943 dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign; 1944 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n", 1945 pd->port_port, pd->port_piobufs); 1946 1947 /* 1948 * Now allocate the rcvhdr Q and eager TIDs; skip the TID 1949 * array for time being. If pd->port_port > chip-supported, 1950 * we need to do extra stuff here to handle by handling overflow 1951 * through port 0, someday 1952 */ 1953 ret = ipath_create_rcvhdrq(dd, pd); 1954 if (!ret) 1955 ret = ipath_create_user_egr(pd); 1956 if (ret) 1957 goto done; 1958 1959 /* 1960 * set the eager head register for this port to the current values 1961 * of the tail pointers, since we don't know if they were 1962 * updated on last use of the port. 1963 */ 1964 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); 1965 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); 1966 pd->port_lastrcvhdrqtail = -1; 1967 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", 1968 pd->port_port, head32); 1969 pd->port_tidcursor = 0; /* start at beginning after open */ 1970 1971 /* initialize poll variables... */ 1972 pd->port_urgent = 0; 1973 pd->port_urgent_poll = 0; 1974 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1975 1976 /* 1977 * Now enable the port for receive. 1978 * For chips that are set to DMA the tail register to memory 1979 * when they change (and when the update bit transitions from 1980 * 0 to 1. So for those chips, we turn it off and then back on. 1981 * This will (very briefly) affect any other open ports, but the 1982 * duration is very short, and therefore isn't an issue. We 1983 * explictly set the in-memory tail copy to 0 beforehand, so we 1984 * don't have to wait to be sure the DMA update has happened 1985 * (chip resets head/tail to 0 on transition to enable). 1986 */ 1987 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 1988 &dd->ipath_rcvctrl); 1989 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) { 1990 if (pd->port_rcvhdrtail_kvaddr) 1991 ipath_clear_rcvhdrtail(pd); 1992 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1993 dd->ipath_rcvctrl & 1994 ~(1ULL << dd->ipath_r_tailupd_shift)); 1995 } 1996 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1997 dd->ipath_rcvctrl); 1998 /* Notify any waiting slaves */ 1999 if (pd->port_subport_cnt) { 2000 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 2001 wake_up(&pd->port_wait); 2002 } 2003done: 2004 return ret; 2005} 2006 2007/** 2008 * unlock_exptid - unlock any expected TID entries port still had in use 2009 * @pd: port 2010 * 2011 * We don't actually update the chip here, because we do a bulk update 2012 * below, using ipath_f_clear_tids. 2013 */ 2014static void unlock_expected_tids(struct ipath_portdata *pd) 2015{ 2016 struct ipath_devdata *dd = pd->port_dd; 2017 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; 2018 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt; 2019 2020 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", 2021 pd->port_port); 2022 for (i = port_tidbase; i < maxtid; i++) { 2023 struct page *ps = dd->ipath_pageshadow[i]; 2024 2025 if (!ps) 2026 continue; 2027 2028 dd->ipath_pageshadow[i] = NULL; 2029 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], 2030 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2031 ipath_release_user_pages_on_close(&ps, 1); 2032 cnt++; 2033 ipath_stats.sps_pageunlocks++; 2034 } 2035 if (cnt) 2036 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n", 2037 pd->port_port, cnt); 2038 2039 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks) 2040 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n", 2041 (unsigned long long) ipath_stats.sps_pagelocks, 2042 (unsigned long long) 2043 ipath_stats.sps_pageunlocks); 2044} 2045 2046static int ipath_close(struct inode *in, struct file *fp) 2047{ 2048 int ret = 0; 2049 struct ipath_filedata *fd; 2050 struct ipath_portdata *pd; 2051 struct ipath_devdata *dd; 2052 unsigned port; 2053 2054 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", 2055 (long)in->i_rdev, fp->private_data); 2056 2057 mutex_lock(&ipath_mutex); 2058 2059 fd = (struct ipath_filedata *) fp->private_data; 2060 fp->private_data = NULL; 2061 pd = fd->pd; 2062 if (!pd) { 2063 mutex_unlock(&ipath_mutex); 2064 goto bail; 2065 } 2066 2067 dd = pd->port_dd; 2068 2069 /* drain user sdma queue */ 2070 ipath_user_sdma_queue_drain(dd, fd->pq); 2071 ipath_user_sdma_queue_destroy(fd->pq); 2072 2073 if (--pd->port_cnt) { 2074 /* 2075 * XXX If the master closes the port before the slave(s), 2076 * revoke the mmap for the eager receive queue so 2077 * the slave(s) don't wait for receive data forever. 2078 */ 2079 pd->active_slaves &= ~(1 << fd->subport); 2080 pd->port_subpid[fd->subport] = 0; 2081 mutex_unlock(&ipath_mutex); 2082 goto bail; 2083 } 2084 port = pd->port_port; 2085 2086 if (pd->port_hdrqfull) { 2087 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2088 "during run\n", pd->port_comm, pd->port_pid, 2089 pd->port_hdrqfull); 2090 pd->port_hdrqfull = 0; 2091 } 2092 2093 if (pd->port_rcvwait_to || pd->port_piowait_to 2094 || pd->port_rcvnowait || pd->port_pionowait) { 2095 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; " 2096 "%u rcv %u, pio already\n", 2097 pd->port_port, pd->port_rcvwait_to, 2098 pd->port_piowait_to, pd->port_rcvnowait, 2099 pd->port_pionowait); 2100 pd->port_rcvwait_to = pd->port_piowait_to = 2101 pd->port_rcvnowait = pd->port_pionowait = 0; 2102 } 2103 if (pd->port_flag) { 2104 ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n", 2105 pd->port_port, pd->port_flag); 2106 pd->port_flag = 0; 2107 } 2108 2109 if (dd->ipath_kregbase) { 2110 int i; 2111 /* atomically clear receive enable port and intr avail. */ 2112 clear_bit(dd->ipath_r_portenable_shift + port, 2113 &dd->ipath_rcvctrl); 2114 clear_bit(pd->port_port + dd->ipath_r_intravail_shift, 2115 &dd->ipath_rcvctrl); 2116 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, 2117 dd->ipath_rcvctrl); 2118 /* and read back from chip to be sure that nothing 2119 * else is in flight when we do the rest */ 2120 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2121 2122 /* clean up the pkeys for this port user */ 2123 ipath_clean_part_key(pd, dd); 2124 /* 2125 * be paranoid, and never write 0's to these, just use an 2126 * unused part of the port 0 tail page. Of course, 2127 * rcvhdraddr points to a large chunk of memory, so this 2128 * could still trash things, but at least it won't trash 2129 * page 0, and by disabling the port, it should stop "soon", 2130 * even if a packet or two is in already in flight after we 2131 * disabled the port. 2132 */ 2133 ipath_write_kreg_port(dd, 2134 dd->ipath_kregs->kr_rcvhdrtailaddr, port, 2135 dd->ipath_dummy_hdrq_phys); 2136 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, 2137 pd->port_port, dd->ipath_dummy_hdrq_phys); 2138 2139 i = dd->ipath_pbufsport * (port - 1); 2140 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); 2141 ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1); 2142 2143 dd->ipath_f_clear_tids(dd, pd->port_port); 2144 2145 if (dd->ipath_pageshadow) 2146 unlock_expected_tids(pd); 2147 ipath_stats.sps_ports--; 2148 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2149 pd->port_comm, pd->port_pid, 2150 dd->ipath_unit, port); 2151 } 2152 2153 pd->port_pid = 0; 2154 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ 2155 mutex_unlock(&ipath_mutex); 2156 ipath_free_pddata(dd, pd); /* after releasing the mutex */ 2157 2158bail: 2159 kfree(fd); 2160 return ret; 2161} 2162 2163static int ipath_port_info(struct ipath_portdata *pd, u16 subport, 2164 struct ipath_port_info __user *uinfo) 2165{ 2166 struct ipath_port_info info; 2167 int nup; 2168 int ret; 2169 size_t sz; 2170 2171 (void) ipath_count_units(NULL, &nup, NULL); 2172 info.num_active = nup; 2173 info.unit = pd->port_dd->ipath_unit; 2174 info.port = pd->port_port; 2175 info.subport = subport; 2176 /* Don't return new fields if old library opened the port. */ 2177 if (ipath_supports_subports(pd->userversion >> 16, 2178 pd->userversion & 0xffff)) { 2179 /* Number of user ports available for this device. */ 2180 info.num_ports = pd->port_dd->ipath_cfgports - 1; 2181 info.num_subports = pd->port_subport_cnt; 2182 sz = sizeof(info); 2183 } else 2184 sz = sizeof(info) - 2 * sizeof(u16); 2185 2186 if (copy_to_user(uinfo, &info, sz)) { 2187 ret = -EFAULT; 2188 goto bail; 2189 } 2190 ret = 0; 2191 2192bail: 2193 return ret; 2194} 2195 2196static int ipath_get_slave_info(struct ipath_portdata *pd, 2197 void __user *slave_mask_addr) 2198{ 2199 int ret = 0; 2200 2201 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) 2202 ret = -EFAULT; 2203 return ret; 2204} 2205 2206static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq, 2207 u32 __user *inflightp) 2208{ 2209 const u32 val = ipath_user_sdma_inflight_counter(pq); 2210 2211 if (put_user(val, inflightp)) 2212 return -EFAULT; 2213 2214 return 0; 2215} 2216 2217static int ipath_sdma_get_complete(struct ipath_devdata *dd, 2218 struct ipath_user_sdma_queue *pq, 2219 u32 __user *completep) 2220{ 2221 u32 val; 2222 int err; 2223 2224 err = ipath_user_sdma_make_progress(dd, pq); 2225 if (err < 0) 2226 return err; 2227 2228 val = ipath_user_sdma_complete_counter(pq); 2229 if (put_user(val, completep)) 2230 return -EFAULT; 2231 2232 return 0; 2233} 2234 2235static ssize_t ipath_write(struct file *fp, const char __user *data, 2236 size_t count, loff_t *off) 2237{ 2238 const struct ipath_cmd __user *ucmd; 2239 struct ipath_portdata *pd; 2240 const void __user *src; 2241 size_t consumed, copy; 2242 struct ipath_cmd cmd; 2243 ssize_t ret = 0; 2244 void *dest; 2245 2246 if (count < sizeof(cmd.type)) { 2247 ret = -EINVAL; 2248 goto bail; 2249 } 2250 2251 ucmd = (const struct ipath_cmd __user *) data; 2252 2253 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { 2254 ret = -EFAULT; 2255 goto bail; 2256 } 2257 2258 consumed = sizeof(cmd.type); 2259 2260 switch (cmd.type) { 2261 case IPATH_CMD_ASSIGN_PORT: 2262 case __IPATH_CMD_USER_INIT: 2263 case IPATH_CMD_USER_INIT: 2264 copy = sizeof(cmd.cmd.user_info); 2265 dest = &cmd.cmd.user_info; 2266 src = &ucmd->cmd.user_info; 2267 break; 2268 case IPATH_CMD_RECV_CTRL: 2269 copy = sizeof(cmd.cmd.recv_ctrl); 2270 dest = &cmd.cmd.recv_ctrl; 2271 src = &ucmd->cmd.recv_ctrl; 2272 break; 2273 case IPATH_CMD_PORT_INFO: 2274 copy = sizeof(cmd.cmd.port_info); 2275 dest = &cmd.cmd.port_info; 2276 src = &ucmd->cmd.port_info; 2277 break; 2278 case IPATH_CMD_TID_UPDATE: 2279 case IPATH_CMD_TID_FREE: 2280 copy = sizeof(cmd.cmd.tid_info); 2281 dest = &cmd.cmd.tid_info; 2282 src = &ucmd->cmd.tid_info; 2283 break; 2284 case IPATH_CMD_SET_PART_KEY: 2285 copy = sizeof(cmd.cmd.part_key); 2286 dest = &cmd.cmd.part_key; 2287 src = &ucmd->cmd.part_key; 2288 break; 2289 case __IPATH_CMD_SLAVE_INFO: 2290 copy = sizeof(cmd.cmd.slave_mask_addr); 2291 dest = &cmd.cmd.slave_mask_addr; 2292 src = &ucmd->cmd.slave_mask_addr; 2293 break; 2294 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg 2295 copy = 0; 2296 src = NULL; 2297 dest = NULL; 2298 break; 2299 case IPATH_CMD_POLL_TYPE: 2300 copy = sizeof(cmd.cmd.poll_type); 2301 dest = &cmd.cmd.poll_type; 2302 src = &ucmd->cmd.poll_type; 2303 break; 2304 case IPATH_CMD_ARMLAUNCH_CTRL: 2305 copy = sizeof(cmd.cmd.armlaunch_ctrl); 2306 dest = &cmd.cmd.armlaunch_ctrl; 2307 src = &ucmd->cmd.armlaunch_ctrl; 2308 break; 2309 case IPATH_CMD_SDMA_INFLIGHT: 2310 copy = sizeof(cmd.cmd.sdma_inflight); 2311 dest = &cmd.cmd.sdma_inflight; 2312 src = &ucmd->cmd.sdma_inflight; 2313 break; 2314 case IPATH_CMD_SDMA_COMPLETE: 2315 copy = sizeof(cmd.cmd.sdma_complete); 2316 dest = &cmd.cmd.sdma_complete; 2317 src = &ucmd->cmd.sdma_complete; 2318 break; 2319 default: 2320 ret = -EINVAL; 2321 goto bail; 2322 } 2323 2324 if (copy) { 2325 if ((count - consumed) < copy) { 2326 ret = -EINVAL; 2327 goto bail; 2328 } 2329 2330 if (copy_from_user(dest, src, copy)) { 2331 ret = -EFAULT; 2332 goto bail; 2333 } 2334 2335 consumed += copy; 2336 } 2337 2338 pd = port_fp(fp); 2339 if (!pd && cmd.type != __IPATH_CMD_USER_INIT && 2340 cmd.type != IPATH_CMD_ASSIGN_PORT) { 2341 ret = -EINVAL; 2342 goto bail; 2343 } 2344 2345 switch (cmd.type) { 2346 case IPATH_CMD_ASSIGN_PORT: 2347 ret = ipath_assign_port(fp, &cmd.cmd.user_info); 2348 if (ret) 2349 goto bail; 2350 break; 2351 case __IPATH_CMD_USER_INIT: 2352 /* backwards compatibility, get port first */ 2353 ret = ipath_assign_port(fp, &cmd.cmd.user_info); 2354 if (ret) 2355 goto bail; 2356 /* and fall through to current version. */ 2357 case IPATH_CMD_USER_INIT: 2358 ret = ipath_do_user_init(fp, &cmd.cmd.user_info); 2359 if (ret) 2360 goto bail; 2361 ret = ipath_get_base_info( 2362 fp, (void __user *) (unsigned long) 2363 cmd.cmd.user_info.spu_base_info, 2364 cmd.cmd.user_info.spu_base_info_size); 2365 break; 2366 case IPATH_CMD_RECV_CTRL: 2367 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); 2368 break; 2369 case IPATH_CMD_PORT_INFO: 2370 ret = ipath_port_info(pd, subport_fp(fp), 2371 (struct ipath_port_info __user *) 2372 (unsigned long) cmd.cmd.port_info); 2373 break; 2374 case IPATH_CMD_TID_UPDATE: 2375 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); 2376 break; 2377 case IPATH_CMD_TID_FREE: 2378 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); 2379 break; 2380 case IPATH_CMD_SET_PART_KEY: 2381 ret = ipath_set_part_key(pd, cmd.cmd.part_key); 2382 break; 2383 case __IPATH_CMD_SLAVE_INFO: 2384 ret = ipath_get_slave_info(pd, 2385 (void __user *) (unsigned long) 2386 cmd.cmd.slave_mask_addr); 2387 break; 2388 case IPATH_CMD_PIOAVAILUPD: 2389 ipath_force_pio_avail_update(pd->port_dd); 2390 break; 2391 case IPATH_CMD_POLL_TYPE: 2392 pd->poll_type = cmd.cmd.poll_type; 2393 break; 2394 case IPATH_CMD_ARMLAUNCH_CTRL: 2395 if (cmd.cmd.armlaunch_ctrl) 2396 ipath_enable_armlaunch(pd->port_dd); 2397 else 2398 ipath_disable_armlaunch(pd->port_dd); 2399 break; 2400 case IPATH_CMD_SDMA_INFLIGHT: 2401 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp), 2402 (u32 __user *) (unsigned long) 2403 cmd.cmd.sdma_inflight); 2404 break; 2405 case IPATH_CMD_SDMA_COMPLETE: 2406 ret = ipath_sdma_get_complete(pd->port_dd, 2407 user_sdma_queue_fp(fp), 2408 (u32 __user *) (unsigned long) 2409 cmd.cmd.sdma_complete); 2410 break; 2411 } 2412 2413 if (ret >= 0) 2414 ret = consumed; 2415 2416bail: 2417 return ret; 2418} 2419 2420static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov, 2421 unsigned long dim, loff_t off) 2422{ 2423 struct file *filp = iocb->ki_filp; 2424 struct ipath_filedata *fp = filp->private_data; 2425 struct ipath_portdata *pd = port_fp(filp); 2426 struct ipath_user_sdma_queue *pq = fp->pq; 2427 2428 if (!dim) 2429 return -EINVAL; 2430 2431 return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim); 2432} 2433 2434static struct class *ipath_class; 2435 2436static int init_cdev(int minor, char *name, const struct file_operations *fops, 2437 struct cdev **cdevp, struct class_device **class_devp) 2438{ 2439 const dev_t dev = MKDEV(IPATH_MAJOR, minor); 2440 struct cdev *cdev = NULL; 2441 struct class_device *class_dev = NULL; 2442 int ret; 2443 2444 cdev = cdev_alloc(); 2445 if (!cdev) { 2446 printk(KERN_ERR IPATH_DRV_NAME 2447 ": Could not allocate cdev for minor %d, %s\n", 2448 minor, name); 2449 ret = -ENOMEM; 2450 goto done; 2451 } 2452 2453 cdev->owner = THIS_MODULE; 2454 cdev->ops = fops; 2455 kobject_set_name(&cdev->kobj, name); 2456 2457 ret = cdev_add(cdev, dev, 1); 2458 if (ret < 0) { 2459 printk(KERN_ERR IPATH_DRV_NAME 2460 ": Could not add cdev for minor %d, %s (err %d)\n", 2461 minor, name, -ret); 2462 goto err_cdev; 2463 } 2464 2465 class_dev = class_device_create(ipath_class, NULL, dev, NULL, name); 2466 2467 if (IS_ERR(class_dev)) { 2468 ret = PTR_ERR(class_dev); 2469 printk(KERN_ERR IPATH_DRV_NAME ": Could not create " 2470 "class_dev for minor %d, %s (err %d)\n", 2471 minor, name, -ret); 2472 goto err_cdev; 2473 } 2474 2475 goto done; 2476 2477err_cdev: 2478 cdev_del(cdev); 2479 cdev = NULL; 2480 2481done: 2482 if (ret >= 0) { 2483 *cdevp = cdev; 2484 *class_devp = class_dev; 2485 } else { 2486 *cdevp = NULL; 2487 *class_devp = NULL; 2488 } 2489 2490 return ret; 2491} 2492 2493int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, 2494 struct cdev **cdevp, struct class_device **class_devp) 2495{ 2496 return init_cdev(minor, name, fops, cdevp, class_devp); 2497} 2498 2499static void cleanup_cdev(struct cdev **cdevp, 2500 struct class_device **class_devp) 2501{ 2502 struct class_device *class_dev = *class_devp; 2503 2504 if (class_dev) { 2505 class_device_unregister(class_dev); 2506 *class_devp = NULL; 2507 } 2508 2509 if (*cdevp) { 2510 cdev_del(*cdevp); 2511 *cdevp = NULL; 2512 } 2513} 2514 2515void ipath_cdev_cleanup(struct cdev **cdevp, 2516 struct class_device **class_devp) 2517{ 2518 cleanup_cdev(cdevp, class_devp); 2519} 2520 2521static struct cdev *wildcard_cdev; 2522static struct class_device *wildcard_class_dev; 2523 2524static const dev_t dev = MKDEV(IPATH_MAJOR, 0); 2525 2526static int user_init(void) 2527{ 2528 int ret; 2529 2530 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME); 2531 if (ret < 0) { 2532 printk(KERN_ERR IPATH_DRV_NAME ": Could not register " 2533 "chrdev region (err %d)\n", -ret); 2534 goto done; 2535 } 2536 2537 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME); 2538 2539 if (IS_ERR(ipath_class)) { 2540 ret = PTR_ERR(ipath_class); 2541 printk(KERN_ERR IPATH_DRV_NAME ": Could not create " 2542 "device class (err %d)\n", -ret); 2543 goto bail; 2544 } 2545 2546 goto done; 2547bail: 2548 unregister_chrdev_region(dev, IPATH_NMINORS); 2549done: 2550 return ret; 2551} 2552 2553static void user_cleanup(void) 2554{ 2555 if (ipath_class) { 2556 class_destroy(ipath_class); 2557 ipath_class = NULL; 2558 } 2559 2560 unregister_chrdev_region(dev, IPATH_NMINORS); 2561} 2562 2563static atomic_t user_count = ATOMIC_INIT(0); 2564static atomic_t user_setup = ATOMIC_INIT(0); 2565 2566int ipath_user_add(struct ipath_devdata *dd) 2567{ 2568 char name[10]; 2569 int ret; 2570 2571 if (atomic_inc_return(&user_count) == 1) { 2572 ret = user_init(); 2573 if (ret < 0) { 2574 ipath_dev_err(dd, "Unable to set up user support: " 2575 "error %d\n", -ret); 2576 goto bail; 2577 } 2578 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev, 2579 &wildcard_class_dev); 2580 if (ret < 0) { 2581 ipath_dev_err(dd, "Could not create wildcard " 2582 "minor: error %d\n", -ret); 2583 goto bail_user; 2584 } 2585 2586 atomic_set(&user_setup, 1); 2587 } 2588 2589 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit); 2590 2591 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops, 2592 &dd->user_cdev, &dd->user_class_dev); 2593 if (ret < 0) 2594 ipath_dev_err(dd, "Could not create user minor %d, %s\n", 2595 dd->ipath_unit + 1, name); 2596 2597 goto bail; 2598 2599bail_user: 2600 user_cleanup(); 2601bail: 2602 return ret; 2603} 2604 2605void ipath_user_remove(struct ipath_devdata *dd) 2606{ 2607 cleanup_cdev(&dd->user_cdev, &dd->user_class_dev); 2608 2609 if (atomic_dec_return(&user_count) == 0) { 2610 if (atomic_read(&user_setup) == 0) 2611 goto bail; 2612 2613 cleanup_cdev(&wildcard_cdev, &wildcard_class_dev); 2614 user_cleanup(); 2615 2616 atomic_set(&user_setup, 0); 2617 } 2618bail: 2619 return; 2620} 2621