ipath_file_ops.c revision c76d3d28c31a68f45d6b5acaa4813138dd7883b3
1/* 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/pci.h> 35#include <linux/poll.h> 36#include <linux/cdev.h> 37#include <linux/swap.h> 38#include <linux/vmalloc.h> 39#include <linux/highmem.h> 40#include <linux/io.h> 41#include <linux/jiffies.h> 42#include <linux/smp_lock.h> 43#include <asm/pgtable.h> 44 45#include "ipath_kernel.h" 46#include "ipath_common.h" 47#include "ipath_user_sdma.h" 48 49static int ipath_open(struct inode *, struct file *); 50static int ipath_close(struct inode *, struct file *); 51static ssize_t ipath_write(struct file *, const char __user *, size_t, 52 loff_t *); 53static ssize_t ipath_writev(struct kiocb *, const struct iovec *, 54 unsigned long , loff_t); 55static unsigned int ipath_poll(struct file *, struct poll_table_struct *); 56static int ipath_mmap(struct file *, struct vm_area_struct *); 57 58static const struct file_operations ipath_file_ops = { 59 .owner = THIS_MODULE, 60 .write = ipath_write, 61 .aio_write = ipath_writev, 62 .open = ipath_open, 63 .release = ipath_close, 64 .poll = ipath_poll, 65 .mmap = ipath_mmap 66}; 67 68/* 69 * Convert kernel virtual addresses to physical addresses so they don't 70 * potentially conflict with the chip addresses used as mmap offsets. 71 * It doesn't really matter what mmap offset we use as long as we can 72 * interpret it correctly. 73 */ 74static u64 cvt_kvaddr(void *p) 75{ 76 struct page *page; 77 u64 paddr = 0; 78 79 page = vmalloc_to_page(p); 80 if (page) 81 paddr = page_to_pfn(page) << PAGE_SHIFT; 82 83 return paddr; 84} 85 86static int ipath_get_base_info(struct file *fp, 87 void __user *ubase, size_t ubase_size) 88{ 89 struct ipath_portdata *pd = port_fp(fp); 90 int ret = 0; 91 struct ipath_base_info *kinfo = NULL; 92 struct ipath_devdata *dd = pd->port_dd; 93 unsigned subport_cnt; 94 int shared, master; 95 size_t sz; 96 97 subport_cnt = pd->port_subport_cnt; 98 if (!subport_cnt) { 99 shared = 0; 100 master = 0; 101 subport_cnt = 1; 102 } else { 103 shared = 1; 104 master = !subport_fp(fp); 105 } 106 107 sz = sizeof(*kinfo); 108 /* If port sharing is not requested, allow the old size structure */ 109 if (!shared) 110 sz -= 7 * sizeof(u64); 111 if (ubase_size < sz) { 112 ipath_cdbg(PROC, 113 "Base size %zu, need %zu (version mismatch?)\n", 114 ubase_size, sz); 115 ret = -EINVAL; 116 goto bail; 117 } 118 119 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); 120 if (kinfo == NULL) { 121 ret = -ENOMEM; 122 goto bail; 123 } 124 125 ret = dd->ipath_f_get_base_info(pd, kinfo); 126 if (ret < 0) 127 goto bail; 128 129 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; 130 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; 131 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; 132 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; 133 /* 134 * have to mmap whole thing 135 */ 136 kinfo->spi_rcv_egrbuftotlen = 137 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 138 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; 139 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / 140 pd->port_rcvegrbuf_chunks; 141 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; 142 if (master) 143 kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; 144 /* 145 * for this use, may be ipath_cfgports summed over all chips that 146 * are are configured and present 147 */ 148 kinfo->spi_nports = dd->ipath_cfgports; 149 /* unit (chip/board) our port is on */ 150 kinfo->spi_unit = dd->ipath_unit; 151 /* for now, only a single page */ 152 kinfo->spi_tid_maxsize = PAGE_SIZE; 153 154 /* 155 * Doing this per port, and based on the skip value, etc. This has 156 * to be the actual buffer size, since the protocol code treats it 157 * as an array. 158 * 159 * These have to be set to user addresses in the user code via mmap. 160 * These values are used on return to user code for the mmap target 161 * addresses only. For 32 bit, same 44 bit address problem, so use 162 * the physical address, not virtual. Before 2.6.11, using the 163 * page_address() macro worked, but in 2.6.11, even that returns the 164 * full 64 bit address (upper bits all 1's). So far, using the 165 * physical addresses (or chip offsets, for chip mapping) works, but 166 * no doubt some future kernel release will change that, and we'll be 167 * on to yet another method of dealing with this. 168 */ 169 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; 170 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; 171 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; 172 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; 173 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + 174 (void *) dd->ipath_statusp - 175 (void *) dd->ipath_pioavailregs_dma; 176 if (!shared) { 177 kinfo->spi_piocnt = pd->port_piocnt; 178 kinfo->spi_piobufbase = (u64) pd->port_piobufs; 179 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + 180 dd->ipath_ureg_align * pd->port_port; 181 } else if (master) { 182 kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) + 183 (pd->port_piocnt % subport_cnt); 184 /* Master's PIO buffers are after all the slave's */ 185 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 186 dd->ipath_palign * 187 (pd->port_piocnt - kinfo->spi_piocnt); 188 } else { 189 unsigned slave = subport_fp(fp) - 1; 190 191 kinfo->spi_piocnt = pd->port_piocnt / subport_cnt; 192 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 193 dd->ipath_palign * kinfo->spi_piocnt * slave; 194 } 195 196 if (shared) { 197 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 198 dd->ipath_ureg_align * pd->port_port; 199 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; 200 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; 201 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; 202 203 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + 204 PAGE_SIZE * subport_fp(fp)); 205 206 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + 207 pd->port_rcvhdrq_size * subport_fp(fp)); 208 kinfo->spi_rcvhdr_tailaddr = 0; 209 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + 210 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * 211 subport_fp(fp)); 212 213 kinfo->spi_subport_uregbase = 214 cvt_kvaddr(pd->subport_uregbase); 215 kinfo->spi_subport_rcvegrbuf = 216 cvt_kvaddr(pd->subport_rcvegrbuf); 217 kinfo->spi_subport_rcvhdr_base = 218 cvt_kvaddr(pd->subport_rcvhdr_base); 219 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", 220 kinfo->spi_port, kinfo->spi_runtime_flags, 221 (unsigned long long) kinfo->spi_subport_uregbase, 222 (unsigned long long) kinfo->spi_subport_rcvegrbuf, 223 (unsigned long long) kinfo->spi_subport_rcvhdr_base); 224 } 225 226 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / 227 dd->ipath_palign; 228 kinfo->spi_pioalign = dd->ipath_palign; 229 230 kinfo->spi_qpair = IPATH_KD_QP; 231 /* 232 * user mode PIO buffers are always 2KB, even when 4KB can 233 * be received, and sent via the kernel; this is ibmaxlen 234 * for 2K MTU. 235 */ 236 kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32); 237 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ 238 kinfo->spi_port = pd->port_port; 239 kinfo->spi_subport = subport_fp(fp); 240 kinfo->spi_sw_version = IPATH_KERN_SWVERSION; 241 kinfo->spi_hw_version = dd->ipath_revision; 242 243 if (master) { 244 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; 245 } 246 247 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); 248 if (copy_to_user(ubase, kinfo, sz)) 249 ret = -EFAULT; 250 251bail: 252 kfree(kinfo); 253 return ret; 254} 255 256/** 257 * ipath_tid_update - update a port TID 258 * @pd: the port 259 * @fp: the ipath device file 260 * @ti: the TID information 261 * 262 * The new implementation as of Oct 2004 is that the driver assigns 263 * the tid and returns it to the caller. To make it easier to 264 * catch bugs, and to reduce search time, we keep a cursor for 265 * each port, walking the shadow tid array to find one that's not 266 * in use. 267 * 268 * For now, if we can't allocate the full list, we fail, although 269 * in the long run, we'll allocate as many as we can, and the 270 * caller will deal with that by trying the remaining pages later. 271 * That means that when we fail, we have to mark the tids as not in 272 * use again, in our shadow copy. 273 * 274 * It's up to the caller to free the tids when they are done. 275 * We'll unlock the pages as they free them. 276 * 277 * Also, right now we are locking one page at a time, but since 278 * the intended use of this routine is for a single group of 279 * virtually contiguous pages, that should change to improve 280 * performance. 281 */ 282static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, 283 const struct ipath_tid_info *ti) 284{ 285 int ret = 0, ntids; 286 u32 tid, porttid, cnt, i, tidcnt, tidoff; 287 u16 *tidlist; 288 struct ipath_devdata *dd = pd->port_dd; 289 u64 physaddr; 290 unsigned long vaddr; 291 u64 __iomem *tidbase; 292 unsigned long tidmap[8]; 293 struct page **pagep = NULL; 294 unsigned subport = subport_fp(fp); 295 296 if (!dd->ipath_pageshadow) { 297 ret = -ENOMEM; 298 goto done; 299 } 300 301 cnt = ti->tidcnt; 302 if (!cnt) { 303 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", 304 (unsigned long long) ti->tidlist); 305 /* 306 * Should we treat as success? likely a bug 307 */ 308 ret = -EFAULT; 309 goto done; 310 } 311 porttid = pd->port_port * dd->ipath_rcvtidcnt; 312 if (!pd->port_subport_cnt) { 313 tidcnt = dd->ipath_rcvtidcnt; 314 tid = pd->port_tidcursor; 315 tidoff = 0; 316 } else if (!subport) { 317 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + 318 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); 319 tidoff = dd->ipath_rcvtidcnt - tidcnt; 320 porttid += tidoff; 321 tid = tidcursor_fp(fp); 322 } else { 323 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; 324 tidoff = tidcnt * (subport - 1); 325 porttid += tidoff; 326 tid = tidcursor_fp(fp); 327 } 328 if (cnt > tidcnt) { 329 /* make sure it all fits in port_tid_pg_list */ 330 dev_info(&dd->pcidev->dev, "Process tried to allocate %u " 331 "TIDs, only trying max (%u)\n", cnt, tidcnt); 332 cnt = tidcnt; 333 } 334 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; 335 tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; 336 337 memset(tidmap, 0, sizeof(tidmap)); 338 /* before decrement; chip actual # */ 339 ntids = tidcnt; 340 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + 341 dd->ipath_rcvtidbase + 342 porttid * sizeof(*tidbase)); 343 344 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", 345 pd->port_port, cnt, tid, tidbase); 346 347 /* virtual address of first page in transfer */ 348 vaddr = ti->tidvaddr; 349 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, 350 cnt * PAGE_SIZE)) { 351 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", 352 (void *)vaddr, cnt); 353 ret = -EFAULT; 354 goto done; 355 } 356 ret = ipath_get_user_pages(vaddr, cnt, pagep); 357 if (ret) { 358 if (ret == -EBUSY) { 359 ipath_dbg("Failed to lock addr %p, %u pages " 360 "(already locked)\n", 361 (void *) vaddr, cnt); 362 /* 363 * for now, continue, and see what happens but with 364 * the new implementation, this should never happen, 365 * unless perhaps the user has mpin'ed the pages 366 * themselves (something we need to test) 367 */ 368 ret = 0; 369 } else { 370 dev_info(&dd->pcidev->dev, 371 "Failed to lock addr %p, %u pages: " 372 "errno %d\n", (void *) vaddr, cnt, -ret); 373 goto done; 374 } 375 } 376 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 377 for (; ntids--; tid++) { 378 if (tid == tidcnt) 379 tid = 0; 380 if (!dd->ipath_pageshadow[porttid + tid]) 381 break; 382 } 383 if (ntids < 0) { 384 /* 385 * oops, wrapped all the way through their TIDs, 386 * and didn't have enough free; see comments at 387 * start of routine 388 */ 389 ipath_dbg("Not enough free TIDs for %u pages " 390 "(index %d), failing\n", cnt, i); 391 i--; /* last tidlist[i] not filled in */ 392 ret = -ENOMEM; 393 break; 394 } 395 tidlist[i] = tid + tidoff; 396 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " 397 "vaddr %lx\n", i, tid + tidoff, vaddr); 398 /* we "know" system pages and TID pages are same size */ 399 dd->ipath_pageshadow[porttid + tid] = pagep[i]; 400 dd->ipath_physshadow[porttid + tid] = ipath_map_page( 401 dd->pcidev, pagep[i], 0, PAGE_SIZE, 402 PCI_DMA_FROMDEVICE); 403 /* 404 * don't need atomic or it's overhead 405 */ 406 __set_bit(tid, tidmap); 407 physaddr = dd->ipath_physshadow[porttid + tid]; 408 ipath_stats.sps_pagelocks++; 409 ipath_cdbg(VERBOSE, 410 "TID %u, vaddr %lx, physaddr %llx pgp %p\n", 411 tid, vaddr, (unsigned long long) physaddr, 412 pagep[i]); 413 dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, 414 physaddr); 415 /* 416 * don't check this tid in ipath_portshadow, since we 417 * just filled it in; start with the next one. 418 */ 419 tid++; 420 } 421 422 if (ret) { 423 u32 limit; 424 cleanup: 425 /* jump here if copy out of updated info failed... */ 426 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", 427 -ret, i, cnt); 428 /* same code that's in ipath_free_tid() */ 429 limit = sizeof(tidmap) * BITS_PER_BYTE; 430 if (limit > tidcnt) 431 /* just in case size changes in future */ 432 limit = tidcnt; 433 tid = find_first_bit((const unsigned long *)tidmap, limit); 434 for (; tid < limit; tid++) { 435 if (!test_bit(tid, tidmap)) 436 continue; 437 if (dd->ipath_pageshadow[porttid + tid]) { 438 ipath_cdbg(VERBOSE, "Freeing TID %u\n", 439 tid); 440 dd->ipath_f_put_tid(dd, &tidbase[tid], 441 RCVHQ_RCV_TYPE_EXPECTED, 442 dd->ipath_tidinvalid); 443 pci_unmap_page(dd->pcidev, 444 dd->ipath_physshadow[porttid + tid], 445 PAGE_SIZE, PCI_DMA_FROMDEVICE); 446 dd->ipath_pageshadow[porttid + tid] = NULL; 447 ipath_stats.sps_pageunlocks++; 448 } 449 } 450 ipath_release_user_pages(pagep, cnt); 451 } else { 452 /* 453 * Copy the updated array, with ipath_tid's filled in, back 454 * to user. Since we did the copy in already, this "should 455 * never fail" If it does, we have to clean up... 456 */ 457 if (copy_to_user((void __user *) 458 (unsigned long) ti->tidlist, 459 tidlist, cnt * sizeof(*tidlist))) { 460 ret = -EFAULT; 461 goto cleanup; 462 } 463 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 464 tidmap, sizeof tidmap)) { 465 ret = -EFAULT; 466 goto cleanup; 467 } 468 if (tid == tidcnt) 469 tid = 0; 470 if (!pd->port_subport_cnt) 471 pd->port_tidcursor = tid; 472 else 473 tidcursor_fp(fp) = tid; 474 } 475 476done: 477 if (ret) 478 ipath_dbg("Failed to map %u TID pages, failing with %d\n", 479 ti->tidcnt, -ret); 480 return ret; 481} 482 483/** 484 * ipath_tid_free - free a port TID 485 * @pd: the port 486 * @subport: the subport 487 * @ti: the TID info 488 * 489 * right now we are unlocking one page at a time, but since 490 * the intended use of this routine is for a single group of 491 * virtually contiguous pages, that should change to improve 492 * performance. We check that the TID is in range for this port 493 * but otherwise don't check validity; if user has an error and 494 * frees the wrong tid, it's only their own data that can thereby 495 * be corrupted. We do check that the TID was in use, for sanity 496 * We always use our idea of the saved address, not the address that 497 * they pass in to us. 498 */ 499 500static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, 501 const struct ipath_tid_info *ti) 502{ 503 int ret = 0; 504 u32 tid, porttid, cnt, limit, tidcnt; 505 struct ipath_devdata *dd = pd->port_dd; 506 u64 __iomem *tidbase; 507 unsigned long tidmap[8]; 508 509 if (!dd->ipath_pageshadow) { 510 ret = -ENOMEM; 511 goto done; 512 } 513 514 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 515 sizeof tidmap)) { 516 ret = -EFAULT; 517 goto done; 518 } 519 520 porttid = pd->port_port * dd->ipath_rcvtidcnt; 521 if (!pd->port_subport_cnt) 522 tidcnt = dd->ipath_rcvtidcnt; 523 else if (!subport) { 524 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + 525 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); 526 porttid += dd->ipath_rcvtidcnt - tidcnt; 527 } else { 528 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; 529 porttid += tidcnt * (subport - 1); 530 } 531 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + 532 dd->ipath_rcvtidbase + 533 porttid * sizeof(*tidbase)); 534 535 limit = sizeof(tidmap) * BITS_PER_BYTE; 536 if (limit > tidcnt) 537 /* just in case size changes in future */ 538 limit = tidcnt; 539 tid = find_first_bit(tidmap, limit); 540 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " 541 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, 542 limit, tid, porttid); 543 for (cnt = 0; tid < limit; tid++) { 544 /* 545 * small optimization; if we detect a run of 3 or so without 546 * any set, use find_first_bit again. That's mainly to 547 * accelerate the case where we wrapped, so we have some at 548 * the beginning, and some at the end, and a big gap 549 * in the middle. 550 */ 551 if (!test_bit(tid, tidmap)) 552 continue; 553 cnt++; 554 if (dd->ipath_pageshadow[porttid + tid]) { 555 struct page *p; 556 p = dd->ipath_pageshadow[porttid + tid]; 557 dd->ipath_pageshadow[porttid + tid] = NULL; 558 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", 559 pid_nr(pd->port_pid), tid); 560 dd->ipath_f_put_tid(dd, &tidbase[tid], 561 RCVHQ_RCV_TYPE_EXPECTED, 562 dd->ipath_tidinvalid); 563 pci_unmap_page(dd->pcidev, 564 dd->ipath_physshadow[porttid + tid], 565 PAGE_SIZE, PCI_DMA_FROMDEVICE); 566 ipath_release_user_pages(&p, 1); 567 ipath_stats.sps_pageunlocks++; 568 } else 569 ipath_dbg("Unused tid %u, ignoring\n", tid); 570 } 571 if (cnt != ti->tidcnt) 572 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", 573 ti->tidcnt, cnt); 574done: 575 if (ret) 576 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", 577 ti->tidcnt, -ret); 578 return ret; 579} 580 581/** 582 * ipath_set_part_key - set a partition key 583 * @pd: the port 584 * @key: the key 585 * 586 * We can have up to 4 active at a time (other than the default, which is 587 * always allowed). This is somewhat tricky, since multiple ports may set 588 * the same key, so we reference count them, and clean up at exit. All 4 589 * partition keys are packed into a single infinipath register. It's an 590 * error for a process to set the same pkey multiple times. We provide no 591 * mechanism to de-allocate a pkey at this time, we may eventually need to 592 * do that. I've used the atomic operations, and no locking, and only make 593 * a single pass through what's available. This should be more than 594 * adequate for some time. I'll think about spinlocks or the like if and as 595 * it's necessary. 596 */ 597static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) 598{ 599 struct ipath_devdata *dd = pd->port_dd; 600 int i, any = 0, pidx = -1; 601 u16 lkey = key & 0x7FFF; 602 int ret; 603 604 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) { 605 /* nothing to do; this key always valid */ 606 ret = 0; 607 goto bail; 608 } 609 610 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " 611 "%hx:%x %hx:%x %hx:%x %hx:%x\n", 612 pd->port_port, key, dd->ipath_pkeys[0], 613 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], 614 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], 615 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], 616 atomic_read(&dd->ipath_pkeyrefs[3])); 617 618 if (!lkey) { 619 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", 620 pd->port_port); 621 ret = -EINVAL; 622 goto bail; 623 } 624 625 /* 626 * Set the full membership bit, because it has to be 627 * set in the register or the packet, and it seems 628 * cleaner to set in the register than to force all 629 * callers to set it. (see bug 4331) 630 */ 631 key |= 0x8000; 632 633 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { 634 if (!pd->port_pkeys[i] && pidx == -1) 635 pidx = i; 636 if (pd->port_pkeys[i] == key) { 637 ipath_cdbg(VERBOSE, "p%u tries to set same pkey " 638 "(%x) more than once\n", 639 pd->port_port, key); 640 ret = -EEXIST; 641 goto bail; 642 } 643 } 644 if (pidx == -1) { 645 ipath_dbg("All pkeys for port %u already in use, " 646 "can't set %x\n", pd->port_port, key); 647 ret = -EBUSY; 648 goto bail; 649 } 650 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { 651 if (!dd->ipath_pkeys[i]) { 652 any++; 653 continue; 654 } 655 if (dd->ipath_pkeys[i] == key) { 656 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i]; 657 658 if (atomic_inc_return(pkrefs) > 1) { 659 pd->port_pkeys[pidx] = key; 660 ipath_cdbg(VERBOSE, "p%u set key %x " 661 "matches #%d, count now %d\n", 662 pd->port_port, key, i, 663 atomic_read(pkrefs)); 664 ret = 0; 665 goto bail; 666 } else { 667 /* 668 * lost race, decrement count, catch below 669 */ 670 atomic_dec(pkrefs); 671 ipath_cdbg(VERBOSE, "Lost race, count was " 672 "0, after dec, it's %d\n", 673 atomic_read(pkrefs)); 674 any++; 675 } 676 } 677 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { 678 /* 679 * It makes no sense to have both the limited and 680 * full membership PKEY set at the same time since 681 * the unlimited one will disable the limited one. 682 */ 683 ret = -EEXIST; 684 goto bail; 685 } 686 } 687 if (!any) { 688 ipath_dbg("port %u, all pkeys already in use, " 689 "can't set %x\n", pd->port_port, key); 690 ret = -EBUSY; 691 goto bail; 692 } 693 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { 694 if (!dd->ipath_pkeys[i] && 695 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { 696 u64 pkey; 697 698 /* for ipathstats, etc. */ 699 ipath_stats.sps_pkeys[i] = lkey; 700 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; 701 pkey = 702 (u64) dd->ipath_pkeys[0] | 703 ((u64) dd->ipath_pkeys[1] << 16) | 704 ((u64) dd->ipath_pkeys[2] << 32) | 705 ((u64) dd->ipath_pkeys[3] << 48); 706 ipath_cdbg(PROC, "p%u set key %x in #%d, " 707 "portidx %d, new pkey reg %llx\n", 708 pd->port_port, key, i, pidx, 709 (unsigned long long) pkey); 710 ipath_write_kreg( 711 dd, dd->ipath_kregs->kr_partitionkey, pkey); 712 713 ret = 0; 714 goto bail; 715 } 716 } 717 ipath_dbg("port %u, all pkeys already in use 2nd pass, " 718 "can't set %x\n", pd->port_port, key); 719 ret = -EBUSY; 720 721bail: 722 return ret; 723} 724 725/** 726 * ipath_manage_rcvq - manage a port's receive queue 727 * @pd: the port 728 * @subport: the subport 729 * @start_stop: action to carry out 730 * 731 * start_stop == 0 disables receive on the port, for use in queue 732 * overflow conditions. start_stop==1 re-enables, to be used to 733 * re-init the software copy of the head register 734 */ 735static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, 736 int start_stop) 737{ 738 struct ipath_devdata *dd = pd->port_dd; 739 740 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", 741 start_stop ? "en" : "dis", dd->ipath_unit, 742 pd->port_port, subport); 743 if (subport) 744 goto bail; 745 /* atomically clear receive enable port. */ 746 if (start_stop) { 747 /* 748 * On enable, force in-memory copy of the tail register to 749 * 0, so that protocol code doesn't have to worry about 750 * whether or not the chip has yet updated the in-memory 751 * copy or not on return from the system call. The chip 752 * always resets it's tail register back to 0 on a 753 * transition from disabled to enabled. This could cause a 754 * problem if software was broken, and did the enable w/o 755 * the disable, but eventually the in-memory copy will be 756 * updated and correct itself, even in the face of software 757 * bugs. 758 */ 759 if (pd->port_rcvhdrtail_kvaddr) 760 ipath_clear_rcvhdrtail(pd); 761 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 762 &dd->ipath_rcvctrl); 763 } else 764 clear_bit(dd->ipath_r_portenable_shift + pd->port_port, 765 &dd->ipath_rcvctrl); 766 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 767 dd->ipath_rcvctrl); 768 /* now be sure chip saw it before we return */ 769 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 770 if (start_stop) { 771 /* 772 * And try to be sure that tail reg update has happened too. 773 * This should in theory interlock with the RXE changes to 774 * the tail register. Don't assign it to the tail register 775 * in memory copy, since we could overwrite an update by the 776 * chip if we did. 777 */ 778 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 779 } 780 /* always; new head should be equal to new tail; see above */ 781bail: 782 return 0; 783} 784 785static void ipath_clean_part_key(struct ipath_portdata *pd, 786 struct ipath_devdata *dd) 787{ 788 int i, j, pchanged = 0; 789 u64 oldpkey; 790 791 /* for debugging only */ 792 oldpkey = (u64) dd->ipath_pkeys[0] | 793 ((u64) dd->ipath_pkeys[1] << 16) | 794 ((u64) dd->ipath_pkeys[2] << 32) | 795 ((u64) dd->ipath_pkeys[3] << 48); 796 797 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { 798 if (!pd->port_pkeys[i]) 799 continue; 800 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i, 801 pd->port_pkeys[i]); 802 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) { 803 /* check for match independent of the global bit */ 804 if ((dd->ipath_pkeys[j] & 0x7fff) != 805 (pd->port_pkeys[i] & 0x7fff)) 806 continue; 807 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) { 808 ipath_cdbg(VERBOSE, "p%u clear key " 809 "%x matches #%d\n", 810 pd->port_port, 811 pd->port_pkeys[i], j); 812 ipath_stats.sps_pkeys[j] = 813 dd->ipath_pkeys[j] = 0; 814 pchanged++; 815 } 816 else ipath_cdbg( 817 VERBOSE, "p%u key %x matches #%d, " 818 "but ref still %d\n", pd->port_port, 819 pd->port_pkeys[i], j, 820 atomic_read(&dd->ipath_pkeyrefs[j])); 821 break; 822 } 823 pd->port_pkeys[i] = 0; 824 } 825 if (pchanged) { 826 u64 pkey = (u64) dd->ipath_pkeys[0] | 827 ((u64) dd->ipath_pkeys[1] << 16) | 828 ((u64) dd->ipath_pkeys[2] << 32) | 829 ((u64) dd->ipath_pkeys[3] << 48); 830 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, " 831 "new pkey reg %llx\n", pd->port_port, 832 (unsigned long long) oldpkey, 833 (unsigned long long) pkey); 834 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, 835 pkey); 836 } 837} 838 839/* 840 * Initialize the port data with the receive buffer sizes 841 * so this can be done while the master port is locked. 842 * Otherwise, there is a race with a slave opening the port 843 * and seeing these fields uninitialized. 844 */ 845static void init_user_egr_sizes(struct ipath_portdata *pd) 846{ 847 struct ipath_devdata *dd = pd->port_dd; 848 unsigned egrperchunk, egrcnt, size; 849 850 /* 851 * to avoid wasting a lot of memory, we allocate 32KB chunks of 852 * physically contiguous memory, advance through it until used up 853 * and then allocate more. Of course, we need memory to store those 854 * extra pointers, now. Started out with 256KB, but under heavy 855 * memory pressure (creating large files and then copying them over 856 * NFS while doing lots of MPI jobs), we hit some allocation 857 * failures, even though we can sleep... (2.6.10) Still get 858 * failures at 64K. 32K is the lowest we can go without wasting 859 * additional memory. 860 */ 861 size = 0x8000; 862 egrperchunk = size / dd->ipath_rcvegrbufsize; 863 egrcnt = dd->ipath_rcvegrcnt; 864 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; 865 pd->port_rcvegrbufs_perchunk = egrperchunk; 866 pd->port_rcvegrbuf_size = size; 867} 868 869/** 870 * ipath_create_user_egr - allocate eager TID buffers 871 * @pd: the port to allocate TID buffers for 872 * 873 * This routine is now quite different for user and kernel, because 874 * the kernel uses skb's, for the accelerated network performance 875 * This is the user port version 876 * 877 * Allocate the eager TID buffers and program them into infinipath 878 * They are no longer completely contiguous, we do multiple allocation 879 * calls. 880 */ 881static int ipath_create_user_egr(struct ipath_portdata *pd) 882{ 883 struct ipath_devdata *dd = pd->port_dd; 884 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; 885 size_t size; 886 int ret; 887 gfp_t gfp_flags; 888 889 /* 890 * GFP_USER, but without GFP_FS, so buffer cache can be 891 * coalesced (we hope); otherwise, even at order 4, 892 * heavy filesystem activity makes these fail, and we can 893 * use compound pages. 894 */ 895 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 896 897 egrcnt = dd->ipath_rcvegrcnt; 898 /* TID number offset for this port */ 899 egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt; 900 egrsize = dd->ipath_rcvegrbufsize; 901 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " 902 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); 903 904 chunk = pd->port_rcvegrbuf_chunks; 905 egrperchunk = pd->port_rcvegrbufs_perchunk; 906 size = pd->port_rcvegrbuf_size; 907 pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), 908 GFP_KERNEL); 909 if (!pd->port_rcvegrbuf) { 910 ret = -ENOMEM; 911 goto bail; 912 } 913 pd->port_rcvegrbuf_phys = 914 kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), 915 GFP_KERNEL); 916 if (!pd->port_rcvegrbuf_phys) { 917 ret = -ENOMEM; 918 goto bail_rcvegrbuf; 919 } 920 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { 921 922 pd->port_rcvegrbuf[e] = dma_alloc_coherent( 923 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], 924 gfp_flags); 925 926 if (!pd->port_rcvegrbuf[e]) { 927 ret = -ENOMEM; 928 goto bail_rcvegrbuf_phys; 929 } 930 } 931 932 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; 933 934 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { 935 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; 936 unsigned i; 937 938 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { 939 dd->ipath_f_put_tid(dd, e + egroff + 940 (u64 __iomem *) 941 ((char __iomem *) 942 dd->ipath_kregbase + 943 dd->ipath_rcvegrbase), 944 RCVHQ_RCV_TYPE_EAGER, pa); 945 pa += egrsize; 946 } 947 cond_resched(); /* don't hog the cpu */ 948 } 949 950 ret = 0; 951 goto bail; 952 953bail_rcvegrbuf_phys: 954 for (e = 0; e < pd->port_rcvegrbuf_chunks && 955 pd->port_rcvegrbuf[e]; e++) { 956 dma_free_coherent(&dd->pcidev->dev, size, 957 pd->port_rcvegrbuf[e], 958 pd->port_rcvegrbuf_phys[e]); 959 960 } 961 kfree(pd->port_rcvegrbuf_phys); 962 pd->port_rcvegrbuf_phys = NULL; 963bail_rcvegrbuf: 964 kfree(pd->port_rcvegrbuf); 965 pd->port_rcvegrbuf = NULL; 966bail: 967 return ret; 968} 969 970 971/* common code for the mappings on dma_alloc_coherent mem */ 972static int ipath_mmap_mem(struct vm_area_struct *vma, 973 struct ipath_portdata *pd, unsigned len, int write_ok, 974 void *kvaddr, char *what) 975{ 976 struct ipath_devdata *dd = pd->port_dd; 977 unsigned long pfn; 978 int ret; 979 980 if ((vma->vm_end - vma->vm_start) > len) { 981 dev_info(&dd->pcidev->dev, 982 "FAIL on %s: len %lx > %x\n", what, 983 vma->vm_end - vma->vm_start, len); 984 ret = -EFAULT; 985 goto bail; 986 } 987 988 if (!write_ok) { 989 if (vma->vm_flags & VM_WRITE) { 990 dev_info(&dd->pcidev->dev, 991 "%s must be mapped readonly\n", what); 992 ret = -EPERM; 993 goto bail; 994 } 995 996 /* don't allow them to later change with mprotect */ 997 vma->vm_flags &= ~VM_MAYWRITE; 998 } 999 1000 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; 1001 ret = remap_pfn_range(vma, vma->vm_start, pfn, 1002 len, vma->vm_page_prot); 1003 if (ret) 1004 dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " 1005 "bytes r%c failed: %d\n", what, pd->port_port, 1006 pfn, len, write_ok?'w':'o', ret); 1007 else 1008 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " 1009 "r%c\n", what, pd->port_port, pfn, len, 1010 write_ok?'w':'o'); 1011bail: 1012 return ret; 1013} 1014 1015static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, 1016 u64 ureg) 1017{ 1018 unsigned long phys; 1019 int ret; 1020 1021 /* 1022 * This is real hardware, so use io_remap. This is the mechanism 1023 * for the user process to update the head registers for their port 1024 * in the chip. 1025 */ 1026 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { 1027 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " 1028 "%lx > PAGE\n", vma->vm_end - vma->vm_start); 1029 ret = -EFAULT; 1030 } else { 1031 phys = dd->ipath_physaddr + ureg; 1032 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1033 1034 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 1035 ret = io_remap_pfn_range(vma, vma->vm_start, 1036 phys >> PAGE_SHIFT, 1037 vma->vm_end - vma->vm_start, 1038 vma->vm_page_prot); 1039 } 1040 return ret; 1041} 1042 1043static int mmap_piobufs(struct vm_area_struct *vma, 1044 struct ipath_devdata *dd, 1045 struct ipath_portdata *pd, 1046 unsigned piobufs, unsigned piocnt) 1047{ 1048 unsigned long phys; 1049 int ret; 1050 1051 /* 1052 * When we map the PIO buffers in the chip, we want to map them as 1053 * writeonly, no read possible. This prevents access to previous 1054 * process data, and catches users who might try to read the i/o 1055 * space due to a bug. 1056 */ 1057 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { 1058 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " 1059 "reqlen %lx > PAGE\n", 1060 vma->vm_end - vma->vm_start); 1061 ret = -EINVAL; 1062 goto bail; 1063 } 1064 1065 phys = dd->ipath_physaddr + piobufs; 1066 1067#if defined(__powerpc__) 1068 /* There isn't a generic way to specify writethrough mappings */ 1069 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1070 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; 1071 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; 1072#endif 1073 1074 /* 1075 * don't allow them to later change to readable with mprotect (for when 1076 * not initially mapped readable, as is normally the case) 1077 */ 1078 vma->vm_flags &= ~VM_MAYREAD; 1079 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 1080 1081 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, 1082 vma->vm_end - vma->vm_start, 1083 vma->vm_page_prot); 1084bail: 1085 return ret; 1086} 1087 1088static int mmap_rcvegrbufs(struct vm_area_struct *vma, 1089 struct ipath_portdata *pd) 1090{ 1091 struct ipath_devdata *dd = pd->port_dd; 1092 unsigned long start, size; 1093 size_t total_size, i; 1094 unsigned long pfn; 1095 int ret; 1096 1097 size = pd->port_rcvegrbuf_size; 1098 total_size = pd->port_rcvegrbuf_chunks * size; 1099 if ((vma->vm_end - vma->vm_start) > total_size) { 1100 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " 1101 "reqlen %lx > actual %lx\n", 1102 vma->vm_end - vma->vm_start, 1103 (unsigned long) total_size); 1104 ret = -EINVAL; 1105 goto bail; 1106 } 1107 1108 if (vma->vm_flags & VM_WRITE) { 1109 dev_info(&dd->pcidev->dev, "Can't map eager buffers as " 1110 "writable (flags=%lx)\n", vma->vm_flags); 1111 ret = -EPERM; 1112 goto bail; 1113 } 1114 /* don't allow them to later change to writeable with mprotect */ 1115 vma->vm_flags &= ~VM_MAYWRITE; 1116 1117 start = vma->vm_start; 1118 1119 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { 1120 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; 1121 ret = remap_pfn_range(vma, start, pfn, size, 1122 vma->vm_page_prot); 1123 if (ret < 0) 1124 goto bail; 1125 } 1126 ret = 0; 1127 1128bail: 1129 return ret; 1130} 1131 1132/* 1133 * ipath_file_vma_fault - handle a VMA page fault. 1134 */ 1135static int ipath_file_vma_fault(struct vm_area_struct *vma, 1136 struct vm_fault *vmf) 1137{ 1138 struct page *page; 1139 1140 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); 1141 if (!page) 1142 return VM_FAULT_SIGBUS; 1143 get_page(page); 1144 vmf->page = page; 1145 1146 return 0; 1147} 1148 1149static struct vm_operations_struct ipath_file_vm_ops = { 1150 .fault = ipath_file_vma_fault, 1151}; 1152 1153static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, 1154 struct ipath_portdata *pd, unsigned subport) 1155{ 1156 unsigned long len; 1157 struct ipath_devdata *dd; 1158 void *addr; 1159 size_t size; 1160 int ret = 0; 1161 1162 /* If the port is not shared, all addresses should be physical */ 1163 if (!pd->port_subport_cnt) 1164 goto bail; 1165 1166 dd = pd->port_dd; 1167 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 1168 1169 /* 1170 * Each process has all the subport uregbase, rcvhdrq, and 1171 * rcvegrbufs mmapped - as an array for all the processes, 1172 * and also separately for this process. 1173 */ 1174 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { 1175 addr = pd->subport_uregbase; 1176 size = PAGE_SIZE * pd->port_subport_cnt; 1177 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { 1178 addr = pd->subport_rcvhdr_base; 1179 size = pd->port_rcvhdrq_size * pd->port_subport_cnt; 1180 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { 1181 addr = pd->subport_rcvegrbuf; 1182 size *= pd->port_subport_cnt; 1183 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + 1184 PAGE_SIZE * subport)) { 1185 addr = pd->subport_uregbase + PAGE_SIZE * subport; 1186 size = PAGE_SIZE; 1187 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + 1188 pd->port_rcvhdrq_size * subport)) { 1189 addr = pd->subport_rcvhdr_base + 1190 pd->port_rcvhdrq_size * subport; 1191 size = pd->port_rcvhdrq_size; 1192 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + 1193 size * subport)) { 1194 addr = pd->subport_rcvegrbuf + size * subport; 1195 /* rcvegrbufs are read-only on the slave */ 1196 if (vma->vm_flags & VM_WRITE) { 1197 dev_info(&dd->pcidev->dev, 1198 "Can't map eager buffers as " 1199 "writable (flags=%lx)\n", vma->vm_flags); 1200 ret = -EPERM; 1201 goto bail; 1202 } 1203 /* 1204 * Don't allow permission to later change to writeable 1205 * with mprotect. 1206 */ 1207 vma->vm_flags &= ~VM_MAYWRITE; 1208 } else { 1209 goto bail; 1210 } 1211 len = vma->vm_end - vma->vm_start; 1212 if (len > size) { 1213 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); 1214 ret = -EINVAL; 1215 goto bail; 1216 } 1217 1218 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1219 vma->vm_ops = &ipath_file_vm_ops; 1220 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1221 ret = 1; 1222 1223bail: 1224 return ret; 1225} 1226 1227/** 1228 * ipath_mmap - mmap various structures into user space 1229 * @fp: the file pointer 1230 * @vma: the VM area 1231 * 1232 * We use this to have a shared buffer between the kernel and the user code 1233 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio 1234 * buffers in the chip. We have the open and close entries so we can bump 1235 * the ref count and keep the driver from being unloaded while still mapped. 1236 */ 1237static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) 1238{ 1239 struct ipath_portdata *pd; 1240 struct ipath_devdata *dd; 1241 u64 pgaddr, ureg; 1242 unsigned piobufs, piocnt; 1243 int ret; 1244 1245 pd = port_fp(fp); 1246 if (!pd) { 1247 ret = -EINVAL; 1248 goto bail; 1249 } 1250 dd = pd->port_dd; 1251 1252 /* 1253 * This is the ipath_do_user_init() code, mapping the shared buffers 1254 * into the user process. The address referred to by vm_pgoff is the 1255 * file offset passed via mmap(). For shared ports, this is the 1256 * kernel vmalloc() address of the pages to share with the master. 1257 * For non-shared or master ports, this is a physical address. 1258 * We only do one mmap for each space mapped. 1259 */ 1260 pgaddr = vma->vm_pgoff << PAGE_SHIFT; 1261 1262 /* 1263 * Check for 0 in case one of the allocations failed, but user 1264 * called mmap anyway. 1265 */ 1266 if (!pgaddr) { 1267 ret = -EINVAL; 1268 goto bail; 1269 } 1270 1271 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", 1272 (unsigned long long) pgaddr, vma->vm_start, 1273 vma->vm_end - vma->vm_start, dd->ipath_unit, 1274 pd->port_port, subport_fp(fp)); 1275 1276 /* 1277 * Physical addresses must fit in 40 bits for our hardware. 1278 * Check for kernel virtual addresses first, anything else must 1279 * match a HW or memory address. 1280 */ 1281 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); 1282 if (ret) { 1283 if (ret > 0) 1284 ret = 0; 1285 goto bail; 1286 } 1287 1288 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; 1289 if (!pd->port_subport_cnt) { 1290 /* port is not shared */ 1291 piocnt = pd->port_piocnt; 1292 piobufs = pd->port_piobufs; 1293 } else if (!subport_fp(fp)) { 1294 /* caller is the master */ 1295 piocnt = (pd->port_piocnt / pd->port_subport_cnt) + 1296 (pd->port_piocnt % pd->port_subport_cnt); 1297 piobufs = pd->port_piobufs + 1298 dd->ipath_palign * (pd->port_piocnt - piocnt); 1299 } else { 1300 unsigned slave = subport_fp(fp) - 1; 1301 1302 /* caller is a slave */ 1303 piocnt = pd->port_piocnt / pd->port_subport_cnt; 1304 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; 1305 } 1306 1307 if (pgaddr == ureg) 1308 ret = mmap_ureg(vma, dd, ureg); 1309 else if (pgaddr == piobufs) 1310 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); 1311 else if (pgaddr == dd->ipath_pioavailregs_phys) 1312 /* in-memory copy of pioavail registers */ 1313 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1314 (void *) dd->ipath_pioavailregs_dma, 1315 "pioavail registers"); 1316 else if (pgaddr == pd->port_rcvegr_phys) 1317 ret = mmap_rcvegrbufs(vma, pd); 1318 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) 1319 /* 1320 * The rcvhdrq itself; readonly except on HT (so have 1321 * to allow writable mapping), multiple pages, contiguous 1322 * from an i/o perspective. 1323 */ 1324 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, 1325 pd->port_rcvhdrq, 1326 "rcvhdrq"); 1327 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) 1328 /* in-memory copy of rcvhdrq tail register */ 1329 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1330 pd->port_rcvhdrtail_kvaddr, 1331 "rcvhdrq tail"); 1332 else 1333 ret = -EINVAL; 1334 1335 vma->vm_private_data = NULL; 1336 1337 if (ret < 0) 1338 dev_info(&dd->pcidev->dev, 1339 "Failure %d on off %llx len %lx\n", 1340 -ret, (unsigned long long)pgaddr, 1341 vma->vm_end - vma->vm_start); 1342bail: 1343 return ret; 1344} 1345 1346static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd) 1347{ 1348 unsigned pollflag = 0; 1349 1350 if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && 1351 pd->port_hdrqfull != pd->port_hdrqfull_poll) { 1352 pollflag |= POLLIN | POLLRDNORM; 1353 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1354 } 1355 1356 return pollflag; 1357} 1358 1359static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, 1360 struct file *fp, 1361 struct poll_table_struct *pt) 1362{ 1363 unsigned pollflag = 0; 1364 struct ipath_devdata *dd; 1365 1366 dd = pd->port_dd; 1367 1368 /* variable access in ipath_poll_hdrqfull() needs this */ 1369 rmb(); 1370 pollflag = ipath_poll_hdrqfull(pd); 1371 1372 if (pd->port_urgent != pd->port_urgent_poll) { 1373 pollflag |= POLLIN | POLLRDNORM; 1374 pd->port_urgent_poll = pd->port_urgent; 1375 } 1376 1377 if (!pollflag) { 1378 /* this saves a spin_lock/unlock in interrupt handler... */ 1379 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag); 1380 /* flush waiting flag so don't miss an event... */ 1381 wmb(); 1382 poll_wait(fp, &pd->port_wait, pt); 1383 } 1384 1385 return pollflag; 1386} 1387 1388static unsigned int ipath_poll_next(struct ipath_portdata *pd, 1389 struct file *fp, 1390 struct poll_table_struct *pt) 1391{ 1392 u32 head; 1393 u32 tail; 1394 unsigned pollflag = 0; 1395 struct ipath_devdata *dd; 1396 1397 dd = pd->port_dd; 1398 1399 /* variable access in ipath_poll_hdrqfull() needs this */ 1400 rmb(); 1401 pollflag = ipath_poll_hdrqfull(pd); 1402 1403 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); 1404 if (pd->port_rcvhdrtail_kvaddr) 1405 tail = ipath_get_rcvhdrtail(pd); 1406 else 1407 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 1408 1409 if (head != tail) 1410 pollflag |= POLLIN | POLLRDNORM; 1411 else { 1412 /* this saves a spin_lock/unlock in interrupt handler */ 1413 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); 1414 /* flush waiting flag so we don't miss an event */ 1415 wmb(); 1416 1417 set_bit(pd->port_port + dd->ipath_r_intravail_shift, 1418 &dd->ipath_rcvctrl); 1419 1420 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1421 dd->ipath_rcvctrl); 1422 1423 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ 1424 ipath_write_ureg(dd, ur_rcvhdrhead, 1425 dd->ipath_rhdrhead_intr_off | head, 1426 pd->port_port); 1427 1428 poll_wait(fp, &pd->port_wait, pt); 1429 } 1430 1431 return pollflag; 1432} 1433 1434static unsigned int ipath_poll(struct file *fp, 1435 struct poll_table_struct *pt) 1436{ 1437 struct ipath_portdata *pd; 1438 unsigned pollflag; 1439 1440 pd = port_fp(fp); 1441 if (!pd) 1442 pollflag = 0; 1443 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT) 1444 pollflag = ipath_poll_urgent(pd, fp, pt); 1445 else 1446 pollflag = ipath_poll_next(pd, fp, pt); 1447 1448 return pollflag; 1449} 1450 1451static int ipath_supports_subports(int user_swmajor, int user_swminor) 1452{ 1453 /* no subport implementation prior to software version 1.3 */ 1454 return (user_swmajor > 1) || (user_swminor >= 3); 1455} 1456 1457static int ipath_compatible_subports(int user_swmajor, int user_swminor) 1458{ 1459 /* this code is written long-hand for clarity */ 1460 if (IPATH_USER_SWMAJOR != user_swmajor) { 1461 /* no promise of compatibility if major mismatch */ 1462 return 0; 1463 } 1464 if (IPATH_USER_SWMAJOR == 1) { 1465 switch (IPATH_USER_SWMINOR) { 1466 case 0: 1467 case 1: 1468 case 2: 1469 /* no subport implementation so cannot be compatible */ 1470 return 0; 1471 case 3: 1472 /* 3 is only compatible with itself */ 1473 return user_swminor == 3; 1474 default: 1475 /* >= 4 are compatible (or are expected to be) */ 1476 return user_swminor >= 4; 1477 } 1478 } 1479 /* make no promises yet for future major versions */ 1480 return 0; 1481} 1482 1483static int init_subports(struct ipath_devdata *dd, 1484 struct ipath_portdata *pd, 1485 const struct ipath_user_info *uinfo) 1486{ 1487 int ret = 0; 1488 unsigned num_subports; 1489 size_t size; 1490 1491 /* 1492 * If the user is requesting zero subports, 1493 * skip the subport allocation. 1494 */ 1495 if (uinfo->spu_subport_cnt <= 0) 1496 goto bail; 1497 1498 /* Self-consistency check for ipath_compatible_subports() */ 1499 if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) && 1500 !ipath_compatible_subports(IPATH_USER_SWMAJOR, 1501 IPATH_USER_SWMINOR)) { 1502 dev_info(&dd->pcidev->dev, 1503 "Inconsistent ipath_compatible_subports()\n"); 1504 goto bail; 1505 } 1506 1507 /* Check for subport compatibility */ 1508 if (!ipath_compatible_subports(uinfo->spu_userversion >> 16, 1509 uinfo->spu_userversion & 0xffff)) { 1510 dev_info(&dd->pcidev->dev, 1511 "Mismatched user version (%d.%d) and driver " 1512 "version (%d.%d) while port sharing. Ensure " 1513 "that driver and library are from the same " 1514 "release.\n", 1515 (int) (uinfo->spu_userversion >> 16), 1516 (int) (uinfo->spu_userversion & 0xffff), 1517 IPATH_USER_SWMAJOR, 1518 IPATH_USER_SWMINOR); 1519 goto bail; 1520 } 1521 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) { 1522 ret = -EINVAL; 1523 goto bail; 1524 } 1525 1526 num_subports = uinfo->spu_subport_cnt; 1527 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); 1528 if (!pd->subport_uregbase) { 1529 ret = -ENOMEM; 1530 goto bail; 1531 } 1532 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1533 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1534 sizeof(u32), PAGE_SIZE) * num_subports; 1535 pd->subport_rcvhdr_base = vmalloc(size); 1536 if (!pd->subport_rcvhdr_base) { 1537 ret = -ENOMEM; 1538 goto bail_ureg; 1539 } 1540 1541 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1542 pd->port_rcvegrbuf_size * 1543 num_subports); 1544 if (!pd->subport_rcvegrbuf) { 1545 ret = -ENOMEM; 1546 goto bail_rhdr; 1547 } 1548 1549 pd->port_subport_cnt = uinfo->spu_subport_cnt; 1550 pd->port_subport_id = uinfo->spu_subport_id; 1551 pd->active_slaves = 1; 1552 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1553 memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); 1554 memset(pd->subport_rcvhdr_base, 0, size); 1555 memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * 1556 pd->port_rcvegrbuf_size * 1557 num_subports); 1558 goto bail; 1559 1560bail_rhdr: 1561 vfree(pd->subport_rcvhdr_base); 1562bail_ureg: 1563 vfree(pd->subport_uregbase); 1564 pd->subport_uregbase = NULL; 1565bail: 1566 return ret; 1567} 1568 1569static int try_alloc_port(struct ipath_devdata *dd, int port, 1570 struct file *fp, 1571 const struct ipath_user_info *uinfo) 1572{ 1573 struct ipath_portdata *pd; 1574 int ret; 1575 1576 if (!(pd = dd->ipath_pd[port])) { 1577 void *ptmp; 1578 1579 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); 1580 1581 /* 1582 * Allocate memory for use in ipath_tid_update() just once 1583 * at open, not per call. Reduces cost of expected send 1584 * setup. 1585 */ 1586 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + 1587 dd->ipath_rcvtidcnt * sizeof(struct page **), 1588 GFP_KERNEL); 1589 if (!pd || !ptmp) { 1590 ipath_dev_err(dd, "Unable to allocate portdata " 1591 "memory, failing open\n"); 1592 ret = -ENOMEM; 1593 kfree(pd); 1594 kfree(ptmp); 1595 goto bail; 1596 } 1597 dd->ipath_pd[port] = pd; 1598 dd->ipath_pd[port]->port_port = port; 1599 dd->ipath_pd[port]->port_dd = dd; 1600 dd->ipath_pd[port]->port_tid_pg_list = ptmp; 1601 init_waitqueue_head(&dd->ipath_pd[port]->port_wait); 1602 } 1603 if (!pd->port_cnt) { 1604 pd->userversion = uinfo->spu_userversion; 1605 init_user_egr_sizes(pd); 1606 if ((ret = init_subports(dd, pd, uinfo)) != 0) 1607 goto bail; 1608 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", 1609 current->comm, current->pid, dd->ipath_unit, 1610 port); 1611 pd->port_cnt = 1; 1612 port_fp(fp) = pd; 1613 pd->port_pid = get_pid(task_pid(current)); 1614 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1615 ipath_stats.sps_ports++; 1616 ret = 0; 1617 } else 1618 ret = -EBUSY; 1619 1620bail: 1621 return ret; 1622} 1623 1624static inline int usable(struct ipath_devdata *dd) 1625{ 1626 return dd && 1627 (dd->ipath_flags & IPATH_PRESENT) && 1628 dd->ipath_kregbase && 1629 dd->ipath_lid && 1630 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED 1631 | IPATH_LINKUNK)); 1632} 1633 1634static int find_free_port(int unit, struct file *fp, 1635 const struct ipath_user_info *uinfo) 1636{ 1637 struct ipath_devdata *dd = ipath_lookup(unit); 1638 int ret, i; 1639 1640 if (!dd) { 1641 ret = -ENODEV; 1642 goto bail; 1643 } 1644 1645 if (!usable(dd)) { 1646 ret = -ENETDOWN; 1647 goto bail; 1648 } 1649 1650 for (i = 1; i < dd->ipath_cfgports; i++) { 1651 ret = try_alloc_port(dd, i, fp, uinfo); 1652 if (ret != -EBUSY) 1653 goto bail; 1654 } 1655 ret = -EBUSY; 1656 1657bail: 1658 return ret; 1659} 1660 1661static int find_best_unit(struct file *fp, 1662 const struct ipath_user_info *uinfo) 1663{ 1664 int ret = 0, i, prefunit = -1, devmax; 1665 int maxofallports, npresent, nup; 1666 int ndev; 1667 1668 devmax = ipath_count_units(&npresent, &nup, &maxofallports); 1669 1670 /* 1671 * This code is present to allow a knowledgeable person to 1672 * specify the layout of processes to processors before opening 1673 * this driver, and then we'll assign the process to the "closest" 1674 * InfiniPath chip to that processor (we assume reasonable connectivity, 1675 * for now). This code assumes that if affinity has been set 1676 * before this point, that at most one cpu is set; for now this 1677 * is reasonable. I check for both cpus_empty() and cpus_full(), 1678 * in case some kernel variant sets none of the bits when no 1679 * affinity is set. 2.6.11 and 12 kernels have all present 1680 * cpus set. Some day we'll have to fix it up further to handle 1681 * a cpu subset. This algorithm fails for two HT chips connected 1682 * in tunnel fashion. Eventually this needs real topology 1683 * information. There may be some issues with dual core numbering 1684 * as well. This needs more work prior to release. 1685 */ 1686 if (!cpus_empty(current->cpus_allowed) && 1687 !cpus_full(current->cpus_allowed)) { 1688 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1689 for (i = 0; i < ncpus; i++) 1690 if (cpu_isset(i, current->cpus_allowed)) { 1691 ipath_cdbg(PROC, "%s[%u] affinity set for " 1692 "cpu %d/%d\n", current->comm, 1693 current->pid, i, ncpus); 1694 curcpu = i; 1695 nset++; 1696 } 1697 if (curcpu != -1 && nset != ncpus) { 1698 if (npresent) { 1699 prefunit = curcpu / (ncpus / npresent); 1700 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, " 1701 "%d cpus/chip, select unit %d\n", 1702 current->comm, current->pid, 1703 npresent, ncpus, ncpus / npresent, 1704 prefunit); 1705 } 1706 } 1707 } 1708 1709 /* 1710 * user ports start at 1, kernel port is 0 1711 * For now, we do round-robin access across all chips 1712 */ 1713 1714 if (prefunit != -1) 1715 devmax = prefunit + 1; 1716recheck: 1717 for (i = 1; i < maxofallports; i++) { 1718 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; 1719 ndev++) { 1720 struct ipath_devdata *dd = ipath_lookup(ndev); 1721 1722 if (!usable(dd)) 1723 continue; /* can't use this unit */ 1724 if (i >= dd->ipath_cfgports) 1725 /* 1726 * Maxed out on users of this unit. Try 1727 * next. 1728 */ 1729 continue; 1730 ret = try_alloc_port(dd, i, fp, uinfo); 1731 if (!ret) 1732 goto done; 1733 } 1734 } 1735 1736 if (npresent) { 1737 if (nup == 0) { 1738 ret = -ENETDOWN; 1739 ipath_dbg("No ports available (none initialized " 1740 "and ready)\n"); 1741 } else { 1742 if (prefunit > 0) { 1743 /* if started above 0, retry from 0 */ 1744 ipath_cdbg(PROC, 1745 "%s[%u] no ports on prefunit " 1746 "%d, clear and re-check\n", 1747 current->comm, current->pid, 1748 prefunit); 1749 devmax = ipath_count_units(NULL, NULL, 1750 NULL); 1751 prefunit = -1; 1752 goto recheck; 1753 } 1754 ret = -EBUSY; 1755 ipath_dbg("No ports available\n"); 1756 } 1757 } else { 1758 ret = -ENXIO; 1759 ipath_dbg("No boards found\n"); 1760 } 1761 1762done: 1763 return ret; 1764} 1765 1766static int find_shared_port(struct file *fp, 1767 const struct ipath_user_info *uinfo) 1768{ 1769 int devmax, ndev, i; 1770 int ret = 0; 1771 1772 devmax = ipath_count_units(NULL, NULL, NULL); 1773 1774 for (ndev = 0; ndev < devmax; ndev++) { 1775 struct ipath_devdata *dd = ipath_lookup(ndev); 1776 1777 if (!usable(dd)) 1778 continue; 1779 for (i = 1; i < dd->ipath_cfgports; i++) { 1780 struct ipath_portdata *pd = dd->ipath_pd[i]; 1781 1782 /* Skip ports which are not yet open */ 1783 if (!pd || !pd->port_cnt) 1784 continue; 1785 /* Skip port if it doesn't match the requested one */ 1786 if (pd->port_subport_id != uinfo->spu_subport_id) 1787 continue; 1788 /* Verify the sharing process matches the master */ 1789 if (pd->port_subport_cnt != uinfo->spu_subport_cnt || 1790 pd->userversion != uinfo->spu_userversion || 1791 pd->port_cnt >= pd->port_subport_cnt) { 1792 ret = -EINVAL; 1793 goto done; 1794 } 1795 port_fp(fp) = pd; 1796 subport_fp(fp) = pd->port_cnt++; 1797 pd->port_subpid[subport_fp(fp)] = 1798 get_pid(task_pid(current)); 1799 tidcursor_fp(fp) = 0; 1800 pd->active_slaves |= 1 << subport_fp(fp); 1801 ipath_cdbg(PROC, 1802 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", 1803 current->comm, current->pid, 1804 subport_fp(fp), 1805 pd->port_comm, pid_nr(pd->port_pid), 1806 dd->ipath_unit, pd->port_port); 1807 ret = 1; 1808 goto done; 1809 } 1810 } 1811 1812done: 1813 return ret; 1814} 1815 1816static int ipath_open(struct inode *in, struct file *fp) 1817{ 1818 /* The real work is performed later in ipath_assign_port() */ 1819 cycle_kernel_lock(); 1820 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); 1821 return fp->private_data ? 0 : -ENOMEM; 1822} 1823 1824/* Get port early, so can set affinity prior to memory allocation */ 1825static int ipath_assign_port(struct file *fp, 1826 const struct ipath_user_info *uinfo) 1827{ 1828 int ret; 1829 int i_minor; 1830 unsigned swmajor, swminor; 1831 1832 /* Check to be sure we haven't already initialized this file */ 1833 if (port_fp(fp)) { 1834 ret = -EINVAL; 1835 goto done; 1836 } 1837 1838 /* for now, if major version is different, bail */ 1839 swmajor = uinfo->spu_userversion >> 16; 1840 if (swmajor != IPATH_USER_SWMAJOR) { 1841 ipath_dbg("User major version %d not same as driver " 1842 "major %d\n", uinfo->spu_userversion >> 16, 1843 IPATH_USER_SWMAJOR); 1844 ret = -ENODEV; 1845 goto done; 1846 } 1847 1848 swminor = uinfo->spu_userversion & 0xffff; 1849 if (swminor != IPATH_USER_SWMINOR) 1850 ipath_dbg("User minor version %d not same as driver " 1851 "minor %d\n", swminor, IPATH_USER_SWMINOR); 1852 1853 mutex_lock(&ipath_mutex); 1854 1855 if (ipath_compatible_subports(swmajor, swminor) && 1856 uinfo->spu_subport_cnt && 1857 (ret = find_shared_port(fp, uinfo))) { 1858 if (ret > 0) 1859 ret = 0; 1860 goto done_chk_sdma; 1861 } 1862 1863 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; 1864 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", 1865 (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); 1866 1867 if (i_minor) 1868 ret = find_free_port(i_minor - 1, fp, uinfo); 1869 else 1870 ret = find_best_unit(fp, uinfo); 1871 1872done_chk_sdma: 1873 if (!ret) { 1874 struct ipath_filedata *fd = fp->private_data; 1875 const struct ipath_portdata *pd = fd->pd; 1876 const struct ipath_devdata *dd = pd->port_dd; 1877 1878 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev, 1879 dd->ipath_unit, 1880 pd->port_port, 1881 fd->subport); 1882 1883 if (!fd->pq) 1884 ret = -ENOMEM; 1885 } 1886 1887 mutex_unlock(&ipath_mutex); 1888 1889done: 1890 return ret; 1891} 1892 1893 1894static int ipath_do_user_init(struct file *fp, 1895 const struct ipath_user_info *uinfo) 1896{ 1897 int ret; 1898 struct ipath_portdata *pd = port_fp(fp); 1899 struct ipath_devdata *dd; 1900 u32 head32; 1901 1902 /* Subports don't need to initialize anything since master did it. */ 1903 if (subport_fp(fp)) { 1904 ret = wait_event_interruptible(pd->port_wait, 1905 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); 1906 goto done; 1907 } 1908 1909 dd = pd->port_dd; 1910 1911 if (uinfo->spu_rcvhdrsize) { 1912 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); 1913 if (ret) 1914 goto done; 1915 } 1916 1917 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ 1918 1919 /* some ports may get extra buffers, calculate that here */ 1920 if (pd->port_port <= dd->ipath_ports_extrabuf) 1921 pd->port_piocnt = dd->ipath_pbufsport + 1; 1922 else 1923 pd->port_piocnt = dd->ipath_pbufsport; 1924 1925 /* for right now, kernel piobufs are at end, so port 1 is at 0 */ 1926 if (pd->port_port <= dd->ipath_ports_extrabuf) 1927 pd->port_pio_base = (dd->ipath_pbufsport + 1) 1928 * (pd->port_port - 1); 1929 else 1930 pd->port_pio_base = dd->ipath_ports_extrabuf + 1931 dd->ipath_pbufsport * (pd->port_port - 1); 1932 pd->port_piobufs = dd->ipath_piobufbase + 1933 pd->port_pio_base * dd->ipath_palign; 1934 ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u," 1935 " first pio %u\n", pd->port_port, pd->port_piobufs, 1936 pd->port_piocnt, pd->port_pio_base); 1937 ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0); 1938 1939 /* 1940 * Now allocate the rcvhdr Q and eager TIDs; skip the TID 1941 * array for time being. If pd->port_port > chip-supported, 1942 * we need to do extra stuff here to handle by handling overflow 1943 * through port 0, someday 1944 */ 1945 ret = ipath_create_rcvhdrq(dd, pd); 1946 if (!ret) 1947 ret = ipath_create_user_egr(pd); 1948 if (ret) 1949 goto done; 1950 1951 /* 1952 * set the eager head register for this port to the current values 1953 * of the tail pointers, since we don't know if they were 1954 * updated on last use of the port. 1955 */ 1956 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); 1957 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); 1958 pd->port_lastrcvhdrqtail = -1; 1959 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", 1960 pd->port_port, head32); 1961 pd->port_tidcursor = 0; /* start at beginning after open */ 1962 1963 /* initialize poll variables... */ 1964 pd->port_urgent = 0; 1965 pd->port_urgent_poll = 0; 1966 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1967 1968 /* 1969 * Now enable the port for receive. 1970 * For chips that are set to DMA the tail register to memory 1971 * when they change (and when the update bit transitions from 1972 * 0 to 1. So for those chips, we turn it off and then back on. 1973 * This will (very briefly) affect any other open ports, but the 1974 * duration is very short, and therefore isn't an issue. We 1975 * explictly set the in-memory tail copy to 0 beforehand, so we 1976 * don't have to wait to be sure the DMA update has happened 1977 * (chip resets head/tail to 0 on transition to enable). 1978 */ 1979 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 1980 &dd->ipath_rcvctrl); 1981 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) { 1982 if (pd->port_rcvhdrtail_kvaddr) 1983 ipath_clear_rcvhdrtail(pd); 1984 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1985 dd->ipath_rcvctrl & 1986 ~(1ULL << dd->ipath_r_tailupd_shift)); 1987 } 1988 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1989 dd->ipath_rcvctrl); 1990 /* Notify any waiting slaves */ 1991 if (pd->port_subport_cnt) { 1992 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1993 wake_up(&pd->port_wait); 1994 } 1995done: 1996 return ret; 1997} 1998 1999/** 2000 * unlock_exptid - unlock any expected TID entries port still had in use 2001 * @pd: port 2002 * 2003 * We don't actually update the chip here, because we do a bulk update 2004 * below, using ipath_f_clear_tids. 2005 */ 2006static void unlock_expected_tids(struct ipath_portdata *pd) 2007{ 2008 struct ipath_devdata *dd = pd->port_dd; 2009 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; 2010 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt; 2011 2012 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", 2013 pd->port_port); 2014 for (i = port_tidbase; i < maxtid; i++) { 2015 struct page *ps = dd->ipath_pageshadow[i]; 2016 2017 if (!ps) 2018 continue; 2019 2020 dd->ipath_pageshadow[i] = NULL; 2021 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], 2022 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2023 ipath_release_user_pages_on_close(&ps, 1); 2024 cnt++; 2025 ipath_stats.sps_pageunlocks++; 2026 } 2027 if (cnt) 2028 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n", 2029 pd->port_port, cnt); 2030 2031 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks) 2032 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n", 2033 (unsigned long long) ipath_stats.sps_pagelocks, 2034 (unsigned long long) 2035 ipath_stats.sps_pageunlocks); 2036} 2037 2038static int ipath_close(struct inode *in, struct file *fp) 2039{ 2040 int ret = 0; 2041 struct ipath_filedata *fd; 2042 struct ipath_portdata *pd; 2043 struct ipath_devdata *dd; 2044 unsigned port; 2045 2046 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", 2047 (long)in->i_rdev, fp->private_data); 2048 2049 mutex_lock(&ipath_mutex); 2050 2051 fd = (struct ipath_filedata *) fp->private_data; 2052 fp->private_data = NULL; 2053 pd = fd->pd; 2054 if (!pd) { 2055 mutex_unlock(&ipath_mutex); 2056 goto bail; 2057 } 2058 2059 dd = pd->port_dd; 2060 2061 /* drain user sdma queue */ 2062 ipath_user_sdma_queue_drain(dd, fd->pq); 2063 ipath_user_sdma_queue_destroy(fd->pq); 2064 2065 if (--pd->port_cnt) { 2066 /* 2067 * XXX If the master closes the port before the slave(s), 2068 * revoke the mmap for the eager receive queue so 2069 * the slave(s) don't wait for receive data forever. 2070 */ 2071 pd->active_slaves &= ~(1 << fd->subport); 2072 put_pid(pd->port_subpid[fd->subport]); 2073 pd->port_subpid[fd->subport] = NULL; 2074 mutex_unlock(&ipath_mutex); 2075 goto bail; 2076 } 2077 port = pd->port_port; 2078 2079 if (pd->port_hdrqfull) { 2080 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2081 "during run\n", pd->port_comm, pid_nr(pd->port_pid), 2082 pd->port_hdrqfull); 2083 pd->port_hdrqfull = 0; 2084 } 2085 2086 if (pd->port_rcvwait_to || pd->port_piowait_to 2087 || pd->port_rcvnowait || pd->port_pionowait) { 2088 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; " 2089 "%u rcv %u, pio already\n", 2090 pd->port_port, pd->port_rcvwait_to, 2091 pd->port_piowait_to, pd->port_rcvnowait, 2092 pd->port_pionowait); 2093 pd->port_rcvwait_to = pd->port_piowait_to = 2094 pd->port_rcvnowait = pd->port_pionowait = 0; 2095 } 2096 if (pd->port_flag) { 2097 ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n", 2098 pd->port_port, pd->port_flag); 2099 pd->port_flag = 0; 2100 } 2101 2102 if (dd->ipath_kregbase) { 2103 /* atomically clear receive enable port and intr avail. */ 2104 clear_bit(dd->ipath_r_portenable_shift + port, 2105 &dd->ipath_rcvctrl); 2106 clear_bit(pd->port_port + dd->ipath_r_intravail_shift, 2107 &dd->ipath_rcvctrl); 2108 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, 2109 dd->ipath_rcvctrl); 2110 /* and read back from chip to be sure that nothing 2111 * else is in flight when we do the rest */ 2112 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2113 2114 /* clean up the pkeys for this port user */ 2115 ipath_clean_part_key(pd, dd); 2116 /* 2117 * be paranoid, and never write 0's to these, just use an 2118 * unused part of the port 0 tail page. Of course, 2119 * rcvhdraddr points to a large chunk of memory, so this 2120 * could still trash things, but at least it won't trash 2121 * page 0, and by disabling the port, it should stop "soon", 2122 * even if a packet or two is in already in flight after we 2123 * disabled the port. 2124 */ 2125 ipath_write_kreg_port(dd, 2126 dd->ipath_kregs->kr_rcvhdrtailaddr, port, 2127 dd->ipath_dummy_hdrq_phys); 2128 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, 2129 pd->port_port, dd->ipath_dummy_hdrq_phys); 2130 2131 ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt); 2132 ipath_chg_pioavailkernel(dd, pd->port_pio_base, 2133 pd->port_piocnt, 1); 2134 2135 dd->ipath_f_clear_tids(dd, pd->port_port); 2136 2137 if (dd->ipath_pageshadow) 2138 unlock_expected_tids(pd); 2139 ipath_stats.sps_ports--; 2140 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2141 pd->port_comm, pid_nr(pd->port_pid), 2142 dd->ipath_unit, port); 2143 } 2144 2145 put_pid(pd->port_pid); 2146 pd->port_pid = NULL; 2147 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ 2148 mutex_unlock(&ipath_mutex); 2149 ipath_free_pddata(dd, pd); /* after releasing the mutex */ 2150 2151bail: 2152 kfree(fd); 2153 return ret; 2154} 2155 2156static int ipath_port_info(struct ipath_portdata *pd, u16 subport, 2157 struct ipath_port_info __user *uinfo) 2158{ 2159 struct ipath_port_info info; 2160 int nup; 2161 int ret; 2162 size_t sz; 2163 2164 (void) ipath_count_units(NULL, &nup, NULL); 2165 info.num_active = nup; 2166 info.unit = pd->port_dd->ipath_unit; 2167 info.port = pd->port_port; 2168 info.subport = subport; 2169 /* Don't return new fields if old library opened the port. */ 2170 if (ipath_supports_subports(pd->userversion >> 16, 2171 pd->userversion & 0xffff)) { 2172 /* Number of user ports available for this device. */ 2173 info.num_ports = pd->port_dd->ipath_cfgports - 1; 2174 info.num_subports = pd->port_subport_cnt; 2175 sz = sizeof(info); 2176 } else 2177 sz = sizeof(info) - 2 * sizeof(u16); 2178 2179 if (copy_to_user(uinfo, &info, sz)) { 2180 ret = -EFAULT; 2181 goto bail; 2182 } 2183 ret = 0; 2184 2185bail: 2186 return ret; 2187} 2188 2189static int ipath_get_slave_info(struct ipath_portdata *pd, 2190 void __user *slave_mask_addr) 2191{ 2192 int ret = 0; 2193 2194 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) 2195 ret = -EFAULT; 2196 return ret; 2197} 2198 2199static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq, 2200 u32 __user *inflightp) 2201{ 2202 const u32 val = ipath_user_sdma_inflight_counter(pq); 2203 2204 if (put_user(val, inflightp)) 2205 return -EFAULT; 2206 2207 return 0; 2208} 2209 2210static int ipath_sdma_get_complete(struct ipath_devdata *dd, 2211 struct ipath_user_sdma_queue *pq, 2212 u32 __user *completep) 2213{ 2214 u32 val; 2215 int err; 2216 2217 err = ipath_user_sdma_make_progress(dd, pq); 2218 if (err < 0) 2219 return err; 2220 2221 val = ipath_user_sdma_complete_counter(pq); 2222 if (put_user(val, completep)) 2223 return -EFAULT; 2224 2225 return 0; 2226} 2227 2228static ssize_t ipath_write(struct file *fp, const char __user *data, 2229 size_t count, loff_t *off) 2230{ 2231 const struct ipath_cmd __user *ucmd; 2232 struct ipath_portdata *pd; 2233 const void __user *src; 2234 size_t consumed, copy; 2235 struct ipath_cmd cmd; 2236 ssize_t ret = 0; 2237 void *dest; 2238 2239 if (count < sizeof(cmd.type)) { 2240 ret = -EINVAL; 2241 goto bail; 2242 } 2243 2244 ucmd = (const struct ipath_cmd __user *) data; 2245 2246 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { 2247 ret = -EFAULT; 2248 goto bail; 2249 } 2250 2251 consumed = sizeof(cmd.type); 2252 2253 switch (cmd.type) { 2254 case IPATH_CMD_ASSIGN_PORT: 2255 case __IPATH_CMD_USER_INIT: 2256 case IPATH_CMD_USER_INIT: 2257 copy = sizeof(cmd.cmd.user_info); 2258 dest = &cmd.cmd.user_info; 2259 src = &ucmd->cmd.user_info; 2260 break; 2261 case IPATH_CMD_RECV_CTRL: 2262 copy = sizeof(cmd.cmd.recv_ctrl); 2263 dest = &cmd.cmd.recv_ctrl; 2264 src = &ucmd->cmd.recv_ctrl; 2265 break; 2266 case IPATH_CMD_PORT_INFO: 2267 copy = sizeof(cmd.cmd.port_info); 2268 dest = &cmd.cmd.port_info; 2269 src = &ucmd->cmd.port_info; 2270 break; 2271 case IPATH_CMD_TID_UPDATE: 2272 case IPATH_CMD_TID_FREE: 2273 copy = sizeof(cmd.cmd.tid_info); 2274 dest = &cmd.cmd.tid_info; 2275 src = &ucmd->cmd.tid_info; 2276 break; 2277 case IPATH_CMD_SET_PART_KEY: 2278 copy = sizeof(cmd.cmd.part_key); 2279 dest = &cmd.cmd.part_key; 2280 src = &ucmd->cmd.part_key; 2281 break; 2282 case __IPATH_CMD_SLAVE_INFO: 2283 copy = sizeof(cmd.cmd.slave_mask_addr); 2284 dest = &cmd.cmd.slave_mask_addr; 2285 src = &ucmd->cmd.slave_mask_addr; 2286 break; 2287 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg 2288 copy = 0; 2289 src = NULL; 2290 dest = NULL; 2291 break; 2292 case IPATH_CMD_POLL_TYPE: 2293 copy = sizeof(cmd.cmd.poll_type); 2294 dest = &cmd.cmd.poll_type; 2295 src = &ucmd->cmd.poll_type; 2296 break; 2297 case IPATH_CMD_ARMLAUNCH_CTRL: 2298 copy = sizeof(cmd.cmd.armlaunch_ctrl); 2299 dest = &cmd.cmd.armlaunch_ctrl; 2300 src = &ucmd->cmd.armlaunch_ctrl; 2301 break; 2302 case IPATH_CMD_SDMA_INFLIGHT: 2303 copy = sizeof(cmd.cmd.sdma_inflight); 2304 dest = &cmd.cmd.sdma_inflight; 2305 src = &ucmd->cmd.sdma_inflight; 2306 break; 2307 case IPATH_CMD_SDMA_COMPLETE: 2308 copy = sizeof(cmd.cmd.sdma_complete); 2309 dest = &cmd.cmd.sdma_complete; 2310 src = &ucmd->cmd.sdma_complete; 2311 break; 2312 default: 2313 ret = -EINVAL; 2314 goto bail; 2315 } 2316 2317 if (copy) { 2318 if ((count - consumed) < copy) { 2319 ret = -EINVAL; 2320 goto bail; 2321 } 2322 2323 if (copy_from_user(dest, src, copy)) { 2324 ret = -EFAULT; 2325 goto bail; 2326 } 2327 2328 consumed += copy; 2329 } 2330 2331 pd = port_fp(fp); 2332 if (!pd && cmd.type != __IPATH_CMD_USER_INIT && 2333 cmd.type != IPATH_CMD_ASSIGN_PORT) { 2334 ret = -EINVAL; 2335 goto bail; 2336 } 2337 2338 switch (cmd.type) { 2339 case IPATH_CMD_ASSIGN_PORT: 2340 ret = ipath_assign_port(fp, &cmd.cmd.user_info); 2341 if (ret) 2342 goto bail; 2343 break; 2344 case __IPATH_CMD_USER_INIT: 2345 /* backwards compatibility, get port first */ 2346 ret = ipath_assign_port(fp, &cmd.cmd.user_info); 2347 if (ret) 2348 goto bail; 2349 /* and fall through to current version. */ 2350 case IPATH_CMD_USER_INIT: 2351 ret = ipath_do_user_init(fp, &cmd.cmd.user_info); 2352 if (ret) 2353 goto bail; 2354 ret = ipath_get_base_info( 2355 fp, (void __user *) (unsigned long) 2356 cmd.cmd.user_info.spu_base_info, 2357 cmd.cmd.user_info.spu_base_info_size); 2358 break; 2359 case IPATH_CMD_RECV_CTRL: 2360 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); 2361 break; 2362 case IPATH_CMD_PORT_INFO: 2363 ret = ipath_port_info(pd, subport_fp(fp), 2364 (struct ipath_port_info __user *) 2365 (unsigned long) cmd.cmd.port_info); 2366 break; 2367 case IPATH_CMD_TID_UPDATE: 2368 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); 2369 break; 2370 case IPATH_CMD_TID_FREE: 2371 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); 2372 break; 2373 case IPATH_CMD_SET_PART_KEY: 2374 ret = ipath_set_part_key(pd, cmd.cmd.part_key); 2375 break; 2376 case __IPATH_CMD_SLAVE_INFO: 2377 ret = ipath_get_slave_info(pd, 2378 (void __user *) (unsigned long) 2379 cmd.cmd.slave_mask_addr); 2380 break; 2381 case IPATH_CMD_PIOAVAILUPD: 2382 ipath_force_pio_avail_update(pd->port_dd); 2383 break; 2384 case IPATH_CMD_POLL_TYPE: 2385 pd->poll_type = cmd.cmd.poll_type; 2386 break; 2387 case IPATH_CMD_ARMLAUNCH_CTRL: 2388 if (cmd.cmd.armlaunch_ctrl) 2389 ipath_enable_armlaunch(pd->port_dd); 2390 else 2391 ipath_disable_armlaunch(pd->port_dd); 2392 break; 2393 case IPATH_CMD_SDMA_INFLIGHT: 2394 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp), 2395 (u32 __user *) (unsigned long) 2396 cmd.cmd.sdma_inflight); 2397 break; 2398 case IPATH_CMD_SDMA_COMPLETE: 2399 ret = ipath_sdma_get_complete(pd->port_dd, 2400 user_sdma_queue_fp(fp), 2401 (u32 __user *) (unsigned long) 2402 cmd.cmd.sdma_complete); 2403 break; 2404 } 2405 2406 if (ret >= 0) 2407 ret = consumed; 2408 2409bail: 2410 return ret; 2411} 2412 2413static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov, 2414 unsigned long dim, loff_t off) 2415{ 2416 struct file *filp = iocb->ki_filp; 2417 struct ipath_filedata *fp = filp->private_data; 2418 struct ipath_portdata *pd = port_fp(filp); 2419 struct ipath_user_sdma_queue *pq = fp->pq; 2420 2421 if (!dim) 2422 return -EINVAL; 2423 2424 return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim); 2425} 2426 2427static struct class *ipath_class; 2428 2429static int init_cdev(int minor, char *name, const struct file_operations *fops, 2430 struct cdev **cdevp, struct device **devp) 2431{ 2432 const dev_t dev = MKDEV(IPATH_MAJOR, minor); 2433 struct cdev *cdev = NULL; 2434 struct device *device = NULL; 2435 int ret; 2436 2437 cdev = cdev_alloc(); 2438 if (!cdev) { 2439 printk(KERN_ERR IPATH_DRV_NAME 2440 ": Could not allocate cdev for minor %d, %s\n", 2441 minor, name); 2442 ret = -ENOMEM; 2443 goto done; 2444 } 2445 2446 cdev->owner = THIS_MODULE; 2447 cdev->ops = fops; 2448 kobject_set_name(&cdev->kobj, name); 2449 2450 ret = cdev_add(cdev, dev, 1); 2451 if (ret < 0) { 2452 printk(KERN_ERR IPATH_DRV_NAME 2453 ": Could not add cdev for minor %d, %s (err %d)\n", 2454 minor, name, -ret); 2455 goto err_cdev; 2456 } 2457 2458 device = device_create_drvdata(ipath_class, NULL, dev, NULL, name); 2459 2460 if (IS_ERR(device)) { 2461 ret = PTR_ERR(device); 2462 printk(KERN_ERR IPATH_DRV_NAME ": Could not create " 2463 "device for minor %d, %s (err %d)\n", 2464 minor, name, -ret); 2465 goto err_cdev; 2466 } 2467 2468 goto done; 2469 2470err_cdev: 2471 cdev_del(cdev); 2472 cdev = NULL; 2473 2474done: 2475 if (ret >= 0) { 2476 *cdevp = cdev; 2477 *devp = device; 2478 } else { 2479 *cdevp = NULL; 2480 *devp = NULL; 2481 } 2482 2483 return ret; 2484} 2485 2486int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, 2487 struct cdev **cdevp, struct device **devp) 2488{ 2489 return init_cdev(minor, name, fops, cdevp, devp); 2490} 2491 2492static void cleanup_cdev(struct cdev **cdevp, 2493 struct device **devp) 2494{ 2495 struct device *dev = *devp; 2496 2497 if (dev) { 2498 device_unregister(dev); 2499 *devp = NULL; 2500 } 2501 2502 if (*cdevp) { 2503 cdev_del(*cdevp); 2504 *cdevp = NULL; 2505 } 2506} 2507 2508void ipath_cdev_cleanup(struct cdev **cdevp, 2509 struct device **devp) 2510{ 2511 cleanup_cdev(cdevp, devp); 2512} 2513 2514static struct cdev *wildcard_cdev; 2515static struct device *wildcard_dev; 2516 2517static const dev_t dev = MKDEV(IPATH_MAJOR, 0); 2518 2519static int user_init(void) 2520{ 2521 int ret; 2522 2523 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME); 2524 if (ret < 0) { 2525 printk(KERN_ERR IPATH_DRV_NAME ": Could not register " 2526 "chrdev region (err %d)\n", -ret); 2527 goto done; 2528 } 2529 2530 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME); 2531 2532 if (IS_ERR(ipath_class)) { 2533 ret = PTR_ERR(ipath_class); 2534 printk(KERN_ERR IPATH_DRV_NAME ": Could not create " 2535 "device class (err %d)\n", -ret); 2536 goto bail; 2537 } 2538 2539 goto done; 2540bail: 2541 unregister_chrdev_region(dev, IPATH_NMINORS); 2542done: 2543 return ret; 2544} 2545 2546static void user_cleanup(void) 2547{ 2548 if (ipath_class) { 2549 class_destroy(ipath_class); 2550 ipath_class = NULL; 2551 } 2552 2553 unregister_chrdev_region(dev, IPATH_NMINORS); 2554} 2555 2556static atomic_t user_count = ATOMIC_INIT(0); 2557static atomic_t user_setup = ATOMIC_INIT(0); 2558 2559int ipath_user_add(struct ipath_devdata *dd) 2560{ 2561 char name[10]; 2562 int ret; 2563 2564 if (atomic_inc_return(&user_count) == 1) { 2565 ret = user_init(); 2566 if (ret < 0) { 2567 ipath_dev_err(dd, "Unable to set up user support: " 2568 "error %d\n", -ret); 2569 goto bail; 2570 } 2571 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev, 2572 &wildcard_dev); 2573 if (ret < 0) { 2574 ipath_dev_err(dd, "Could not create wildcard " 2575 "minor: error %d\n", -ret); 2576 goto bail_user; 2577 } 2578 2579 atomic_set(&user_setup, 1); 2580 } 2581 2582 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit); 2583 2584 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops, 2585 &dd->user_cdev, &dd->user_dev); 2586 if (ret < 0) 2587 ipath_dev_err(dd, "Could not create user minor %d, %s\n", 2588 dd->ipath_unit + 1, name); 2589 2590 goto bail; 2591 2592bail_user: 2593 user_cleanup(); 2594bail: 2595 return ret; 2596} 2597 2598void ipath_user_remove(struct ipath_devdata *dd) 2599{ 2600 cleanup_cdev(&dd->user_cdev, &dd->user_dev); 2601 2602 if (atomic_dec_return(&user_count) == 0) { 2603 if (atomic_read(&user_setup) == 0) 2604 goto bail; 2605 2606 cleanup_cdev(&wildcard_cdev, &wildcard_dev); 2607 user_cleanup(); 2608 2609 atomic_set(&user_setup, 0); 2610 } 2611bail: 2612 return; 2613} 2614