ipath_file_ops.c revision cbe31f02f5b5536f17dd978118e25052af528071
1/* 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/pci.h> 35#include <linux/poll.h> 36#include <linux/cdev.h> 37#include <linux/swap.h> 38#include <linux/vmalloc.h> 39#include <linux/highmem.h> 40#include <linux/io.h> 41#include <linux/jiffies.h> 42#include <linux/smp_lock.h> 43#include <asm/pgtable.h> 44 45#include "ipath_kernel.h" 46#include "ipath_common.h" 47#include "ipath_user_sdma.h" 48 49static int ipath_open(struct inode *, struct file *); 50static int ipath_close(struct inode *, struct file *); 51static ssize_t ipath_write(struct file *, const char __user *, size_t, 52 loff_t *); 53static ssize_t ipath_writev(struct kiocb *, const struct iovec *, 54 unsigned long , loff_t); 55static unsigned int ipath_poll(struct file *, struct poll_table_struct *); 56static int ipath_mmap(struct file *, struct vm_area_struct *); 57 58static const struct file_operations ipath_file_ops = { 59 .owner = THIS_MODULE, 60 .write = ipath_write, 61 .aio_write = ipath_writev, 62 .open = ipath_open, 63 .release = ipath_close, 64 .poll = ipath_poll, 65 .mmap = ipath_mmap 66}; 67 68/* 69 * Convert kernel virtual addresses to physical addresses so they don't 70 * potentially conflict with the chip addresses used as mmap offsets. 71 * It doesn't really matter what mmap offset we use as long as we can 72 * interpret it correctly. 73 */ 74static u64 cvt_kvaddr(void *p) 75{ 76 struct page *page; 77 u64 paddr = 0; 78 79 page = vmalloc_to_page(p); 80 if (page) 81 paddr = page_to_pfn(page) << PAGE_SHIFT; 82 83 return paddr; 84} 85 86static int ipath_get_base_info(struct file *fp, 87 void __user *ubase, size_t ubase_size) 88{ 89 struct ipath_portdata *pd = port_fp(fp); 90 int ret = 0; 91 struct ipath_base_info *kinfo = NULL; 92 struct ipath_devdata *dd = pd->port_dd; 93 unsigned subport_cnt; 94 int shared, master; 95 size_t sz; 96 97 subport_cnt = pd->port_subport_cnt; 98 if (!subport_cnt) { 99 shared = 0; 100 master = 0; 101 subport_cnt = 1; 102 } else { 103 shared = 1; 104 master = !subport_fp(fp); 105 } 106 107 sz = sizeof(*kinfo); 108 /* If port sharing is not requested, allow the old size structure */ 109 if (!shared) 110 sz -= 7 * sizeof(u64); 111 if (ubase_size < sz) { 112 ipath_cdbg(PROC, 113 "Base size %zu, need %zu (version mismatch?)\n", 114 ubase_size, sz); 115 ret = -EINVAL; 116 goto bail; 117 } 118 119 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); 120 if (kinfo == NULL) { 121 ret = -ENOMEM; 122 goto bail; 123 } 124 125 ret = dd->ipath_f_get_base_info(pd, kinfo); 126 if (ret < 0) 127 goto bail; 128 129 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; 130 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; 131 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; 132 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; 133 /* 134 * have to mmap whole thing 135 */ 136 kinfo->spi_rcv_egrbuftotlen = 137 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 138 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; 139 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / 140 pd->port_rcvegrbuf_chunks; 141 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; 142 if (master) 143 kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; 144 /* 145 * for this use, may be ipath_cfgports summed over all chips that 146 * are are configured and present 147 */ 148 kinfo->spi_nports = dd->ipath_cfgports; 149 /* unit (chip/board) our port is on */ 150 kinfo->spi_unit = dd->ipath_unit; 151 /* for now, only a single page */ 152 kinfo->spi_tid_maxsize = PAGE_SIZE; 153 154 /* 155 * Doing this per port, and based on the skip value, etc. This has 156 * to be the actual buffer size, since the protocol code treats it 157 * as an array. 158 * 159 * These have to be set to user addresses in the user code via mmap. 160 * These values are used on return to user code for the mmap target 161 * addresses only. For 32 bit, same 44 bit address problem, so use 162 * the physical address, not virtual. Before 2.6.11, using the 163 * page_address() macro worked, but in 2.6.11, even that returns the 164 * full 64 bit address (upper bits all 1's). So far, using the 165 * physical addresses (or chip offsets, for chip mapping) works, but 166 * no doubt some future kernel release will change that, and we'll be 167 * on to yet another method of dealing with this. 168 */ 169 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; 170 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; 171 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; 172 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; 173 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + 174 (void *) dd->ipath_statusp - 175 (void *) dd->ipath_pioavailregs_dma; 176 if (!shared) { 177 kinfo->spi_piocnt = pd->port_piocnt; 178 kinfo->spi_piobufbase = (u64) pd->port_piobufs; 179 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + 180 dd->ipath_ureg_align * pd->port_port; 181 } else if (master) { 182 kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) + 183 (pd->port_piocnt % subport_cnt); 184 /* Master's PIO buffers are after all the slave's */ 185 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 186 dd->ipath_palign * 187 (pd->port_piocnt - kinfo->spi_piocnt); 188 } else { 189 unsigned slave = subport_fp(fp) - 1; 190 191 kinfo->spi_piocnt = pd->port_piocnt / subport_cnt; 192 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 193 dd->ipath_palign * kinfo->spi_piocnt * slave; 194 } 195 196 if (shared) { 197 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 198 dd->ipath_ureg_align * pd->port_port; 199 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; 200 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; 201 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; 202 203 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + 204 PAGE_SIZE * subport_fp(fp)); 205 206 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + 207 pd->port_rcvhdrq_size * subport_fp(fp)); 208 kinfo->spi_rcvhdr_tailaddr = 0; 209 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + 210 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * 211 subport_fp(fp)); 212 213 kinfo->spi_subport_uregbase = 214 cvt_kvaddr(pd->subport_uregbase); 215 kinfo->spi_subport_rcvegrbuf = 216 cvt_kvaddr(pd->subport_rcvegrbuf); 217 kinfo->spi_subport_rcvhdr_base = 218 cvt_kvaddr(pd->subport_rcvhdr_base); 219 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", 220 kinfo->spi_port, kinfo->spi_runtime_flags, 221 (unsigned long long) kinfo->spi_subport_uregbase, 222 (unsigned long long) kinfo->spi_subport_rcvegrbuf, 223 (unsigned long long) kinfo->spi_subport_rcvhdr_base); 224 } 225 226 /* 227 * All user buffers are 2KB buffers. If we ever support 228 * giving 4KB buffers to user processes, this will need some 229 * work. 230 */ 231 kinfo->spi_pioindex = (kinfo->spi_piobufbase - 232 (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign; 233 kinfo->spi_pioalign = dd->ipath_palign; 234 235 kinfo->spi_qpair = IPATH_KD_QP; 236 /* 237 * user mode PIO buffers are always 2KB, even when 4KB can 238 * be received, and sent via the kernel; this is ibmaxlen 239 * for 2K MTU. 240 */ 241 kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32); 242 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ 243 kinfo->spi_port = pd->port_port; 244 kinfo->spi_subport = subport_fp(fp); 245 kinfo->spi_sw_version = IPATH_KERN_SWVERSION; 246 kinfo->spi_hw_version = dd->ipath_revision; 247 248 if (master) { 249 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; 250 } 251 252 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); 253 if (copy_to_user(ubase, kinfo, sz)) 254 ret = -EFAULT; 255 256bail: 257 kfree(kinfo); 258 return ret; 259} 260 261/** 262 * ipath_tid_update - update a port TID 263 * @pd: the port 264 * @fp: the ipath device file 265 * @ti: the TID information 266 * 267 * The new implementation as of Oct 2004 is that the driver assigns 268 * the tid and returns it to the caller. To make it easier to 269 * catch bugs, and to reduce search time, we keep a cursor for 270 * each port, walking the shadow tid array to find one that's not 271 * in use. 272 * 273 * For now, if we can't allocate the full list, we fail, although 274 * in the long run, we'll allocate as many as we can, and the 275 * caller will deal with that by trying the remaining pages later. 276 * That means that when we fail, we have to mark the tids as not in 277 * use again, in our shadow copy. 278 * 279 * It's up to the caller to free the tids when they are done. 280 * We'll unlock the pages as they free them. 281 * 282 * Also, right now we are locking one page at a time, but since 283 * the intended use of this routine is for a single group of 284 * virtually contiguous pages, that should change to improve 285 * performance. 286 */ 287static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, 288 const struct ipath_tid_info *ti) 289{ 290 int ret = 0, ntids; 291 u32 tid, porttid, cnt, i, tidcnt, tidoff; 292 u16 *tidlist; 293 struct ipath_devdata *dd = pd->port_dd; 294 u64 physaddr; 295 unsigned long vaddr; 296 u64 __iomem *tidbase; 297 unsigned long tidmap[8]; 298 struct page **pagep = NULL; 299 unsigned subport = subport_fp(fp); 300 301 if (!dd->ipath_pageshadow) { 302 ret = -ENOMEM; 303 goto done; 304 } 305 306 cnt = ti->tidcnt; 307 if (!cnt) { 308 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", 309 (unsigned long long) ti->tidlist); 310 /* 311 * Should we treat as success? likely a bug 312 */ 313 ret = -EFAULT; 314 goto done; 315 } 316 porttid = pd->port_port * dd->ipath_rcvtidcnt; 317 if (!pd->port_subport_cnt) { 318 tidcnt = dd->ipath_rcvtidcnt; 319 tid = pd->port_tidcursor; 320 tidoff = 0; 321 } else if (!subport) { 322 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + 323 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); 324 tidoff = dd->ipath_rcvtidcnt - tidcnt; 325 porttid += tidoff; 326 tid = tidcursor_fp(fp); 327 } else { 328 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; 329 tidoff = tidcnt * (subport - 1); 330 porttid += tidoff; 331 tid = tidcursor_fp(fp); 332 } 333 if (cnt > tidcnt) { 334 /* make sure it all fits in port_tid_pg_list */ 335 dev_info(&dd->pcidev->dev, "Process tried to allocate %u " 336 "TIDs, only trying max (%u)\n", cnt, tidcnt); 337 cnt = tidcnt; 338 } 339 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; 340 tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; 341 342 memset(tidmap, 0, sizeof(tidmap)); 343 /* before decrement; chip actual # */ 344 ntids = tidcnt; 345 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + 346 dd->ipath_rcvtidbase + 347 porttid * sizeof(*tidbase)); 348 349 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", 350 pd->port_port, cnt, tid, tidbase); 351 352 /* virtual address of first page in transfer */ 353 vaddr = ti->tidvaddr; 354 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, 355 cnt * PAGE_SIZE)) { 356 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", 357 (void *)vaddr, cnt); 358 ret = -EFAULT; 359 goto done; 360 } 361 ret = ipath_get_user_pages(vaddr, cnt, pagep); 362 if (ret) { 363 if (ret == -EBUSY) { 364 ipath_dbg("Failed to lock addr %p, %u pages " 365 "(already locked)\n", 366 (void *) vaddr, cnt); 367 /* 368 * for now, continue, and see what happens but with 369 * the new implementation, this should never happen, 370 * unless perhaps the user has mpin'ed the pages 371 * themselves (something we need to test) 372 */ 373 ret = 0; 374 } else { 375 dev_info(&dd->pcidev->dev, 376 "Failed to lock addr %p, %u pages: " 377 "errno %d\n", (void *) vaddr, cnt, -ret); 378 goto done; 379 } 380 } 381 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 382 for (; ntids--; tid++) { 383 if (tid == tidcnt) 384 tid = 0; 385 if (!dd->ipath_pageshadow[porttid + tid]) 386 break; 387 } 388 if (ntids < 0) { 389 /* 390 * oops, wrapped all the way through their TIDs, 391 * and didn't have enough free; see comments at 392 * start of routine 393 */ 394 ipath_dbg("Not enough free TIDs for %u pages " 395 "(index %d), failing\n", cnt, i); 396 i--; /* last tidlist[i] not filled in */ 397 ret = -ENOMEM; 398 break; 399 } 400 tidlist[i] = tid + tidoff; 401 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " 402 "vaddr %lx\n", i, tid + tidoff, vaddr); 403 /* we "know" system pages and TID pages are same size */ 404 dd->ipath_pageshadow[porttid + tid] = pagep[i]; 405 dd->ipath_physshadow[porttid + tid] = ipath_map_page( 406 dd->pcidev, pagep[i], 0, PAGE_SIZE, 407 PCI_DMA_FROMDEVICE); 408 /* 409 * don't need atomic or it's overhead 410 */ 411 __set_bit(tid, tidmap); 412 physaddr = dd->ipath_physshadow[porttid + tid]; 413 ipath_stats.sps_pagelocks++; 414 ipath_cdbg(VERBOSE, 415 "TID %u, vaddr %lx, physaddr %llx pgp %p\n", 416 tid, vaddr, (unsigned long long) physaddr, 417 pagep[i]); 418 dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, 419 physaddr); 420 /* 421 * don't check this tid in ipath_portshadow, since we 422 * just filled it in; start with the next one. 423 */ 424 tid++; 425 } 426 427 if (ret) { 428 u32 limit; 429 cleanup: 430 /* jump here if copy out of updated info failed... */ 431 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", 432 -ret, i, cnt); 433 /* same code that's in ipath_free_tid() */ 434 limit = sizeof(tidmap) * BITS_PER_BYTE; 435 if (limit > tidcnt) 436 /* just in case size changes in future */ 437 limit = tidcnt; 438 tid = find_first_bit((const unsigned long *)tidmap, limit); 439 for (; tid < limit; tid++) { 440 if (!test_bit(tid, tidmap)) 441 continue; 442 if (dd->ipath_pageshadow[porttid + tid]) { 443 ipath_cdbg(VERBOSE, "Freeing TID %u\n", 444 tid); 445 dd->ipath_f_put_tid(dd, &tidbase[tid], 446 RCVHQ_RCV_TYPE_EXPECTED, 447 dd->ipath_tidinvalid); 448 pci_unmap_page(dd->pcidev, 449 dd->ipath_physshadow[porttid + tid], 450 PAGE_SIZE, PCI_DMA_FROMDEVICE); 451 dd->ipath_pageshadow[porttid + tid] = NULL; 452 ipath_stats.sps_pageunlocks++; 453 } 454 } 455 ipath_release_user_pages(pagep, cnt); 456 } else { 457 /* 458 * Copy the updated array, with ipath_tid's filled in, back 459 * to user. Since we did the copy in already, this "should 460 * never fail" If it does, we have to clean up... 461 */ 462 if (copy_to_user((void __user *) 463 (unsigned long) ti->tidlist, 464 tidlist, cnt * sizeof(*tidlist))) { 465 ret = -EFAULT; 466 goto cleanup; 467 } 468 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 469 tidmap, sizeof tidmap)) { 470 ret = -EFAULT; 471 goto cleanup; 472 } 473 if (tid == tidcnt) 474 tid = 0; 475 if (!pd->port_subport_cnt) 476 pd->port_tidcursor = tid; 477 else 478 tidcursor_fp(fp) = tid; 479 } 480 481done: 482 if (ret) 483 ipath_dbg("Failed to map %u TID pages, failing with %d\n", 484 ti->tidcnt, -ret); 485 return ret; 486} 487 488/** 489 * ipath_tid_free - free a port TID 490 * @pd: the port 491 * @subport: the subport 492 * @ti: the TID info 493 * 494 * right now we are unlocking one page at a time, but since 495 * the intended use of this routine is for a single group of 496 * virtually contiguous pages, that should change to improve 497 * performance. We check that the TID is in range for this port 498 * but otherwise don't check validity; if user has an error and 499 * frees the wrong tid, it's only their own data that can thereby 500 * be corrupted. We do check that the TID was in use, for sanity 501 * We always use our idea of the saved address, not the address that 502 * they pass in to us. 503 */ 504 505static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, 506 const struct ipath_tid_info *ti) 507{ 508 int ret = 0; 509 u32 tid, porttid, cnt, limit, tidcnt; 510 struct ipath_devdata *dd = pd->port_dd; 511 u64 __iomem *tidbase; 512 unsigned long tidmap[8]; 513 514 if (!dd->ipath_pageshadow) { 515 ret = -ENOMEM; 516 goto done; 517 } 518 519 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 520 sizeof tidmap)) { 521 ret = -EFAULT; 522 goto done; 523 } 524 525 porttid = pd->port_port * dd->ipath_rcvtidcnt; 526 if (!pd->port_subport_cnt) 527 tidcnt = dd->ipath_rcvtidcnt; 528 else if (!subport) { 529 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + 530 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); 531 porttid += dd->ipath_rcvtidcnt - tidcnt; 532 } else { 533 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; 534 porttid += tidcnt * (subport - 1); 535 } 536 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + 537 dd->ipath_rcvtidbase + 538 porttid * sizeof(*tidbase)); 539 540 limit = sizeof(tidmap) * BITS_PER_BYTE; 541 if (limit > tidcnt) 542 /* just in case size changes in future */ 543 limit = tidcnt; 544 tid = find_first_bit(tidmap, limit); 545 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " 546 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, 547 limit, tid, porttid); 548 for (cnt = 0; tid < limit; tid++) { 549 /* 550 * small optimization; if we detect a run of 3 or so without 551 * any set, use find_first_bit again. That's mainly to 552 * accelerate the case where we wrapped, so we have some at 553 * the beginning, and some at the end, and a big gap 554 * in the middle. 555 */ 556 if (!test_bit(tid, tidmap)) 557 continue; 558 cnt++; 559 if (dd->ipath_pageshadow[porttid + tid]) { 560 struct page *p; 561 p = dd->ipath_pageshadow[porttid + tid]; 562 dd->ipath_pageshadow[porttid + tid] = NULL; 563 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", 564 pid_nr(pd->port_pid), tid); 565 dd->ipath_f_put_tid(dd, &tidbase[tid], 566 RCVHQ_RCV_TYPE_EXPECTED, 567 dd->ipath_tidinvalid); 568 pci_unmap_page(dd->pcidev, 569 dd->ipath_physshadow[porttid + tid], 570 PAGE_SIZE, PCI_DMA_FROMDEVICE); 571 ipath_release_user_pages(&p, 1); 572 ipath_stats.sps_pageunlocks++; 573 } else 574 ipath_dbg("Unused tid %u, ignoring\n", tid); 575 } 576 if (cnt != ti->tidcnt) 577 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", 578 ti->tidcnt, cnt); 579done: 580 if (ret) 581 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", 582 ti->tidcnt, -ret); 583 return ret; 584} 585 586/** 587 * ipath_set_part_key - set a partition key 588 * @pd: the port 589 * @key: the key 590 * 591 * We can have up to 4 active at a time (other than the default, which is 592 * always allowed). This is somewhat tricky, since multiple ports may set 593 * the same key, so we reference count them, and clean up at exit. All 4 594 * partition keys are packed into a single infinipath register. It's an 595 * error for a process to set the same pkey multiple times. We provide no 596 * mechanism to de-allocate a pkey at this time, we may eventually need to 597 * do that. I've used the atomic operations, and no locking, and only make 598 * a single pass through what's available. This should be more than 599 * adequate for some time. I'll think about spinlocks or the like if and as 600 * it's necessary. 601 */ 602static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) 603{ 604 struct ipath_devdata *dd = pd->port_dd; 605 int i, any = 0, pidx = -1; 606 u16 lkey = key & 0x7FFF; 607 int ret; 608 609 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) { 610 /* nothing to do; this key always valid */ 611 ret = 0; 612 goto bail; 613 } 614 615 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " 616 "%hx:%x %hx:%x %hx:%x %hx:%x\n", 617 pd->port_port, key, dd->ipath_pkeys[0], 618 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], 619 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], 620 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], 621 atomic_read(&dd->ipath_pkeyrefs[3])); 622 623 if (!lkey) { 624 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", 625 pd->port_port); 626 ret = -EINVAL; 627 goto bail; 628 } 629 630 /* 631 * Set the full membership bit, because it has to be 632 * set in the register or the packet, and it seems 633 * cleaner to set in the register than to force all 634 * callers to set it. (see bug 4331) 635 */ 636 key |= 0x8000; 637 638 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { 639 if (!pd->port_pkeys[i] && pidx == -1) 640 pidx = i; 641 if (pd->port_pkeys[i] == key) { 642 ipath_cdbg(VERBOSE, "p%u tries to set same pkey " 643 "(%x) more than once\n", 644 pd->port_port, key); 645 ret = -EEXIST; 646 goto bail; 647 } 648 } 649 if (pidx == -1) { 650 ipath_dbg("All pkeys for port %u already in use, " 651 "can't set %x\n", pd->port_port, key); 652 ret = -EBUSY; 653 goto bail; 654 } 655 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { 656 if (!dd->ipath_pkeys[i]) { 657 any++; 658 continue; 659 } 660 if (dd->ipath_pkeys[i] == key) { 661 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i]; 662 663 if (atomic_inc_return(pkrefs) > 1) { 664 pd->port_pkeys[pidx] = key; 665 ipath_cdbg(VERBOSE, "p%u set key %x " 666 "matches #%d, count now %d\n", 667 pd->port_port, key, i, 668 atomic_read(pkrefs)); 669 ret = 0; 670 goto bail; 671 } else { 672 /* 673 * lost race, decrement count, catch below 674 */ 675 atomic_dec(pkrefs); 676 ipath_cdbg(VERBOSE, "Lost race, count was " 677 "0, after dec, it's %d\n", 678 atomic_read(pkrefs)); 679 any++; 680 } 681 } 682 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { 683 /* 684 * It makes no sense to have both the limited and 685 * full membership PKEY set at the same time since 686 * the unlimited one will disable the limited one. 687 */ 688 ret = -EEXIST; 689 goto bail; 690 } 691 } 692 if (!any) { 693 ipath_dbg("port %u, all pkeys already in use, " 694 "can't set %x\n", pd->port_port, key); 695 ret = -EBUSY; 696 goto bail; 697 } 698 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { 699 if (!dd->ipath_pkeys[i] && 700 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { 701 u64 pkey; 702 703 /* for ipathstats, etc. */ 704 ipath_stats.sps_pkeys[i] = lkey; 705 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; 706 pkey = 707 (u64) dd->ipath_pkeys[0] | 708 ((u64) dd->ipath_pkeys[1] << 16) | 709 ((u64) dd->ipath_pkeys[2] << 32) | 710 ((u64) dd->ipath_pkeys[3] << 48); 711 ipath_cdbg(PROC, "p%u set key %x in #%d, " 712 "portidx %d, new pkey reg %llx\n", 713 pd->port_port, key, i, pidx, 714 (unsigned long long) pkey); 715 ipath_write_kreg( 716 dd, dd->ipath_kregs->kr_partitionkey, pkey); 717 718 ret = 0; 719 goto bail; 720 } 721 } 722 ipath_dbg("port %u, all pkeys already in use 2nd pass, " 723 "can't set %x\n", pd->port_port, key); 724 ret = -EBUSY; 725 726bail: 727 return ret; 728} 729 730/** 731 * ipath_manage_rcvq - manage a port's receive queue 732 * @pd: the port 733 * @subport: the subport 734 * @start_stop: action to carry out 735 * 736 * start_stop == 0 disables receive on the port, for use in queue 737 * overflow conditions. start_stop==1 re-enables, to be used to 738 * re-init the software copy of the head register 739 */ 740static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, 741 int start_stop) 742{ 743 struct ipath_devdata *dd = pd->port_dd; 744 745 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", 746 start_stop ? "en" : "dis", dd->ipath_unit, 747 pd->port_port, subport); 748 if (subport) 749 goto bail; 750 /* atomically clear receive enable port. */ 751 if (start_stop) { 752 /* 753 * On enable, force in-memory copy of the tail register to 754 * 0, so that protocol code doesn't have to worry about 755 * whether or not the chip has yet updated the in-memory 756 * copy or not on return from the system call. The chip 757 * always resets it's tail register back to 0 on a 758 * transition from disabled to enabled. This could cause a 759 * problem if software was broken, and did the enable w/o 760 * the disable, but eventually the in-memory copy will be 761 * updated and correct itself, even in the face of software 762 * bugs. 763 */ 764 if (pd->port_rcvhdrtail_kvaddr) 765 ipath_clear_rcvhdrtail(pd); 766 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 767 &dd->ipath_rcvctrl); 768 } else 769 clear_bit(dd->ipath_r_portenable_shift + pd->port_port, 770 &dd->ipath_rcvctrl); 771 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 772 dd->ipath_rcvctrl); 773 /* now be sure chip saw it before we return */ 774 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 775 if (start_stop) { 776 /* 777 * And try to be sure that tail reg update has happened too. 778 * This should in theory interlock with the RXE changes to 779 * the tail register. Don't assign it to the tail register 780 * in memory copy, since we could overwrite an update by the 781 * chip if we did. 782 */ 783 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 784 } 785 /* always; new head should be equal to new tail; see above */ 786bail: 787 return 0; 788} 789 790static void ipath_clean_part_key(struct ipath_portdata *pd, 791 struct ipath_devdata *dd) 792{ 793 int i, j, pchanged = 0; 794 u64 oldpkey; 795 796 /* for debugging only */ 797 oldpkey = (u64) dd->ipath_pkeys[0] | 798 ((u64) dd->ipath_pkeys[1] << 16) | 799 ((u64) dd->ipath_pkeys[2] << 32) | 800 ((u64) dd->ipath_pkeys[3] << 48); 801 802 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { 803 if (!pd->port_pkeys[i]) 804 continue; 805 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i, 806 pd->port_pkeys[i]); 807 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) { 808 /* check for match independent of the global bit */ 809 if ((dd->ipath_pkeys[j] & 0x7fff) != 810 (pd->port_pkeys[i] & 0x7fff)) 811 continue; 812 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) { 813 ipath_cdbg(VERBOSE, "p%u clear key " 814 "%x matches #%d\n", 815 pd->port_port, 816 pd->port_pkeys[i], j); 817 ipath_stats.sps_pkeys[j] = 818 dd->ipath_pkeys[j] = 0; 819 pchanged++; 820 } 821 else ipath_cdbg( 822 VERBOSE, "p%u key %x matches #%d, " 823 "but ref still %d\n", pd->port_port, 824 pd->port_pkeys[i], j, 825 atomic_read(&dd->ipath_pkeyrefs[j])); 826 break; 827 } 828 pd->port_pkeys[i] = 0; 829 } 830 if (pchanged) { 831 u64 pkey = (u64) dd->ipath_pkeys[0] | 832 ((u64) dd->ipath_pkeys[1] << 16) | 833 ((u64) dd->ipath_pkeys[2] << 32) | 834 ((u64) dd->ipath_pkeys[3] << 48); 835 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, " 836 "new pkey reg %llx\n", pd->port_port, 837 (unsigned long long) oldpkey, 838 (unsigned long long) pkey); 839 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, 840 pkey); 841 } 842} 843 844/* 845 * Initialize the port data with the receive buffer sizes 846 * so this can be done while the master port is locked. 847 * Otherwise, there is a race with a slave opening the port 848 * and seeing these fields uninitialized. 849 */ 850static void init_user_egr_sizes(struct ipath_portdata *pd) 851{ 852 struct ipath_devdata *dd = pd->port_dd; 853 unsigned egrperchunk, egrcnt, size; 854 855 /* 856 * to avoid wasting a lot of memory, we allocate 32KB chunks of 857 * physically contiguous memory, advance through it until used up 858 * and then allocate more. Of course, we need memory to store those 859 * extra pointers, now. Started out with 256KB, but under heavy 860 * memory pressure (creating large files and then copying them over 861 * NFS while doing lots of MPI jobs), we hit some allocation 862 * failures, even though we can sleep... (2.6.10) Still get 863 * failures at 64K. 32K is the lowest we can go without wasting 864 * additional memory. 865 */ 866 size = 0x8000; 867 egrperchunk = size / dd->ipath_rcvegrbufsize; 868 egrcnt = dd->ipath_rcvegrcnt; 869 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; 870 pd->port_rcvegrbufs_perchunk = egrperchunk; 871 pd->port_rcvegrbuf_size = size; 872} 873 874/** 875 * ipath_create_user_egr - allocate eager TID buffers 876 * @pd: the port to allocate TID buffers for 877 * 878 * This routine is now quite different for user and kernel, because 879 * the kernel uses skb's, for the accelerated network performance 880 * This is the user port version 881 * 882 * Allocate the eager TID buffers and program them into infinipath 883 * They are no longer completely contiguous, we do multiple allocation 884 * calls. 885 */ 886static int ipath_create_user_egr(struct ipath_portdata *pd) 887{ 888 struct ipath_devdata *dd = pd->port_dd; 889 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; 890 size_t size; 891 int ret; 892 gfp_t gfp_flags; 893 894 /* 895 * GFP_USER, but without GFP_FS, so buffer cache can be 896 * coalesced (we hope); otherwise, even at order 4, 897 * heavy filesystem activity makes these fail, and we can 898 * use compound pages. 899 */ 900 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 901 902 egrcnt = dd->ipath_rcvegrcnt; 903 /* TID number offset for this port */ 904 egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt; 905 egrsize = dd->ipath_rcvegrbufsize; 906 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " 907 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); 908 909 chunk = pd->port_rcvegrbuf_chunks; 910 egrperchunk = pd->port_rcvegrbufs_perchunk; 911 size = pd->port_rcvegrbuf_size; 912 pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), 913 GFP_KERNEL); 914 if (!pd->port_rcvegrbuf) { 915 ret = -ENOMEM; 916 goto bail; 917 } 918 pd->port_rcvegrbuf_phys = 919 kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), 920 GFP_KERNEL); 921 if (!pd->port_rcvegrbuf_phys) { 922 ret = -ENOMEM; 923 goto bail_rcvegrbuf; 924 } 925 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { 926 927 pd->port_rcvegrbuf[e] = dma_alloc_coherent( 928 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], 929 gfp_flags); 930 931 if (!pd->port_rcvegrbuf[e]) { 932 ret = -ENOMEM; 933 goto bail_rcvegrbuf_phys; 934 } 935 } 936 937 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; 938 939 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { 940 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; 941 unsigned i; 942 943 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { 944 dd->ipath_f_put_tid(dd, e + egroff + 945 (u64 __iomem *) 946 ((char __iomem *) 947 dd->ipath_kregbase + 948 dd->ipath_rcvegrbase), 949 RCVHQ_RCV_TYPE_EAGER, pa); 950 pa += egrsize; 951 } 952 cond_resched(); /* don't hog the cpu */ 953 } 954 955 ret = 0; 956 goto bail; 957 958bail_rcvegrbuf_phys: 959 for (e = 0; e < pd->port_rcvegrbuf_chunks && 960 pd->port_rcvegrbuf[e]; e++) { 961 dma_free_coherent(&dd->pcidev->dev, size, 962 pd->port_rcvegrbuf[e], 963 pd->port_rcvegrbuf_phys[e]); 964 965 } 966 kfree(pd->port_rcvegrbuf_phys); 967 pd->port_rcvegrbuf_phys = NULL; 968bail_rcvegrbuf: 969 kfree(pd->port_rcvegrbuf); 970 pd->port_rcvegrbuf = NULL; 971bail: 972 return ret; 973} 974 975 976/* common code for the mappings on dma_alloc_coherent mem */ 977static int ipath_mmap_mem(struct vm_area_struct *vma, 978 struct ipath_portdata *pd, unsigned len, int write_ok, 979 void *kvaddr, char *what) 980{ 981 struct ipath_devdata *dd = pd->port_dd; 982 unsigned long pfn; 983 int ret; 984 985 if ((vma->vm_end - vma->vm_start) > len) { 986 dev_info(&dd->pcidev->dev, 987 "FAIL on %s: len %lx > %x\n", what, 988 vma->vm_end - vma->vm_start, len); 989 ret = -EFAULT; 990 goto bail; 991 } 992 993 if (!write_ok) { 994 if (vma->vm_flags & VM_WRITE) { 995 dev_info(&dd->pcidev->dev, 996 "%s must be mapped readonly\n", what); 997 ret = -EPERM; 998 goto bail; 999 } 1000 1001 /* don't allow them to later change with mprotect */ 1002 vma->vm_flags &= ~VM_MAYWRITE; 1003 } 1004 1005 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; 1006 ret = remap_pfn_range(vma, vma->vm_start, pfn, 1007 len, vma->vm_page_prot); 1008 if (ret) 1009 dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " 1010 "bytes r%c failed: %d\n", what, pd->port_port, 1011 pfn, len, write_ok?'w':'o', ret); 1012 else 1013 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " 1014 "r%c\n", what, pd->port_port, pfn, len, 1015 write_ok?'w':'o'); 1016bail: 1017 return ret; 1018} 1019 1020static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, 1021 u64 ureg) 1022{ 1023 unsigned long phys; 1024 int ret; 1025 1026 /* 1027 * This is real hardware, so use io_remap. This is the mechanism 1028 * for the user process to update the head registers for their port 1029 * in the chip. 1030 */ 1031 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { 1032 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " 1033 "%lx > PAGE\n", vma->vm_end - vma->vm_start); 1034 ret = -EFAULT; 1035 } else { 1036 phys = dd->ipath_physaddr + ureg; 1037 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1038 1039 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 1040 ret = io_remap_pfn_range(vma, vma->vm_start, 1041 phys >> PAGE_SHIFT, 1042 vma->vm_end - vma->vm_start, 1043 vma->vm_page_prot); 1044 } 1045 return ret; 1046} 1047 1048static int mmap_piobufs(struct vm_area_struct *vma, 1049 struct ipath_devdata *dd, 1050 struct ipath_portdata *pd, 1051 unsigned piobufs, unsigned piocnt) 1052{ 1053 unsigned long phys; 1054 int ret; 1055 1056 /* 1057 * When we map the PIO buffers in the chip, we want to map them as 1058 * writeonly, no read possible. This prevents access to previous 1059 * process data, and catches users who might try to read the i/o 1060 * space due to a bug. 1061 */ 1062 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { 1063 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " 1064 "reqlen %lx > PAGE\n", 1065 vma->vm_end - vma->vm_start); 1066 ret = -EINVAL; 1067 goto bail; 1068 } 1069 1070 phys = dd->ipath_physaddr + piobufs; 1071 1072#if defined(__powerpc__) 1073 /* There isn't a generic way to specify writethrough mappings */ 1074 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1075 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; 1076 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; 1077#endif 1078 1079 /* 1080 * don't allow them to later change to readable with mprotect (for when 1081 * not initially mapped readable, as is normally the case) 1082 */ 1083 vma->vm_flags &= ~VM_MAYREAD; 1084 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 1085 1086 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, 1087 vma->vm_end - vma->vm_start, 1088 vma->vm_page_prot); 1089bail: 1090 return ret; 1091} 1092 1093static int mmap_rcvegrbufs(struct vm_area_struct *vma, 1094 struct ipath_portdata *pd) 1095{ 1096 struct ipath_devdata *dd = pd->port_dd; 1097 unsigned long start, size; 1098 size_t total_size, i; 1099 unsigned long pfn; 1100 int ret; 1101 1102 size = pd->port_rcvegrbuf_size; 1103 total_size = pd->port_rcvegrbuf_chunks * size; 1104 if ((vma->vm_end - vma->vm_start) > total_size) { 1105 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " 1106 "reqlen %lx > actual %lx\n", 1107 vma->vm_end - vma->vm_start, 1108 (unsigned long) total_size); 1109 ret = -EINVAL; 1110 goto bail; 1111 } 1112 1113 if (vma->vm_flags & VM_WRITE) { 1114 dev_info(&dd->pcidev->dev, "Can't map eager buffers as " 1115 "writable (flags=%lx)\n", vma->vm_flags); 1116 ret = -EPERM; 1117 goto bail; 1118 } 1119 /* don't allow them to later change to writeable with mprotect */ 1120 vma->vm_flags &= ~VM_MAYWRITE; 1121 1122 start = vma->vm_start; 1123 1124 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { 1125 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; 1126 ret = remap_pfn_range(vma, start, pfn, size, 1127 vma->vm_page_prot); 1128 if (ret < 0) 1129 goto bail; 1130 } 1131 ret = 0; 1132 1133bail: 1134 return ret; 1135} 1136 1137/* 1138 * ipath_file_vma_fault - handle a VMA page fault. 1139 */ 1140static int ipath_file_vma_fault(struct vm_area_struct *vma, 1141 struct vm_fault *vmf) 1142{ 1143 struct page *page; 1144 1145 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); 1146 if (!page) 1147 return VM_FAULT_SIGBUS; 1148 get_page(page); 1149 vmf->page = page; 1150 1151 return 0; 1152} 1153 1154static struct vm_operations_struct ipath_file_vm_ops = { 1155 .fault = ipath_file_vma_fault, 1156}; 1157 1158static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, 1159 struct ipath_portdata *pd, unsigned subport) 1160{ 1161 unsigned long len; 1162 struct ipath_devdata *dd; 1163 void *addr; 1164 size_t size; 1165 int ret = 0; 1166 1167 /* If the port is not shared, all addresses should be physical */ 1168 if (!pd->port_subport_cnt) 1169 goto bail; 1170 1171 dd = pd->port_dd; 1172 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 1173 1174 /* 1175 * Each process has all the subport uregbase, rcvhdrq, and 1176 * rcvegrbufs mmapped - as an array for all the processes, 1177 * and also separately for this process. 1178 */ 1179 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { 1180 addr = pd->subport_uregbase; 1181 size = PAGE_SIZE * pd->port_subport_cnt; 1182 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { 1183 addr = pd->subport_rcvhdr_base; 1184 size = pd->port_rcvhdrq_size * pd->port_subport_cnt; 1185 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { 1186 addr = pd->subport_rcvegrbuf; 1187 size *= pd->port_subport_cnt; 1188 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + 1189 PAGE_SIZE * subport)) { 1190 addr = pd->subport_uregbase + PAGE_SIZE * subport; 1191 size = PAGE_SIZE; 1192 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + 1193 pd->port_rcvhdrq_size * subport)) { 1194 addr = pd->subport_rcvhdr_base + 1195 pd->port_rcvhdrq_size * subport; 1196 size = pd->port_rcvhdrq_size; 1197 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + 1198 size * subport)) { 1199 addr = pd->subport_rcvegrbuf + size * subport; 1200 /* rcvegrbufs are read-only on the slave */ 1201 if (vma->vm_flags & VM_WRITE) { 1202 dev_info(&dd->pcidev->dev, 1203 "Can't map eager buffers as " 1204 "writable (flags=%lx)\n", vma->vm_flags); 1205 ret = -EPERM; 1206 goto bail; 1207 } 1208 /* 1209 * Don't allow permission to later change to writeable 1210 * with mprotect. 1211 */ 1212 vma->vm_flags &= ~VM_MAYWRITE; 1213 } else { 1214 goto bail; 1215 } 1216 len = vma->vm_end - vma->vm_start; 1217 if (len > size) { 1218 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); 1219 ret = -EINVAL; 1220 goto bail; 1221 } 1222 1223 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1224 vma->vm_ops = &ipath_file_vm_ops; 1225 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1226 ret = 1; 1227 1228bail: 1229 return ret; 1230} 1231 1232/** 1233 * ipath_mmap - mmap various structures into user space 1234 * @fp: the file pointer 1235 * @vma: the VM area 1236 * 1237 * We use this to have a shared buffer between the kernel and the user code 1238 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio 1239 * buffers in the chip. We have the open and close entries so we can bump 1240 * the ref count and keep the driver from being unloaded while still mapped. 1241 */ 1242static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) 1243{ 1244 struct ipath_portdata *pd; 1245 struct ipath_devdata *dd; 1246 u64 pgaddr, ureg; 1247 unsigned piobufs, piocnt; 1248 int ret; 1249 1250 pd = port_fp(fp); 1251 if (!pd) { 1252 ret = -EINVAL; 1253 goto bail; 1254 } 1255 dd = pd->port_dd; 1256 1257 /* 1258 * This is the ipath_do_user_init() code, mapping the shared buffers 1259 * into the user process. The address referred to by vm_pgoff is the 1260 * file offset passed via mmap(). For shared ports, this is the 1261 * kernel vmalloc() address of the pages to share with the master. 1262 * For non-shared or master ports, this is a physical address. 1263 * We only do one mmap for each space mapped. 1264 */ 1265 pgaddr = vma->vm_pgoff << PAGE_SHIFT; 1266 1267 /* 1268 * Check for 0 in case one of the allocations failed, but user 1269 * called mmap anyway. 1270 */ 1271 if (!pgaddr) { 1272 ret = -EINVAL; 1273 goto bail; 1274 } 1275 1276 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", 1277 (unsigned long long) pgaddr, vma->vm_start, 1278 vma->vm_end - vma->vm_start, dd->ipath_unit, 1279 pd->port_port, subport_fp(fp)); 1280 1281 /* 1282 * Physical addresses must fit in 40 bits for our hardware. 1283 * Check for kernel virtual addresses first, anything else must 1284 * match a HW or memory address. 1285 */ 1286 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); 1287 if (ret) { 1288 if (ret > 0) 1289 ret = 0; 1290 goto bail; 1291 } 1292 1293 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; 1294 if (!pd->port_subport_cnt) { 1295 /* port is not shared */ 1296 piocnt = pd->port_piocnt; 1297 piobufs = pd->port_piobufs; 1298 } else if (!subport_fp(fp)) { 1299 /* caller is the master */ 1300 piocnt = (pd->port_piocnt / pd->port_subport_cnt) + 1301 (pd->port_piocnt % pd->port_subport_cnt); 1302 piobufs = pd->port_piobufs + 1303 dd->ipath_palign * (pd->port_piocnt - piocnt); 1304 } else { 1305 unsigned slave = subport_fp(fp) - 1; 1306 1307 /* caller is a slave */ 1308 piocnt = pd->port_piocnt / pd->port_subport_cnt; 1309 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; 1310 } 1311 1312 if (pgaddr == ureg) 1313 ret = mmap_ureg(vma, dd, ureg); 1314 else if (pgaddr == piobufs) 1315 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); 1316 else if (pgaddr == dd->ipath_pioavailregs_phys) 1317 /* in-memory copy of pioavail registers */ 1318 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1319 (void *) dd->ipath_pioavailregs_dma, 1320 "pioavail registers"); 1321 else if (pgaddr == pd->port_rcvegr_phys) 1322 ret = mmap_rcvegrbufs(vma, pd); 1323 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) 1324 /* 1325 * The rcvhdrq itself; readonly except on HT (so have 1326 * to allow writable mapping), multiple pages, contiguous 1327 * from an i/o perspective. 1328 */ 1329 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, 1330 pd->port_rcvhdrq, 1331 "rcvhdrq"); 1332 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) 1333 /* in-memory copy of rcvhdrq tail register */ 1334 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1335 pd->port_rcvhdrtail_kvaddr, 1336 "rcvhdrq tail"); 1337 else 1338 ret = -EINVAL; 1339 1340 vma->vm_private_data = NULL; 1341 1342 if (ret < 0) 1343 dev_info(&dd->pcidev->dev, 1344 "Failure %d on off %llx len %lx\n", 1345 -ret, (unsigned long long)pgaddr, 1346 vma->vm_end - vma->vm_start); 1347bail: 1348 return ret; 1349} 1350 1351static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd) 1352{ 1353 unsigned pollflag = 0; 1354 1355 if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && 1356 pd->port_hdrqfull != pd->port_hdrqfull_poll) { 1357 pollflag |= POLLIN | POLLRDNORM; 1358 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1359 } 1360 1361 return pollflag; 1362} 1363 1364static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, 1365 struct file *fp, 1366 struct poll_table_struct *pt) 1367{ 1368 unsigned pollflag = 0; 1369 struct ipath_devdata *dd; 1370 1371 dd = pd->port_dd; 1372 1373 /* variable access in ipath_poll_hdrqfull() needs this */ 1374 rmb(); 1375 pollflag = ipath_poll_hdrqfull(pd); 1376 1377 if (pd->port_urgent != pd->port_urgent_poll) { 1378 pollflag |= POLLIN | POLLRDNORM; 1379 pd->port_urgent_poll = pd->port_urgent; 1380 } 1381 1382 if (!pollflag) { 1383 /* this saves a spin_lock/unlock in interrupt handler... */ 1384 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag); 1385 /* flush waiting flag so don't miss an event... */ 1386 wmb(); 1387 poll_wait(fp, &pd->port_wait, pt); 1388 } 1389 1390 return pollflag; 1391} 1392 1393static unsigned int ipath_poll_next(struct ipath_portdata *pd, 1394 struct file *fp, 1395 struct poll_table_struct *pt) 1396{ 1397 u32 head; 1398 u32 tail; 1399 unsigned pollflag = 0; 1400 struct ipath_devdata *dd; 1401 1402 dd = pd->port_dd; 1403 1404 /* variable access in ipath_poll_hdrqfull() needs this */ 1405 rmb(); 1406 pollflag = ipath_poll_hdrqfull(pd); 1407 1408 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); 1409 if (pd->port_rcvhdrtail_kvaddr) 1410 tail = ipath_get_rcvhdrtail(pd); 1411 else 1412 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 1413 1414 if (head != tail) 1415 pollflag |= POLLIN | POLLRDNORM; 1416 else { 1417 /* this saves a spin_lock/unlock in interrupt handler */ 1418 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); 1419 /* flush waiting flag so we don't miss an event */ 1420 wmb(); 1421 1422 set_bit(pd->port_port + dd->ipath_r_intravail_shift, 1423 &dd->ipath_rcvctrl); 1424 1425 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1426 dd->ipath_rcvctrl); 1427 1428 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ 1429 ipath_write_ureg(dd, ur_rcvhdrhead, 1430 dd->ipath_rhdrhead_intr_off | head, 1431 pd->port_port); 1432 1433 poll_wait(fp, &pd->port_wait, pt); 1434 } 1435 1436 return pollflag; 1437} 1438 1439static unsigned int ipath_poll(struct file *fp, 1440 struct poll_table_struct *pt) 1441{ 1442 struct ipath_portdata *pd; 1443 unsigned pollflag; 1444 1445 pd = port_fp(fp); 1446 if (!pd) 1447 pollflag = 0; 1448 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT) 1449 pollflag = ipath_poll_urgent(pd, fp, pt); 1450 else 1451 pollflag = ipath_poll_next(pd, fp, pt); 1452 1453 return pollflag; 1454} 1455 1456static int ipath_supports_subports(int user_swmajor, int user_swminor) 1457{ 1458 /* no subport implementation prior to software version 1.3 */ 1459 return (user_swmajor > 1) || (user_swminor >= 3); 1460} 1461 1462static int ipath_compatible_subports(int user_swmajor, int user_swminor) 1463{ 1464 /* this code is written long-hand for clarity */ 1465 if (IPATH_USER_SWMAJOR != user_swmajor) { 1466 /* no promise of compatibility if major mismatch */ 1467 return 0; 1468 } 1469 if (IPATH_USER_SWMAJOR == 1) { 1470 switch (IPATH_USER_SWMINOR) { 1471 case 0: 1472 case 1: 1473 case 2: 1474 /* no subport implementation so cannot be compatible */ 1475 return 0; 1476 case 3: 1477 /* 3 is only compatible with itself */ 1478 return user_swminor == 3; 1479 default: 1480 /* >= 4 are compatible (or are expected to be) */ 1481 return user_swminor >= 4; 1482 } 1483 } 1484 /* make no promises yet for future major versions */ 1485 return 0; 1486} 1487 1488static int init_subports(struct ipath_devdata *dd, 1489 struct ipath_portdata *pd, 1490 const struct ipath_user_info *uinfo) 1491{ 1492 int ret = 0; 1493 unsigned num_subports; 1494 size_t size; 1495 1496 /* 1497 * If the user is requesting zero subports, 1498 * skip the subport allocation. 1499 */ 1500 if (uinfo->spu_subport_cnt <= 0) 1501 goto bail; 1502 1503 /* Self-consistency check for ipath_compatible_subports() */ 1504 if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) && 1505 !ipath_compatible_subports(IPATH_USER_SWMAJOR, 1506 IPATH_USER_SWMINOR)) { 1507 dev_info(&dd->pcidev->dev, 1508 "Inconsistent ipath_compatible_subports()\n"); 1509 goto bail; 1510 } 1511 1512 /* Check for subport compatibility */ 1513 if (!ipath_compatible_subports(uinfo->spu_userversion >> 16, 1514 uinfo->spu_userversion & 0xffff)) { 1515 dev_info(&dd->pcidev->dev, 1516 "Mismatched user version (%d.%d) and driver " 1517 "version (%d.%d) while port sharing. Ensure " 1518 "that driver and library are from the same " 1519 "release.\n", 1520 (int) (uinfo->spu_userversion >> 16), 1521 (int) (uinfo->spu_userversion & 0xffff), 1522 IPATH_USER_SWMAJOR, 1523 IPATH_USER_SWMINOR); 1524 goto bail; 1525 } 1526 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) { 1527 ret = -EINVAL; 1528 goto bail; 1529 } 1530 1531 num_subports = uinfo->spu_subport_cnt; 1532 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); 1533 if (!pd->subport_uregbase) { 1534 ret = -ENOMEM; 1535 goto bail; 1536 } 1537 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1538 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1539 sizeof(u32), PAGE_SIZE) * num_subports; 1540 pd->subport_rcvhdr_base = vmalloc(size); 1541 if (!pd->subport_rcvhdr_base) { 1542 ret = -ENOMEM; 1543 goto bail_ureg; 1544 } 1545 1546 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1547 pd->port_rcvegrbuf_size * 1548 num_subports); 1549 if (!pd->subport_rcvegrbuf) { 1550 ret = -ENOMEM; 1551 goto bail_rhdr; 1552 } 1553 1554 pd->port_subport_cnt = uinfo->spu_subport_cnt; 1555 pd->port_subport_id = uinfo->spu_subport_id; 1556 pd->active_slaves = 1; 1557 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1558 memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); 1559 memset(pd->subport_rcvhdr_base, 0, size); 1560 memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * 1561 pd->port_rcvegrbuf_size * 1562 num_subports); 1563 goto bail; 1564 1565bail_rhdr: 1566 vfree(pd->subport_rcvhdr_base); 1567bail_ureg: 1568 vfree(pd->subport_uregbase); 1569 pd->subport_uregbase = NULL; 1570bail: 1571 return ret; 1572} 1573 1574static int try_alloc_port(struct ipath_devdata *dd, int port, 1575 struct file *fp, 1576 const struct ipath_user_info *uinfo) 1577{ 1578 struct ipath_portdata *pd; 1579 int ret; 1580 1581 if (!(pd = dd->ipath_pd[port])) { 1582 void *ptmp; 1583 1584 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); 1585 1586 /* 1587 * Allocate memory for use in ipath_tid_update() just once 1588 * at open, not per call. Reduces cost of expected send 1589 * setup. 1590 */ 1591 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + 1592 dd->ipath_rcvtidcnt * sizeof(struct page **), 1593 GFP_KERNEL); 1594 if (!pd || !ptmp) { 1595 ipath_dev_err(dd, "Unable to allocate portdata " 1596 "memory, failing open\n"); 1597 ret = -ENOMEM; 1598 kfree(pd); 1599 kfree(ptmp); 1600 goto bail; 1601 } 1602 dd->ipath_pd[port] = pd; 1603 dd->ipath_pd[port]->port_port = port; 1604 dd->ipath_pd[port]->port_dd = dd; 1605 dd->ipath_pd[port]->port_tid_pg_list = ptmp; 1606 init_waitqueue_head(&dd->ipath_pd[port]->port_wait); 1607 } 1608 if (!pd->port_cnt) { 1609 pd->userversion = uinfo->spu_userversion; 1610 init_user_egr_sizes(pd); 1611 if ((ret = init_subports(dd, pd, uinfo)) != 0) 1612 goto bail; 1613 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", 1614 current->comm, current->pid, dd->ipath_unit, 1615 port); 1616 pd->port_cnt = 1; 1617 port_fp(fp) = pd; 1618 pd->port_pid = get_pid(task_pid(current)); 1619 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1620 ipath_stats.sps_ports++; 1621 ret = 0; 1622 } else 1623 ret = -EBUSY; 1624 1625bail: 1626 return ret; 1627} 1628 1629static inline int usable(struct ipath_devdata *dd) 1630{ 1631 return dd && 1632 (dd->ipath_flags & IPATH_PRESENT) && 1633 dd->ipath_kregbase && 1634 dd->ipath_lid && 1635 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED 1636 | IPATH_LINKUNK)); 1637} 1638 1639static int find_free_port(int unit, struct file *fp, 1640 const struct ipath_user_info *uinfo) 1641{ 1642 struct ipath_devdata *dd = ipath_lookup(unit); 1643 int ret, i; 1644 1645 if (!dd) { 1646 ret = -ENODEV; 1647 goto bail; 1648 } 1649 1650 if (!usable(dd)) { 1651 ret = -ENETDOWN; 1652 goto bail; 1653 } 1654 1655 for (i = 1; i < dd->ipath_cfgports; i++) { 1656 ret = try_alloc_port(dd, i, fp, uinfo); 1657 if (ret != -EBUSY) 1658 goto bail; 1659 } 1660 ret = -EBUSY; 1661 1662bail: 1663 return ret; 1664} 1665 1666static int find_best_unit(struct file *fp, 1667 const struct ipath_user_info *uinfo) 1668{ 1669 int ret = 0, i, prefunit = -1, devmax; 1670 int maxofallports, npresent, nup; 1671 int ndev; 1672 1673 devmax = ipath_count_units(&npresent, &nup, &maxofallports); 1674 1675 /* 1676 * This code is present to allow a knowledgeable person to 1677 * specify the layout of processes to processors before opening 1678 * this driver, and then we'll assign the process to the "closest" 1679 * InfiniPath chip to that processor (we assume reasonable connectivity, 1680 * for now). This code assumes that if affinity has been set 1681 * before this point, that at most one cpu is set; for now this 1682 * is reasonable. I check for both cpumask_empty() and cpumask_full(), 1683 * in case some kernel variant sets none of the bits when no 1684 * affinity is set. 2.6.11 and 12 kernels have all present 1685 * cpus set. Some day we'll have to fix it up further to handle 1686 * a cpu subset. This algorithm fails for two HT chips connected 1687 * in tunnel fashion. Eventually this needs real topology 1688 * information. There may be some issues with dual core numbering 1689 * as well. This needs more work prior to release. 1690 */ 1691 if (!cpumask_empty(¤t->cpus_allowed) && 1692 !cpumask_full(¤t->cpus_allowed)) { 1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1694 for (i = 0; i < ncpus; i++) 1695 if (cpumask_test_cpu(i, ¤t->cpus_allowed)) { 1696 ipath_cdbg(PROC, "%s[%u] affinity set for " 1697 "cpu %d/%d\n", current->comm, 1698 current->pid, i, ncpus); 1699 curcpu = i; 1700 nset++; 1701 } 1702 if (curcpu != -1 && nset != ncpus) { 1703 if (npresent) { 1704 prefunit = curcpu / (ncpus / npresent); 1705 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, " 1706 "%d cpus/chip, select unit %d\n", 1707 current->comm, current->pid, 1708 npresent, ncpus, ncpus / npresent, 1709 prefunit); 1710 } 1711 } 1712 } 1713 1714 /* 1715 * user ports start at 1, kernel port is 0 1716 * For now, we do round-robin access across all chips 1717 */ 1718 1719 if (prefunit != -1) 1720 devmax = prefunit + 1; 1721recheck: 1722 for (i = 1; i < maxofallports; i++) { 1723 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; 1724 ndev++) { 1725 struct ipath_devdata *dd = ipath_lookup(ndev); 1726 1727 if (!usable(dd)) 1728 continue; /* can't use this unit */ 1729 if (i >= dd->ipath_cfgports) 1730 /* 1731 * Maxed out on users of this unit. Try 1732 * next. 1733 */ 1734 continue; 1735 ret = try_alloc_port(dd, i, fp, uinfo); 1736 if (!ret) 1737 goto done; 1738 } 1739 } 1740 1741 if (npresent) { 1742 if (nup == 0) { 1743 ret = -ENETDOWN; 1744 ipath_dbg("No ports available (none initialized " 1745 "and ready)\n"); 1746 } else { 1747 if (prefunit > 0) { 1748 /* if started above 0, retry from 0 */ 1749 ipath_cdbg(PROC, 1750 "%s[%u] no ports on prefunit " 1751 "%d, clear and re-check\n", 1752 current->comm, current->pid, 1753 prefunit); 1754 devmax = ipath_count_units(NULL, NULL, 1755 NULL); 1756 prefunit = -1; 1757 goto recheck; 1758 } 1759 ret = -EBUSY; 1760 ipath_dbg("No ports available\n"); 1761 } 1762 } else { 1763 ret = -ENXIO; 1764 ipath_dbg("No boards found\n"); 1765 } 1766 1767done: 1768 return ret; 1769} 1770 1771static int find_shared_port(struct file *fp, 1772 const struct ipath_user_info *uinfo) 1773{ 1774 int devmax, ndev, i; 1775 int ret = 0; 1776 1777 devmax = ipath_count_units(NULL, NULL, NULL); 1778 1779 for (ndev = 0; ndev < devmax; ndev++) { 1780 struct ipath_devdata *dd = ipath_lookup(ndev); 1781 1782 if (!usable(dd)) 1783 continue; 1784 for (i = 1; i < dd->ipath_cfgports; i++) { 1785 struct ipath_portdata *pd = dd->ipath_pd[i]; 1786 1787 /* Skip ports which are not yet open */ 1788 if (!pd || !pd->port_cnt) 1789 continue; 1790 /* Skip port if it doesn't match the requested one */ 1791 if (pd->port_subport_id != uinfo->spu_subport_id) 1792 continue; 1793 /* Verify the sharing process matches the master */ 1794 if (pd->port_subport_cnt != uinfo->spu_subport_cnt || 1795 pd->userversion != uinfo->spu_userversion || 1796 pd->port_cnt >= pd->port_subport_cnt) { 1797 ret = -EINVAL; 1798 goto done; 1799 } 1800 port_fp(fp) = pd; 1801 subport_fp(fp) = pd->port_cnt++; 1802 pd->port_subpid[subport_fp(fp)] = 1803 get_pid(task_pid(current)); 1804 tidcursor_fp(fp) = 0; 1805 pd->active_slaves |= 1 << subport_fp(fp); 1806 ipath_cdbg(PROC, 1807 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", 1808 current->comm, current->pid, 1809 subport_fp(fp), 1810 pd->port_comm, pid_nr(pd->port_pid), 1811 dd->ipath_unit, pd->port_port); 1812 ret = 1; 1813 goto done; 1814 } 1815 } 1816 1817done: 1818 return ret; 1819} 1820 1821static int ipath_open(struct inode *in, struct file *fp) 1822{ 1823 /* The real work is performed later in ipath_assign_port() */ 1824 cycle_kernel_lock(); 1825 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); 1826 return fp->private_data ? 0 : -ENOMEM; 1827} 1828 1829/* Get port early, so can set affinity prior to memory allocation */ 1830static int ipath_assign_port(struct file *fp, 1831 const struct ipath_user_info *uinfo) 1832{ 1833 int ret; 1834 int i_minor; 1835 unsigned swmajor, swminor; 1836 1837 /* Check to be sure we haven't already initialized this file */ 1838 if (port_fp(fp)) { 1839 ret = -EINVAL; 1840 goto done; 1841 } 1842 1843 /* for now, if major version is different, bail */ 1844 swmajor = uinfo->spu_userversion >> 16; 1845 if (swmajor != IPATH_USER_SWMAJOR) { 1846 ipath_dbg("User major version %d not same as driver " 1847 "major %d\n", uinfo->spu_userversion >> 16, 1848 IPATH_USER_SWMAJOR); 1849 ret = -ENODEV; 1850 goto done; 1851 } 1852 1853 swminor = uinfo->spu_userversion & 0xffff; 1854 if (swminor != IPATH_USER_SWMINOR) 1855 ipath_dbg("User minor version %d not same as driver " 1856 "minor %d\n", swminor, IPATH_USER_SWMINOR); 1857 1858 mutex_lock(&ipath_mutex); 1859 1860 if (ipath_compatible_subports(swmajor, swminor) && 1861 uinfo->spu_subport_cnt && 1862 (ret = find_shared_port(fp, uinfo))) { 1863 if (ret > 0) 1864 ret = 0; 1865 goto done_chk_sdma; 1866 } 1867 1868 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; 1869 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", 1870 (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); 1871 1872 if (i_minor) 1873 ret = find_free_port(i_minor - 1, fp, uinfo); 1874 else 1875 ret = find_best_unit(fp, uinfo); 1876 1877done_chk_sdma: 1878 if (!ret) { 1879 struct ipath_filedata *fd = fp->private_data; 1880 const struct ipath_portdata *pd = fd->pd; 1881 const struct ipath_devdata *dd = pd->port_dd; 1882 1883 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev, 1884 dd->ipath_unit, 1885 pd->port_port, 1886 fd->subport); 1887 1888 if (!fd->pq) 1889 ret = -ENOMEM; 1890 } 1891 1892 mutex_unlock(&ipath_mutex); 1893 1894done: 1895 return ret; 1896} 1897 1898 1899static int ipath_do_user_init(struct file *fp, 1900 const struct ipath_user_info *uinfo) 1901{ 1902 int ret; 1903 struct ipath_portdata *pd = port_fp(fp); 1904 struct ipath_devdata *dd; 1905 u32 head32; 1906 1907 /* Subports don't need to initialize anything since master did it. */ 1908 if (subport_fp(fp)) { 1909 ret = wait_event_interruptible(pd->port_wait, 1910 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); 1911 goto done; 1912 } 1913 1914 dd = pd->port_dd; 1915 1916 if (uinfo->spu_rcvhdrsize) { 1917 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); 1918 if (ret) 1919 goto done; 1920 } 1921 1922 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ 1923 1924 /* some ports may get extra buffers, calculate that here */ 1925 if (pd->port_port <= dd->ipath_ports_extrabuf) 1926 pd->port_piocnt = dd->ipath_pbufsport + 1; 1927 else 1928 pd->port_piocnt = dd->ipath_pbufsport; 1929 1930 /* for right now, kernel piobufs are at end, so port 1 is at 0 */ 1931 if (pd->port_port <= dd->ipath_ports_extrabuf) 1932 pd->port_pio_base = (dd->ipath_pbufsport + 1) 1933 * (pd->port_port - 1); 1934 else 1935 pd->port_pio_base = dd->ipath_ports_extrabuf + 1936 dd->ipath_pbufsport * (pd->port_port - 1); 1937 pd->port_piobufs = dd->ipath_piobufbase + 1938 pd->port_pio_base * dd->ipath_palign; 1939 ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u," 1940 " first pio %u\n", pd->port_port, pd->port_piobufs, 1941 pd->port_piocnt, pd->port_pio_base); 1942 ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0); 1943 1944 /* 1945 * Now allocate the rcvhdr Q and eager TIDs; skip the TID 1946 * array for time being. If pd->port_port > chip-supported, 1947 * we need to do extra stuff here to handle by handling overflow 1948 * through port 0, someday 1949 */ 1950 ret = ipath_create_rcvhdrq(dd, pd); 1951 if (!ret) 1952 ret = ipath_create_user_egr(pd); 1953 if (ret) 1954 goto done; 1955 1956 /* 1957 * set the eager head register for this port to the current values 1958 * of the tail pointers, since we don't know if they were 1959 * updated on last use of the port. 1960 */ 1961 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); 1962 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); 1963 pd->port_lastrcvhdrqtail = -1; 1964 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", 1965 pd->port_port, head32); 1966 pd->port_tidcursor = 0; /* start at beginning after open */ 1967 1968 /* initialize poll variables... */ 1969 pd->port_urgent = 0; 1970 pd->port_urgent_poll = 0; 1971 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1972 1973 /* 1974 * Now enable the port for receive. 1975 * For chips that are set to DMA the tail register to memory 1976 * when they change (and when the update bit transitions from 1977 * 0 to 1. So for those chips, we turn it off and then back on. 1978 * This will (very briefly) affect any other open ports, but the 1979 * duration is very short, and therefore isn't an issue. We 1980 * explictly set the in-memory tail copy to 0 beforehand, so we 1981 * don't have to wait to be sure the DMA update has happened 1982 * (chip resets head/tail to 0 on transition to enable). 1983 */ 1984 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 1985 &dd->ipath_rcvctrl); 1986 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) { 1987 if (pd->port_rcvhdrtail_kvaddr) 1988 ipath_clear_rcvhdrtail(pd); 1989 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1990 dd->ipath_rcvctrl & 1991 ~(1ULL << dd->ipath_r_tailupd_shift)); 1992 } 1993 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1994 dd->ipath_rcvctrl); 1995 /* Notify any waiting slaves */ 1996 if (pd->port_subport_cnt) { 1997 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1998 wake_up(&pd->port_wait); 1999 } 2000done: 2001 return ret; 2002} 2003 2004/** 2005 * unlock_exptid - unlock any expected TID entries port still had in use 2006 * @pd: port 2007 * 2008 * We don't actually update the chip here, because we do a bulk update 2009 * below, using ipath_f_clear_tids. 2010 */ 2011static void unlock_expected_tids(struct ipath_portdata *pd) 2012{ 2013 struct ipath_devdata *dd = pd->port_dd; 2014 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; 2015 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt; 2016 2017 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", 2018 pd->port_port); 2019 for (i = port_tidbase; i < maxtid; i++) { 2020 struct page *ps = dd->ipath_pageshadow[i]; 2021 2022 if (!ps) 2023 continue; 2024 2025 dd->ipath_pageshadow[i] = NULL; 2026 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], 2027 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2028 ipath_release_user_pages_on_close(&ps, 1); 2029 cnt++; 2030 ipath_stats.sps_pageunlocks++; 2031 } 2032 if (cnt) 2033 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n", 2034 pd->port_port, cnt); 2035 2036 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks) 2037 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n", 2038 (unsigned long long) ipath_stats.sps_pagelocks, 2039 (unsigned long long) 2040 ipath_stats.sps_pageunlocks); 2041} 2042 2043static int ipath_close(struct inode *in, struct file *fp) 2044{ 2045 int ret = 0; 2046 struct ipath_filedata *fd; 2047 struct ipath_portdata *pd; 2048 struct ipath_devdata *dd; 2049 unsigned long flags; 2050 unsigned port; 2051 struct pid *pid; 2052 2053 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", 2054 (long)in->i_rdev, fp->private_data); 2055 2056 mutex_lock(&ipath_mutex); 2057 2058 fd = (struct ipath_filedata *) fp->private_data; 2059 fp->private_data = NULL; 2060 pd = fd->pd; 2061 if (!pd) { 2062 mutex_unlock(&ipath_mutex); 2063 goto bail; 2064 } 2065 2066 dd = pd->port_dd; 2067 2068 /* drain user sdma queue */ 2069 ipath_user_sdma_queue_drain(dd, fd->pq); 2070 ipath_user_sdma_queue_destroy(fd->pq); 2071 2072 if (--pd->port_cnt) { 2073 /* 2074 * XXX If the master closes the port before the slave(s), 2075 * revoke the mmap for the eager receive queue so 2076 * the slave(s) don't wait for receive data forever. 2077 */ 2078 pd->active_slaves &= ~(1 << fd->subport); 2079 put_pid(pd->port_subpid[fd->subport]); 2080 pd->port_subpid[fd->subport] = NULL; 2081 mutex_unlock(&ipath_mutex); 2082 goto bail; 2083 } 2084 /* early; no interrupt users after this */ 2085 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); 2086 port = pd->port_port; 2087 dd->ipath_pd[port] = NULL; 2088 pid = pd->port_pid; 2089 pd->port_pid = NULL; 2090 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); 2091 2092 if (pd->port_rcvwait_to || pd->port_piowait_to 2093 || pd->port_rcvnowait || pd->port_pionowait) { 2094 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; " 2095 "%u rcv %u, pio already\n", 2096 pd->port_port, pd->port_rcvwait_to, 2097 pd->port_piowait_to, pd->port_rcvnowait, 2098 pd->port_pionowait); 2099 pd->port_rcvwait_to = pd->port_piowait_to = 2100 pd->port_rcvnowait = pd->port_pionowait = 0; 2101 } 2102 if (pd->port_flag) { 2103 ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n", 2104 pd->port_port, pd->port_flag); 2105 pd->port_flag = 0; 2106 } 2107 2108 if (dd->ipath_kregbase) { 2109 /* atomically clear receive enable port and intr avail. */ 2110 clear_bit(dd->ipath_r_portenable_shift + port, 2111 &dd->ipath_rcvctrl); 2112 clear_bit(pd->port_port + dd->ipath_r_intravail_shift, 2113 &dd->ipath_rcvctrl); 2114 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, 2115 dd->ipath_rcvctrl); 2116 /* and read back from chip to be sure that nothing 2117 * else is in flight when we do the rest */ 2118 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2119 2120 /* clean up the pkeys for this port user */ 2121 ipath_clean_part_key(pd, dd); 2122 /* 2123 * be paranoid, and never write 0's to these, just use an 2124 * unused part of the port 0 tail page. Of course, 2125 * rcvhdraddr points to a large chunk of memory, so this 2126 * could still trash things, but at least it won't trash 2127 * page 0, and by disabling the port, it should stop "soon", 2128 * even if a packet or two is in already in flight after we 2129 * disabled the port. 2130 */ 2131 ipath_write_kreg_port(dd, 2132 dd->ipath_kregs->kr_rcvhdrtailaddr, port, 2133 dd->ipath_dummy_hdrq_phys); 2134 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, 2135 pd->port_port, dd->ipath_dummy_hdrq_phys); 2136 2137 ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt); 2138 ipath_chg_pioavailkernel(dd, pd->port_pio_base, 2139 pd->port_piocnt, 1); 2140 2141 dd->ipath_f_clear_tids(dd, pd->port_port); 2142 2143 if (dd->ipath_pageshadow) 2144 unlock_expected_tids(pd); 2145 ipath_stats.sps_ports--; 2146 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2147 pd->port_comm, pid_nr(pid), 2148 dd->ipath_unit, port); 2149 } 2150 2151 put_pid(pid); 2152 mutex_unlock(&ipath_mutex); 2153 ipath_free_pddata(dd, pd); /* after releasing the mutex */ 2154 2155bail: 2156 kfree(fd); 2157 return ret; 2158} 2159 2160static int ipath_port_info(struct ipath_portdata *pd, u16 subport, 2161 struct ipath_port_info __user *uinfo) 2162{ 2163 struct ipath_port_info info; 2164 int nup; 2165 int ret; 2166 size_t sz; 2167 2168 (void) ipath_count_units(NULL, &nup, NULL); 2169 info.num_active = nup; 2170 info.unit = pd->port_dd->ipath_unit; 2171 info.port = pd->port_port; 2172 info.subport = subport; 2173 /* Don't return new fields if old library opened the port. */ 2174 if (ipath_supports_subports(pd->userversion >> 16, 2175 pd->userversion & 0xffff)) { 2176 /* Number of user ports available for this device. */ 2177 info.num_ports = pd->port_dd->ipath_cfgports - 1; 2178 info.num_subports = pd->port_subport_cnt; 2179 sz = sizeof(info); 2180 } else 2181 sz = sizeof(info) - 2 * sizeof(u16); 2182 2183 if (copy_to_user(uinfo, &info, sz)) { 2184 ret = -EFAULT; 2185 goto bail; 2186 } 2187 ret = 0; 2188 2189bail: 2190 return ret; 2191} 2192 2193static int ipath_get_slave_info(struct ipath_portdata *pd, 2194 void __user *slave_mask_addr) 2195{ 2196 int ret = 0; 2197 2198 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) 2199 ret = -EFAULT; 2200 return ret; 2201} 2202 2203static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq, 2204 u32 __user *inflightp) 2205{ 2206 const u32 val = ipath_user_sdma_inflight_counter(pq); 2207 2208 if (put_user(val, inflightp)) 2209 return -EFAULT; 2210 2211 return 0; 2212} 2213 2214static int ipath_sdma_get_complete(struct ipath_devdata *dd, 2215 struct ipath_user_sdma_queue *pq, 2216 u32 __user *completep) 2217{ 2218 u32 val; 2219 int err; 2220 2221 err = ipath_user_sdma_make_progress(dd, pq); 2222 if (err < 0) 2223 return err; 2224 2225 val = ipath_user_sdma_complete_counter(pq); 2226 if (put_user(val, completep)) 2227 return -EFAULT; 2228 2229 return 0; 2230} 2231 2232static ssize_t ipath_write(struct file *fp, const char __user *data, 2233 size_t count, loff_t *off) 2234{ 2235 const struct ipath_cmd __user *ucmd; 2236 struct ipath_portdata *pd; 2237 const void __user *src; 2238 size_t consumed, copy; 2239 struct ipath_cmd cmd; 2240 ssize_t ret = 0; 2241 void *dest; 2242 2243 if (count < sizeof(cmd.type)) { 2244 ret = -EINVAL; 2245 goto bail; 2246 } 2247 2248 ucmd = (const struct ipath_cmd __user *) data; 2249 2250 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { 2251 ret = -EFAULT; 2252 goto bail; 2253 } 2254 2255 consumed = sizeof(cmd.type); 2256 2257 switch (cmd.type) { 2258 case IPATH_CMD_ASSIGN_PORT: 2259 case __IPATH_CMD_USER_INIT: 2260 case IPATH_CMD_USER_INIT: 2261 copy = sizeof(cmd.cmd.user_info); 2262 dest = &cmd.cmd.user_info; 2263 src = &ucmd->cmd.user_info; 2264 break; 2265 case IPATH_CMD_RECV_CTRL: 2266 copy = sizeof(cmd.cmd.recv_ctrl); 2267 dest = &cmd.cmd.recv_ctrl; 2268 src = &ucmd->cmd.recv_ctrl; 2269 break; 2270 case IPATH_CMD_PORT_INFO: 2271 copy = sizeof(cmd.cmd.port_info); 2272 dest = &cmd.cmd.port_info; 2273 src = &ucmd->cmd.port_info; 2274 break; 2275 case IPATH_CMD_TID_UPDATE: 2276 case IPATH_CMD_TID_FREE: 2277 copy = sizeof(cmd.cmd.tid_info); 2278 dest = &cmd.cmd.tid_info; 2279 src = &ucmd->cmd.tid_info; 2280 break; 2281 case IPATH_CMD_SET_PART_KEY: 2282 copy = sizeof(cmd.cmd.part_key); 2283 dest = &cmd.cmd.part_key; 2284 src = &ucmd->cmd.part_key; 2285 break; 2286 case __IPATH_CMD_SLAVE_INFO: 2287 copy = sizeof(cmd.cmd.slave_mask_addr); 2288 dest = &cmd.cmd.slave_mask_addr; 2289 src = &ucmd->cmd.slave_mask_addr; 2290 break; 2291 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg 2292 copy = 0; 2293 src = NULL; 2294 dest = NULL; 2295 break; 2296 case IPATH_CMD_POLL_TYPE: 2297 copy = sizeof(cmd.cmd.poll_type); 2298 dest = &cmd.cmd.poll_type; 2299 src = &ucmd->cmd.poll_type; 2300 break; 2301 case IPATH_CMD_ARMLAUNCH_CTRL: 2302 copy = sizeof(cmd.cmd.armlaunch_ctrl); 2303 dest = &cmd.cmd.armlaunch_ctrl; 2304 src = &ucmd->cmd.armlaunch_ctrl; 2305 break; 2306 case IPATH_CMD_SDMA_INFLIGHT: 2307 copy = sizeof(cmd.cmd.sdma_inflight); 2308 dest = &cmd.cmd.sdma_inflight; 2309 src = &ucmd->cmd.sdma_inflight; 2310 break; 2311 case IPATH_CMD_SDMA_COMPLETE: 2312 copy = sizeof(cmd.cmd.sdma_complete); 2313 dest = &cmd.cmd.sdma_complete; 2314 src = &ucmd->cmd.sdma_complete; 2315 break; 2316 default: 2317 ret = -EINVAL; 2318 goto bail; 2319 } 2320 2321 if (copy) { 2322 if ((count - consumed) < copy) { 2323 ret = -EINVAL; 2324 goto bail; 2325 } 2326 2327 if (copy_from_user(dest, src, copy)) { 2328 ret = -EFAULT; 2329 goto bail; 2330 } 2331 2332 consumed += copy; 2333 } 2334 2335 pd = port_fp(fp); 2336 if (!pd && cmd.type != __IPATH_CMD_USER_INIT && 2337 cmd.type != IPATH_CMD_ASSIGN_PORT) { 2338 ret = -EINVAL; 2339 goto bail; 2340 } 2341 2342 switch (cmd.type) { 2343 case IPATH_CMD_ASSIGN_PORT: 2344 ret = ipath_assign_port(fp, &cmd.cmd.user_info); 2345 if (ret) 2346 goto bail; 2347 break; 2348 case __IPATH_CMD_USER_INIT: 2349 /* backwards compatibility, get port first */ 2350 ret = ipath_assign_port(fp, &cmd.cmd.user_info); 2351 if (ret) 2352 goto bail; 2353 /* and fall through to current version. */ 2354 case IPATH_CMD_USER_INIT: 2355 ret = ipath_do_user_init(fp, &cmd.cmd.user_info); 2356 if (ret) 2357 goto bail; 2358 ret = ipath_get_base_info( 2359 fp, (void __user *) (unsigned long) 2360 cmd.cmd.user_info.spu_base_info, 2361 cmd.cmd.user_info.spu_base_info_size); 2362 break; 2363 case IPATH_CMD_RECV_CTRL: 2364 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); 2365 break; 2366 case IPATH_CMD_PORT_INFO: 2367 ret = ipath_port_info(pd, subport_fp(fp), 2368 (struct ipath_port_info __user *) 2369 (unsigned long) cmd.cmd.port_info); 2370 break; 2371 case IPATH_CMD_TID_UPDATE: 2372 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); 2373 break; 2374 case IPATH_CMD_TID_FREE: 2375 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); 2376 break; 2377 case IPATH_CMD_SET_PART_KEY: 2378 ret = ipath_set_part_key(pd, cmd.cmd.part_key); 2379 break; 2380 case __IPATH_CMD_SLAVE_INFO: 2381 ret = ipath_get_slave_info(pd, 2382 (void __user *) (unsigned long) 2383 cmd.cmd.slave_mask_addr); 2384 break; 2385 case IPATH_CMD_PIOAVAILUPD: 2386 ipath_force_pio_avail_update(pd->port_dd); 2387 break; 2388 case IPATH_CMD_POLL_TYPE: 2389 pd->poll_type = cmd.cmd.poll_type; 2390 break; 2391 case IPATH_CMD_ARMLAUNCH_CTRL: 2392 if (cmd.cmd.armlaunch_ctrl) 2393 ipath_enable_armlaunch(pd->port_dd); 2394 else 2395 ipath_disable_armlaunch(pd->port_dd); 2396 break; 2397 case IPATH_CMD_SDMA_INFLIGHT: 2398 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp), 2399 (u32 __user *) (unsigned long) 2400 cmd.cmd.sdma_inflight); 2401 break; 2402 case IPATH_CMD_SDMA_COMPLETE: 2403 ret = ipath_sdma_get_complete(pd->port_dd, 2404 user_sdma_queue_fp(fp), 2405 (u32 __user *) (unsigned long) 2406 cmd.cmd.sdma_complete); 2407 break; 2408 } 2409 2410 if (ret >= 0) 2411 ret = consumed; 2412 2413bail: 2414 return ret; 2415} 2416 2417static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov, 2418 unsigned long dim, loff_t off) 2419{ 2420 struct file *filp = iocb->ki_filp; 2421 struct ipath_filedata *fp = filp->private_data; 2422 struct ipath_portdata *pd = port_fp(filp); 2423 struct ipath_user_sdma_queue *pq = fp->pq; 2424 2425 if (!dim) 2426 return -EINVAL; 2427 2428 return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim); 2429} 2430 2431static struct class *ipath_class; 2432 2433static int init_cdev(int minor, char *name, const struct file_operations *fops, 2434 struct cdev **cdevp, struct device **devp) 2435{ 2436 const dev_t dev = MKDEV(IPATH_MAJOR, minor); 2437 struct cdev *cdev = NULL; 2438 struct device *device = NULL; 2439 int ret; 2440 2441 cdev = cdev_alloc(); 2442 if (!cdev) { 2443 printk(KERN_ERR IPATH_DRV_NAME 2444 ": Could not allocate cdev for minor %d, %s\n", 2445 minor, name); 2446 ret = -ENOMEM; 2447 goto done; 2448 } 2449 2450 cdev->owner = THIS_MODULE; 2451 cdev->ops = fops; 2452 kobject_set_name(&cdev->kobj, name); 2453 2454 ret = cdev_add(cdev, dev, 1); 2455 if (ret < 0) { 2456 printk(KERN_ERR IPATH_DRV_NAME 2457 ": Could not add cdev for minor %d, %s (err %d)\n", 2458 minor, name, -ret); 2459 goto err_cdev; 2460 } 2461 2462 device = device_create(ipath_class, NULL, dev, NULL, name); 2463 2464 if (IS_ERR(device)) { 2465 ret = PTR_ERR(device); 2466 printk(KERN_ERR IPATH_DRV_NAME ": Could not create " 2467 "device for minor %d, %s (err %d)\n", 2468 minor, name, -ret); 2469 goto err_cdev; 2470 } 2471 2472 goto done; 2473 2474err_cdev: 2475 cdev_del(cdev); 2476 cdev = NULL; 2477 2478done: 2479 if (ret >= 0) { 2480 *cdevp = cdev; 2481 *devp = device; 2482 } else { 2483 *cdevp = NULL; 2484 *devp = NULL; 2485 } 2486 2487 return ret; 2488} 2489 2490int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, 2491 struct cdev **cdevp, struct device **devp) 2492{ 2493 return init_cdev(minor, name, fops, cdevp, devp); 2494} 2495 2496static void cleanup_cdev(struct cdev **cdevp, 2497 struct device **devp) 2498{ 2499 struct device *dev = *devp; 2500 2501 if (dev) { 2502 device_unregister(dev); 2503 *devp = NULL; 2504 } 2505 2506 if (*cdevp) { 2507 cdev_del(*cdevp); 2508 *cdevp = NULL; 2509 } 2510} 2511 2512void ipath_cdev_cleanup(struct cdev **cdevp, 2513 struct device **devp) 2514{ 2515 cleanup_cdev(cdevp, devp); 2516} 2517 2518static struct cdev *wildcard_cdev; 2519static struct device *wildcard_dev; 2520 2521static const dev_t dev = MKDEV(IPATH_MAJOR, 0); 2522 2523static int user_init(void) 2524{ 2525 int ret; 2526 2527 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME); 2528 if (ret < 0) { 2529 printk(KERN_ERR IPATH_DRV_NAME ": Could not register " 2530 "chrdev region (err %d)\n", -ret); 2531 goto done; 2532 } 2533 2534 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME); 2535 2536 if (IS_ERR(ipath_class)) { 2537 ret = PTR_ERR(ipath_class); 2538 printk(KERN_ERR IPATH_DRV_NAME ": Could not create " 2539 "device class (err %d)\n", -ret); 2540 goto bail; 2541 } 2542 2543 goto done; 2544bail: 2545 unregister_chrdev_region(dev, IPATH_NMINORS); 2546done: 2547 return ret; 2548} 2549 2550static void user_cleanup(void) 2551{ 2552 if (ipath_class) { 2553 class_destroy(ipath_class); 2554 ipath_class = NULL; 2555 } 2556 2557 unregister_chrdev_region(dev, IPATH_NMINORS); 2558} 2559 2560static atomic_t user_count = ATOMIC_INIT(0); 2561static atomic_t user_setup = ATOMIC_INIT(0); 2562 2563int ipath_user_add(struct ipath_devdata *dd) 2564{ 2565 char name[10]; 2566 int ret; 2567 2568 if (atomic_inc_return(&user_count) == 1) { 2569 ret = user_init(); 2570 if (ret < 0) { 2571 ipath_dev_err(dd, "Unable to set up user support: " 2572 "error %d\n", -ret); 2573 goto bail; 2574 } 2575 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev, 2576 &wildcard_dev); 2577 if (ret < 0) { 2578 ipath_dev_err(dd, "Could not create wildcard " 2579 "minor: error %d\n", -ret); 2580 goto bail_user; 2581 } 2582 2583 atomic_set(&user_setup, 1); 2584 } 2585 2586 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit); 2587 2588 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops, 2589 &dd->user_cdev, &dd->user_dev); 2590 if (ret < 0) 2591 ipath_dev_err(dd, "Could not create user minor %d, %s\n", 2592 dd->ipath_unit + 1, name); 2593 2594 goto bail; 2595 2596bail_user: 2597 user_cleanup(); 2598bail: 2599 return ret; 2600} 2601 2602void ipath_user_remove(struct ipath_devdata *dd) 2603{ 2604 cleanup_cdev(&dd->user_cdev, &dd->user_dev); 2605 2606 if (atomic_dec_return(&user_count) == 0) { 2607 if (atomic_read(&user_setup) == 0) 2608 goto bail; 2609 2610 cleanup_cdev(&wildcard_cdev, &wildcard_dev); 2611 user_cleanup(); 2612 2613 atomic_set(&user_setup, 0); 2614 } 2615bail: 2616 return; 2617} 2618