user_mad.c revision 6c06aec2487f7568cf57471a20f422568f25d551
1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2008 Cisco. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: user_mad.c 5596 2006-03-03 01:00:07Z sean.hefty $ 36 */ 37 38#include <linux/module.h> 39#include <linux/init.h> 40#include <linux/device.h> 41#include <linux/err.h> 42#include <linux/fs.h> 43#include <linux/cdev.h> 44#include <linux/dma-mapping.h> 45#include <linux/poll.h> 46#include <linux/mutex.h> 47#include <linux/kref.h> 48#include <linux/compat.h> 49#include <linux/semaphore.h> 50 51#include <asm/uaccess.h> 52 53#include <rdma/ib_mad.h> 54#include <rdma/ib_user_mad.h> 55 56MODULE_AUTHOR("Roland Dreier"); 57MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); 58MODULE_LICENSE("Dual BSD/GPL"); 59 60enum { 61 IB_UMAD_MAX_PORTS = 64, 62 IB_UMAD_MAX_AGENTS = 32, 63 64 IB_UMAD_MAJOR = 231, 65 IB_UMAD_MINOR_BASE = 0 66}; 67 68/* 69 * Our lifetime rules for these structs are the following: each time a 70 * device special file is opened, we look up the corresponding struct 71 * ib_umad_port by minor in the umad_port[] table while holding the 72 * port_lock. If this lookup succeeds, we take a reference on the 73 * ib_umad_port's struct ib_umad_device while still holding the 74 * port_lock; if the lookup fails, we fail the open(). We drop these 75 * references in the corresponding close(). 76 * 77 * In addition to references coming from open character devices, there 78 * is one more reference to each ib_umad_device representing the 79 * module's reference taken when allocating the ib_umad_device in 80 * ib_umad_add_one(). 81 * 82 * When destroying an ib_umad_device, we clear all of its 83 * ib_umad_ports from umad_port[] while holding port_lock before 84 * dropping the module's reference to the ib_umad_device. This is 85 * always safe because any open() calls will either succeed and obtain 86 * a reference before we clear the umad_port[] entries, or fail after 87 * we clear the umad_port[] entries. 88 */ 89 90struct ib_umad_port { 91 struct cdev *cdev; 92 struct device *dev; 93 94 struct cdev *sm_cdev; 95 struct device *sm_dev; 96 struct semaphore sm_sem; 97 98 struct mutex file_mutex; 99 struct list_head file_list; 100 101 struct ib_device *ib_dev; 102 struct ib_umad_device *umad_dev; 103 int dev_num; 104 u8 port_num; 105}; 106 107struct ib_umad_device { 108 int start_port, end_port; 109 struct kref ref; 110 struct ib_umad_port port[0]; 111}; 112 113struct ib_umad_file { 114 struct mutex mutex; 115 struct ib_umad_port *port; 116 struct list_head recv_list; 117 struct list_head send_list; 118 struct list_head port_list; 119 spinlock_t send_lock; 120 wait_queue_head_t recv_wait; 121 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 122 int agents_dead; 123 u8 use_pkey_index; 124 u8 already_used; 125}; 126 127struct ib_umad_packet { 128 struct ib_mad_send_buf *msg; 129 struct ib_mad_recv_wc *recv_wc; 130 struct list_head list; 131 int length; 132 struct ib_user_mad mad; 133}; 134 135static struct class *umad_class; 136 137static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); 138 139static DEFINE_SPINLOCK(port_lock); 140static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS]; 141static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); 142 143static void ib_umad_add_one(struct ib_device *device); 144static void ib_umad_remove_one(struct ib_device *device); 145 146static void ib_umad_release_dev(struct kref *ref) 147{ 148 struct ib_umad_device *dev = 149 container_of(ref, struct ib_umad_device, ref); 150 151 kfree(dev); 152} 153 154static int hdr_size(struct ib_umad_file *file) 155{ 156 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : 157 sizeof (struct ib_user_mad_hdr_old); 158} 159 160/* caller must hold file->mutex */ 161static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) 162{ 163 return file->agents_dead ? NULL : file->agent[id]; 164} 165 166static int queue_packet(struct ib_umad_file *file, 167 struct ib_mad_agent *agent, 168 struct ib_umad_packet *packet) 169{ 170 int ret = 1; 171 172 mutex_lock(&file->mutex); 173 174 for (packet->mad.hdr.id = 0; 175 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; 176 packet->mad.hdr.id++) 177 if (agent == __get_agent(file, packet->mad.hdr.id)) { 178 list_add_tail(&packet->list, &file->recv_list); 179 wake_up_interruptible(&file->recv_wait); 180 ret = 0; 181 break; 182 } 183 184 mutex_unlock(&file->mutex); 185 186 return ret; 187} 188 189static void dequeue_send(struct ib_umad_file *file, 190 struct ib_umad_packet *packet) 191{ 192 spin_lock_irq(&file->send_lock); 193 list_del(&packet->list); 194 spin_unlock_irq(&file->send_lock); 195} 196 197static void send_handler(struct ib_mad_agent *agent, 198 struct ib_mad_send_wc *send_wc) 199{ 200 struct ib_umad_file *file = agent->context; 201 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 202 203 dequeue_send(file, packet); 204 ib_destroy_ah(packet->msg->ah); 205 ib_free_send_mad(packet->msg); 206 207 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 208 packet->length = IB_MGMT_MAD_HDR; 209 packet->mad.hdr.status = ETIMEDOUT; 210 if (!queue_packet(file, agent, packet)) 211 return; 212 } 213 kfree(packet); 214} 215 216static void recv_handler(struct ib_mad_agent *agent, 217 struct ib_mad_recv_wc *mad_recv_wc) 218{ 219 struct ib_umad_file *file = agent->context; 220 struct ib_umad_packet *packet; 221 222 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) 223 goto err1; 224 225 packet = kzalloc(sizeof *packet, GFP_KERNEL); 226 if (!packet) 227 goto err1; 228 229 packet->length = mad_recv_wc->mad_len; 230 packet->recv_wc = mad_recv_wc; 231 232 packet->mad.hdr.status = 0; 233 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; 234 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 235 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); 236 packet->mad.hdr.sl = mad_recv_wc->wc->sl; 237 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; 238 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; 239 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 240 if (packet->mad.hdr.grh_present) { 241 struct ib_ah_attr ah_attr; 242 243 ib_init_ah_from_wc(agent->device, agent->port_num, 244 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, 245 &ah_attr); 246 247 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index; 248 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit; 249 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class; 250 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16); 251 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label); 252 } 253 254 if (queue_packet(file, agent, packet)) 255 goto err2; 256 return; 257 258err2: 259 kfree(packet); 260err1: 261 ib_free_recv_mad(mad_recv_wc); 262} 263 264static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, 265 struct ib_umad_packet *packet, size_t count) 266{ 267 struct ib_mad_recv_buf *recv_buf; 268 int left, seg_payload, offset, max_seg_payload; 269 270 /* We need enough room to copy the first (or only) MAD segment. */ 271 recv_buf = &packet->recv_wc->recv_buf; 272 if ((packet->length <= sizeof (*recv_buf->mad) && 273 count < hdr_size(file) + packet->length) || 274 (packet->length > sizeof (*recv_buf->mad) && 275 count < hdr_size(file) + sizeof (*recv_buf->mad))) 276 return -EINVAL; 277 278 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 279 return -EFAULT; 280 281 buf += hdr_size(file); 282 seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad)); 283 if (copy_to_user(buf, recv_buf->mad, seg_payload)) 284 return -EFAULT; 285 286 if (seg_payload < packet->length) { 287 /* 288 * Multipacket RMPP MAD message. Copy remainder of message. 289 * Note that last segment may have a shorter payload. 290 */ 291 if (count < hdr_size(file) + packet->length) { 292 /* 293 * The buffer is too small, return the first RMPP segment, 294 * which includes the RMPP message length. 295 */ 296 return -ENOSPC; 297 } 298 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); 299 max_seg_payload = sizeof (struct ib_mad) - offset; 300 301 for (left = packet->length - seg_payload, buf += seg_payload; 302 left; left -= seg_payload, buf += seg_payload) { 303 recv_buf = container_of(recv_buf->list.next, 304 struct ib_mad_recv_buf, list); 305 seg_payload = min(left, max_seg_payload); 306 if (copy_to_user(buf, ((void *) recv_buf->mad) + offset, 307 seg_payload)) 308 return -EFAULT; 309 } 310 } 311 return hdr_size(file) + packet->length; 312} 313 314static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, 315 struct ib_umad_packet *packet, size_t count) 316{ 317 ssize_t size = hdr_size(file) + packet->length; 318 319 if (count < size) 320 return -EINVAL; 321 322 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 323 return -EFAULT; 324 325 buf += hdr_size(file); 326 327 if (copy_to_user(buf, packet->mad.data, packet->length)) 328 return -EFAULT; 329 330 return size; 331} 332 333static ssize_t ib_umad_read(struct file *filp, char __user *buf, 334 size_t count, loff_t *pos) 335{ 336 struct ib_umad_file *file = filp->private_data; 337 struct ib_umad_packet *packet; 338 ssize_t ret; 339 340 if (count < hdr_size(file)) 341 return -EINVAL; 342 343 mutex_lock(&file->mutex); 344 345 while (list_empty(&file->recv_list)) { 346 mutex_unlock(&file->mutex); 347 348 if (filp->f_flags & O_NONBLOCK) 349 return -EAGAIN; 350 351 if (wait_event_interruptible(file->recv_wait, 352 !list_empty(&file->recv_list))) 353 return -ERESTARTSYS; 354 355 mutex_lock(&file->mutex); 356 } 357 358 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); 359 list_del(&packet->list); 360 361 mutex_unlock(&file->mutex); 362 363 if (packet->recv_wc) 364 ret = copy_recv_mad(file, buf, packet, count); 365 else 366 ret = copy_send_mad(file, buf, packet, count); 367 368 if (ret < 0) { 369 /* Requeue packet */ 370 mutex_lock(&file->mutex); 371 list_add(&packet->list, &file->recv_list); 372 mutex_unlock(&file->mutex); 373 } else { 374 if (packet->recv_wc) 375 ib_free_recv_mad(packet->recv_wc); 376 kfree(packet); 377 } 378 return ret; 379} 380 381static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) 382{ 383 int left, seg; 384 385 /* Copy class specific header */ 386 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && 387 copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR, 388 msg->hdr_len - IB_MGMT_RMPP_HDR)) 389 return -EFAULT; 390 391 /* All headers are in place. Copy data segments. */ 392 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; 393 seg++, left -= msg->seg_size, buf += msg->seg_size) { 394 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, 395 min(left, msg->seg_size))) 396 return -EFAULT; 397 } 398 return 0; 399} 400 401static int same_destination(struct ib_user_mad_hdr *hdr1, 402 struct ib_user_mad_hdr *hdr2) 403{ 404 if (!hdr1->grh_present && !hdr2->grh_present) 405 return (hdr1->lid == hdr2->lid); 406 407 if (hdr1->grh_present && hdr2->grh_present) 408 return !memcmp(hdr1->gid, hdr2->gid, 16); 409 410 return 0; 411} 412 413static int is_duplicate(struct ib_umad_file *file, 414 struct ib_umad_packet *packet) 415{ 416 struct ib_umad_packet *sent_packet; 417 struct ib_mad_hdr *sent_hdr, *hdr; 418 419 hdr = (struct ib_mad_hdr *) packet->mad.data; 420 list_for_each_entry(sent_packet, &file->send_list, list) { 421 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; 422 423 if ((hdr->tid != sent_hdr->tid) || 424 (hdr->mgmt_class != sent_hdr->mgmt_class)) 425 continue; 426 427 /* 428 * No need to be overly clever here. If two new operations have 429 * the same TID, reject the second as a duplicate. This is more 430 * restrictive than required by the spec. 431 */ 432 if (!ib_response_mad((struct ib_mad *) hdr)) { 433 if (!ib_response_mad((struct ib_mad *) sent_hdr)) 434 return 1; 435 continue; 436 } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) 437 continue; 438 439 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) 440 return 1; 441 } 442 443 return 0; 444} 445 446static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 447 size_t count, loff_t *pos) 448{ 449 struct ib_umad_file *file = filp->private_data; 450 struct ib_umad_packet *packet; 451 struct ib_mad_agent *agent; 452 struct ib_ah_attr ah_attr; 453 struct ib_ah *ah; 454 struct ib_rmpp_mad *rmpp_mad; 455 __be64 *tid; 456 int ret, data_len, hdr_len, copy_offset, rmpp_active; 457 458 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) 459 return -EINVAL; 460 461 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); 462 if (!packet) 463 return -ENOMEM; 464 465 if (copy_from_user(&packet->mad, buf, hdr_size(file))) { 466 ret = -EFAULT; 467 goto err; 468 } 469 470 if (packet->mad.hdr.id < 0 || 471 packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { 472 ret = -EINVAL; 473 goto err; 474 } 475 476 buf += hdr_size(file); 477 478 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { 479 ret = -EFAULT; 480 goto err; 481 } 482 483 mutex_lock(&file->mutex); 484 485 agent = __get_agent(file, packet->mad.hdr.id); 486 if (!agent) { 487 ret = -EINVAL; 488 goto err_up; 489 } 490 491 memset(&ah_attr, 0, sizeof ah_attr); 492 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid); 493 ah_attr.sl = packet->mad.hdr.sl; 494 ah_attr.src_path_bits = packet->mad.hdr.path_bits; 495 ah_attr.port_num = file->port->port_num; 496 if (packet->mad.hdr.grh_present) { 497 ah_attr.ah_flags = IB_AH_GRH; 498 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 499 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; 500 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); 501 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 502 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 503 } 504 505 ah = ib_create_ah(agent->qp->pd, &ah_attr); 506 if (IS_ERR(ah)) { 507 ret = PTR_ERR(ah); 508 goto err_up; 509 } 510 511 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 512 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 513 if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { 514 copy_offset = IB_MGMT_MAD_HDR; 515 rmpp_active = 0; 516 } else { 517 copy_offset = IB_MGMT_RMPP_HDR; 518 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 519 IB_MGMT_RMPP_FLAG_ACTIVE; 520 } 521 522 data_len = count - hdr_size(file) - hdr_len; 523 packet->msg = ib_create_send_mad(agent, 524 be32_to_cpu(packet->mad.hdr.qpn), 525 packet->mad.hdr.pkey_index, rmpp_active, 526 hdr_len, data_len, GFP_KERNEL); 527 if (IS_ERR(packet->msg)) { 528 ret = PTR_ERR(packet->msg); 529 goto err_ah; 530 } 531 532 packet->msg->ah = ah; 533 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 534 packet->msg->retries = packet->mad.hdr.retries; 535 packet->msg->context[0] = packet; 536 537 /* Copy MAD header. Any RMPP header is already in place. */ 538 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); 539 540 if (!rmpp_active) { 541 if (copy_from_user(packet->msg->mad + copy_offset, 542 buf + copy_offset, 543 hdr_len + data_len - copy_offset)) { 544 ret = -EFAULT; 545 goto err_msg; 546 } 547 } else { 548 ret = copy_rmpp_mad(packet->msg, buf); 549 if (ret) 550 goto err_msg; 551 } 552 553 /* 554 * Set the high-order part of the transaction ID to make MADs from 555 * different agents unique, and allow routing responses back to the 556 * original requestor. 557 */ 558 if (!ib_response_mad(packet->msg->mad)) { 559 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 560 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 561 (be64_to_cpup(tid) & 0xffffffff)); 562 rmpp_mad->mad_hdr.tid = *tid; 563 } 564 565 spin_lock_irq(&file->send_lock); 566 ret = is_duplicate(file, packet); 567 if (!ret) 568 list_add_tail(&packet->list, &file->send_list); 569 spin_unlock_irq(&file->send_lock); 570 if (ret) { 571 ret = -EINVAL; 572 goto err_msg; 573 } 574 575 ret = ib_post_send_mad(packet->msg, NULL); 576 if (ret) 577 goto err_send; 578 579 mutex_unlock(&file->mutex); 580 return count; 581 582err_send: 583 dequeue_send(file, packet); 584err_msg: 585 ib_free_send_mad(packet->msg); 586err_ah: 587 ib_destroy_ah(ah); 588err_up: 589 mutex_unlock(&file->mutex); 590err: 591 kfree(packet); 592 return ret; 593} 594 595static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait) 596{ 597 struct ib_umad_file *file = filp->private_data; 598 599 /* we will always be able to post a MAD send */ 600 unsigned int mask = POLLOUT | POLLWRNORM; 601 602 poll_wait(filp, &file->recv_wait, wait); 603 604 if (!list_empty(&file->recv_list)) 605 mask |= POLLIN | POLLRDNORM; 606 607 return mask; 608} 609 610static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, 611 int compat_method_mask) 612{ 613 struct ib_user_mad_reg_req ureq; 614 struct ib_mad_reg_req req; 615 struct ib_mad_agent *agent = NULL; 616 int agent_id; 617 int ret; 618 619 mutex_lock(&file->port->file_mutex); 620 mutex_lock(&file->mutex); 621 622 if (!file->port->ib_dev) { 623 ret = -EPIPE; 624 goto out; 625 } 626 627 if (copy_from_user(&ureq, arg, sizeof ureq)) { 628 ret = -EFAULT; 629 goto out; 630 } 631 632 if (ureq.qpn != 0 && ureq.qpn != 1) { 633 ret = -EINVAL; 634 goto out; 635 } 636 637 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 638 if (!__get_agent(file, agent_id)) 639 goto found; 640 641 ret = -ENOMEM; 642 goto out; 643 644found: 645 if (ureq.mgmt_class) { 646 req.mgmt_class = ureq.mgmt_class; 647 req.mgmt_class_version = ureq.mgmt_class_version; 648 memcpy(req.oui, ureq.oui, sizeof req.oui); 649 650 if (compat_method_mask) { 651 u32 *umm = (u32 *) ureq.method_mask; 652 int i; 653 654 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) 655 req.method_mask[i] = 656 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); 657 } else 658 memcpy(req.method_mask, ureq.method_mask, 659 sizeof req.method_mask); 660 } 661 662 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 663 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 664 ureq.mgmt_class ? &req : NULL, 665 ureq.rmpp_version, 666 send_handler, recv_handler, file); 667 if (IS_ERR(agent)) { 668 ret = PTR_ERR(agent); 669 agent = NULL; 670 goto out; 671 } 672 673 if (put_user(agent_id, 674 (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { 675 ret = -EFAULT; 676 goto out; 677 } 678 679 if (!file->already_used) { 680 file->already_used = 1; 681 if (!file->use_pkey_index) { 682 printk(KERN_WARNING "user_mad: process %s did not enable " 683 "P_Key index support.\n", current->comm); 684 printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt " 685 "has info on the new ABI.\n"); 686 } 687 } 688 689 file->agent[agent_id] = agent; 690 ret = 0; 691 692out: 693 mutex_unlock(&file->mutex); 694 695 if (ret && agent) 696 ib_unregister_mad_agent(agent); 697 698 mutex_unlock(&file->port->file_mutex); 699 700 return ret; 701} 702 703static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) 704{ 705 struct ib_mad_agent *agent = NULL; 706 u32 id; 707 int ret = 0; 708 709 if (get_user(id, arg)) 710 return -EFAULT; 711 712 mutex_lock(&file->port->file_mutex); 713 mutex_lock(&file->mutex); 714 715 if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { 716 ret = -EINVAL; 717 goto out; 718 } 719 720 agent = file->agent[id]; 721 file->agent[id] = NULL; 722 723out: 724 mutex_unlock(&file->mutex); 725 726 if (agent) 727 ib_unregister_mad_agent(agent); 728 729 mutex_unlock(&file->port->file_mutex); 730 731 return ret; 732} 733 734static long ib_umad_enable_pkey(struct ib_umad_file *file) 735{ 736 int ret = 0; 737 738 mutex_lock(&file->mutex); 739 if (file->already_used) 740 ret = -EINVAL; 741 else 742 file->use_pkey_index = 1; 743 mutex_unlock(&file->mutex); 744 745 return ret; 746} 747 748static long ib_umad_ioctl(struct file *filp, unsigned int cmd, 749 unsigned long arg) 750{ 751 switch (cmd) { 752 case IB_USER_MAD_REGISTER_AGENT: 753 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0); 754 case IB_USER_MAD_UNREGISTER_AGENT: 755 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); 756 case IB_USER_MAD_ENABLE_PKEY: 757 return ib_umad_enable_pkey(filp->private_data); 758 default: 759 return -ENOIOCTLCMD; 760 } 761} 762 763#ifdef CONFIG_COMPAT 764static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, 765 unsigned long arg) 766{ 767 switch (cmd) { 768 case IB_USER_MAD_REGISTER_AGENT: 769 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1); 770 case IB_USER_MAD_UNREGISTER_AGENT: 771 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); 772 case IB_USER_MAD_ENABLE_PKEY: 773 return ib_umad_enable_pkey(filp->private_data); 774 default: 775 return -ENOIOCTLCMD; 776 } 777} 778#endif 779 780static int ib_umad_open(struct inode *inode, struct file *filp) 781{ 782 struct ib_umad_port *port; 783 struct ib_umad_file *file; 784 int ret = 0; 785 786 spin_lock(&port_lock); 787 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE]; 788 if (port) 789 kref_get(&port->umad_dev->ref); 790 spin_unlock(&port_lock); 791 792 if (!port) 793 return -ENXIO; 794 795 mutex_lock(&port->file_mutex); 796 797 if (!port->ib_dev) { 798 ret = -ENXIO; 799 goto out; 800 } 801 802 file = kzalloc(sizeof *file, GFP_KERNEL); 803 if (!file) { 804 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 805 ret = -ENOMEM; 806 goto out; 807 } 808 809 mutex_init(&file->mutex); 810 spin_lock_init(&file->send_lock); 811 INIT_LIST_HEAD(&file->recv_list); 812 INIT_LIST_HEAD(&file->send_list); 813 init_waitqueue_head(&file->recv_wait); 814 815 file->port = port; 816 filp->private_data = file; 817 818 list_add_tail(&file->port_list, &port->file_list); 819 820out: 821 mutex_unlock(&port->file_mutex); 822 return ret; 823} 824 825static int ib_umad_close(struct inode *inode, struct file *filp) 826{ 827 struct ib_umad_file *file = filp->private_data; 828 struct ib_umad_device *dev = file->port->umad_dev; 829 struct ib_umad_packet *packet, *tmp; 830 int already_dead; 831 int i; 832 833 mutex_lock(&file->port->file_mutex); 834 mutex_lock(&file->mutex); 835 836 already_dead = file->agents_dead; 837 file->agents_dead = 1; 838 839 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { 840 if (packet->recv_wc) 841 ib_free_recv_mad(packet->recv_wc); 842 kfree(packet); 843 } 844 845 list_del(&file->port_list); 846 847 mutex_unlock(&file->mutex); 848 849 if (!already_dead) 850 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) 851 if (file->agent[i]) 852 ib_unregister_mad_agent(file->agent[i]); 853 854 mutex_unlock(&file->port->file_mutex); 855 856 kfree(file); 857 kref_put(&dev->ref, ib_umad_release_dev); 858 859 return 0; 860} 861 862static const struct file_operations umad_fops = { 863 .owner = THIS_MODULE, 864 .read = ib_umad_read, 865 .write = ib_umad_write, 866 .poll = ib_umad_poll, 867 .unlocked_ioctl = ib_umad_ioctl, 868#ifdef CONFIG_COMPAT 869 .compat_ioctl = ib_umad_compat_ioctl, 870#endif 871 .open = ib_umad_open, 872 .release = ib_umad_close 873}; 874 875static int ib_umad_sm_open(struct inode *inode, struct file *filp) 876{ 877 struct ib_umad_port *port; 878 struct ib_port_modify props = { 879 .set_port_cap_mask = IB_PORT_SM 880 }; 881 int ret; 882 883 spin_lock(&port_lock); 884 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS]; 885 if (port) 886 kref_get(&port->umad_dev->ref); 887 spin_unlock(&port_lock); 888 889 if (!port) 890 return -ENXIO; 891 892 if (filp->f_flags & O_NONBLOCK) { 893 if (down_trylock(&port->sm_sem)) { 894 ret = -EAGAIN; 895 goto fail; 896 } 897 } else { 898 if (down_interruptible(&port->sm_sem)) { 899 ret = -ERESTARTSYS; 900 goto fail; 901 } 902 } 903 904 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 905 if (ret) { 906 up(&port->sm_sem); 907 goto fail; 908 } 909 910 filp->private_data = port; 911 912 return 0; 913 914fail: 915 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 916 return ret; 917} 918 919static int ib_umad_sm_close(struct inode *inode, struct file *filp) 920{ 921 struct ib_umad_port *port = filp->private_data; 922 struct ib_port_modify props = { 923 .clr_port_cap_mask = IB_PORT_SM 924 }; 925 int ret = 0; 926 927 mutex_lock(&port->file_mutex); 928 if (port->ib_dev) 929 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 930 mutex_unlock(&port->file_mutex); 931 932 up(&port->sm_sem); 933 934 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 935 936 return ret; 937} 938 939static const struct file_operations umad_sm_fops = { 940 .owner = THIS_MODULE, 941 .open = ib_umad_sm_open, 942 .release = ib_umad_sm_close 943}; 944 945static struct ib_client umad_client = { 946 .name = "umad", 947 .add = ib_umad_add_one, 948 .remove = ib_umad_remove_one 949}; 950 951static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 952 char *buf) 953{ 954 struct ib_umad_port *port = dev_get_drvdata(dev); 955 956 if (!port) 957 return -ENODEV; 958 959 return sprintf(buf, "%s\n", port->ib_dev->name); 960} 961static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 962 963static ssize_t show_port(struct device *dev, struct device_attribute *attr, 964 char *buf) 965{ 966 struct ib_umad_port *port = dev_get_drvdata(dev); 967 968 if (!port) 969 return -ENODEV; 970 971 return sprintf(buf, "%d\n", port->port_num); 972} 973static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 974 975static ssize_t show_abi_version(struct class *class, char *buf) 976{ 977 return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION); 978} 979static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 980 981static int ib_umad_init_port(struct ib_device *device, int port_num, 982 struct ib_umad_port *port) 983{ 984 spin_lock(&port_lock); 985 port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); 986 if (port->dev_num >= IB_UMAD_MAX_PORTS) { 987 spin_unlock(&port_lock); 988 return -1; 989 } 990 set_bit(port->dev_num, dev_map); 991 spin_unlock(&port_lock); 992 993 port->ib_dev = device; 994 port->port_num = port_num; 995 init_MUTEX(&port->sm_sem); 996 mutex_init(&port->file_mutex); 997 INIT_LIST_HEAD(&port->file_list); 998 999 port->cdev = cdev_alloc(); 1000 if (!port->cdev) 1001 return -1; 1002 port->cdev->owner = THIS_MODULE; 1003 port->cdev->ops = &umad_fops; 1004 kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num); 1005 if (cdev_add(port->cdev, base_dev + port->dev_num, 1)) 1006 goto err_cdev; 1007 1008 port->dev = device_create_drvdata(umad_class, device->dma_device, 1009 port->cdev->dev, port, 1010 "umad%d", port->dev_num); 1011 if (IS_ERR(port->dev)) 1012 goto err_cdev; 1013 1014 if (device_create_file(port->dev, &dev_attr_ibdev)) 1015 goto err_dev; 1016 if (device_create_file(port->dev, &dev_attr_port)) 1017 goto err_dev; 1018 1019 port->sm_cdev = cdev_alloc(); 1020 if (!port->sm_cdev) 1021 goto err_dev; 1022 port->sm_cdev->owner = THIS_MODULE; 1023 port->sm_cdev->ops = &umad_sm_fops; 1024 kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num); 1025 if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1)) 1026 goto err_sm_cdev; 1027 1028 port->sm_dev = device_create_drvdata(umad_class, device->dma_device, 1029 port->sm_cdev->dev, port, 1030 "issm%d", port->dev_num); 1031 if (IS_ERR(port->sm_dev)) 1032 goto err_sm_cdev; 1033 1034 if (device_create_file(port->sm_dev, &dev_attr_ibdev)) 1035 goto err_sm_dev; 1036 if (device_create_file(port->sm_dev, &dev_attr_port)) 1037 goto err_sm_dev; 1038 1039 spin_lock(&port_lock); 1040 umad_port[port->dev_num] = port; 1041 spin_unlock(&port_lock); 1042 1043 return 0; 1044 1045err_sm_dev: 1046 device_destroy(umad_class, port->sm_cdev->dev); 1047 1048err_sm_cdev: 1049 cdev_del(port->sm_cdev); 1050 1051err_dev: 1052 device_destroy(umad_class, port->cdev->dev); 1053 1054err_cdev: 1055 cdev_del(port->cdev); 1056 clear_bit(port->dev_num, dev_map); 1057 1058 return -1; 1059} 1060 1061static void ib_umad_kill_port(struct ib_umad_port *port) 1062{ 1063 struct ib_umad_file *file; 1064 int already_dead; 1065 int id; 1066 1067 dev_set_drvdata(port->dev, NULL); 1068 dev_set_drvdata(port->sm_dev, NULL); 1069 1070 device_destroy(umad_class, port->cdev->dev); 1071 device_destroy(umad_class, port->sm_cdev->dev); 1072 1073 cdev_del(port->cdev); 1074 cdev_del(port->sm_cdev); 1075 1076 spin_lock(&port_lock); 1077 umad_port[port->dev_num] = NULL; 1078 spin_unlock(&port_lock); 1079 1080 mutex_lock(&port->file_mutex); 1081 1082 port->ib_dev = NULL; 1083 1084 list_for_each_entry(file, &port->file_list, port_list) { 1085 mutex_lock(&file->mutex); 1086 already_dead = file->agents_dead; 1087 file->agents_dead = 1; 1088 mutex_unlock(&file->mutex); 1089 1090 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) 1091 if (file->agent[id]) 1092 ib_unregister_mad_agent(file->agent[id]); 1093 } 1094 1095 mutex_unlock(&port->file_mutex); 1096 1097 clear_bit(port->dev_num, dev_map); 1098} 1099 1100static void ib_umad_add_one(struct ib_device *device) 1101{ 1102 struct ib_umad_device *umad_dev; 1103 int s, e, i; 1104 1105 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1106 return; 1107 1108 if (device->node_type == RDMA_NODE_IB_SWITCH) 1109 s = e = 0; 1110 else { 1111 s = 1; 1112 e = device->phys_port_cnt; 1113 } 1114 1115 umad_dev = kzalloc(sizeof *umad_dev + 1116 (e - s + 1) * sizeof (struct ib_umad_port), 1117 GFP_KERNEL); 1118 if (!umad_dev) 1119 return; 1120 1121 kref_init(&umad_dev->ref); 1122 1123 umad_dev->start_port = s; 1124 umad_dev->end_port = e; 1125 1126 for (i = s; i <= e; ++i) { 1127 umad_dev->port[i - s].umad_dev = umad_dev; 1128 1129 if (ib_umad_init_port(device, i, &umad_dev->port[i - s])) 1130 goto err; 1131 } 1132 1133 ib_set_client_data(device, &umad_client, umad_dev); 1134 1135 return; 1136 1137err: 1138 while (--i >= s) 1139 ib_umad_kill_port(&umad_dev->port[i - s]); 1140 1141 kref_put(&umad_dev->ref, ib_umad_release_dev); 1142} 1143 1144static void ib_umad_remove_one(struct ib_device *device) 1145{ 1146 struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client); 1147 int i; 1148 1149 if (!umad_dev) 1150 return; 1151 1152 for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) 1153 ib_umad_kill_port(&umad_dev->port[i]); 1154 1155 kref_put(&umad_dev->ref, ib_umad_release_dev); 1156} 1157 1158static int __init ib_umad_init(void) 1159{ 1160 int ret; 1161 1162 ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, 1163 "infiniband_mad"); 1164 if (ret) { 1165 printk(KERN_ERR "user_mad: couldn't register device number\n"); 1166 goto out; 1167 } 1168 1169 umad_class = class_create(THIS_MODULE, "infiniband_mad"); 1170 if (IS_ERR(umad_class)) { 1171 ret = PTR_ERR(umad_class); 1172 printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n"); 1173 goto out_chrdev; 1174 } 1175 1176 ret = class_create_file(umad_class, &class_attr_abi_version); 1177 if (ret) { 1178 printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); 1179 goto out_class; 1180 } 1181 1182 ret = ib_register_client(&umad_client); 1183 if (ret) { 1184 printk(KERN_ERR "user_mad: couldn't register ib_umad client\n"); 1185 goto out_class; 1186 } 1187 1188 return 0; 1189 1190out_class: 1191 class_destroy(umad_class); 1192 1193out_chrdev: 1194 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1195 1196out: 1197 return ret; 1198} 1199 1200static void __exit ib_umad_cleanup(void) 1201{ 1202 ib_unregister_client(&umad_client); 1203 class_destroy(umad_class); 1204 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1205} 1206 1207module_init(ib_umad_init); 1208module_exit(ib_umad_cleanup); 1209