user_mad.c revision a4d61e84804f3b14cc35c5e2af768a07c0f64ef6
1/*
2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/device.h>
40#include <linux/err.h>
41#include <linux/fs.h>
42#include <linux/cdev.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/poll.h>
46#include <linux/rwsem.h>
47#include <linux/kref.h>
48
49#include <asm/uaccess.h>
50#include <asm/semaphore.h>
51
52#include <rdma/ib_mad.h>
53#include <rdma/ib_user_mad.h>
54
55MODULE_AUTHOR("Roland Dreier");
56MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
57MODULE_LICENSE("Dual BSD/GPL");
58
59enum {
60	IB_UMAD_MAX_PORTS  = 64,
61	IB_UMAD_MAX_AGENTS = 32,
62
63	IB_UMAD_MAJOR      = 231,
64	IB_UMAD_MINOR_BASE = 0
65};
66
67struct ib_umad_port {
68	int                    devnum;
69	struct cdev            dev;
70	struct class_device    class_dev;
71
72	int                    sm_devnum;
73	struct cdev            sm_dev;
74	struct class_device    sm_class_dev;
75	struct semaphore       sm_sem;
76
77	struct ib_device      *ib_dev;
78	struct ib_umad_device *umad_dev;
79	u8                     port_num;
80};
81
82struct ib_umad_device {
83	int                  start_port, end_port;
84	struct kref          ref;
85	struct ib_umad_port  port[0];
86};
87
88struct ib_umad_file {
89	struct ib_umad_port *port;
90	spinlock_t           recv_lock;
91	struct list_head     recv_list;
92	wait_queue_head_t    recv_wait;
93	struct rw_semaphore  agent_mutex;
94	struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
95	struct ib_mr        *mr[IB_UMAD_MAX_AGENTS];
96};
97
98struct ib_umad_packet {
99	struct ib_ah      *ah;
100	struct ib_mad_send_buf *msg;
101	struct list_head   list;
102	int		   length;
103	DECLARE_PCI_UNMAP_ADDR(mapping)
104	struct ib_user_mad mad;
105};
106
107static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
108static spinlock_t map_lock;
109static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2);
110
111static void ib_umad_add_one(struct ib_device *device);
112static void ib_umad_remove_one(struct ib_device *device);
113
114static int queue_packet(struct ib_umad_file *file,
115			struct ib_mad_agent *agent,
116			struct ib_umad_packet *packet)
117{
118	int ret = 1;
119
120	down_read(&file->agent_mutex);
121	for (packet->mad.hdr.id = 0;
122	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
123	     packet->mad.hdr.id++)
124		if (agent == file->agent[packet->mad.hdr.id]) {
125			spin_lock_irq(&file->recv_lock);
126			list_add_tail(&packet->list, &file->recv_list);
127			spin_unlock_irq(&file->recv_lock);
128			wake_up_interruptible(&file->recv_wait);
129			ret = 0;
130			break;
131		}
132
133	up_read(&file->agent_mutex);
134
135	return ret;
136}
137
138static void send_handler(struct ib_mad_agent *agent,
139			 struct ib_mad_send_wc *send_wc)
140{
141	struct ib_umad_file *file = agent->context;
142	struct ib_umad_packet *timeout, *packet =
143		(void *) (unsigned long) send_wc->wr_id;
144
145	ib_destroy_ah(packet->msg->send_wr.wr.ud.ah);
146	ib_free_send_mad(packet->msg);
147
148	if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
149		timeout = kmalloc(sizeof *timeout + sizeof (struct ib_mad_hdr),
150				  GFP_KERNEL);
151		if (!timeout)
152			goto out;
153
154		memset(timeout, 0, sizeof *timeout + sizeof (struct ib_mad_hdr));
155
156		timeout->length = sizeof (struct ib_mad_hdr);
157		timeout->mad.hdr.id = packet->mad.hdr.id;
158		timeout->mad.hdr.status = ETIMEDOUT;
159		memcpy(timeout->mad.data, packet->mad.data,
160		       sizeof (struct ib_mad_hdr));
161
162		if (!queue_packet(file, agent, timeout))
163				return;
164	}
165out:
166	kfree(packet);
167}
168
169static void recv_handler(struct ib_mad_agent *agent,
170			 struct ib_mad_recv_wc *mad_recv_wc)
171{
172	struct ib_umad_file *file = agent->context;
173	struct ib_umad_packet *packet;
174	int length;
175
176	if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
177		goto out;
178
179	length = mad_recv_wc->mad_len;
180	packet = kmalloc(sizeof *packet + length, GFP_KERNEL);
181	if (!packet)
182		goto out;
183
184	memset(packet, 0, sizeof *packet + length);
185	packet->length = length;
186
187	ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
188
189	packet->mad.hdr.status    = 0;
190	packet->mad.hdr.length    = length + sizeof (struct ib_user_mad);
191	packet->mad.hdr.qpn 	  = cpu_to_be32(mad_recv_wc->wc->src_qp);
192	packet->mad.hdr.lid 	  = cpu_to_be16(mad_recv_wc->wc->slid);
193	packet->mad.hdr.sl  	  = mad_recv_wc->wc->sl;
194	packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
195	packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
196	if (packet->mad.hdr.grh_present) {
197		/* XXX parse GRH */
198		packet->mad.hdr.gid_index 	= 0;
199		packet->mad.hdr.hop_limit 	= 0;
200		packet->mad.hdr.traffic_class	= 0;
201		memset(packet->mad.hdr.gid, 0, 16);
202		packet->mad.hdr.flow_label	= 0;
203	}
204
205	if (queue_packet(file, agent, packet))
206		kfree(packet);
207
208out:
209	ib_free_recv_mad(mad_recv_wc);
210}
211
212static ssize_t ib_umad_read(struct file *filp, char __user *buf,
213			    size_t count, loff_t *pos)
214{
215	struct ib_umad_file *file = filp->private_data;
216	struct ib_umad_packet *packet;
217	ssize_t ret;
218
219	if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))
220		return -EINVAL;
221
222	spin_lock_irq(&file->recv_lock);
223
224	while (list_empty(&file->recv_list)) {
225		spin_unlock_irq(&file->recv_lock);
226
227		if (filp->f_flags & O_NONBLOCK)
228			return -EAGAIN;
229
230		if (wait_event_interruptible(file->recv_wait,
231					     !list_empty(&file->recv_list)))
232			return -ERESTARTSYS;
233
234		spin_lock_irq(&file->recv_lock);
235	}
236
237	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
238	list_del(&packet->list);
239
240	spin_unlock_irq(&file->recv_lock);
241
242	if (count < packet->length + sizeof (struct ib_user_mad)) {
243		/* Return length needed (and first RMPP segment) if too small */
244		if (copy_to_user(buf, &packet->mad,
245				 sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))
246			ret = -EFAULT;
247		else
248			ret = -ENOSPC;
249	} else if (copy_to_user(buf, &packet->mad,
250			      packet->length + sizeof (struct ib_user_mad)))
251		ret = -EFAULT;
252	else
253		ret = packet->length + sizeof (struct ib_user_mad);
254	if (ret < 0) {
255		/* Requeue packet */
256		spin_lock_irq(&file->recv_lock);
257		list_add(&packet->list, &file->recv_list);
258		spin_unlock_irq(&file->recv_lock);
259	} else
260		kfree(packet);
261	return ret;
262}
263
264static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
265			     size_t count, loff_t *pos)
266{
267	struct ib_umad_file *file = filp->private_data;
268	struct ib_umad_packet *packet;
269	struct ib_mad_agent *agent;
270	struct ib_ah_attr ah_attr;
271	struct ib_send_wr *bad_wr;
272	struct ib_rmpp_mad *rmpp_mad;
273	u8 method;
274	__be64 *tid;
275	int ret, length, hdr_len, data_len, rmpp_hdr_size;
276	int rmpp_active = 0;
277
278	if (count < sizeof (struct ib_user_mad))
279		return -EINVAL;
280
281	length = count - sizeof (struct ib_user_mad);
282	packet = kmalloc(sizeof *packet + sizeof(struct ib_mad_hdr) +
283			 sizeof(struct ib_rmpp_hdr), GFP_KERNEL);
284	if (!packet)
285		return -ENOMEM;
286
287	if (copy_from_user(&packet->mad, buf,
288			    sizeof (struct ib_user_mad) +
289			    sizeof(struct ib_mad_hdr) +
290			    sizeof(struct ib_rmpp_hdr))) {
291		ret = -EFAULT;
292		goto err;
293	}
294
295	if (packet->mad.hdr.id < 0 ||
296	    packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
297		ret = -EINVAL;
298		goto err;
299	}
300
301	packet->length = length;
302
303	down_read(&file->agent_mutex);
304
305	agent = file->agent[packet->mad.hdr.id];
306	if (!agent) {
307		ret = -EINVAL;
308		goto err_up;
309	}
310
311	memset(&ah_attr, 0, sizeof ah_attr);
312	ah_attr.dlid          = be16_to_cpu(packet->mad.hdr.lid);
313	ah_attr.sl            = packet->mad.hdr.sl;
314	ah_attr.src_path_bits = packet->mad.hdr.path_bits;
315	ah_attr.port_num      = file->port->port_num;
316	if (packet->mad.hdr.grh_present) {
317		ah_attr.ah_flags = IB_AH_GRH;
318		memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
319		ah_attr.grh.flow_label 	   = be32_to_cpu(packet->mad.hdr.flow_label);
320		ah_attr.grh.hop_limit  	   = packet->mad.hdr.hop_limit;
321		ah_attr.grh.traffic_class  = packet->mad.hdr.traffic_class;
322	}
323
324	packet->ah = ib_create_ah(agent->qp->pd, &ah_attr);
325	if (IS_ERR(packet->ah)) {
326		ret = PTR_ERR(packet->ah);
327		goto err_up;
328	}
329
330	rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
331	if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) {
332		/* RMPP active */
333		if (!agent->rmpp_version) {
334			ret = -EINVAL;
335			goto err_ah;
336		}
337		/* Validate that management class can support RMPP */
338		if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
339			hdr_len = offsetof(struct ib_sa_mad, data);
340			data_len = length;
341		} else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
342			    (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
343				hdr_len = offsetof(struct ib_vendor_mad, data);
344				data_len = length - hdr_len;
345		} else {
346			ret = -EINVAL;
347			goto err_ah;
348		}
349		rmpp_active = 1;
350	} else {
351		if (length > sizeof(struct ib_mad)) {
352			ret = -EINVAL;
353			goto err_ah;
354		}
355		hdr_len = offsetof(struct ib_mad, data);
356		data_len = length - hdr_len;
357	}
358
359	packet->msg = ib_create_send_mad(agent,
360					 be32_to_cpu(packet->mad.hdr.qpn),
361					 0, packet->ah, rmpp_active,
362					 hdr_len, data_len,
363					 GFP_KERNEL);
364	if (IS_ERR(packet->msg)) {
365		ret = PTR_ERR(packet->msg);
366		goto err_ah;
367	}
368
369	packet->msg->send_wr.wr.ud.timeout_ms  = packet->mad.hdr.timeout_ms;
370	packet->msg->send_wr.wr.ud.retries = packet->mad.hdr.retries;
371
372	/* Override send WR WRID initialized in ib_create_send_mad */
373	packet->msg->send_wr.wr_id = (unsigned long) packet;
374
375	if (!rmpp_active) {
376		/* Copy message from user into send buffer */
377		if (copy_from_user(packet->msg->mad,
378				   buf + sizeof(struct ib_user_mad), length)) {
379			ret = -EFAULT;
380			goto err_msg;
381		}
382	} else {
383		rmpp_hdr_size = sizeof(struct ib_mad_hdr) +
384				sizeof(struct ib_rmpp_hdr);
385
386		/* Only copy MAD headers (RMPP header in place) */
387		memcpy(packet->msg->mad, packet->mad.data,
388		       sizeof(struct ib_mad_hdr));
389
390		/* Now, copy rest of message from user into send buffer */
391		if (copy_from_user(((struct ib_rmpp_mad *) packet->msg->mad)->data,
392				   buf + sizeof (struct ib_user_mad) + rmpp_hdr_size,
393				   length - rmpp_hdr_size)) {
394			ret = -EFAULT;
395			goto err_msg;
396		}
397	}
398
399	/*
400	 * If userspace is generating a request that will generate a
401	 * response, we need to make sure the high-order part of the
402	 * transaction ID matches the agent being used to send the
403	 * MAD.
404	 */
405	method = packet->msg->mad->mad_hdr.method;
406
407	if (!(method & IB_MGMT_METHOD_RESP)       &&
408	    method != IB_MGMT_METHOD_TRAP_REPRESS &&
409	    method != IB_MGMT_METHOD_SEND) {
410		tid = &packet->msg->mad->mad_hdr.tid;
411		*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
412				   (be64_to_cpup(tid) & 0xffffffff));
413	}
414
415	ret = ib_post_send_mad(agent, &packet->msg->send_wr, &bad_wr);
416	if (ret)
417		goto err_msg;
418
419	up_read(&file->agent_mutex);
420
421	return sizeof (struct ib_user_mad_hdr) + packet->length;
422
423err_msg:
424	ib_free_send_mad(packet->msg);
425
426err_ah:
427	ib_destroy_ah(packet->ah);
428
429err_up:
430	up_read(&file->agent_mutex);
431
432err:
433	kfree(packet);
434	return ret;
435}
436
437static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
438{
439	struct ib_umad_file *file = filp->private_data;
440
441	/* we will always be able to post a MAD send */
442	unsigned int mask = POLLOUT | POLLWRNORM;
443
444	poll_wait(filp, &file->recv_wait, wait);
445
446	if (!list_empty(&file->recv_list))
447		mask |= POLLIN | POLLRDNORM;
448
449	return mask;
450}
451
452static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
453{
454	struct ib_user_mad_reg_req ureq;
455	struct ib_mad_reg_req req;
456	struct ib_mad_agent *agent;
457	int agent_id;
458	int ret;
459
460	down_write(&file->agent_mutex);
461
462	if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
463		ret = -EFAULT;
464		goto out;
465	}
466
467	if (ureq.qpn != 0 && ureq.qpn != 1) {
468		ret = -EINVAL;
469		goto out;
470	}
471
472	for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
473		if (!file->agent[agent_id])
474			goto found;
475
476	ret = -ENOMEM;
477	goto out;
478
479found:
480	if (ureq.mgmt_class) {
481		req.mgmt_class         = ureq.mgmt_class;
482		req.mgmt_class_version = ureq.mgmt_class_version;
483		memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask);
484		memcpy(req.oui,         ureq.oui,         sizeof req.oui);
485	}
486
487	agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
488				      ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
489				      ureq.mgmt_class ? &req : NULL,
490				      ureq.rmpp_version,
491				      send_handler, recv_handler, file);
492	if (IS_ERR(agent)) {
493		ret = PTR_ERR(agent);
494		goto out;
495	}
496
497	file->agent[agent_id] = agent;
498
499	file->mr[agent_id] = ib_get_dma_mr(agent->qp->pd, IB_ACCESS_LOCAL_WRITE);
500	if (IS_ERR(file->mr[agent_id])) {
501		ret = -ENOMEM;
502		goto err;
503	}
504
505	if (put_user(agent_id,
506		     (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
507		ret = -EFAULT;
508		goto err_mr;
509	}
510
511	ret = 0;
512	goto out;
513
514err_mr:
515	ib_dereg_mr(file->mr[agent_id]);
516
517err:
518	file->agent[agent_id] = NULL;
519	ib_unregister_mad_agent(agent);
520
521out:
522	up_write(&file->agent_mutex);
523	return ret;
524}
525
526static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
527{
528	u32 id;
529	int ret = 0;
530
531	down_write(&file->agent_mutex);
532
533	if (get_user(id, (u32 __user *) arg)) {
534		ret = -EFAULT;
535		goto out;
536	}
537
538	if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !file->agent[id]) {
539		ret = -EINVAL;
540		goto out;
541	}
542
543	ib_dereg_mr(file->mr[id]);
544	ib_unregister_mad_agent(file->agent[id]);
545	file->agent[id] = NULL;
546
547out:
548	up_write(&file->agent_mutex);
549	return ret;
550}
551
552static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
553			  unsigned long arg)
554{
555	switch (cmd) {
556	case IB_USER_MAD_REGISTER_AGENT:
557		return ib_umad_reg_agent(filp->private_data, arg);
558	case IB_USER_MAD_UNREGISTER_AGENT:
559		return ib_umad_unreg_agent(filp->private_data, arg);
560	default:
561		return -ENOIOCTLCMD;
562	}
563}
564
565static int ib_umad_open(struct inode *inode, struct file *filp)
566{
567	struct ib_umad_port *port =
568		container_of(inode->i_cdev, struct ib_umad_port, dev);
569	struct ib_umad_file *file;
570
571	file = kmalloc(sizeof *file, GFP_KERNEL);
572	if (!file)
573		return -ENOMEM;
574
575	memset(file, 0, sizeof *file);
576
577	spin_lock_init(&file->recv_lock);
578	init_rwsem(&file->agent_mutex);
579	INIT_LIST_HEAD(&file->recv_list);
580	init_waitqueue_head(&file->recv_wait);
581
582	file->port = port;
583	filp->private_data = file;
584
585	return 0;
586}
587
588static int ib_umad_close(struct inode *inode, struct file *filp)
589{
590	struct ib_umad_file *file = filp->private_data;
591	struct ib_umad_packet *packet, *tmp;
592	int i;
593
594	for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
595		if (file->agent[i]) {
596			ib_dereg_mr(file->mr[i]);
597			ib_unregister_mad_agent(file->agent[i]);
598		}
599
600	list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
601		kfree(packet);
602
603	kfree(file);
604
605	return 0;
606}
607
608static struct file_operations umad_fops = {
609	.owner 	 	= THIS_MODULE,
610	.read 	 	= ib_umad_read,
611	.write 	 	= ib_umad_write,
612	.poll 	 	= ib_umad_poll,
613	.unlocked_ioctl = ib_umad_ioctl,
614	.compat_ioctl 	= ib_umad_ioctl,
615	.open 	 	= ib_umad_open,
616	.release 	= ib_umad_close
617};
618
619static int ib_umad_sm_open(struct inode *inode, struct file *filp)
620{
621	struct ib_umad_port *port =
622		container_of(inode->i_cdev, struct ib_umad_port, sm_dev);
623	struct ib_port_modify props = {
624		.set_port_cap_mask = IB_PORT_SM
625	};
626	int ret;
627
628	if (filp->f_flags & O_NONBLOCK) {
629		if (down_trylock(&port->sm_sem))
630			return -EAGAIN;
631	} else {
632		if (down_interruptible(&port->sm_sem))
633			return -ERESTARTSYS;
634	}
635
636	ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
637	if (ret) {
638		up(&port->sm_sem);
639		return ret;
640	}
641
642	filp->private_data = port;
643
644	return 0;
645}
646
647static int ib_umad_sm_close(struct inode *inode, struct file *filp)
648{
649	struct ib_umad_port *port = filp->private_data;
650	struct ib_port_modify props = {
651		.clr_port_cap_mask = IB_PORT_SM
652	};
653	int ret;
654
655	ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
656	up(&port->sm_sem);
657
658	return ret;
659}
660
661static struct file_operations umad_sm_fops = {
662	.owner 	 = THIS_MODULE,
663	.open 	 = ib_umad_sm_open,
664	.release = ib_umad_sm_close
665};
666
667static struct ib_client umad_client = {
668	.name   = "umad",
669	.add    = ib_umad_add_one,
670	.remove = ib_umad_remove_one
671};
672
673static ssize_t show_dev(struct class_device *class_dev, char *buf)
674{
675	struct ib_umad_port *port = class_get_devdata(class_dev);
676
677	if (class_dev == &port->class_dev)
678		return print_dev_t(buf, port->dev.dev);
679	else
680		return print_dev_t(buf, port->sm_dev.dev);
681}
682static CLASS_DEVICE_ATTR(dev, S_IRUGO, show_dev, NULL);
683
684static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
685{
686	struct ib_umad_port *port = class_get_devdata(class_dev);
687
688	return sprintf(buf, "%s\n", port->ib_dev->name);
689}
690static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
691
692static ssize_t show_port(struct class_device *class_dev, char *buf)
693{
694	struct ib_umad_port *port = class_get_devdata(class_dev);
695
696	return sprintf(buf, "%d\n", port->port_num);
697}
698static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
699
700static void ib_umad_release_dev(struct kref *ref)
701{
702	struct ib_umad_device *dev =
703		container_of(ref, struct ib_umad_device, ref);
704
705	kfree(dev);
706}
707
708static void ib_umad_release_port(struct class_device *class_dev)
709{
710	struct ib_umad_port *port = class_get_devdata(class_dev);
711
712	if (class_dev == &port->class_dev) {
713		cdev_del(&port->dev);
714		clear_bit(port->devnum, dev_map);
715	} else {
716		cdev_del(&port->sm_dev);
717		clear_bit(port->sm_devnum, dev_map);
718	}
719
720	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
721}
722
723static struct class umad_class = {
724	.name    = "infiniband_mad",
725	.release = ib_umad_release_port
726};
727
728static ssize_t show_abi_version(struct class *class, char *buf)
729{
730	return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
731}
732static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
733
734static int ib_umad_init_port(struct ib_device *device, int port_num,
735			     struct ib_umad_port *port)
736{
737	spin_lock(&map_lock);
738	port->devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
739	if (port->devnum >= IB_UMAD_MAX_PORTS) {
740		spin_unlock(&map_lock);
741		return -1;
742	}
743	port->sm_devnum = find_next_zero_bit(dev_map, IB_UMAD_MAX_PORTS * 2, IB_UMAD_MAX_PORTS);
744	if (port->sm_devnum >= IB_UMAD_MAX_PORTS * 2) {
745		spin_unlock(&map_lock);
746		return -1;
747	}
748	set_bit(port->devnum, dev_map);
749	set_bit(port->sm_devnum, dev_map);
750	spin_unlock(&map_lock);
751
752	port->ib_dev   = device;
753	port->port_num = port_num;
754	init_MUTEX(&port->sm_sem);
755
756	cdev_init(&port->dev, &umad_fops);
757	port->dev.owner = THIS_MODULE;
758	kobject_set_name(&port->dev.kobj, "umad%d", port->devnum);
759	if (cdev_add(&port->dev, base_dev + port->devnum, 1))
760		return -1;
761
762	port->class_dev.class = &umad_class;
763	port->class_dev.dev   = device->dma_device;
764
765	snprintf(port->class_dev.class_id, BUS_ID_SIZE, "umad%d", port->devnum);
766
767	if (class_device_register(&port->class_dev))
768		goto err_cdev;
769
770	class_set_devdata(&port->class_dev, port);
771	kref_get(&port->umad_dev->ref);
772
773	if (class_device_create_file(&port->class_dev, &class_device_attr_dev))
774		goto err_class;
775	if (class_device_create_file(&port->class_dev, &class_device_attr_ibdev))
776		goto err_class;
777	if (class_device_create_file(&port->class_dev, &class_device_attr_port))
778		goto err_class;
779
780	cdev_init(&port->sm_dev, &umad_sm_fops);
781	port->sm_dev.owner = THIS_MODULE;
782	kobject_set_name(&port->dev.kobj, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS);
783	if (cdev_add(&port->sm_dev, base_dev + port->sm_devnum, 1))
784		return -1;
785
786	port->sm_class_dev.class = &umad_class;
787	port->sm_class_dev.dev   = device->dma_device;
788
789	snprintf(port->sm_class_dev.class_id, BUS_ID_SIZE, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS);
790
791	if (class_device_register(&port->sm_class_dev))
792		goto err_sm_cdev;
793
794	class_set_devdata(&port->sm_class_dev, port);
795	kref_get(&port->umad_dev->ref);
796
797	if (class_device_create_file(&port->sm_class_dev, &class_device_attr_dev))
798		goto err_sm_class;
799	if (class_device_create_file(&port->sm_class_dev, &class_device_attr_ibdev))
800		goto err_sm_class;
801	if (class_device_create_file(&port->sm_class_dev, &class_device_attr_port))
802		goto err_sm_class;
803
804	return 0;
805
806err_sm_class:
807	class_device_unregister(&port->sm_class_dev);
808
809err_sm_cdev:
810	cdev_del(&port->sm_dev);
811
812err_class:
813	class_device_unregister(&port->class_dev);
814
815err_cdev:
816	cdev_del(&port->dev);
817	clear_bit(port->devnum, dev_map);
818
819	return -1;
820}
821
822static void ib_umad_add_one(struct ib_device *device)
823{
824	struct ib_umad_device *umad_dev;
825	int s, e, i;
826
827	if (device->node_type == IB_NODE_SWITCH)
828		s = e = 0;
829	else {
830		s = 1;
831		e = device->phys_port_cnt;
832	}
833
834	umad_dev = kmalloc(sizeof *umad_dev +
835			   (e - s + 1) * sizeof (struct ib_umad_port),
836			   GFP_KERNEL);
837	if (!umad_dev)
838		return;
839
840	memset(umad_dev, 0, sizeof *umad_dev +
841	       (e - s + 1) * sizeof (struct ib_umad_port));
842
843	kref_init(&umad_dev->ref);
844
845	umad_dev->start_port = s;
846	umad_dev->end_port   = e;
847
848	for (i = s; i <= e; ++i) {
849		umad_dev->port[i - s].umad_dev = umad_dev;
850
851		if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
852			goto err;
853	}
854
855	ib_set_client_data(device, &umad_client, umad_dev);
856
857	return;
858
859err:
860	while (--i >= s) {
861		class_device_unregister(&umad_dev->port[i - s].class_dev);
862		class_device_unregister(&umad_dev->port[i - s].sm_class_dev);
863	}
864
865	kref_put(&umad_dev->ref, ib_umad_release_dev);
866}
867
868static void ib_umad_remove_one(struct ib_device *device)
869{
870	struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
871	int i;
872
873	if (!umad_dev)
874		return;
875
876	for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) {
877		class_device_unregister(&umad_dev->port[i].class_dev);
878		class_device_unregister(&umad_dev->port[i].sm_class_dev);
879	}
880
881	kref_put(&umad_dev->ref, ib_umad_release_dev);
882}
883
884static int __init ib_umad_init(void)
885{
886	int ret;
887
888	spin_lock_init(&map_lock);
889
890	ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
891				     "infiniband_mad");
892	if (ret) {
893		printk(KERN_ERR "user_mad: couldn't register device number\n");
894		goto out;
895	}
896
897	ret = class_register(&umad_class);
898	if (ret) {
899		printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
900		goto out_chrdev;
901	}
902
903	ret = class_create_file(&umad_class, &class_attr_abi_version);
904	if (ret) {
905		printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
906		goto out_class;
907	}
908
909	ret = ib_register_client(&umad_client);
910	if (ret) {
911		printk(KERN_ERR "user_mad: couldn't register ib_umad client\n");
912		goto out_class;
913	}
914
915	return 0;
916
917out_class:
918	class_unregister(&umad_class);
919
920out_chrdev:
921	unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
922
923out:
924	return ret;
925}
926
927static void __exit ib_umad_cleanup(void)
928{
929	ib_unregister_client(&umad_client);
930	class_unregister(&umad_class);
931	unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
932}
933
934module_init(ib_umad_init);
935module_exit(ib_umad_cleanup);
936