c2_provider.c revision ee30cb5b0b65392843cc3beaba48160ee4a3764e
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/inetdevice.h>
41#include <linux/delay.h>
42#include <linux/ethtool.h>
43#include <linux/mii.h>
44#include <linux/if_vlan.h>
45#include <linux/crc32.h>
46#include <linux/in.h>
47#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/init.h>
50#include <linux/dma-mapping.h>
51#include <linux/if_arp.h>
52#include <linux/vmalloc.h>
53
54#include <asm/io.h>
55#include <asm/irq.h>
56#include <asm/byteorder.h>
57
58#include <rdma/ib_smi.h>
59#include <rdma/ib_user_verbs.h>
60#include "c2.h"
61#include "c2_provider.h"
62#include "c2_user.h"
63
64static int c2_query_device(struct ib_device *ibdev,
65			   struct ib_device_attr *props)
66{
67	struct c2_dev *c2dev = to_c2dev(ibdev);
68
69	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
70
71	*props = c2dev->props;
72	return 0;
73}
74
75static int c2_query_port(struct ib_device *ibdev,
76			 u8 port, struct ib_port_attr *props)
77{
78	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
79
80	props->max_mtu = IB_MTU_4096;
81	props->lid = 0;
82	props->lmc = 0;
83	props->sm_lid = 0;
84	props->sm_sl = 0;
85	props->state = IB_PORT_ACTIVE;
86	props->phys_state = 0;
87	props->port_cap_flags =
88	    IB_PORT_CM_SUP |
89	    IB_PORT_REINIT_SUP |
90	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
91	props->gid_tbl_len = 1;
92	props->pkey_tbl_len = 1;
93	props->qkey_viol_cntr = 0;
94	props->active_width = 1;
95	props->active_speed = 1;
96
97	return 0;
98}
99
100static int c2_modify_port(struct ib_device *ibdev,
101			  u8 port, int port_modify_mask,
102			  struct ib_port_modify *props)
103{
104	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
105	return 0;
106}
107
108static int c2_query_pkey(struct ib_device *ibdev,
109			 u8 port, u16 index, u16 * pkey)
110{
111	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
112	*pkey = 0;
113	return 0;
114}
115
116static int c2_query_gid(struct ib_device *ibdev, u8 port,
117			int index, union ib_gid *gid)
118{
119	struct c2_dev *c2dev = to_c2dev(ibdev);
120
121	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
122	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
123	memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
124
125	return 0;
126}
127
128/* Allocate the user context data structure. This keeps track
129 * of all objects associated with a particular user-mode client.
130 */
131static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
132					     struct ib_udata *udata)
133{
134	struct c2_ucontext *context;
135
136	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
137	context = kmalloc(sizeof(*context), GFP_KERNEL);
138	if (!context)
139		return ERR_PTR(-ENOMEM);
140
141	return &context->ibucontext;
142}
143
144static int c2_dealloc_ucontext(struct ib_ucontext *context)
145{
146	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
147	kfree(context);
148	return 0;
149}
150
151static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
152{
153	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
154	return -ENOSYS;
155}
156
157static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
158				 struct ib_ucontext *context,
159				 struct ib_udata *udata)
160{
161	struct c2_pd *pd;
162	int err;
163
164	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
165
166	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
167	if (!pd)
168		return ERR_PTR(-ENOMEM);
169
170	err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
171	if (err) {
172		kfree(pd);
173		return ERR_PTR(err);
174	}
175
176	if (context) {
177		if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
178			c2_pd_free(to_c2dev(ibdev), pd);
179			kfree(pd);
180			return ERR_PTR(-EFAULT);
181		}
182	}
183
184	return &pd->ibpd;
185}
186
187static int c2_dealloc_pd(struct ib_pd *pd)
188{
189	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
190	c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
191	kfree(pd);
192
193	return 0;
194}
195
196static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
197{
198	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
199	return ERR_PTR(-ENOSYS);
200}
201
202static int c2_ah_destroy(struct ib_ah *ah)
203{
204	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
205	return -ENOSYS;
206}
207
208static void c2_add_ref(struct ib_qp *ibqp)
209{
210	struct c2_qp *qp;
211	BUG_ON(!ibqp);
212	qp = to_c2qp(ibqp);
213	atomic_inc(&qp->refcount);
214}
215
216static void c2_rem_ref(struct ib_qp *ibqp)
217{
218	struct c2_qp *qp;
219	BUG_ON(!ibqp);
220	qp = to_c2qp(ibqp);
221	if (atomic_dec_and_test(&qp->refcount))
222		wake_up(&qp->wait);
223}
224
225struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
226{
227	struct c2_dev* c2dev = to_c2dev(device);
228	struct c2_qp *qp;
229
230	qp = c2_find_qpn(c2dev, qpn);
231	pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
232		__FUNCTION__, qp, qpn, device,
233		(qp?atomic_read(&qp->refcount):0));
234
235	return (qp?&qp->ibqp:NULL);
236}
237
238static struct ib_qp *c2_create_qp(struct ib_pd *pd,
239				  struct ib_qp_init_attr *init_attr,
240				  struct ib_udata *udata)
241{
242	struct c2_qp *qp;
243	int err;
244
245	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
246
247	switch (init_attr->qp_type) {
248	case IB_QPT_RC:
249		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
250		if (!qp) {
251			pr_debug("%s: Unable to allocate QP\n", __FUNCTION__);
252			return ERR_PTR(-ENOMEM);
253		}
254		spin_lock_init(&qp->lock);
255		if (pd->uobject) {
256			/* userspace specific */
257		}
258
259		err = c2_alloc_qp(to_c2dev(pd->device),
260				  to_c2pd(pd), init_attr, qp);
261
262		if (err && pd->uobject) {
263			/* userspace specific */
264		}
265
266		break;
267	default:
268		pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__,
269			init_attr->qp_type);
270		return ERR_PTR(-EINVAL);
271		break;
272	}
273
274	if (err) {
275		kfree(qp);
276		return ERR_PTR(err);
277	}
278
279	return &qp->ibqp;
280}
281
282static int c2_destroy_qp(struct ib_qp *ib_qp)
283{
284	struct c2_qp *qp = to_c2qp(ib_qp);
285
286	pr_debug("%s:%u qp=%p,qp->state=%d\n",
287		__FUNCTION__, __LINE__,ib_qp,qp->state);
288	c2_free_qp(to_c2dev(ib_qp->device), qp);
289	kfree(qp);
290	return 0;
291}
292
293static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries,
294				  struct ib_ucontext *context,
295				  struct ib_udata *udata)
296{
297	struct c2_cq *cq;
298	int err;
299
300	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
301	if (!cq) {
302		pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__);
303		return ERR_PTR(-ENOMEM);
304	}
305
306	err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
307	if (err) {
308		pr_debug("%s: error initializing CQ\n", __FUNCTION__);
309		kfree(cq);
310		return ERR_PTR(err);
311	}
312
313	return &cq->ibcq;
314}
315
316static int c2_destroy_cq(struct ib_cq *ib_cq)
317{
318	struct c2_cq *cq = to_c2cq(ib_cq);
319
320	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
321
322	c2_free_cq(to_c2dev(ib_cq->device), cq);
323	kfree(cq);
324
325	return 0;
326}
327
328static inline u32 c2_convert_access(int acc)
329{
330	return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
331	    (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
332	    (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
333	    C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
334}
335
336static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
337				    struct ib_phys_buf *buffer_list,
338				    int num_phys_buf, int acc, u64 * iova_start)
339{
340	struct c2_mr *mr;
341	u64 *page_list;
342	u32 total_len;
343	int err, i, j, k, page_shift, pbl_depth;
344
345	pbl_depth = 0;
346	total_len = 0;
347
348	page_shift = PAGE_SHIFT;
349	/*
350	 * If there is only 1 buffer we assume this could
351	 * be a map of all phy mem...use a 32k page_shift.
352	 */
353	if (num_phys_buf == 1)
354		page_shift += 3;
355
356	for (i = 0; i < num_phys_buf; i++) {
357
358		if (buffer_list[i].addr & ~PAGE_MASK) {
359			pr_debug("Unaligned Memory Buffer: 0x%x\n",
360				(unsigned int) buffer_list[i].addr);
361			return ERR_PTR(-EINVAL);
362		}
363
364		if (!buffer_list[i].size) {
365			pr_debug("Invalid Buffer Size\n");
366			return ERR_PTR(-EINVAL);
367		}
368
369		total_len += buffer_list[i].size;
370		pbl_depth += ALIGN(buffer_list[i].size,
371				   (1 << page_shift)) >> page_shift;
372	}
373
374	page_list = vmalloc(sizeof(u64) * pbl_depth);
375	if (!page_list) {
376		pr_debug("couldn't vmalloc page_list of size %zd\n",
377			(sizeof(u64) * pbl_depth));
378		return ERR_PTR(-ENOMEM);
379	}
380
381	for (i = 0, j = 0; i < num_phys_buf; i++) {
382
383		int naddrs;
384
385 		naddrs = ALIGN(buffer_list[i].size,
386			       (1 << page_shift)) >> page_shift;
387		for (k = 0; k < naddrs; k++)
388			page_list[j++] = (buffer_list[i].addr +
389						     (k << page_shift));
390	}
391
392	mr = kmalloc(sizeof(*mr), GFP_KERNEL);
393	if (!mr) {
394		vfree(page_list);
395		return ERR_PTR(-ENOMEM);
396	}
397
398	mr->pd = to_c2pd(ib_pd);
399	pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
400		"*iova_start %llx, first pa %llx, last pa %llx\n",
401		__FUNCTION__, page_shift, pbl_depth, total_len,
402		(unsigned long long) *iova_start,
403	       	(unsigned long long) page_list[0],
404	       	(unsigned long long) page_list[pbl_depth-1]);
405  	err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list,
406 					 (1 << page_shift), pbl_depth,
407					 total_len, 0, iova_start,
408					 c2_convert_access(acc), mr);
409	vfree(page_list);
410	if (err) {
411		kfree(mr);
412		return ERR_PTR(err);
413	}
414
415	return &mr->ibmr;
416}
417
418static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
419{
420	struct ib_phys_buf bl;
421	u64 kva = 0;
422
423	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
424
425	/* AMSO1100 limit */
426	bl.size = 0xffffffff;
427	bl.addr = 0;
428	return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
429}
430
431static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
432				    int acc, struct ib_udata *udata)
433{
434	u64 *pages;
435	u64 kva = 0;
436	int shift, n, len;
437	int i, j, k;
438	int err = 0;
439	struct ib_umem_chunk *chunk;
440	struct c2_pd *c2pd = to_c2pd(pd);
441	struct c2_mr *c2mr;
442
443	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
444	shift = ffs(region->page_size) - 1;
445
446	c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
447	if (!c2mr)
448		return ERR_PTR(-ENOMEM);
449	c2mr->pd = c2pd;
450
451	n = 0;
452	list_for_each_entry(chunk, &region->chunk_list, list)
453		n += chunk->nents;
454
455	pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
456	if (!pages) {
457		err = -ENOMEM;
458		goto err;
459	}
460
461	i = 0;
462	list_for_each_entry(chunk, &region->chunk_list, list) {
463		for (j = 0; j < chunk->nmap; ++j) {
464			len = sg_dma_len(&chunk->page_list[j]) >> shift;
465			for (k = 0; k < len; ++k) {
466				pages[i++] =
467					sg_dma_address(&chunk->page_list[j]) +
468					(region->page_size * k);
469			}
470		}
471	}
472
473	kva = (u64)region->virt_base;
474  	err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
475					 pages,
476 					 region->page_size,
477					 i,
478					 region->length,
479					 region->offset,
480					 &kva,
481					 c2_convert_access(acc),
482					 c2mr);
483	kfree(pages);
484	if (err) {
485		kfree(c2mr);
486		return ERR_PTR(err);
487	}
488	return &c2mr->ibmr;
489
490err:
491	kfree(c2mr);
492	return ERR_PTR(err);
493}
494
495static int c2_dereg_mr(struct ib_mr *ib_mr)
496{
497	struct c2_mr *mr = to_c2mr(ib_mr);
498	int err;
499
500	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
501
502	err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
503	if (err)
504		pr_debug("c2_stag_dealloc failed: %d\n", err);
505	else
506		kfree(mr);
507
508	return err;
509}
510
511static ssize_t show_rev(struct class_device *cdev, char *buf)
512{
513	struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
514	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
515	return sprintf(buf, "%x\n", dev->props.hw_ver);
516}
517
518static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
519{
520	struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
521	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
522	return sprintf(buf, "%x.%x.%x\n",
523		       (int) (dev->props.fw_ver >> 32),
524		       (int) (dev->props.fw_ver >> 16) & 0xffff,
525		       (int) (dev->props.fw_ver & 0xffff));
526}
527
528static ssize_t show_hca(struct class_device *cdev, char *buf)
529{
530	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
531	return sprintf(buf, "AMSO1100\n");
532}
533
534static ssize_t show_board(struct class_device *cdev, char *buf)
535{
536	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
537	return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
538}
539
540static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
541static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
542static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
543static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
544
545static struct class_device_attribute *c2_class_attributes[] = {
546	&class_device_attr_hw_rev,
547	&class_device_attr_fw_ver,
548	&class_device_attr_hca_type,
549	&class_device_attr_board_id
550};
551
552static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
553			int attr_mask, struct ib_udata *udata)
554{
555	int err;
556
557	err =
558	    c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
559			 attr_mask);
560
561	return err;
562}
563
564static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
565{
566	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
567	return -ENOSYS;
568}
569
570static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
571{
572	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
573	return -ENOSYS;
574}
575
576static int c2_process_mad(struct ib_device *ibdev,
577			  int mad_flags,
578			  u8 port_num,
579			  struct ib_wc *in_wc,
580			  struct ib_grh *in_grh,
581			  struct ib_mad *in_mad, struct ib_mad *out_mad)
582{
583	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
584	return -ENOSYS;
585}
586
587static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
588{
589	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
590
591	/* Request a connection */
592	return c2_llp_connect(cm_id, iw_param);
593}
594
595static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
596{
597	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
598
599	/* Accept the new connection */
600	return c2_llp_accept(cm_id, iw_param);
601}
602
603static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
604{
605	int err;
606
607	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
608
609	err = c2_llp_reject(cm_id, pdata, pdata_len);
610	return err;
611}
612
613static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
614{
615	int err;
616
617	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
618	err = c2_llp_service_create(cm_id, backlog);
619	pr_debug("%s:%u err=%d\n",
620		__FUNCTION__, __LINE__,
621		err);
622	return err;
623}
624
625static int c2_service_destroy(struct iw_cm_id *cm_id)
626{
627	int err;
628	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
629
630	err = c2_llp_service_destroy(cm_id);
631
632	return err;
633}
634
635static int c2_pseudo_up(struct net_device *netdev)
636{
637	struct in_device *ind;
638	struct c2_dev *c2dev = netdev->priv;
639
640	ind = in_dev_get(netdev);
641	if (!ind)
642		return 0;
643
644	pr_debug("adding...\n");
645	for_ifa(ind) {
646#ifdef DEBUG
647		u8 *ip = (u8 *) & ifa->ifa_address;
648
649		pr_debug("%s: %d.%d.%d.%d\n",
650		       ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
651#endif
652		c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
653	}
654	endfor_ifa(ind);
655	in_dev_put(ind);
656
657	return 0;
658}
659
660static int c2_pseudo_down(struct net_device *netdev)
661{
662	struct in_device *ind;
663	struct c2_dev *c2dev = netdev->priv;
664
665	ind = in_dev_get(netdev);
666	if (!ind)
667		return 0;
668
669	pr_debug("deleting...\n");
670	for_ifa(ind) {
671#ifdef DEBUG
672		u8 *ip = (u8 *) & ifa->ifa_address;
673
674		pr_debug("%s: %d.%d.%d.%d\n",
675		       ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
676#endif
677		c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
678	}
679	endfor_ifa(ind);
680	in_dev_put(ind);
681
682	return 0;
683}
684
685static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
686{
687	kfree_skb(skb);
688	return NETDEV_TX_OK;
689}
690
691static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
692{
693	int ret = 0;
694
695	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
696		return -EINVAL;
697
698	netdev->mtu = new_mtu;
699
700	/* TODO: Tell rnic about new rmda interface mtu */
701	return ret;
702}
703
704static void setup(struct net_device *netdev)
705{
706	SET_MODULE_OWNER(netdev);
707	netdev->open = c2_pseudo_up;
708	netdev->stop = c2_pseudo_down;
709	netdev->hard_start_xmit = c2_pseudo_xmit_frame;
710	netdev->get_stats = NULL;
711	netdev->tx_timeout = NULL;
712	netdev->set_mac_address = NULL;
713	netdev->change_mtu = c2_pseudo_change_mtu;
714	netdev->watchdog_timeo = 0;
715	netdev->type = ARPHRD_ETHER;
716	netdev->mtu = 1500;
717	netdev->hard_header_len = ETH_HLEN;
718	netdev->addr_len = ETH_ALEN;
719	netdev->tx_queue_len = 0;
720	netdev->flags |= IFF_NOARP;
721	return;
722}
723
724static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
725{
726	char name[IFNAMSIZ];
727	struct net_device *netdev;
728
729	/* change ethxxx to iwxxx */
730	strcpy(name, "iw");
731	strcat(name, &c2dev->netdev->name[3]);
732	netdev = alloc_netdev(sizeof(*netdev), name, setup);
733	if (!netdev) {
734		printk(KERN_ERR PFX "%s -  etherdev alloc failed",
735			__FUNCTION__);
736		return NULL;
737	}
738
739	netdev->priv = c2dev;
740
741	SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
742
743	memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
744
745	/* Print out the MAC address */
746	pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n",
747		netdev->name,
748		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
749		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
750
751#if 0
752	/* Disable network packets */
753	netif_stop_queue(netdev);
754#endif
755	return netdev;
756}
757
758int c2_register_device(struct c2_dev *dev)
759{
760	int ret;
761	int i;
762
763	/* Register pseudo network device */
764	dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
765	if (dev->pseudo_netdev) {
766		ret = register_netdev(dev->pseudo_netdev);
767		if (ret) {
768			printk(KERN_ERR PFX
769				"Unable to register netdev, ret = %d\n", ret);
770			free_netdev(dev->pseudo_netdev);
771			return ret;
772		}
773	}
774
775	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
776	strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
777	dev->ibdev.owner = THIS_MODULE;
778	dev->ibdev.uverbs_cmd_mask =
779	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
780	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
781	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
782	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
783	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
784	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
785	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
786	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
787	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
788	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
789	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
790	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
791	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
792	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
793	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
794	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
795	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
796
797	dev->ibdev.node_type = RDMA_NODE_RNIC;
798	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
799	memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
800	dev->ibdev.phys_port_cnt = 1;
801	dev->ibdev.dma_device = &dev->pcidev->dev;
802	dev->ibdev.class_dev.dev = &dev->pcidev->dev;
803	dev->ibdev.query_device = c2_query_device;
804	dev->ibdev.query_port = c2_query_port;
805	dev->ibdev.modify_port = c2_modify_port;
806	dev->ibdev.query_pkey = c2_query_pkey;
807	dev->ibdev.query_gid = c2_query_gid;
808	dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
809	dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
810	dev->ibdev.mmap = c2_mmap_uar;
811	dev->ibdev.alloc_pd = c2_alloc_pd;
812	dev->ibdev.dealloc_pd = c2_dealloc_pd;
813	dev->ibdev.create_ah = c2_ah_create;
814	dev->ibdev.destroy_ah = c2_ah_destroy;
815	dev->ibdev.create_qp = c2_create_qp;
816	dev->ibdev.modify_qp = c2_modify_qp;
817	dev->ibdev.destroy_qp = c2_destroy_qp;
818	dev->ibdev.create_cq = c2_create_cq;
819	dev->ibdev.destroy_cq = c2_destroy_cq;
820	dev->ibdev.poll_cq = c2_poll_cq;
821	dev->ibdev.get_dma_mr = c2_get_dma_mr;
822	dev->ibdev.reg_phys_mr = c2_reg_phys_mr;
823	dev->ibdev.reg_user_mr = c2_reg_user_mr;
824	dev->ibdev.dereg_mr = c2_dereg_mr;
825
826	dev->ibdev.alloc_fmr = NULL;
827	dev->ibdev.unmap_fmr = NULL;
828	dev->ibdev.dealloc_fmr = NULL;
829	dev->ibdev.map_phys_fmr = NULL;
830
831	dev->ibdev.attach_mcast = c2_multicast_attach;
832	dev->ibdev.detach_mcast = c2_multicast_detach;
833	dev->ibdev.process_mad = c2_process_mad;
834
835	dev->ibdev.req_notify_cq = c2_arm_cq;
836	dev->ibdev.post_send = c2_post_send;
837	dev->ibdev.post_recv = c2_post_receive;
838
839	dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
840	dev->ibdev.iwcm->add_ref = c2_add_ref;
841	dev->ibdev.iwcm->rem_ref = c2_rem_ref;
842	dev->ibdev.iwcm->get_qp = c2_get_qp;
843	dev->ibdev.iwcm->connect = c2_connect;
844	dev->ibdev.iwcm->accept = c2_accept;
845	dev->ibdev.iwcm->reject = c2_reject;
846	dev->ibdev.iwcm->create_listen = c2_service_create;
847	dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
848
849	ret = ib_register_device(&dev->ibdev);
850	if (ret)
851		return ret;
852
853	for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) {
854		ret = class_device_create_file(&dev->ibdev.class_dev,
855					       c2_class_attributes[i]);
856		if (ret) {
857			unregister_netdev(dev->pseudo_netdev);
858			free_netdev(dev->pseudo_netdev);
859			ib_unregister_device(&dev->ibdev);
860			return ret;
861		}
862	}
863
864	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
865	return 0;
866}
867
868void c2_unregister_device(struct c2_dev *dev)
869{
870	pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
871	unregister_netdev(dev->pseudo_netdev);
872	free_netdev(dev->pseudo_netdev);
873	ib_unregister_device(&dev->ibdev);
874}
875