usnic_ib_qp_grp.c revision f809309a251a13bd97cc189c3fa428782aab9716
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18#include <linux/bug.h>
19#include <linux/errno.h>
20#include <linux/module.h>
21#include <linux/spinlock.h>
22
23#include "usnic_log.h"
24#include "usnic_vnic.h"
25#include "usnic_fwd.h"
26#include "usnic_uiom.h"
27#include "usnic_debugfs.h"
28#include "usnic_ib_qp_grp.h"
29#include "usnic_ib_sysfs.h"
30#include "usnic_transport.h"
31
32#define DFLT_RQ_IDX	0
33
34const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
35{
36	switch (state) {
37	case IB_QPS_RESET:
38		return "Rst";
39	case IB_QPS_INIT:
40		return "Init";
41	case IB_QPS_RTR:
42		return "RTR";
43	case IB_QPS_RTS:
44		return "RTS";
45	case IB_QPS_SQD:
46		return "SQD";
47	case IB_QPS_SQE:
48		return "SQE";
49	case IB_QPS_ERR:
50		return "ERR";
51	default:
52		return "UNKOWN STATE";
53
54	}
55}
56
57int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
58{
59	return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
60}
61
62int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
63{
64	struct usnic_ib_qp_grp *qp_grp = obj;
65	struct usnic_ib_qp_grp_flow *default_flow;
66	if (obj) {
67		default_flow = list_first_entry(&qp_grp->flows_lst,
68					struct usnic_ib_qp_grp_flow, link);
69		return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
70					qp_grp->ibqp.qp_num,
71					usnic_ib_qp_grp_state_to_string(
72							qp_grp->state),
73					qp_grp->owner_pid,
74					usnic_vnic_get_index(qp_grp->vf->vnic),
75					default_flow->flow->flow_id);
76	} else {
77		return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
78	}
79}
80
81static struct usnic_vnic_res_chunk *
82get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
83{
84	lockdep_assert_held(&qp_grp->lock);
85	/*
86	 * The QP res chunk, used to derive qp indices,
87	 * are just indices of the RQs
88	 */
89	return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
90}
91
92static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
93{
94
95	int status;
96	int i, vnic_idx;
97	struct usnic_vnic_res_chunk *res_chunk;
98	struct usnic_vnic_res *res;
99
100	lockdep_assert_held(&qp_grp->lock);
101
102	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
103
104	res_chunk = get_qp_res_chunk(qp_grp);
105	if (IS_ERR_OR_NULL(res_chunk)) {
106		usnic_err("Unable to get qp res with err %ld\n",
107				PTR_ERR(res_chunk));
108		return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
109	}
110
111	for (i = 0; i < res_chunk->cnt; i++) {
112		res = res_chunk->res[i];
113		status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
114						res->vnic_idx);
115		if (status) {
116			usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
117					res->vnic_idx, qp_grp->ufdev->name,
118					vnic_idx, status);
119			goto out_err;
120		}
121	}
122
123	return 0;
124
125out_err:
126	for (i--; i >= 0; i--) {
127		res = res_chunk->res[i];
128		usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
129					res->vnic_idx);
130	}
131
132	return status;
133}
134
135static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
136{
137	int i, vnic_idx;
138	struct usnic_vnic_res_chunk *res_chunk;
139	struct usnic_vnic_res *res;
140	int status = 0;
141
142	lockdep_assert_held(&qp_grp->lock);
143	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
144
145	res_chunk = get_qp_res_chunk(qp_grp);
146	if (IS_ERR_OR_NULL(res_chunk)) {
147		usnic_err("Unable to get qp res with err %ld\n",
148			PTR_ERR(res_chunk));
149		return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
150	}
151
152	for (i = 0; i < res_chunk->cnt; i++) {
153		res = res_chunk->res[i];
154		status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
155						res->vnic_idx);
156		if (status) {
157			usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
158					res->vnic_idx,
159					qp_grp->ufdev->name,
160					vnic_idx, status);
161		}
162	}
163
164	return status;
165
166}
167
168static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
169				struct usnic_filter_action *uaction)
170{
171	struct usnic_vnic_res_chunk *res_chunk;
172
173	res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
174	if (IS_ERR_OR_NULL(res_chunk)) {
175		usnic_err("Unable to get %s with err %ld\n",
176			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
177			PTR_ERR(res_chunk));
178		return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
179	}
180
181	uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
182	uaction->action.type = FILTER_ACTION_RQ_STEERING;
183	uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
184
185	return 0;
186}
187
188static struct usnic_ib_qp_grp_flow*
189create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
190			struct usnic_transport_spec *trans_spec)
191{
192	uint16_t port_num;
193	int err;
194	struct filter filter;
195	struct usnic_filter_action uaction;
196	struct usnic_ib_qp_grp_flow *qp_flow;
197	struct usnic_fwd_flow *flow;
198	enum usnic_transport_type trans_type;
199
200	trans_type = trans_spec->trans_type;
201	port_num = trans_spec->usnic_roce.port_num;
202
203	/* Reserve Port */
204	port_num = usnic_transport_rsrv_port(trans_type, port_num);
205	if (port_num == 0)
206		return ERR_PTR(-EINVAL);
207
208	/* Create Flow */
209	usnic_fwd_init_usnic_filter(&filter, port_num);
210	err = init_filter_action(qp_grp, &uaction);
211	if (err)
212		goto out_unreserve_port;
213
214	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
215	if (IS_ERR_OR_NULL(flow)) {
216		usnic_err("Unable to alloc flow failed with err %ld\n",
217				PTR_ERR(flow));
218		err = flow ? PTR_ERR(flow) : -EFAULT;
219		goto out_unreserve_port;
220	}
221
222	/* Create Flow Handle */
223	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
224	if (IS_ERR_OR_NULL(qp_flow)) {
225		err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
226		goto out_dealloc_flow;
227	}
228	qp_flow->flow = flow;
229	qp_flow->trans_type = trans_type;
230	qp_flow->usnic_roce.port_num = port_num;
231	qp_flow->qp_grp = qp_grp;
232	return qp_flow;
233
234out_dealloc_flow:
235	usnic_fwd_dealloc_flow(flow);
236out_unreserve_port:
237	usnic_transport_unrsrv_port(trans_type, port_num);
238	return ERR_PTR(err);
239}
240
241static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
242{
243	usnic_fwd_dealloc_flow(qp_flow->flow);
244	usnic_transport_unrsrv_port(qp_flow->trans_type,
245					qp_flow->usnic_roce.port_num);
246	kfree(qp_flow);
247}
248
249static struct usnic_ib_qp_grp_flow*
250create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
251		struct usnic_transport_spec *trans_spec)
252{
253	struct socket *sock;
254	int sock_fd;
255	int err;
256	struct filter filter;
257	struct usnic_filter_action uaction;
258	struct usnic_ib_qp_grp_flow *qp_flow;
259	struct usnic_fwd_flow *flow;
260	enum usnic_transport_type trans_type;
261	uint32_t addr;
262	uint16_t port_num;
263	int proto;
264
265	trans_type = trans_spec->trans_type;
266	sock_fd = trans_spec->udp.sock_fd;
267
268	/* Get and check socket */
269	sock = usnic_transport_get_socket(sock_fd);
270	if (IS_ERR_OR_NULL(sock))
271		return ERR_CAST(sock);
272
273	err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
274	if (err)
275		goto out_put_sock;
276
277	if (proto != IPPROTO_UDP) {
278		usnic_err("Protocol for fd %d is not UDP", sock_fd);
279		err = -EPERM;
280		goto out_put_sock;
281	}
282
283	/* Create flow */
284	usnic_fwd_init_udp_filter(&filter, addr, port_num);
285	err = init_filter_action(qp_grp, &uaction);
286	if (err)
287		goto out_put_sock;
288
289	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
290	if (IS_ERR_OR_NULL(flow)) {
291		usnic_err("Unable to alloc flow failed with err %ld\n",
292				PTR_ERR(flow));
293		err = flow ? PTR_ERR(flow) : -EFAULT;
294		goto out_put_sock;
295	}
296
297	/* Create qp_flow */
298	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
299	if (IS_ERR_OR_NULL(qp_flow)) {
300		err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
301		goto out_dealloc_flow;
302	}
303	qp_flow->flow = flow;
304	qp_flow->trans_type = trans_type;
305	qp_flow->udp.sock = sock;
306	qp_flow->qp_grp = qp_grp;
307	return qp_flow;
308
309out_dealloc_flow:
310	usnic_fwd_dealloc_flow(flow);
311out_put_sock:
312	usnic_transport_put_socket(sock);
313	return ERR_PTR(err);
314}
315
316static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
317{
318	usnic_fwd_dealloc_flow(qp_flow->flow);
319	usnic_transport_put_socket(qp_flow->udp.sock);
320	kfree(qp_flow);
321}
322
323static struct usnic_ib_qp_grp_flow*
324create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
325			struct usnic_transport_spec *trans_spec)
326{
327	struct usnic_ib_qp_grp_flow *qp_flow;
328	enum usnic_transport_type trans_type;
329
330	trans_type = trans_spec->trans_type;
331	switch (trans_type) {
332	case USNIC_TRANSPORT_ROCE_CUSTOM:
333		qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
334		break;
335	case USNIC_TRANSPORT_IPV4_UDP:
336		qp_flow = create_udp_flow(qp_grp, trans_spec);
337		break;
338	default:
339		usnic_err("Unsupported transport %u\n",
340				trans_spec->trans_type);
341		return ERR_PTR(-EINVAL);
342	}
343
344	if (!IS_ERR_OR_NULL(qp_flow)) {
345		list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
346		usnic_debugfs_flow_add(qp_flow);
347	}
348
349
350	return qp_flow;
351}
352
353static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
354{
355	usnic_debugfs_flow_remove(qp_flow);
356	list_del(&qp_flow->link);
357
358	switch (qp_flow->trans_type) {
359	case USNIC_TRANSPORT_ROCE_CUSTOM:
360		release_roce_custom_flow(qp_flow);
361		break;
362	case USNIC_TRANSPORT_IPV4_UDP:
363		release_udp_flow(qp_flow);
364		break;
365	default:
366		WARN(1, "Unsupported transport %u\n",
367				qp_flow->trans_type);
368		break;
369	}
370}
371
372static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
373{
374	struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
375	list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
376		release_and_remove_flow(qp_flow);
377}
378
379int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
380				enum ib_qp_state new_state,
381				void *data)
382{
383	int status = 0;
384	int vnic_idx;
385	struct ib_event ib_event;
386	enum ib_qp_state old_state;
387	struct usnic_transport_spec *trans_spec;
388	struct usnic_ib_qp_grp_flow *qp_flow;
389
390	old_state = qp_grp->state;
391	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
392	trans_spec = (struct usnic_transport_spec *) data;
393
394	spin_lock(&qp_grp->lock);
395	switch (new_state) {
396	case IB_QPS_RESET:
397		switch (old_state) {
398		case IB_QPS_RESET:
399			/* NO-OP */
400			break;
401		case IB_QPS_INIT:
402			release_and_remove_all_flows(qp_grp);
403			status = 0;
404			break;
405		case IB_QPS_RTR:
406		case IB_QPS_RTS:
407		case IB_QPS_ERR:
408			status = disable_qp_grp(qp_grp);
409			release_and_remove_all_flows(qp_grp);
410			break;
411		default:
412			status = -EINVAL;
413		}
414		break;
415	case IB_QPS_INIT:
416		switch (old_state) {
417		case IB_QPS_RESET:
418			if (trans_spec) {
419				qp_flow = create_and_add_flow(qp_grp,
420								trans_spec);
421				if (IS_ERR_OR_NULL(qp_flow)) {
422					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
423					break;
424				}
425			} else {
426				/*
427				 * Optional to specify filters.
428				 */
429				status = 0;
430			}
431			break;
432		case IB_QPS_INIT:
433			if (trans_spec) {
434				qp_flow = create_and_add_flow(qp_grp,
435								trans_spec);
436				if (IS_ERR_OR_NULL(qp_flow)) {
437					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
438					break;
439				}
440			} else {
441				/*
442				 * Doesn't make sense to go into INIT state
443				 * from INIT state w/o adding filters.
444				 */
445				status = -EINVAL;
446			}
447			break;
448		case IB_QPS_RTR:
449			status = disable_qp_grp(qp_grp);
450			break;
451		case IB_QPS_RTS:
452			status = disable_qp_grp(qp_grp);
453			break;
454		default:
455			status = -EINVAL;
456		}
457		break;
458	case IB_QPS_RTR:
459		switch (old_state) {
460		case IB_QPS_INIT:
461			status = enable_qp_grp(qp_grp);
462			break;
463		default:
464			status = -EINVAL;
465		}
466		break;
467	case IB_QPS_RTS:
468		switch (old_state) {
469		case IB_QPS_RTR:
470			/* NO-OP FOR NOW */
471			break;
472		default:
473			status = -EINVAL;
474		}
475		break;
476	case IB_QPS_ERR:
477		ib_event.device = &qp_grp->vf->pf->ib_dev;
478		ib_event.element.qp = &qp_grp->ibqp;
479		ib_event.event = IB_EVENT_QP_FATAL;
480
481		switch (old_state) {
482		case IB_QPS_RESET:
483			qp_grp->ibqp.event_handler(&ib_event,
484					qp_grp->ibqp.qp_context);
485			break;
486		case IB_QPS_INIT:
487			release_and_remove_all_flows(qp_grp);
488			qp_grp->ibqp.event_handler(&ib_event,
489					qp_grp->ibqp.qp_context);
490			break;
491		case IB_QPS_RTR:
492		case IB_QPS_RTS:
493			status = disable_qp_grp(qp_grp);
494			release_and_remove_all_flows(qp_grp);
495			qp_grp->ibqp.event_handler(&ib_event,
496					qp_grp->ibqp.qp_context);
497			break;
498		default:
499			status = -EINVAL;
500		}
501		break;
502	default:
503		status = -EINVAL;
504	}
505	spin_unlock(&qp_grp->lock);
506
507	if (!status) {
508		qp_grp->state = new_state;
509		usnic_info("Transistioned %u from %s to %s",
510		qp_grp->grp_id,
511		usnic_ib_qp_grp_state_to_string(old_state),
512		usnic_ib_qp_grp_state_to_string(new_state));
513	} else {
514		usnic_err("Failed to transistion %u from %s to %s",
515		qp_grp->grp_id,
516		usnic_ib_qp_grp_state_to_string(old_state),
517		usnic_ib_qp_grp_state_to_string(new_state));
518	}
519
520	return status;
521}
522
523static struct usnic_vnic_res_chunk**
524alloc_res_chunk_list(struct usnic_vnic *vnic,
525			struct usnic_vnic_res_spec *res_spec, void *owner_obj)
526{
527	enum usnic_vnic_res_type res_type;
528	struct usnic_vnic_res_chunk **res_chunk_list;
529	int err, i, res_cnt, res_lst_sz;
530
531	for (res_lst_sz = 0;
532		res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
533		res_lst_sz++) {
534		/* Do Nothing */
535	}
536
537	res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
538					GFP_ATOMIC);
539	if (!res_chunk_list)
540		return ERR_PTR(-ENOMEM);
541
542	for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
543		i++) {
544		res_type = res_spec->resources[i].type;
545		res_cnt = res_spec->resources[i].cnt;
546
547		res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
548					res_cnt, owner_obj);
549		if (IS_ERR_OR_NULL(res_chunk_list[i])) {
550			err = res_chunk_list[i] ?
551					PTR_ERR(res_chunk_list[i]) : -ENOMEM;
552			usnic_err("Failed to get %s from %s with err %d\n",
553				usnic_vnic_res_type_to_str(res_type),
554				usnic_vnic_pci_name(vnic),
555				err);
556			goto out_free_res;
557		}
558	}
559
560	return res_chunk_list;
561
562out_free_res:
563	for (i--; i > 0; i--)
564		usnic_vnic_put_resources(res_chunk_list[i]);
565	kfree(res_chunk_list);
566	return ERR_PTR(err);
567}
568
569static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
570{
571	int i;
572	for (i = 0; res_chunk_list[i]; i++)
573		usnic_vnic_put_resources(res_chunk_list[i]);
574	kfree(res_chunk_list);
575}
576
577static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
578				struct usnic_ib_pd *pd,
579				struct usnic_ib_qp_grp *qp_grp)
580{
581	int err;
582	struct pci_dev *pdev;
583
584	lockdep_assert_held(&vf->lock);
585
586	pdev = usnic_vnic_get_pdev(vf->vnic);
587	if (vf->qp_grp_ref_cnt == 0) {
588		err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
589		if (err) {
590			usnic_err("Failed to attach %s to domain\n",
591					pci_name(pdev));
592			return err;
593		}
594		vf->pd = pd;
595	}
596	vf->qp_grp_ref_cnt++;
597
598	WARN_ON(vf->pd != pd);
599	qp_grp->vf = vf;
600
601	return 0;
602}
603
604static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
605{
606	struct pci_dev *pdev;
607	struct usnic_ib_pd *pd;
608
609	lockdep_assert_held(&qp_grp->vf->lock);
610
611	pd = qp_grp->vf->pd;
612	pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
613	if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
614		qp_grp->vf->pd = NULL;
615		usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
616	}
617	qp_grp->vf = NULL;
618}
619
620static void log_spec(struct usnic_vnic_res_spec *res_spec)
621{
622	char buf[512];
623	usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
624	usnic_dbg("%s\n", buf);
625}
626
627static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
628				uint32_t *id)
629{
630	enum usnic_transport_type trans_type = qp_flow->trans_type;
631	int err;
632	uint16_t port_num = 0;
633
634	switch (trans_type) {
635	case USNIC_TRANSPORT_ROCE_CUSTOM:
636		*id = qp_flow->usnic_roce.port_num;
637		break;
638	case USNIC_TRANSPORT_IPV4_UDP:
639		err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
640							NULL, NULL,
641							&port_num);
642		if (err)
643			return err;
644		/*
645		 * Copy port_num to stack first and then to *id,
646		 * so that the short to int cast works for little
647		 * and big endian systems.
648		 */
649		*id = port_num;
650		break;
651	default:
652		usnic_err("Unsupported transport %u\n", trans_type);
653		return -EINVAL;
654	}
655
656	return 0;
657}
658
659struct usnic_ib_qp_grp *
660usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
661			struct usnic_ib_pd *pd,
662			struct usnic_vnic_res_spec *res_spec,
663			struct usnic_transport_spec *transport_spec)
664{
665	struct usnic_ib_qp_grp *qp_grp;
666	int err;
667	enum usnic_transport_type transport = transport_spec->trans_type;
668	struct usnic_ib_qp_grp_flow *qp_flow;
669
670	lockdep_assert_held(&vf->lock);
671
672	err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
673						res_spec);
674	if (err) {
675		usnic_err("Spec does not meet miniumum req for transport %d\n",
676				transport);
677		log_spec(res_spec);
678		return ERR_PTR(err);
679	}
680
681	qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
682	if (!qp_grp) {
683		usnic_err("Unable to alloc qp_grp - Out of memory\n");
684		return NULL;
685	}
686
687	qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
688							qp_grp);
689	if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
690		err = qp_grp->res_chunk_list ?
691				PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
692		usnic_err("Unable to alloc res for %d with err %d\n",
693				qp_grp->grp_id, err);
694		goto out_free_qp_grp;
695	}
696
697	err = qp_grp_and_vf_bind(vf, pd, qp_grp);
698	if (err)
699		goto out_free_res;
700
701	INIT_LIST_HEAD(&qp_grp->flows_lst);
702	spin_lock_init(&qp_grp->lock);
703	qp_grp->ufdev = ufdev;
704	qp_grp->state = IB_QPS_RESET;
705	qp_grp->owner_pid = current->pid;
706
707	qp_flow = create_and_add_flow(qp_grp, transport_spec);
708	if (IS_ERR_OR_NULL(qp_flow)) {
709		usnic_err("Unable to create and add flow with err %ld\n",
710				PTR_ERR(qp_flow));
711		err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
712		goto out_qp_grp_vf_unbind;
713	}
714
715	err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
716	if (err)
717		goto out_release_flow;
718	qp_grp->ibqp.qp_num = qp_grp->grp_id;
719
720	usnic_ib_sysfs_qpn_add(qp_grp);
721
722	return qp_grp;
723
724out_release_flow:
725	release_and_remove_flow(qp_flow);
726out_qp_grp_vf_unbind:
727	qp_grp_and_vf_unbind(qp_grp);
728out_free_res:
729	free_qp_grp_res(qp_grp->res_chunk_list);
730out_free_qp_grp:
731	kfree(qp_grp);
732
733	return ERR_PTR(err);
734}
735
736void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
737{
738
739	WARN_ON(qp_grp->state != IB_QPS_RESET);
740	lockdep_assert_held(&qp_grp->vf->lock);
741
742	release_and_remove_all_flows(qp_grp);
743	usnic_ib_sysfs_qpn_remove(qp_grp);
744	qp_grp_and_vf_unbind(qp_grp);
745	free_qp_grp_res(qp_grp->res_chunk_list);
746	kfree(qp_grp);
747}
748
749struct usnic_vnic_res_chunk*
750usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
751				enum usnic_vnic_res_type res_type)
752{
753	int i;
754
755	for (i = 0; qp_grp->res_chunk_list[i]; i++) {
756		if (qp_grp->res_chunk_list[i]->type == res_type)
757			return qp_grp->res_chunk_list[i];
758	}
759
760	return ERR_PTR(-EINVAL);
761}
762