1/*
2 * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef QIB_VERBS_H
36#define QIB_VERBS_H
37
38#include <linux/types.h>
39#include <linux/spinlock.h>
40#include <linux/kernel.h>
41#include <linux/interrupt.h>
42#include <linux/kref.h>
43#include <linux/workqueue.h>
44#include <linux/kthread.h>
45#include <linux/completion.h>
46#include <rdma/ib_pack.h>
47#include <rdma/ib_user_verbs.h>
48
49struct qib_ctxtdata;
50struct qib_pportdata;
51struct qib_devdata;
52struct qib_verbs_txreq;
53
54#define QIB_MAX_RDMA_ATOMIC     16
55#define QIB_GUIDS_PER_PORT	5
56
57#define QPN_MAX                 (1 << 24)
58#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
59
60/*
61 * Increment this value if any changes that break userspace ABI
62 * compatibility are made.
63 */
64#define QIB_UVERBS_ABI_VERSION       2
65
66/*
67 * Define an ib_cq_notify value that is not valid so we know when CQ
68 * notifications are armed.
69 */
70#define IB_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
71
72#define IB_SEQ_NAK	(3 << 29)
73
74/* AETH NAK opcode values */
75#define IB_RNR_NAK                      0x20
76#define IB_NAK_PSN_ERROR                0x60
77#define IB_NAK_INVALID_REQUEST          0x61
78#define IB_NAK_REMOTE_ACCESS_ERROR      0x62
79#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
80#define IB_NAK_INVALID_RD_REQUEST       0x64
81
82/* Flags for checking QP state (see ib_qib_state_ops[]) */
83#define QIB_POST_SEND_OK                0x01
84#define QIB_POST_RECV_OK                0x02
85#define QIB_PROCESS_RECV_OK             0x04
86#define QIB_PROCESS_SEND_OK             0x08
87#define QIB_PROCESS_NEXT_SEND_OK        0x10
88#define QIB_FLUSH_SEND			0x20
89#define QIB_FLUSH_RECV			0x40
90#define QIB_PROCESS_OR_FLUSH_SEND \
91	(QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
92
93/* IB Performance Manager status values */
94#define IB_PMA_SAMPLE_STATUS_DONE       0x00
95#define IB_PMA_SAMPLE_STATUS_STARTED    0x01
96#define IB_PMA_SAMPLE_STATUS_RUNNING    0x02
97
98/* Mandatory IB performance counter select values. */
99#define IB_PMA_PORT_XMIT_DATA   cpu_to_be16(0x0001)
100#define IB_PMA_PORT_RCV_DATA    cpu_to_be16(0x0002)
101#define IB_PMA_PORT_XMIT_PKTS   cpu_to_be16(0x0003)
102#define IB_PMA_PORT_RCV_PKTS    cpu_to_be16(0x0004)
103#define IB_PMA_PORT_XMIT_WAIT   cpu_to_be16(0x0005)
104
105#define QIB_VENDOR_IPG		cpu_to_be16(0xFFA0)
106
107#define IB_BTH_REQ_ACK		(1 << 31)
108#define IB_BTH_SOLICITED	(1 << 23)
109#define IB_BTH_MIG_REQ		(1 << 22)
110
111/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
112#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
113
114#define IB_GRH_VERSION		6
115#define IB_GRH_VERSION_MASK	0xF
116#define IB_GRH_VERSION_SHIFT	28
117#define IB_GRH_TCLASS_MASK	0xFF
118#define IB_GRH_TCLASS_SHIFT	20
119#define IB_GRH_FLOW_MASK	0xFFFFF
120#define IB_GRH_FLOW_SHIFT	0
121#define IB_GRH_NEXT_HDR		0x1B
122
123#define IB_DEFAULT_GID_PREFIX	cpu_to_be64(0xfe80000000000000ULL)
124
125/* Values for set/get portinfo VLCap OperationalVLs */
126#define IB_VL_VL0       1
127#define IB_VL_VL0_1     2
128#define IB_VL_VL0_3     3
129#define IB_VL_VL0_7     4
130#define IB_VL_VL0_14    5
131
132static inline int qib_num_vls(int vls)
133{
134	switch (vls) {
135	default:
136	case IB_VL_VL0:
137		return 1;
138	case IB_VL_VL0_1:
139		return 2;
140	case IB_VL_VL0_3:
141		return 4;
142	case IB_VL_VL0_7:
143		return 8;
144	case IB_VL_VL0_14:
145		return 15;
146	}
147}
148
149struct ib_reth {
150	__be64 vaddr;
151	__be32 rkey;
152	__be32 length;
153} __packed;
154
155struct ib_atomic_eth {
156	__be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
157	__be32 rkey;
158	__be64 swap_data;
159	__be64 compare_data;
160} __packed;
161
162struct qib_other_headers {
163	__be32 bth[3];
164	union {
165		struct {
166			__be32 deth[2];
167			__be32 imm_data;
168		} ud;
169		struct {
170			struct ib_reth reth;
171			__be32 imm_data;
172		} rc;
173		struct {
174			__be32 aeth;
175			__be32 atomic_ack_eth[2];
176		} at;
177		__be32 imm_data;
178		__be32 aeth;
179		struct ib_atomic_eth atomic_eth;
180	} u;
181} __packed;
182
183/*
184 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
185 * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
186 * will be in the eager header buffer.  The remaining 12 or 16 bytes
187 * are in the data buffer.
188 */
189struct qib_ib_header {
190	__be16 lrh[4];
191	union {
192		struct {
193			struct ib_grh grh;
194			struct qib_other_headers oth;
195		} l;
196		struct qib_other_headers oth;
197	} u;
198} __packed;
199
200struct qib_pio_header {
201	__le32 pbc[2];
202	struct qib_ib_header hdr;
203} __packed;
204
205/*
206 * There is one struct qib_mcast for each multicast GID.
207 * All attached QPs are then stored as a list of
208 * struct qib_mcast_qp.
209 */
210struct qib_mcast_qp {
211	struct list_head list;
212	struct qib_qp *qp;
213};
214
215struct qib_mcast {
216	struct rb_node rb_node;
217	union ib_gid mgid;
218	struct list_head qp_list;
219	wait_queue_head_t wait;
220	atomic_t refcount;
221	int n_attached;
222};
223
224/* Protection domain */
225struct qib_pd {
226	struct ib_pd ibpd;
227	int user;               /* non-zero if created from user space */
228};
229
230/* Address Handle */
231struct qib_ah {
232	struct ib_ah ibah;
233	struct ib_ah_attr attr;
234	atomic_t refcount;
235};
236
237/*
238 * This structure is used by qib_mmap() to validate an offset
239 * when an mmap() request is made.  The vm_area_struct then uses
240 * this as its vm_private_data.
241 */
242struct qib_mmap_info {
243	struct list_head pending_mmaps;
244	struct ib_ucontext *context;
245	void *obj;
246	__u64 offset;
247	struct kref ref;
248	unsigned size;
249};
250
251/*
252 * This structure is used to contain the head pointer, tail pointer,
253 * and completion queue entries as a single memory allocation so
254 * it can be mmap'ed into user space.
255 */
256struct qib_cq_wc {
257	u32 head;               /* index of next entry to fill */
258	u32 tail;               /* index of next ib_poll_cq() entry */
259	union {
260		/* these are actually size ibcq.cqe + 1 */
261		struct ib_uverbs_wc uqueue[0];
262		struct ib_wc kqueue[0];
263	};
264};
265
266/*
267 * The completion queue structure.
268 */
269struct qib_cq {
270	struct ib_cq ibcq;
271	struct kthread_work comptask;
272	struct qib_devdata *dd;
273	spinlock_t lock; /* protect changes in this struct */
274	u8 notify;
275	u8 triggered;
276	struct qib_cq_wc *queue;
277	struct qib_mmap_info *ip;
278};
279
280/*
281 * A segment is a linear region of low physical memory.
282 * XXX Maybe we should use phys addr here and kmap()/kunmap().
283 * Used by the verbs layer.
284 */
285struct qib_seg {
286	void *vaddr;
287	size_t length;
288};
289
290/* The number of qib_segs that fit in a page. */
291#define QIB_SEGSZ     (PAGE_SIZE / sizeof(struct qib_seg))
292
293struct qib_segarray {
294	struct qib_seg segs[QIB_SEGSZ];
295};
296
297struct qib_mregion {
298	struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
299	u64 user_base;          /* User's address for this region */
300	u64 iova;               /* IB start address of this region */
301	size_t length;
302	u32 lkey;
303	u32 offset;             /* offset (bytes) to start of region */
304	int access_flags;
305	u32 max_segs;           /* number of qib_segs in all the arrays */
306	u32 mapsz;              /* size of the map array */
307	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
308	u8  lkey_published;     /* in global table */
309	struct completion comp; /* complete when refcount goes to zero */
310	struct rcu_head list;
311	atomic_t refcount;
312	struct qib_segarray *map[0];    /* the segments */
313};
314
315/*
316 * These keep track of the copy progress within a memory region.
317 * Used by the verbs layer.
318 */
319struct qib_sge {
320	struct qib_mregion *mr;
321	void *vaddr;            /* kernel virtual address of segment */
322	u32 sge_length;         /* length of the SGE */
323	u32 length;             /* remaining length of the segment */
324	u16 m;                  /* current index: mr->map[m] */
325	u16 n;                  /* current index: mr->map[m]->segs[n] */
326};
327
328/* Memory region */
329struct qib_mr {
330	struct ib_mr ibmr;
331	struct ib_umem *umem;
332	struct qib_mregion mr;  /* must be last */
333};
334
335/*
336 * Send work request queue entry.
337 * The size of the sg_list is determined when the QP is created and stored
338 * in qp->s_max_sge.
339 */
340struct qib_swqe {
341	struct ib_send_wr wr;   /* don't use wr.sg_list */
342	u32 psn;                /* first packet sequence number */
343	u32 lpsn;               /* last packet sequence number */
344	u32 ssn;                /* send sequence number */
345	u32 length;             /* total length of data in sg_list */
346	struct qib_sge sg_list[0];
347};
348
349/*
350 * Receive work request queue entry.
351 * The size of the sg_list is determined when the QP (or SRQ) is created
352 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
353 */
354struct qib_rwqe {
355	u64 wr_id;
356	u8 num_sge;
357	struct ib_sge sg_list[0];
358};
359
360/*
361 * This structure is used to contain the head pointer, tail pointer,
362 * and receive work queue entries as a single memory allocation so
363 * it can be mmap'ed into user space.
364 * Note that the wq array elements are variable size so you can't
365 * just index into the array to get the N'th element;
366 * use get_rwqe_ptr() instead.
367 */
368struct qib_rwq {
369	u32 head;               /* new work requests posted to the head */
370	u32 tail;               /* receives pull requests from here. */
371	struct qib_rwqe wq[0];
372};
373
374struct qib_rq {
375	struct qib_rwq *wq;
376	u32 size;               /* size of RWQE array */
377	u8 max_sge;
378	spinlock_t lock /* protect changes in this struct */
379		____cacheline_aligned_in_smp;
380};
381
382struct qib_srq {
383	struct ib_srq ibsrq;
384	struct qib_rq rq;
385	struct qib_mmap_info *ip;
386	/* send signal when number of RWQEs < limit */
387	u32 limit;
388};
389
390struct qib_sge_state {
391	struct qib_sge *sg_list;      /* next SGE to be used if any */
392	struct qib_sge sge;   /* progress state for the current SGE */
393	u32 total_len;
394	u8 num_sge;
395};
396
397/*
398 * This structure holds the information that the send tasklet needs
399 * to send a RDMA read response or atomic operation.
400 */
401struct qib_ack_entry {
402	u8 opcode;
403	u8 sent;
404	u32 psn;
405	u32 lpsn;
406	union {
407		struct qib_sge rdma_sge;
408		u64 atomic_data;
409	};
410};
411
412/*
413 * Variables prefixed with s_ are for the requester (sender).
414 * Variables prefixed with r_ are for the responder (receiver).
415 * Variables prefixed with ack_ are for responder replies.
416 *
417 * Common variables are protected by both r_rq.lock and s_lock in that order
418 * which only happens in modify_qp() or changing the QP 'state'.
419 */
420struct qib_qp {
421	struct ib_qp ibqp;
422	/* read mostly fields above and below */
423	struct ib_ah_attr remote_ah_attr;
424	struct ib_ah_attr alt_ah_attr;
425	struct qib_qp __rcu *next;            /* link list for QPN hash table */
426	struct qib_swqe *s_wq;  /* send work queue */
427	struct qib_mmap_info *ip;
428	struct qib_ib_header *s_hdr;     /* next packet header to send */
429	unsigned long timeout_jiffies;  /* computed from timeout */
430
431	enum ib_mtu path_mtu;
432	u32 remote_qpn;
433	u32 pmtu;		/* decoded from path_mtu */
434	u32 qkey;               /* QKEY for this QP (for UD or RD) */
435	u32 s_size;             /* send work queue size */
436	u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
437
438	u8 state;               /* QP state */
439	u8 qp_access_flags;
440	u8 alt_timeout;         /* Alternate path timeout for this QP */
441	u8 timeout;             /* Timeout for this QP */
442	u8 s_srate;
443	u8 s_mig_state;
444	u8 port_num;
445	u8 s_pkey_index;        /* PKEY index to use */
446	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
447	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
448	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
449	u8 s_retry_cnt;         /* number of times to retry */
450	u8 s_rnr_retry_cnt;
451	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
452	u8 s_max_sge;           /* size of s_wq->sg_list */
453	u8 s_draining;
454
455	/* start of read/write fields */
456
457	atomic_t refcount ____cacheline_aligned_in_smp;
458	wait_queue_head_t wait;
459
460
461	struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
462		____cacheline_aligned_in_smp;
463	struct qib_sge_state s_rdma_read_sge;
464
465	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
466	unsigned long r_aflags;
467	u64 r_wr_id;            /* ID for current receive WQE */
468	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
469	u32 r_len;              /* total length of r_sge */
470	u32 r_rcv_len;          /* receive data len processed */
471	u32 r_psn;              /* expected rcv packet sequence number */
472	u32 r_msn;              /* message sequence number */
473
474	u8 r_state;             /* opcode of last packet received */
475	u8 r_flags;
476	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
477
478	struct list_head rspwait;       /* link for waititing to respond */
479
480	struct qib_sge_state r_sge;     /* current receive data */
481	struct qib_rq r_rq;             /* receive work queue */
482
483	spinlock_t s_lock ____cacheline_aligned_in_smp;
484	struct qib_sge_state *s_cur_sge;
485	u32 s_flags;
486	struct qib_verbs_txreq *s_tx;
487	struct qib_swqe *s_wqe;
488	struct qib_sge_state s_sge;     /* current send request data */
489	struct qib_mregion *s_rdma_mr;
490	atomic_t s_dma_busy;
491	u32 s_cur_size;         /* size of send packet in bytes */
492	u32 s_len;              /* total length of s_sge */
493	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
494	u32 s_next_psn;         /* PSN for next request */
495	u32 s_last_psn;         /* last response PSN processed */
496	u32 s_sending_psn;      /* lowest PSN that is being sent */
497	u32 s_sending_hpsn;     /* highest PSN that is being sent */
498	u32 s_psn;              /* current packet sequence number */
499	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
500	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
501	u32 s_head;             /* new entries added here */
502	u32 s_tail;             /* next entry to process */
503	u32 s_cur;              /* current work queue entry */
504	u32 s_acked;            /* last un-ACK'ed entry */
505	u32 s_last;             /* last completed entry */
506	u32 s_ssn;              /* SSN of tail entry */
507	u32 s_lsn;              /* limit sequence number (credit) */
508	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
509	u16 s_rdma_ack_cnt;
510	u8 s_state;             /* opcode of last packet sent */
511	u8 s_ack_state;         /* opcode of packet to ACK */
512	u8 s_nak_state;         /* non-zero if NAK is pending */
513	u8 r_nak_state;         /* non-zero if NAK is pending */
514	u8 s_retry;             /* requester retry counter */
515	u8 s_rnr_retry;         /* requester RNR retry counter */
516	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
517	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
518
519	struct qib_sge_state s_ack_rdma_sge;
520	struct timer_list s_timer;
521	struct list_head iowait;        /* link for wait PIO buf */
522
523	struct work_struct s_work;
524
525	wait_queue_head_t wait_dma;
526
527	struct qib_sge r_sg_list[0] /* verified SGEs */
528		____cacheline_aligned_in_smp;
529};
530
531/*
532 * Atomic bit definitions for r_aflags.
533 */
534#define QIB_R_WRID_VALID        0
535#define QIB_R_REWIND_SGE        1
536
537/*
538 * Bit definitions for r_flags.
539 */
540#define QIB_R_REUSE_SGE 0x01
541#define QIB_R_RDMAR_SEQ 0x02
542#define QIB_R_RSP_NAK   0x04
543#define QIB_R_RSP_SEND  0x08
544#define QIB_R_COMM_EST  0x10
545
546/*
547 * Bit definitions for s_flags.
548 *
549 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
550 * QIB_S_BUSY - send tasklet is processing the QP
551 * QIB_S_TIMER - the RC retry timer is active
552 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
553 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
554 *                         before processing the next SWQE
555 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
556 *                         before processing the next SWQE
557 * QIB_S_WAIT_RNR - waiting for RNR timeout
558 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
559 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
560 *                  next send completion entry not via send DMA
561 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
562 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
563 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
564 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
565 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
566 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
567 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
568 */
569#define QIB_S_SIGNAL_REQ_WR	0x0001
570#define QIB_S_BUSY		0x0002
571#define QIB_S_TIMER		0x0004
572#define QIB_S_RESP_PENDING	0x0008
573#define QIB_S_ACK_PENDING	0x0010
574#define QIB_S_WAIT_FENCE	0x0020
575#define QIB_S_WAIT_RDMAR	0x0040
576#define QIB_S_WAIT_RNR		0x0080
577#define QIB_S_WAIT_SSN_CREDIT	0x0100
578#define QIB_S_WAIT_DMA		0x0200
579#define QIB_S_WAIT_PIO		0x0400
580#define QIB_S_WAIT_TX		0x0800
581#define QIB_S_WAIT_DMA_DESC	0x1000
582#define QIB_S_WAIT_KMEM		0x2000
583#define QIB_S_WAIT_PSN		0x4000
584#define QIB_S_WAIT_ACK		0x8000
585#define QIB_S_SEND_ONE		0x10000
586#define QIB_S_UNLIMITED_CREDIT	0x20000
587
588/*
589 * Wait flags that would prevent any packet type from being sent.
590 */
591#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
592	QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
593
594/*
595 * Wait flags that would prevent send work requests from making progress.
596 */
597#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
598	QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
599	QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
600
601#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
602
603#define QIB_PSN_CREDIT  16
604
605/*
606 * Since struct qib_swqe is not a fixed size, we can't simply index into
607 * struct qib_qp.s_wq.  This function does the array index computation.
608 */
609static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
610					      unsigned n)
611{
612	return (struct qib_swqe *)((char *)qp->s_wq +
613				     (sizeof(struct qib_swqe) +
614				      qp->s_max_sge *
615				      sizeof(struct qib_sge)) * n);
616}
617
618/*
619 * Since struct qib_rwqe is not a fixed size, we can't simply index into
620 * struct qib_rwq.wq.  This function does the array index computation.
621 */
622static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
623{
624	return (struct qib_rwqe *)
625		((char *) rq->wq->wq +
626		 (sizeof(struct qib_rwqe) +
627		  rq->max_sge * sizeof(struct ib_sge)) * n);
628}
629
630/*
631 * QPN-map pages start out as NULL, they get allocated upon
632 * first use and are never deallocated. This way,
633 * large bitmaps are not allocated unless large numbers of QPs are used.
634 */
635struct qpn_map {
636	void *page;
637};
638
639struct qib_qpn_table {
640	spinlock_t lock; /* protect changes in this struct */
641	unsigned flags;         /* flags for QP0/1 allocated for each port */
642	u32 last;               /* last QP number allocated */
643	u32 nmaps;              /* size of the map table */
644	u16 limit;
645	u16 mask;
646	/* bit map of free QP numbers other than 0/1 */
647	struct qpn_map map[QPNMAP_ENTRIES];
648};
649
650struct qib_lkey_table {
651	spinlock_t lock; /* protect changes in this struct */
652	u32 next;               /* next unused index (speeds search) */
653	u32 gen;                /* generation count */
654	u32 max;                /* size of the table */
655	struct qib_mregion __rcu **table;
656};
657
658struct qib_opcode_stats {
659	u64 n_packets;          /* number of packets */
660	u64 n_bytes;            /* total number of bytes */
661};
662
663struct qib_opcode_stats_perctx {
664	struct qib_opcode_stats stats[128];
665};
666
667struct qib_pma_counters {
668	u64 n_unicast_xmit;     /* total unicast packets sent */
669	u64 n_unicast_rcv;      /* total unicast packets received */
670	u64 n_multicast_xmit;   /* total multicast packets sent */
671	u64 n_multicast_rcv;    /* total multicast packets received */
672};
673
674struct qib_ibport {
675	struct qib_qp __rcu *qp0;
676	struct qib_qp __rcu *qp1;
677	struct ib_mad_agent *send_agent;	/* agent for SMI (traps) */
678	struct qib_ah *sm_ah;
679	struct qib_ah *smi_ah;
680	struct rb_root mcast_tree;
681	spinlock_t lock;		/* protect changes in this struct */
682
683	/* non-zero when timer is set */
684	unsigned long mkey_lease_timeout;
685	unsigned long trap_timeout;
686	__be64 gid_prefix;      /* in network order */
687	__be64 mkey;
688	__be64 guids[QIB_GUIDS_PER_PORT	- 1];	/* writable GUIDs */
689	u64 tid;		/* TID for traps */
690	struct qib_pma_counters __percpu *pmastats;
691	u64 z_unicast_xmit;     /* starting count for PMA */
692	u64 z_unicast_rcv;      /* starting count for PMA */
693	u64 z_multicast_xmit;   /* starting count for PMA */
694	u64 z_multicast_rcv;    /* starting count for PMA */
695	u64 z_symbol_error_counter;             /* starting count for PMA */
696	u64 z_link_error_recovery_counter;      /* starting count for PMA */
697	u64 z_link_downed_counter;              /* starting count for PMA */
698	u64 z_port_rcv_errors;                  /* starting count for PMA */
699	u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
700	u64 z_port_xmit_discards;               /* starting count for PMA */
701	u64 z_port_xmit_data;                   /* starting count for PMA */
702	u64 z_port_rcv_data;                    /* starting count for PMA */
703	u64 z_port_xmit_packets;                /* starting count for PMA */
704	u64 z_port_rcv_packets;                 /* starting count for PMA */
705	u32 z_local_link_integrity_errors;      /* starting count for PMA */
706	u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
707	u32 z_vl15_dropped;                     /* starting count for PMA */
708	u32 n_rc_resends;
709	u32 n_rc_acks;
710	u32 n_rc_qacks;
711	u32 n_rc_delayed_comp;
712	u32 n_seq_naks;
713	u32 n_rdma_seq;
714	u32 n_rnr_naks;
715	u32 n_other_naks;
716	u32 n_loop_pkts;
717	u32 n_pkt_drops;
718	u32 n_vl15_dropped;
719	u32 n_rc_timeouts;
720	u32 n_dmawait;
721	u32 n_unaligned;
722	u32 n_rc_dupreq;
723	u32 n_rc_seqnak;
724	u32 port_cap_flags;
725	u32 pma_sample_start;
726	u32 pma_sample_interval;
727	__be16 pma_counter_select[5];
728	u16 pma_tag;
729	u16 pkey_violations;
730	u16 qkey_violations;
731	u16 mkey_violations;
732	u16 mkey_lease_period;
733	u16 sm_lid;
734	u16 repress_traps;
735	u8 sm_sl;
736	u8 mkeyprot;
737	u8 subnet_timeout;
738	u8 vl_high_limit;
739	u8 sl_to_vl[16];
740
741};
742
743
744struct qib_ibdev {
745	struct ib_device ibdev;
746	struct list_head pending_mmaps;
747	spinlock_t mmap_offset_lock; /* protect mmap_offset */
748	u32 mmap_offset;
749	struct qib_mregion __rcu *dma_mr;
750
751	/* QP numbers are shared by all IB ports */
752	struct qib_qpn_table qpn_table;
753	struct qib_lkey_table lk_table;
754	struct list_head piowait;       /* list for wait PIO buf */
755	struct list_head dmawait;	/* list for wait DMA */
756	struct list_head txwait;        /* list for wait qib_verbs_txreq */
757	struct list_head memwait;       /* list for wait kernel memory */
758	struct list_head txreq_free;
759	struct timer_list mem_timer;
760	struct qib_qp __rcu **qp_table;
761	struct qib_pio_header *pio_hdrs;
762	dma_addr_t pio_hdrs_phys;
763	/* list of QPs waiting for RNR timer */
764	spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
765	u32 qp_table_size; /* size of the hash table */
766	u32 qp_rnd; /* random bytes for hash */
767	spinlock_t qpt_lock;
768
769	u32 n_piowait;
770	u32 n_txwait;
771
772	u32 n_pds_allocated;    /* number of PDs allocated for device */
773	spinlock_t n_pds_lock;
774	u32 n_ahs_allocated;    /* number of AHs allocated for device */
775	spinlock_t n_ahs_lock;
776	u32 n_cqs_allocated;    /* number of CQs allocated for device */
777	spinlock_t n_cqs_lock;
778	u32 n_qps_allocated;    /* number of QPs allocated for device */
779	spinlock_t n_qps_lock;
780	u32 n_srqs_allocated;   /* number of SRQs allocated for device */
781	spinlock_t n_srqs_lock;
782	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
783	spinlock_t n_mcast_grps_lock;
784#ifdef CONFIG_DEBUG_FS
785	/* per HCA debugfs */
786	struct dentry *qib_ibdev_dbg;
787#endif
788};
789
790struct qib_verbs_counters {
791	u64 symbol_error_counter;
792	u64 link_error_recovery_counter;
793	u64 link_downed_counter;
794	u64 port_rcv_errors;
795	u64 port_rcv_remphys_errors;
796	u64 port_xmit_discards;
797	u64 port_xmit_data;
798	u64 port_rcv_data;
799	u64 port_xmit_packets;
800	u64 port_rcv_packets;
801	u32 local_link_integrity_errors;
802	u32 excessive_buffer_overrun_errors;
803	u32 vl15_dropped;
804};
805
806static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
807{
808	return container_of(ibmr, struct qib_mr, ibmr);
809}
810
811static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
812{
813	return container_of(ibpd, struct qib_pd, ibpd);
814}
815
816static inline struct qib_ah *to_iah(struct ib_ah *ibah)
817{
818	return container_of(ibah, struct qib_ah, ibah);
819}
820
821static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
822{
823	return container_of(ibcq, struct qib_cq, ibcq);
824}
825
826static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
827{
828	return container_of(ibsrq, struct qib_srq, ibsrq);
829}
830
831static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
832{
833	return container_of(ibqp, struct qib_qp, ibqp);
834}
835
836static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
837{
838	return container_of(ibdev, struct qib_ibdev, ibdev);
839}
840
841/*
842 * Send if not busy or waiting for I/O and either
843 * a RC response is pending or we can process send work requests.
844 */
845static inline int qib_send_ok(struct qib_qp *qp)
846{
847	return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
848		(qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
849		 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
850}
851
852/*
853 * This must be called with s_lock held.
854 */
855void qib_schedule_send(struct qib_qp *qp);
856
857static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
858{
859	u16 p1 = pkey1 & 0x7FFF;
860	u16 p2 = pkey2 & 0x7FFF;
861
862	/*
863	 * Low 15 bits must be non-zero and match, and
864	 * one of the two must be a full member.
865	 */
866	return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
867}
868
869void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
870		   u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
871void qib_cap_mask_chg(struct qib_ibport *ibp);
872void qib_sys_guid_chg(struct qib_ibport *ibp);
873void qib_node_desc_chg(struct qib_ibport *ibp);
874int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
875		    struct ib_wc *in_wc, struct ib_grh *in_grh,
876		    struct ib_mad *in_mad, struct ib_mad *out_mad);
877int qib_create_agents(struct qib_ibdev *dev);
878void qib_free_agents(struct qib_ibdev *dev);
879
880/*
881 * Compare the lower 24 bits of the two values.
882 * Returns an integer <, ==, or > than zero.
883 */
884static inline int qib_cmp24(u32 a, u32 b)
885{
886	return (((int) a) - ((int) b)) << 8;
887}
888
889struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
890
891int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
892			  u64 *rwords, u64 *spkts, u64 *rpkts,
893			  u64 *xmit_wait);
894
895int qib_get_counters(struct qib_pportdata *ppd,
896		     struct qib_verbs_counters *cntrs);
897
898int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
899
900int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
901
902int qib_mcast_tree_empty(struct qib_ibport *ibp);
903
904__be32 qib_compute_aeth(struct qib_qp *qp);
905
906struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
907
908struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
909			    struct ib_qp_init_attr *init_attr,
910			    struct ib_udata *udata);
911
912int qib_destroy_qp(struct ib_qp *ibqp);
913
914int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
915
916int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
917		  int attr_mask, struct ib_udata *udata);
918
919int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
920		 int attr_mask, struct ib_qp_init_attr *init_attr);
921
922unsigned qib_free_all_qps(struct qib_devdata *dd);
923
924void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
925
926void qib_free_qpn_table(struct qib_qpn_table *qpt);
927
928#ifdef CONFIG_DEBUG_FS
929
930struct qib_qp_iter;
931
932struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
933
934int qib_qp_iter_next(struct qib_qp_iter *iter);
935
936void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
937
938#endif
939
940void qib_get_credit(struct qib_qp *qp, u32 aeth);
941
942unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
943
944void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
945
946void qib_put_txreq(struct qib_verbs_txreq *tx);
947
948int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
949		   u32 hdrwords, struct qib_sge_state *ss, u32 len);
950
951void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
952		  int release);
953
954void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
955
956void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
957		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
958
959void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
960		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
961
962int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
963
964struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
965
966void qib_rc_rnr_retry(unsigned long arg);
967
968void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
969
970void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
971
972int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
973
974void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
975		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
976
977int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
978
979void qib_free_lkey(struct qib_mregion *mr);
980
981int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
982		struct qib_sge *isge, struct ib_sge *sge, int acc);
983
984int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
985		u32 len, u64 vaddr, u32 rkey, int acc);
986
987int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
988			 struct ib_recv_wr **bad_wr);
989
990struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
991			      struct ib_srq_init_attr *srq_init_attr,
992			      struct ib_udata *udata);
993
994int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
995		   enum ib_srq_attr_mask attr_mask,
996		   struct ib_udata *udata);
997
998int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
999
1000int qib_destroy_srq(struct ib_srq *ibsrq);
1001
1002int qib_cq_init(struct qib_devdata *dd);
1003
1004void qib_cq_exit(struct qib_devdata *dd);
1005
1006void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
1007
1008int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
1009
1010struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
1011			    int comp_vector, struct ib_ucontext *context,
1012			    struct ib_udata *udata);
1013
1014int qib_destroy_cq(struct ib_cq *ibcq);
1015
1016int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
1017
1018int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
1019
1020struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
1021
1022struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
1023			      struct ib_phys_buf *buffer_list,
1024			      int num_phys_buf, int acc, u64 *iova_start);
1025
1026struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1027			      u64 virt_addr, int mr_access_flags,
1028			      struct ib_udata *udata);
1029
1030int qib_dereg_mr(struct ib_mr *ibmr);
1031
1032struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1033
1034struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
1035				struct ib_device *ibdev, int page_list_len);
1036
1037void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1038
1039int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
1040
1041struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1042			     struct ib_fmr_attr *fmr_attr);
1043
1044int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1045		     int list_len, u64 iova);
1046
1047int qib_unmap_fmr(struct list_head *fmr_list);
1048
1049int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1050
1051static inline void qib_get_mr(struct qib_mregion *mr)
1052{
1053	atomic_inc(&mr->refcount);
1054}
1055
1056void mr_rcu_callback(struct rcu_head *list);
1057
1058static inline void qib_put_mr(struct qib_mregion *mr)
1059{
1060	if (unlikely(atomic_dec_and_test(&mr->refcount)))
1061		call_rcu(&mr->list, mr_rcu_callback);
1062}
1063
1064static inline void qib_put_ss(struct qib_sge_state *ss)
1065{
1066	while (ss->num_sge) {
1067		qib_put_mr(ss->sge.mr);
1068		if (--ss->num_sge)
1069			ss->sge = *ss->sg_list++;
1070	}
1071}
1072
1073
1074void qib_release_mmap_info(struct kref *ref);
1075
1076struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1077					   struct ib_ucontext *context,
1078					   void *obj);
1079
1080void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1081			  u32 size, void *obj);
1082
1083int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1084
1085int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1086
1087void qib_migrate_qp(struct qib_qp *qp);
1088
1089int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1090		      int has_grh, struct qib_qp *qp, u32 bth0);
1091
1092u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1093		 struct ib_global_route *grh, u32 hwords, u32 nwords);
1094
1095void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1096			 u32 bth0, u32 bth2);
1097
1098void qib_do_send(struct work_struct *work);
1099
1100void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1101		       enum ib_wc_status status);
1102
1103void qib_send_rc_ack(struct qib_qp *qp);
1104
1105int qib_make_rc_req(struct qib_qp *qp);
1106
1107int qib_make_uc_req(struct qib_qp *qp);
1108
1109int qib_make_ud_req(struct qib_qp *qp);
1110
1111int qib_register_ib_device(struct qib_devdata *);
1112
1113void qib_unregister_ib_device(struct qib_devdata *);
1114
1115void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1116
1117void qib_ib_piobufavail(struct qib_devdata *);
1118
1119unsigned qib_get_npkeys(struct qib_devdata *);
1120
1121unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1122
1123extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1124
1125/*
1126 * Below  HCA-independent IB PhysPortState values, returned
1127 * by the f_ibphys_portstate() routine.
1128 */
1129#define IB_PHYSPORTSTATE_SLEEP 1
1130#define IB_PHYSPORTSTATE_POLL 2
1131#define IB_PHYSPORTSTATE_DISABLED 3
1132#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1133#define IB_PHYSPORTSTATE_LINKUP 5
1134#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1135#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1136#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1137#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1138#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1139#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1140#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1141#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1142
1143extern const int ib_qib_state_ops[];
1144
1145extern __be64 ib_qib_sys_image_guid;    /* in network order */
1146
1147extern unsigned int ib_qib_lkey_table_size;
1148
1149extern unsigned int ib_qib_max_cqes;
1150
1151extern unsigned int ib_qib_max_cqs;
1152
1153extern unsigned int ib_qib_max_qp_wrs;
1154
1155extern unsigned int ib_qib_max_qps;
1156
1157extern unsigned int ib_qib_max_sges;
1158
1159extern unsigned int ib_qib_max_mcast_grps;
1160
1161extern unsigned int ib_qib_max_mcast_qp_attached;
1162
1163extern unsigned int ib_qib_max_srqs;
1164
1165extern unsigned int ib_qib_max_srq_sges;
1166
1167extern unsigned int ib_qib_max_srq_wrs;
1168
1169extern const u32 ib_qib_rnr_table[];
1170
1171extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1172
1173#endif                          /* QIB_VERBS_H */
1174