1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef IPATH_VERBS_H
35#define IPATH_VERBS_H
36
37#include <linux/types.h>
38#include <linux/spinlock.h>
39#include <linux/kernel.h>
40#include <linux/interrupt.h>
41#include <linux/kref.h>
42#include <rdma/ib_pack.h>
43#include <rdma/ib_user_verbs.h>
44
45#include "ipath_kernel.h"
46
47#define IPATH_MAX_RDMA_ATOMIC	4
48
49#define QPN_MAX                 (1 << 24)
50#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
51
52/*
53 * Increment this value if any changes that break userspace ABI
54 * compatibility are made.
55 */
56#define IPATH_UVERBS_ABI_VERSION       2
57
58/*
59 * Define an ib_cq_notify value that is not valid so we know when CQ
60 * notifications are armed.
61 */
62#define IB_CQ_NONE	(IB_CQ_NEXT_COMP + 1)
63
64/* AETH NAK opcode values */
65#define IB_RNR_NAK			0x20
66#define IB_NAK_PSN_ERROR		0x60
67#define IB_NAK_INVALID_REQUEST		0x61
68#define IB_NAK_REMOTE_ACCESS_ERROR	0x62
69#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
70#define IB_NAK_INVALID_RD_REQUEST	0x64
71
72/* Flags for checking QP state (see ib_ipath_state_ops[]) */
73#define IPATH_POST_SEND_OK		0x01
74#define IPATH_POST_RECV_OK		0x02
75#define IPATH_PROCESS_RECV_OK		0x04
76#define IPATH_PROCESS_SEND_OK		0x08
77#define IPATH_PROCESS_NEXT_SEND_OK	0x10
78#define IPATH_FLUSH_SEND		0x20
79#define IPATH_FLUSH_RECV		0x40
80#define IPATH_PROCESS_OR_FLUSH_SEND \
81	(IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
82
83/* IB Performance Manager status values */
84#define IB_PMA_SAMPLE_STATUS_DONE	0x00
85#define IB_PMA_SAMPLE_STATUS_STARTED	0x01
86#define IB_PMA_SAMPLE_STATUS_RUNNING	0x02
87
88/* Mandatory IB performance counter select values. */
89#define IB_PMA_PORT_XMIT_DATA	cpu_to_be16(0x0001)
90#define IB_PMA_PORT_RCV_DATA	cpu_to_be16(0x0002)
91#define IB_PMA_PORT_XMIT_PKTS	cpu_to_be16(0x0003)
92#define IB_PMA_PORT_RCV_PKTS	cpu_to_be16(0x0004)
93#define IB_PMA_PORT_XMIT_WAIT	cpu_to_be16(0x0005)
94
95struct ib_reth {
96	__be64 vaddr;
97	__be32 rkey;
98	__be32 length;
99} __attribute__ ((packed));
100
101struct ib_atomic_eth {
102	__be32 vaddr[2];	/* unaligned so access as 2 32-bit words */
103	__be32 rkey;
104	__be64 swap_data;
105	__be64 compare_data;
106} __attribute__ ((packed));
107
108struct ipath_other_headers {
109	__be32 bth[3];
110	union {
111		struct {
112			__be32 deth[2];
113			__be32 imm_data;
114		} ud;
115		struct {
116			struct ib_reth reth;
117			__be32 imm_data;
118		} rc;
119		struct {
120			__be32 aeth;
121			__be32 atomic_ack_eth[2];
122		} at;
123		__be32 imm_data;
124		__be32 aeth;
125		struct ib_atomic_eth atomic_eth;
126	} u;
127} __attribute__ ((packed));
128
129/*
130 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
131 * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
132 * will be in the eager header buffer.  The remaining 12 or 16 bytes
133 * are in the data buffer.
134 */
135struct ipath_ib_header {
136	__be16 lrh[4];
137	union {
138		struct {
139			struct ib_grh grh;
140			struct ipath_other_headers oth;
141		} l;
142		struct ipath_other_headers oth;
143	} u;
144} __attribute__ ((packed));
145
146struct ipath_pio_header {
147	__le32 pbc[2];
148	struct ipath_ib_header hdr;
149} __attribute__ ((packed));
150
151/*
152 * There is one struct ipath_mcast for each multicast GID.
153 * All attached QPs are then stored as a list of
154 * struct ipath_mcast_qp.
155 */
156struct ipath_mcast_qp {
157	struct list_head list;
158	struct ipath_qp *qp;
159};
160
161struct ipath_mcast {
162	struct rb_node rb_node;
163	union ib_gid mgid;
164	struct list_head qp_list;
165	wait_queue_head_t wait;
166	atomic_t refcount;
167	int n_attached;
168};
169
170/* Protection domain */
171struct ipath_pd {
172	struct ib_pd ibpd;
173	int user;		/* non-zero if created from user space */
174};
175
176/* Address Handle */
177struct ipath_ah {
178	struct ib_ah ibah;
179	struct ib_ah_attr attr;
180};
181
182/*
183 * This structure is used by ipath_mmap() to validate an offset
184 * when an mmap() request is made.  The vm_area_struct then uses
185 * this as its vm_private_data.
186 */
187struct ipath_mmap_info {
188	struct list_head pending_mmaps;
189	struct ib_ucontext *context;
190	void *obj;
191	__u64 offset;
192	struct kref ref;
193	unsigned size;
194};
195
196/*
197 * This structure is used to contain the head pointer, tail pointer,
198 * and completion queue entries as a single memory allocation so
199 * it can be mmap'ed into user space.
200 */
201struct ipath_cq_wc {
202	u32 head;		/* index of next entry to fill */
203	u32 tail;		/* index of next ib_poll_cq() entry */
204	union {
205		/* these are actually size ibcq.cqe + 1 */
206		struct ib_uverbs_wc uqueue[0];
207		struct ib_wc kqueue[0];
208	};
209};
210
211/*
212 * The completion queue structure.
213 */
214struct ipath_cq {
215	struct ib_cq ibcq;
216	struct tasklet_struct comptask;
217	spinlock_t lock;
218	u8 notify;
219	u8 triggered;
220	struct ipath_cq_wc *queue;
221	struct ipath_mmap_info *ip;
222};
223
224/*
225 * A segment is a linear region of low physical memory.
226 * XXX Maybe we should use phys addr here and kmap()/kunmap().
227 * Used by the verbs layer.
228 */
229struct ipath_seg {
230	void *vaddr;
231	size_t length;
232};
233
234/* The number of ipath_segs that fit in a page. */
235#define IPATH_SEGSZ     (PAGE_SIZE / sizeof (struct ipath_seg))
236
237struct ipath_segarray {
238	struct ipath_seg segs[IPATH_SEGSZ];
239};
240
241struct ipath_mregion {
242	struct ib_pd *pd;	/* shares refcnt of ibmr.pd */
243	u64 user_base;		/* User's address for this region */
244	u64 iova;		/* IB start address of this region */
245	size_t length;
246	u32 lkey;
247	u32 offset;		/* offset (bytes) to start of region */
248	int access_flags;
249	u32 max_segs;		/* number of ipath_segs in all the arrays */
250	u32 mapsz;		/* size of the map array */
251	struct ipath_segarray *map[0];	/* the segments */
252};
253
254/*
255 * These keep track of the copy progress within a memory region.
256 * Used by the verbs layer.
257 */
258struct ipath_sge {
259	struct ipath_mregion *mr;
260	void *vaddr;		/* kernel virtual address of segment */
261	u32 sge_length;		/* length of the SGE */
262	u32 length;		/* remaining length of the segment */
263	u16 m;			/* current index: mr->map[m] */
264	u16 n;			/* current index: mr->map[m]->segs[n] */
265};
266
267/* Memory region */
268struct ipath_mr {
269	struct ib_mr ibmr;
270	struct ib_umem *umem;
271	struct ipath_mregion mr;	/* must be last */
272};
273
274/*
275 * Send work request queue entry.
276 * The size of the sg_list is determined when the QP is created and stored
277 * in qp->s_max_sge.
278 */
279struct ipath_swqe {
280	struct ib_send_wr wr;	/* don't use wr.sg_list */
281	u32 psn;		/* first packet sequence number */
282	u32 lpsn;		/* last packet sequence number */
283	u32 ssn;		/* send sequence number */
284	u32 length;		/* total length of data in sg_list */
285	struct ipath_sge sg_list[0];
286};
287
288/*
289 * Receive work request queue entry.
290 * The size of the sg_list is determined when the QP (or SRQ) is created
291 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
292 */
293struct ipath_rwqe {
294	u64 wr_id;
295	u8 num_sge;
296	struct ib_sge sg_list[0];
297};
298
299/*
300 * This structure is used to contain the head pointer, tail pointer,
301 * and receive work queue entries as a single memory allocation so
302 * it can be mmap'ed into user space.
303 * Note that the wq array elements are variable size so you can't
304 * just index into the array to get the N'th element;
305 * use get_rwqe_ptr() instead.
306 */
307struct ipath_rwq {
308	u32 head;		/* new work requests posted to the head */
309	u32 tail;		/* receives pull requests from here. */
310	struct ipath_rwqe wq[0];
311};
312
313struct ipath_rq {
314	struct ipath_rwq *wq;
315	spinlock_t lock;
316	u32 size;		/* size of RWQE array */
317	u8 max_sge;
318};
319
320struct ipath_srq {
321	struct ib_srq ibsrq;
322	struct ipath_rq rq;
323	struct ipath_mmap_info *ip;
324	/* send signal when number of RWQEs < limit */
325	u32 limit;
326};
327
328struct ipath_sge_state {
329	struct ipath_sge *sg_list;      /* next SGE to be used if any */
330	struct ipath_sge sge;   /* progress state for the current SGE */
331	u8 num_sge;
332	u8 static_rate;
333};
334
335/*
336 * This structure holds the information that the send tasklet needs
337 * to send a RDMA read response or atomic operation.
338 */
339struct ipath_ack_entry {
340	u8 opcode;
341	u8 sent;
342	u32 psn;
343	union {
344		struct ipath_sge_state rdma_sge;
345		u64 atomic_data;
346	};
347};
348
349/*
350 * Variables prefixed with s_ are for the requester (sender).
351 * Variables prefixed with r_ are for the responder (receiver).
352 * Variables prefixed with ack_ are for responder replies.
353 *
354 * Common variables are protected by both r_rq.lock and s_lock in that order
355 * which only happens in modify_qp() or changing the QP 'state'.
356 */
357struct ipath_qp {
358	struct ib_qp ibqp;
359	struct ipath_qp *next;		/* link list for QPN hash table */
360	struct ipath_qp *timer_next;	/* link list for ipath_ib_timer() */
361	struct ipath_qp *pio_next;	/* link for ipath_ib_piobufavail() */
362	struct list_head piowait;	/* link for wait PIO buf */
363	struct list_head timerwait;	/* link for waiting for timeouts */
364	struct ib_ah_attr remote_ah_attr;
365	struct ipath_ib_header s_hdr;	/* next packet header to send */
366	atomic_t refcount;
367	wait_queue_head_t wait;
368	wait_queue_head_t wait_dma;
369	struct tasklet_struct s_task;
370	struct ipath_mmap_info *ip;
371	struct ipath_sge_state *s_cur_sge;
372	struct ipath_verbs_txreq *s_tx;
373	struct ipath_sge_state s_sge;	/* current send request data */
374	struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
375	struct ipath_sge_state s_ack_rdma_sge;
376	struct ipath_sge_state s_rdma_read_sge;
377	struct ipath_sge_state r_sge;	/* current receive data */
378	spinlock_t s_lock;
379	atomic_t s_dma_busy;
380	u16 s_pkt_delay;
381	u16 s_hdrwords;		/* size of s_hdr in 32 bit words */
382	u32 s_cur_size;		/* size of send packet in bytes */
383	u32 s_len;		/* total length of s_sge */
384	u32 s_rdma_read_len;	/* total length of s_rdma_read_sge */
385	u32 s_next_psn;		/* PSN for next request */
386	u32 s_last_psn;		/* last response PSN processed */
387	u32 s_psn;		/* current packet sequence number */
388	u32 s_ack_rdma_psn;	/* PSN for sending RDMA read responses */
389	u32 s_ack_psn;		/* PSN for acking sends and RDMA writes */
390	u32 s_rnr_timeout;	/* number of milliseconds for RNR timeout */
391	u32 r_ack_psn;		/* PSN for next ACK or atomic ACK */
392	u64 r_wr_id;		/* ID for current receive WQE */
393	unsigned long r_aflags;
394	u32 r_len;		/* total length of r_sge */
395	u32 r_rcv_len;		/* receive data len processed */
396	u32 r_psn;		/* expected rcv packet sequence number */
397	u32 r_msn;		/* message sequence number */
398	u8 state;		/* QP state */
399	u8 s_state;		/* opcode of last packet sent */
400	u8 s_ack_state;		/* opcode of packet to ACK */
401	u8 s_nak_state;		/* non-zero if NAK is pending */
402	u8 r_state;		/* opcode of last packet received */
403	u8 r_nak_state;		/* non-zero if NAK is pending */
404	u8 r_min_rnr_timer;	/* retry timeout value for RNR NAKs */
405	u8 r_flags;
406	u8 r_max_rd_atomic;	/* max number of RDMA read/atomic to receive */
407	u8 r_head_ack_queue;	/* index into s_ack_queue[] */
408	u8 qp_access_flags;
409	u8 s_max_sge;		/* size of s_wq->sg_list */
410	u8 s_retry_cnt;		/* number of times to retry */
411	u8 s_rnr_retry_cnt;
412	u8 s_retry;		/* requester retry counter */
413	u8 s_rnr_retry;		/* requester RNR retry counter */
414	u8 s_pkey_index;	/* PKEY index to use */
415	u8 s_max_rd_atomic;	/* max number of RDMA read/atomic to send */
416	u8 s_num_rd_atomic;	/* number of RDMA read/atomic pending */
417	u8 s_tail_ack_queue;	/* index into s_ack_queue[] */
418	u8 s_flags;
419	u8 s_dmult;
420	u8 s_draining;
421	u8 timeout;		/* Timeout for this QP */
422	enum ib_mtu path_mtu;
423	u32 remote_qpn;
424	u32 qkey;		/* QKEY for this QP (for UD or RD) */
425	u32 s_size;		/* send work queue size */
426	u32 s_head;		/* new entries added here */
427	u32 s_tail;		/* next entry to process */
428	u32 s_cur;		/* current work queue entry */
429	u32 s_last;		/* last un-ACK'ed entry */
430	u32 s_ssn;		/* SSN of tail entry */
431	u32 s_lsn;		/* limit sequence number (credit) */
432	struct ipath_swqe *s_wq;	/* send work queue */
433	struct ipath_swqe *s_wqe;
434	struct ipath_sge *r_ud_sg_list;
435	struct ipath_rq r_rq;		/* receive work queue */
436	struct ipath_sge r_sg_list[0];	/* verified SGEs */
437};
438
439/*
440 * Atomic bit definitions for r_aflags.
441 */
442#define IPATH_R_WRID_VALID	0
443
444/*
445 * Bit definitions for r_flags.
446 */
447#define IPATH_R_REUSE_SGE	0x01
448#define IPATH_R_RDMAR_SEQ	0x02
449
450/*
451 * Bit definitions for s_flags.
452 *
453 * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
454 *			   before processing the next SWQE
455 * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
456 *			   before processing the next SWQE
457 * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
458 * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
459 * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
460 *		      next send completion entry not via send DMA.
461 */
462#define IPATH_S_SIGNAL_REQ_WR	0x01
463#define IPATH_S_FENCE_PENDING	0x02
464#define IPATH_S_RDMAR_PENDING	0x04
465#define IPATH_S_ACK_PENDING	0x08
466#define IPATH_S_BUSY		0x10
467#define IPATH_S_WAITING		0x20
468#define IPATH_S_WAIT_SSN_CREDIT	0x40
469#define IPATH_S_WAIT_DMA	0x80
470
471#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
472	IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
473
474#define IPATH_PSN_CREDIT	512
475
476/*
477 * Since struct ipath_swqe is not a fixed size, we can't simply index into
478 * struct ipath_qp.s_wq.  This function does the array index computation.
479 */
480static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
481					      unsigned n)
482{
483	return (struct ipath_swqe *)((char *)qp->s_wq +
484				     (sizeof(struct ipath_swqe) +
485				      qp->s_max_sge *
486				      sizeof(struct ipath_sge)) * n);
487}
488
489/*
490 * Since struct ipath_rwqe is not a fixed size, we can't simply index into
491 * struct ipath_rwq.wq.  This function does the array index computation.
492 */
493static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
494					      unsigned n)
495{
496	return (struct ipath_rwqe *)
497		((char *) rq->wq->wq +
498		 (sizeof(struct ipath_rwqe) +
499		  rq->max_sge * sizeof(struct ib_sge)) * n);
500}
501
502/*
503 * QPN-map pages start out as NULL, they get allocated upon
504 * first use and are never deallocated. This way,
505 * large bitmaps are not allocated unless large numbers of QPs are used.
506 */
507struct qpn_map {
508	atomic_t n_free;
509	void *page;
510};
511
512struct ipath_qp_table {
513	spinlock_t lock;
514	u32 last;		/* last QP number allocated */
515	u32 max;		/* size of the hash table */
516	u32 nmaps;		/* size of the map table */
517	struct ipath_qp **table;
518	/* bit map of free numbers */
519	struct qpn_map map[QPNMAP_ENTRIES];
520};
521
522struct ipath_lkey_table {
523	spinlock_t lock;
524	u32 next;		/* next unused index (speeds search) */
525	u32 gen;		/* generation count */
526	u32 max;		/* size of the table */
527	struct ipath_mregion **table;
528};
529
530struct ipath_opcode_stats {
531	u64 n_packets;		/* number of packets */
532	u64 n_bytes;		/* total number of bytes */
533};
534
535struct ipath_ibdev {
536	struct ib_device ibdev;
537	struct ipath_devdata *dd;
538	struct list_head pending_mmaps;
539	spinlock_t mmap_offset_lock;
540	u32 mmap_offset;
541	int ib_unit;		/* This is the device number */
542	u16 sm_lid;		/* in host order */
543	u8 sm_sl;
544	u8 mkeyprot;
545	/* non-zero when timer is set */
546	unsigned long mkey_lease_timeout;
547
548	/* The following fields are really per port. */
549	struct ipath_qp_table qp_table;
550	struct ipath_lkey_table lk_table;
551	struct list_head pending[3];	/* FIFO of QPs waiting for ACKs */
552	struct list_head piowait;	/* list for wait PIO buf */
553	struct list_head txreq_free;
554	void *txreq_bufs;
555	/* list of QPs waiting for RNR timer */
556	struct list_head rnrwait;
557	spinlock_t pending_lock;
558	__be64 sys_image_guid;	/* in network order */
559	__be64 gid_prefix;	/* in network order */
560	__be64 mkey;
561
562	u32 n_pds_allocated;	/* number of PDs allocated for device */
563	spinlock_t n_pds_lock;
564	u32 n_ahs_allocated;	/* number of AHs allocated for device */
565	spinlock_t n_ahs_lock;
566	u32 n_cqs_allocated;	/* number of CQs allocated for device */
567	spinlock_t n_cqs_lock;
568	u32 n_qps_allocated;	/* number of QPs allocated for device */
569	spinlock_t n_qps_lock;
570	u32 n_srqs_allocated;	/* number of SRQs allocated for device */
571	spinlock_t n_srqs_lock;
572	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
573	spinlock_t n_mcast_grps_lock;
574
575	u64 ipath_sword;	/* total dwords sent (sample result) */
576	u64 ipath_rword;	/* total dwords received (sample result) */
577	u64 ipath_spkts;	/* total packets sent (sample result) */
578	u64 ipath_rpkts;	/* total packets received (sample result) */
579	/* # of ticks no data sent (sample result) */
580	u64 ipath_xmit_wait;
581	u64 rcv_errors;		/* # of packets with SW detected rcv errs */
582	u64 n_unicast_xmit;	/* total unicast packets sent */
583	u64 n_unicast_rcv;	/* total unicast packets received */
584	u64 n_multicast_xmit;	/* total multicast packets sent */
585	u64 n_multicast_rcv;	/* total multicast packets received */
586	u64 z_symbol_error_counter;		/* starting count for PMA */
587	u64 z_link_error_recovery_counter;	/* starting count for PMA */
588	u64 z_link_downed_counter;		/* starting count for PMA */
589	u64 z_port_rcv_errors;			/* starting count for PMA */
590	u64 z_port_rcv_remphys_errors;		/* starting count for PMA */
591	u64 z_port_xmit_discards;		/* starting count for PMA */
592	u64 z_port_xmit_data;			/* starting count for PMA */
593	u64 z_port_rcv_data;			/* starting count for PMA */
594	u64 z_port_xmit_packets;		/* starting count for PMA */
595	u64 z_port_rcv_packets;			/* starting count for PMA */
596	u32 z_pkey_violations;			/* starting count for PMA */
597	u32 z_local_link_integrity_errors;	/* starting count for PMA */
598	u32 z_excessive_buffer_overrun_errors;	/* starting count for PMA */
599	u32 z_vl15_dropped;			/* starting count for PMA */
600	u32 n_rc_resends;
601	u32 n_rc_acks;
602	u32 n_rc_qacks;
603	u32 n_seq_naks;
604	u32 n_rdma_seq;
605	u32 n_rnr_naks;
606	u32 n_other_naks;
607	u32 n_timeouts;
608	u32 n_pkt_drops;
609	u32 n_vl15_dropped;
610	u32 n_wqe_errs;
611	u32 n_rdma_dup_busy;
612	u32 n_piowait;
613	u32 n_unaligned;
614	u32 port_cap_flags;
615	u32 pma_sample_start;
616	u32 pma_sample_interval;
617	__be16 pma_counter_select[5];
618	u16 pma_tag;
619	u16 qkey_violations;
620	u16 mkey_violations;
621	u16 mkey_lease_period;
622	u16 pending_index;	/* which pending queue is active */
623	u8 pma_sample_status;
624	u8 subnet_timeout;
625	u8 vl_high_limit;
626	struct ipath_opcode_stats opstats[128];
627};
628
629struct ipath_verbs_counters {
630	u64 symbol_error_counter;
631	u64 link_error_recovery_counter;
632	u64 link_downed_counter;
633	u64 port_rcv_errors;
634	u64 port_rcv_remphys_errors;
635	u64 port_xmit_discards;
636	u64 port_xmit_data;
637	u64 port_rcv_data;
638	u64 port_xmit_packets;
639	u64 port_rcv_packets;
640	u32 local_link_integrity_errors;
641	u32 excessive_buffer_overrun_errors;
642	u32 vl15_dropped;
643};
644
645struct ipath_verbs_txreq {
646	struct ipath_qp         *qp;
647	struct ipath_swqe       *wqe;
648	u32                      map_len;
649	u32                      len;
650	struct ipath_sge_state  *ss;
651	struct ipath_pio_header  hdr;
652	struct ipath_sdma_txreq  txreq;
653};
654
655static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
656{
657	return container_of(ibmr, struct ipath_mr, ibmr);
658}
659
660static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
661{
662	return container_of(ibpd, struct ipath_pd, ibpd);
663}
664
665static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
666{
667	return container_of(ibah, struct ipath_ah, ibah);
668}
669
670static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
671{
672	return container_of(ibcq, struct ipath_cq, ibcq);
673}
674
675static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
676{
677	return container_of(ibsrq, struct ipath_srq, ibsrq);
678}
679
680static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
681{
682	return container_of(ibqp, struct ipath_qp, ibqp);
683}
684
685static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
686{
687	return container_of(ibdev, struct ipath_ibdev, ibdev);
688}
689
690/*
691 * This must be called with s_lock held.
692 */
693static inline void ipath_schedule_send(struct ipath_qp *qp)
694{
695	if (qp->s_flags & IPATH_S_ANY_WAIT)
696		qp->s_flags &= ~IPATH_S_ANY_WAIT;
697	if (!(qp->s_flags & IPATH_S_BUSY))
698		tasklet_hi_schedule(&qp->s_task);
699}
700
701int ipath_process_mad(struct ib_device *ibdev,
702		      int mad_flags,
703		      u8 port_num,
704		      struct ib_wc *in_wc,
705		      struct ib_grh *in_grh,
706		      struct ib_mad *in_mad, struct ib_mad *out_mad);
707
708/*
709 * Compare the lower 24 bits of the two values.
710 * Returns an integer <, ==, or > than zero.
711 */
712static inline int ipath_cmp24(u32 a, u32 b)
713{
714	return (((int) a) - ((int) b)) << 8;
715}
716
717struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
718
719int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
720			    u64 *rwords, u64 *spkts, u64 *rpkts,
721			    u64 *xmit_wait);
722
723int ipath_get_counters(struct ipath_devdata *dd,
724		       struct ipath_verbs_counters *cntrs);
725
726int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
727
728int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
729
730int ipath_mcast_tree_empty(void);
731
732__be32 ipath_compute_aeth(struct ipath_qp *qp);
733
734struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
735
736struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
737			      struct ib_qp_init_attr *init_attr,
738			      struct ib_udata *udata);
739
740int ipath_destroy_qp(struct ib_qp *ibqp);
741
742int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
743
744int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
745		    int attr_mask, struct ib_udata *udata);
746
747int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
748		   int attr_mask, struct ib_qp_init_attr *init_attr);
749
750unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
751
752int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
753
754void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
755
756unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
757
758int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
759		     u32 hdrwords, struct ipath_sge_state *ss, u32 len);
760
761void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
762
763void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
764
765void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
766		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
767
768void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
769		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
770
771void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
772
773void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
774
775int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
776
777void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
778		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
779
780int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
781		     struct ipath_mregion *mr);
782
783void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
784
785int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
786		  struct ib_sge *sge, int acc);
787
788int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
789		  u32 len, u64 vaddr, u32 rkey, int acc);
790
791int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
792			   struct ib_recv_wr **bad_wr);
793
794struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
795				struct ib_srq_init_attr *srq_init_attr,
796				struct ib_udata *udata);
797
798int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
799		     enum ib_srq_attr_mask attr_mask,
800		     struct ib_udata *udata);
801
802int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
803
804int ipath_destroy_srq(struct ib_srq *ibsrq);
805
806void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
807
808int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
809
810struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
811			      struct ib_ucontext *context,
812			      struct ib_udata *udata);
813
814int ipath_destroy_cq(struct ib_cq *ibcq);
815
816int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
817
818int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
819
820struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
821
822struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
823				struct ib_phys_buf *buffer_list,
824				int num_phys_buf, int acc, u64 *iova_start);
825
826struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
827				u64 virt_addr, int mr_access_flags,
828				struct ib_udata *udata);
829
830int ipath_dereg_mr(struct ib_mr *ibmr);
831
832struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
833			       struct ib_fmr_attr *fmr_attr);
834
835int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
836		       int list_len, u64 iova);
837
838int ipath_unmap_fmr(struct list_head *fmr_list);
839
840int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
841
842void ipath_release_mmap_info(struct kref *ref);
843
844struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
845					       u32 size,
846					       struct ib_ucontext *context,
847					       void *obj);
848
849void ipath_update_mmap_info(struct ipath_ibdev *dev,
850			    struct ipath_mmap_info *ip,
851			    u32 size, void *obj);
852
853int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
854
855void ipath_insert_rnr_queue(struct ipath_qp *qp);
856
857int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
858		   u32 *lengthp, struct ipath_sge_state *ss);
859
860int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
861
862u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
863		   struct ib_global_route *grh, u32 hwords, u32 nwords);
864
865void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
866			   struct ipath_other_headers *ohdr,
867			   u32 bth0, u32 bth2);
868
869void ipath_do_send(unsigned long data);
870
871void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
872			 enum ib_wc_status status);
873
874int ipath_make_rc_req(struct ipath_qp *qp);
875
876int ipath_make_uc_req(struct ipath_qp *qp);
877
878int ipath_make_ud_req(struct ipath_qp *qp);
879
880int ipath_register_ib_device(struct ipath_devdata *);
881
882void ipath_unregister_ib_device(struct ipath_ibdev *);
883
884void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
885
886int ipath_ib_piobufavail(struct ipath_ibdev *);
887
888unsigned ipath_get_npkeys(struct ipath_devdata *);
889
890u32 ipath_get_cr_errpkey(struct ipath_devdata *);
891
892unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
893
894extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
895
896/*
897 * Below converts HCA-specific LinkTrainingState to IB PhysPortState
898 * values.
899 */
900extern const u8 ipath_cvt_physportstate[];
901#define IB_PHYSPORTSTATE_SLEEP 1
902#define IB_PHYSPORTSTATE_POLL 2
903#define IB_PHYSPORTSTATE_DISABLED 3
904#define IB_PHYSPORTSTATE_CFG_TRAIN 4
905#define IB_PHYSPORTSTATE_LINKUP 5
906#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
907
908extern const int ib_ipath_state_ops[];
909
910extern unsigned int ib_ipath_lkey_table_size;
911
912extern unsigned int ib_ipath_max_cqes;
913
914extern unsigned int ib_ipath_max_cqs;
915
916extern unsigned int ib_ipath_max_qp_wrs;
917
918extern unsigned int ib_ipath_max_qps;
919
920extern unsigned int ib_ipath_max_sges;
921
922extern unsigned int ib_ipath_max_mcast_grps;
923
924extern unsigned int ib_ipath_max_mcast_qp_attached;
925
926extern unsigned int ib_ipath_max_srqs;
927
928extern unsigned int ib_ipath_max_srq_sges;
929
930extern unsigned int ib_ipath_max_srq_wrs;
931
932extern const u32 ib_ipath_rnr_table[];
933
934extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
935
936#endif				/* IPATH_VERBS_H */
937