ehca_classes.h revision 19f4282149147b4a3e8c670373dc73ddd5d5facc
1/* 2 * IBM eServer eHCA Infiniband device driver for Linux on POWER 3 * 4 * Struct definition for eHCA internal structures 5 * 6 * Authors: Heiko J Schick <schickhj@de.ibm.com> 7 * Christoph Raisch <raisch@de.ibm.com> 8 * Joachim Fenkes <fenkes@de.ibm.com> 9 * 10 * Copyright (c) 2005 IBM Corporation 11 * 12 * All rights reserved. 13 * 14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB 15 * BSD. 16 * 17 * OpenIB BSD License 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions are met: 21 * 22 * Redistributions of source code must retain the above copyright notice, this 23 * list of conditions and the following disclaimer. 24 * 25 * Redistributions in binary form must reproduce the above copyright notice, 26 * this list of conditions and the following disclaimer in the documentation 27 * and/or other materials 28 * provided with the distribution. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43#ifndef __EHCA_CLASSES_H__ 44#define __EHCA_CLASSES_H__ 45 46struct ehca_module; 47struct ehca_qp; 48struct ehca_cq; 49struct ehca_eq; 50struct ehca_mr; 51struct ehca_mw; 52struct ehca_pd; 53struct ehca_av; 54 55#include <linux/wait.h> 56#include <linux/mutex.h> 57 58#include <rdma/ib_verbs.h> 59#include <rdma/ib_user_verbs.h> 60 61#ifdef CONFIG_PPC64 62#include "ehca_classes_pSeries.h" 63#endif 64#include "ipz_pt_fn.h" 65#include "ehca_qes.h" 66#include "ehca_irq.h" 67 68#define EHCA_EQE_CACHE_SIZE 20 69#define EHCA_MAX_NUM_QUEUES 0xffff 70 71struct ehca_eqe_cache_entry { 72 struct ehca_eqe *eqe; 73 struct ehca_cq *cq; 74}; 75 76struct ehca_eq { 77 u32 length; 78 struct ipz_queue ipz_queue; 79 struct ipz_eq_handle ipz_eq_handle; 80 struct work_struct work; 81 struct h_galpas galpas; 82 int is_initialized; 83 struct ehca_pfeq pf; 84 spinlock_t spinlock; 85 struct tasklet_struct interrupt_task; 86 u32 ist; 87 spinlock_t irq_spinlock; 88 struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE]; 89}; 90 91struct ehca_sma_attr { 92 u16 lid, lmc, sm_sl, sm_lid; 93 u16 pkey_tbl_len, pkeys[16]; 94}; 95 96struct ehca_sport { 97 struct ib_cq *ibcq_aqp1; 98 struct ib_qp *ibqp_sqp[2]; 99 /* lock to serialze modify_qp() calls for sqp in normal 100 * and irq path (when event PORT_ACTIVE is received first time) 101 */ 102 spinlock_t mod_sqp_lock; 103 enum ib_port_state port_state; 104 struct ehca_sma_attr saved_attr; 105 u32 pma_qp_nr; 106}; 107 108#define HCA_CAP_MR_PGSIZE_4K 0x80000000 109#define HCA_CAP_MR_PGSIZE_64K 0x40000000 110#define HCA_CAP_MR_PGSIZE_1M 0x20000000 111#define HCA_CAP_MR_PGSIZE_16M 0x10000000 112 113struct ehca_shca { 114 struct ib_device ib_device; 115 struct of_device *ofdev; 116 u8 num_ports; 117 int hw_level; 118 struct list_head shca_list; 119 struct ipz_adapter_handle ipz_hca_handle; 120 struct ehca_sport sport[2]; 121 struct ehca_eq eq; 122 struct ehca_eq neq; 123 struct ehca_mr *maxmr; 124 struct ehca_pd *pd; 125 struct h_galpas galpas; 126 struct mutex modify_mutex; 127 u64 hca_cap; 128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ 129 u32 hca_cap_mr_pgsize; 130 int max_mtu; 131 int max_num_qps; 132 int max_num_cqs; 133 atomic_t num_cqs; 134 atomic_t num_qps; 135}; 136 137struct ehca_pd { 138 struct ib_pd ib_pd; 139 struct ipz_pd fw_pd; 140 /* small queue mgmt */ 141 struct mutex lock; 142 struct list_head free[2]; 143 struct list_head full[2]; 144}; 145 146enum ehca_ext_qp_type { 147 EQPT_NORMAL = 0, 148 EQPT_LLQP = 1, 149 EQPT_SRQBASE = 2, 150 EQPT_SRQ = 3, 151}; 152 153/* struct to cache modify_qp()'s parms for GSI/SMI qp */ 154struct ehca_mod_qp_parm { 155 int mask; 156 struct ib_qp_attr attr; 157}; 158 159#define EHCA_MOD_QP_PARM_MAX 4 160 161#define QMAP_IDX_MASK 0xFFFFULL 162 163/* struct for tracking if cqes have been reported to the application */ 164struct ehca_qmap_entry { 165 u16 app_wr_id; 166 u16 reported; 167}; 168 169struct ehca_queue_map { 170 struct ehca_qmap_entry *map; 171 unsigned int entries; 172 unsigned int tail; 173 unsigned int left_to_poll; 174}; 175 176struct ehca_qp { 177 union { 178 struct ib_qp ib_qp; 179 struct ib_srq ib_srq; 180 }; 181 u32 qp_type; 182 enum ehca_ext_qp_type ext_type; 183 enum ib_qp_state state; 184 struct ipz_queue ipz_squeue; 185 struct ehca_queue_map sq_map; 186 struct ipz_queue ipz_rqueue; 187 struct ehca_queue_map rq_map; 188 struct h_galpas galpas; 189 u32 qkey; 190 u32 real_qp_num; 191 u32 token; 192 spinlock_t spinlock_s; 193 spinlock_t spinlock_r; 194 u32 sq_max_inline_data_size; 195 struct ipz_qp_handle ipz_qp_handle; 196 struct ehca_pfqp pf; 197 struct ib_qp_init_attr init_attr; 198 struct ehca_cq *send_cq; 199 struct ehca_cq *recv_cq; 200 unsigned int sqerr_purgeflag; 201 struct hlist_node list_entries; 202 /* array to cache modify_qp()'s parms for GSI/SMI qp */ 203 struct ehca_mod_qp_parm *mod_qp_parm; 204 int mod_qp_parm_idx; 205 /* mmap counter for resources mapped into user space */ 206 u32 mm_count_squeue; 207 u32 mm_count_rqueue; 208 u32 mm_count_galpa; 209 /* unsolicited ack circumvention */ 210 int unsol_ack_circ; 211 int mtu_shift; 212 u32 message_count; 213 u32 packet_count; 214 atomic_t nr_events; /* events seen */ 215 wait_queue_head_t wait_completion; 216 int mig_armed; 217 struct list_head sq_err_node; 218 struct list_head rq_err_node; 219}; 220 221#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 222#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ) 223#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE) 224 225/* must be power of 2 */ 226#define QP_HASHTAB_LEN 8 227 228struct ehca_cq { 229 struct ib_cq ib_cq; 230 struct ipz_queue ipz_queue; 231 struct h_galpas galpas; 232 spinlock_t spinlock; 233 u32 cq_number; 234 u32 token; 235 u32 nr_of_entries; 236 struct ipz_cq_handle ipz_cq_handle; 237 struct ehca_pfcq pf; 238 spinlock_t cb_lock; 239 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 240 struct list_head entry; 241 u32 nr_callbacks; /* #events assigned to cpu by scaling code */ 242 atomic_t nr_events; /* #events seen */ 243 wait_queue_head_t wait_completion; 244 spinlock_t task_lock; 245 /* mmap counter for resources mapped into user space */ 246 u32 mm_count_queue; 247 u32 mm_count_galpa; 248 struct list_head sqp_err_list; 249 struct list_head rqp_err_list; 250}; 251 252enum ehca_mr_flag { 253 EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */ 254 EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */ 255}; 256 257struct ehca_mr { 258 union { 259 struct ib_mr ib_mr; /* must always be first in ehca_mr */ 260 struct ib_fmr ib_fmr; /* must always be first in ehca_mr */ 261 } ib; 262 struct ib_umem *umem; 263 spinlock_t mrlock; 264 265 enum ehca_mr_flag flags; 266 u32 num_kpages; /* number of kernel pages */ 267 u32 num_hwpages; /* number of hw pages to form MR */ 268 u64 hwpage_size; /* hw page size used for this MR */ 269 int acl; /* ACL (stored here for usage in reregister) */ 270 u64 *start; /* virtual start address (stored here for */ 271 /* usage in reregister) */ 272 u64 size; /* size (stored here for usage in reregister) */ 273 u32 fmr_page_size; /* page size for FMR */ 274 u32 fmr_max_pages; /* max pages for FMR */ 275 u32 fmr_max_maps; /* max outstanding maps for FMR */ 276 u32 fmr_map_cnt; /* map counter for FMR */ 277 /* fw specific data */ 278 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ 279 struct h_galpas galpas; 280}; 281 282struct ehca_mw { 283 struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */ 284 spinlock_t mwlock; 285 286 u8 never_bound; /* indication MW was never bound */ 287 struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */ 288 struct h_galpas galpas; 289}; 290 291enum ehca_mr_pgi_type { 292 EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr, 293 * ehca_rereg_phys_mr, 294 * ehca_reg_internal_maxmr */ 295 EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */ 296 EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */ 297}; 298 299struct ehca_mr_pginfo { 300 enum ehca_mr_pgi_type type; 301 u64 num_kpages; 302 u64 kpage_cnt; 303 u64 hwpage_size; /* hw page size used for this MR */ 304 u64 num_hwpages; /* number of hw pages */ 305 u64 hwpage_cnt; /* counter for hw pages */ 306 u64 next_hwpage; /* next hw page in buffer/chunk/listelem */ 307 308 union { 309 struct { /* type EHCA_MR_PGI_PHYS section */ 310 int num_phys_buf; 311 struct ib_phys_buf *phys_buf_array; 312 u64 next_buf; 313 } phy; 314 struct { /* type EHCA_MR_PGI_USER section */ 315 struct ib_umem *region; 316 struct ib_umem_chunk *next_chunk; 317 u64 next_nmap; 318 } usr; 319 struct { /* type EHCA_MR_PGI_FMR section */ 320 u64 fmr_pgsize; 321 u64 *page_list; 322 u64 next_listelem; 323 } fmr; 324 } u; 325}; 326 327/* output parameters for MR/FMR hipz calls */ 328struct ehca_mr_hipzout_parms { 329 struct ipz_mrmw_handle handle; 330 u32 lkey; 331 u32 rkey; 332 u64 len; 333 u64 vaddr; 334 u32 acl; 335}; 336 337/* output parameters for MW hipz calls */ 338struct ehca_mw_hipzout_parms { 339 struct ipz_mrmw_handle handle; 340 u32 rkey; 341}; 342 343struct ehca_av { 344 struct ib_ah ib_ah; 345 struct ehca_ud_av av; 346}; 347 348struct ehca_ucontext { 349 struct ib_ucontext ib_ucontext; 350}; 351 352int ehca_init_pd_cache(void); 353void ehca_cleanup_pd_cache(void); 354int ehca_init_cq_cache(void); 355void ehca_cleanup_cq_cache(void); 356int ehca_init_qp_cache(void); 357void ehca_cleanup_qp_cache(void); 358int ehca_init_av_cache(void); 359void ehca_cleanup_av_cache(void); 360int ehca_init_mrmw_cache(void); 361void ehca_cleanup_mrmw_cache(void); 362int ehca_init_small_qp_cache(void); 363void ehca_cleanup_small_qp_cache(void); 364 365extern rwlock_t ehca_qp_idr_lock; 366extern rwlock_t ehca_cq_idr_lock; 367extern struct idr ehca_qp_idr; 368extern struct idr ehca_cq_idr; 369 370extern int ehca_static_rate; 371extern int ehca_port_act_time; 372extern int ehca_use_hp_mr; 373extern int ehca_scaling_code; 374extern int ehca_lock_hcalls; 375extern int ehca_nr_ports; 376extern int ehca_max_cq; 377extern int ehca_max_qp; 378 379struct ipzu_queue_resp { 380 u32 qe_size; /* queue entry size */ 381 u32 act_nr_of_sg; 382 u32 queue_length; /* queue length allocated in bytes */ 383 u32 pagesize; 384 u32 toggle_state; 385 u32 offset; /* save offset within a page for small_qp */ 386}; 387 388struct ehca_create_cq_resp { 389 u32 cq_number; 390 u32 token; 391 struct ipzu_queue_resp ipz_queue; 392 u32 fw_handle_ofs; 393 u32 dummy; 394}; 395 396struct ehca_create_qp_resp { 397 u32 qp_num; 398 u32 token; 399 u32 qp_type; 400 u32 ext_type; 401 u32 qkey; 402 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */ 403 u32 real_qp_num; 404 u32 fw_handle_ofs; 405 u32 dummy; 406 struct ipzu_queue_resp ipz_squeue; 407 struct ipzu_queue_resp ipz_rqueue; 408}; 409 410struct ehca_alloc_cq_parms { 411 u32 nr_cqe; 412 u32 act_nr_of_entries; 413 u32 act_pages; 414 struct ipz_eq_handle eq_handle; 415}; 416 417enum ehca_service_type { 418 ST_RC = 0, 419 ST_UC = 1, 420 ST_RD = 2, 421 ST_UD = 3, 422}; 423 424enum ehca_ll_comp_flags { 425 LLQP_SEND_COMP = 0x20, 426 LLQP_RECV_COMP = 0x40, 427 LLQP_COMP_MASK = 0x60, 428}; 429 430struct ehca_alloc_queue_parms { 431 /* input parameters */ 432 int max_wr; 433 int max_sge; 434 int page_size; 435 int is_small; 436 437 /* output parameters */ 438 u16 act_nr_wqes; 439 u8 act_nr_sges; 440 u32 queue_size; /* bytes for small queues, pages otherwise */ 441}; 442 443struct ehca_alloc_qp_parms { 444 struct ehca_alloc_queue_parms squeue; 445 struct ehca_alloc_queue_parms rqueue; 446 447 /* input parameters */ 448 enum ehca_service_type servicetype; 449 int qp_storage; 450 int sigtype; 451 enum ehca_ext_qp_type ext_type; 452 enum ehca_ll_comp_flags ll_comp_flags; 453 int ud_av_l_key_ctl; 454 455 u32 token; 456 struct ipz_eq_handle eq_handle; 457 struct ipz_pd pd; 458 struct ipz_cq_handle send_cq_handle, recv_cq_handle; 459 460 u32 srq_qpn, srq_token, srq_limit; 461 462 /* output parameters */ 463 u32 real_qp_num; 464 struct ipz_qp_handle qp_handle; 465 struct h_galpas galpas; 466}; 467 468int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); 469int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num); 470struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num); 471 472#endif 473