Lines Matching refs:ep

25  * bnx2i_get_cid_num - get cid from ep
26 * @ep: endpoint pointer
30 static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
34 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
35 cid = ep->ep_cid;
37 cid = GET_CID_NUM(ep->ep_cid);
131 * @ep: endpoint (transport identifier) structure
138 int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
146 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
160 num_active_cmds = atomic_read(&ep->num_active_cmds);
164 next_index = num_active_cmds >> ep->ec_shift;
170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
171 if (cq_index > ep->qp.cqe_size * 2)
172 cq_index -= ep->qp.cqe_size * 2;
195 if (!bnx2i_conn->ep->qp.rqe_left)
198 bnx2i_conn->ep->qp.rqe_left--;
199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
201 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
202 bnx2i_conn->ep->qp.rq_cons_idx = 0;
204 bnx2i_conn->ep->qp.rq_cons_qe++;
205 bnx2i_conn->ep->qp.rq_cons_idx++;
220 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
234 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
235 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
237 ep->qp.rqe_left += count;
238 ep->qp.rq_prod_idx &= 0x7FFF;
239 ep->qp.rq_prod_idx += count;
241 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
242 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
244 ep->qp.rq_prod_idx |= 0x8000;
246 ep->qp.rq_prod_idx |= hi_bit;
248 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
249 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
250 rq_db->prod_idx = ep->qp.rq_prod_idx;
253 writew(ep->qp.rq_prod_idx,
254 ep->qp.ctx_base + CNIC_RECV_DOORBELL);
272 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
274 atomic_inc(&ep->num_active_cmds);
276 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
277 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
278 sq_db->prod_idx = ep->qp.sq_prod_idx;
281 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
300 if (bnx2i_conn->ep->qp.sq_prod_qe ==
301 bnx2i_conn->ep->qp.sq_last_qe)
302 bnx2i_conn->ep->qp.sq_prod_qe =
303 bnx2i_conn->ep->qp.sq_first_qe;
305 bnx2i_conn->ep->qp.sq_prod_qe++;
307 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
308 bnx2i_conn->ep->qp.sq_last_qe)
309 bnx2i_conn->ep->qp.sq_prod_qe += count;
311 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
312 bnx2i_conn->ep->qp.sq_prod_qe;
313 bnx2i_conn->ep->qp.sq_prod_qe =
314 &bnx2i_conn->ep->qp.sq_first_qe[count -
318 bnx2i_conn->ep->qp.sq_prod_idx += count;
320 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
343 bnx2i_conn->ep->qp.sq_prod_qe;
402 bnx2i_conn->ep->qp.sq_prod_qe;
473 text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
519 bnx2i_conn->ep->qp.sq_prod_qe;
543 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
550 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
558 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
613 bnx2i_conn->ep->qp.sq_prod_qe;
632 bnx2i_conn->ep->state = EP_STATE_LOGOUT_SENT;
660 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
661 update_wqe->context_id = bnx2i_conn->ep->ep_cid;
663 update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
703 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
705 if (ep->state == EP_STATE_OFLD_START) {
707 ep->state = EP_STATE_OFLD_FAILED;
708 } else if (ep->state == EP_STATE_DISCONN_START) {
710 ep->state = EP_STATE_DISCONN_TIMEDOUT;
711 } else if (ep->state == EP_STATE_CLEANUP_START) {
713 ep->state = EP_STATE_CLEANUP_FAILED;
716 wake_up_interruptible(&ep->ofld_wait);
747 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
761 * @ep: endpoint (transport identifier) structure
766 int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
778 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
779 conn_cleanup.context_id = ep->ep_cid;
781 conn_cleanup.context_id = (ep->ep_cid >> 7);
783 conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
796 * @ep: endpoint (transport identifier) structure
801 struct bnx2i_endpoint *ep)
815 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
817 dma_addr = ep->qp.sq_pgtbl_phys;
821 dma_addr = ep->qp.cq_pgtbl_phys;
829 dma_addr = ep->qp.rq_pgtbl_phys;
833 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
838 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
856 * @ep: endpoint (transport identifier) structure
861 struct bnx2i_endpoint *ep)
876 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
878 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
882 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
890 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
894 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
898 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
907 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
925 * @ep: endpoint (transport identifier) structure
929 int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
934 rc = bnx2i_5771x_send_conn_ofld_req(hba, ep);
936 rc = bnx2i_570x_send_conn_ofld_req(hba, ep);
944 * @ep: endpoint (transport identifier) structure
950 static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
957 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
963 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
964 num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
965 page = ep->qp.sq_phys;
968 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
970 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
991 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
992 num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
993 page = ep->qp.rq_phys;
996 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
998 ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
1019 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
1020 num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
1021 page = ep->qp.cq_phys;
1024 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
1026 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
1051 * @ep: endpoint (transport identifier) structure
1058 int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1062 ep->hba = hba;
1063 ep->conn = NULL;
1064 ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
1067 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
1068 ep->qp.sq_mem_size =
1069 (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1070 ep->qp.sq_pgtbl_size =
1071 (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1072 ep->qp.sq_pgtbl_size =
1073 (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1075 ep->qp.sq_pgtbl_virt =
1076 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1077 &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
1078 if (!ep->qp.sq_pgtbl_virt) {
1080 ep->qp.sq_pgtbl_size);
1085 ep->qp.sq_virt =
1086 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1087 &ep->qp.sq_phys, GFP_KERNEL);
1088 if (!ep->qp.sq_virt) {
1090 ep->qp.sq_mem_size);
1094 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
1095 ep->qp.sq_first_qe = ep->qp.sq_virt;
1096 ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
1097 ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
1098 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
1099 ep->qp.sq_prod_idx = 0;
1100 ep->qp.sq_cons_idx = 0;
1101 ep->qp.sqe_left = hba->max_sqes;
1104 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
1105 ep->qp.cq_mem_size =
1106 (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1107 ep->qp.cq_pgtbl_size =
1108 (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1109 ep->qp.cq_pgtbl_size =
1110 (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1112 ep->qp.cq_pgtbl_virt =
1113 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1114 &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
1115 if (!ep->qp.cq_pgtbl_virt) {
1117 ep->qp.cq_pgtbl_size);
1122 ep->qp.cq_virt =
1123 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1124 &ep->qp.cq_phys, GFP_KERNEL);
1125 if (!ep->qp.cq_virt) {
1127 ep->qp.cq_mem_size);
1130 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
1132 ep->qp.cq_first_qe = ep->qp.cq_virt;
1133 ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
1134 ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
1135 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
1136 ep->qp.cq_prod_idx = 0;
1137 ep->qp.cq_cons_idx = 0;
1138 ep->qp.cqe_left = hba->max_cqes;
1139 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1140 ep->qp.cqe_size = hba->max_cqes;
1143 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
1147 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1148 ep->qp.rq_mem_size =
1149 (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1150 ep->qp.rq_pgtbl_size =
1151 (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1152 ep->qp.rq_pgtbl_size =
1153 (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1155 ep->qp.rq_pgtbl_virt =
1156 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1157 &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
1158 if (!ep->qp.rq_pgtbl_virt) {
1160 ep->qp.rq_pgtbl_size);
1165 ep->qp.rq_virt =
1166 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1167 &ep->qp.rq_phys, GFP_KERNEL);
1168 if (!ep->qp.rq_virt) {
1170 ep->qp.rq_mem_size);
1174 ep->qp.rq_first_qe = ep->qp.rq_virt;
1175 ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
1176 ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
1177 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
1178 ep->qp.rq_prod_idx = 0x8000;
1179 ep->qp.rq_cons_idx = 0;
1180 ep->qp.rqe_left = hba->max_rqes;
1182 setup_qp_page_tables(ep);
1187 bnx2i_free_qp_resc(hba, ep);
1196 * @ep: endpoint (transport identifier) structure
1200 void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1202 if (ep->qp.ctx_base) {
1203 iounmap(ep->qp.ctx_base);
1204 ep->qp.ctx_base = NULL;
1207 if (ep->qp.sq_pgtbl_virt) {
1208 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1209 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
1210 ep->qp.sq_pgtbl_virt = NULL;
1211 ep->qp.sq_pgtbl_phys = 0;
1213 if (ep->qp.sq_virt) {
1214 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1215 ep->qp.sq_virt, ep->qp.sq_phys);
1216 ep->qp.sq_virt = NULL;
1217 ep->qp.sq_phys = 0;
1221 if (ep->qp.rq_pgtbl_virt) {
1222 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1223 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
1224 ep->qp.rq_pgtbl_virt = NULL;
1225 ep->qp.rq_pgtbl_phys = 0;
1227 if (ep->qp.rq_virt) {
1228 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1229 ep->qp.rq_virt, ep->qp.rq_phys);
1230 ep->qp.rq_virt = NULL;
1231 ep->qp.rq_phys = 0;
1235 if (ep->qp.cq_pgtbl_virt) {
1236 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1237 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
1238 ep->qp.cq_pgtbl_virt = NULL;
1239 ep->qp.cq_pgtbl_phys = 0;
1241 if (ep->qp.cq_virt) {
1242 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1243 ep->qp.cq_virt, ep->qp.cq_phys);
1244 ep->qp.cq_virt = NULL;
1245 ep->qp.cq_phys = 0;
1650 bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
1986 if (bnx2i_conn->ep == NULL)
1989 qp = &bnx2i_conn->ep->qp;
2069 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
2075 atomic_dec(&bnx2i_conn->ep->num_active_cmds);
2121 if (!bnx2i_conn->ep) {
2122 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
2127 nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep,
2130 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
2154 if (!conn->ep) {
2155 printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
2161 conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
2163 conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
2165 wake_up_interruptible(&conn->ep->ofld_wait);
2392 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
2393 bnx2i_conn->ep->ep_cid);
2415 struct bnx2i_endpoint *ep;
2417 ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
2418 if (!ep) {
2424 if (hba != ep->hba) {
2431 ep->state = EP_STATE_CLEANUP_FAILED;
2433 ep->state = EP_STATE_CLEANUP_CMPL;
2434 wake_up_interruptible(&ep->ofld_wait);
2450 struct bnx2i_endpoint *ep;
2453 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2454 if (!ep) {
2459 if (hba != ep->hba) {
2465 ep->state = EP_STATE_OFLD_FAILED;
2478 ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
2484 ep->state = EP_STATE_OFLD_COMPL;
2486 cid_num = bnx2i_get_cid_num(ep);
2487 ep->ep_cid = cid_addr;
2488 ep->qp.ctx_base = NULL;
2490 wake_up_interruptible(&ep->ofld_wait);
2596 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2598 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
2599 ep->state = EP_STATE_CONNECT_FAILED;
2601 ep->state = EP_STATE_CONNECT_COMPL;
2603 ep->state = EP_STATE_CONNECT_FAILED;
2605 wake_up_interruptible(&ep->ofld_wait);
2618 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2620 ep->state = EP_STATE_DISCONN_COMPL;
2621 wake_up_interruptible(&ep->ofld_wait);
2634 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2636 ep->state = EP_STATE_DISCONN_COMPL;
2637 wake_up_interruptible(&ep->ofld_wait);
2651 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2653 ep->state = EP_STATE_TCP_FIN_RCVD;
2654 if (ep->conn)
2655 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2668 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2669 u32 old_state = ep->state;
2671 ep->state = EP_STATE_TCP_RST_RCVD;
2673 wake_up_interruptible(&ep->ofld_wait);
2675 if (ep->conn)
2676 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2723 * @ep: bnx2i endpoint
2729 int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2738 cid_num = bnx2i_get_cid_num(ep);
2740 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2741 reg_base = pci_resource_start(ep->hba->pcidev,
2744 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2748 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2749 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2750 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
2763 ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
2765 if (!ep->qp.ctx_base)
2769 bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);