Lines Matching refs:hba

19 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
21 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
23 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
25 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
26 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
29 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
41 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
42 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
46 if (hba->cnic && hba->cnic->submit_kwqes)
47 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
55 * @hba: adapter structure pointer
61 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
70 if (!hba->cnic) {
71 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
81 fcoe_init1.num_tasks = hba->max_tasks;
86 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
87 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
88 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
90 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
108 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
110 ((u64) hba->hash_tbl_pbl_dma >> 32);
112 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
114 ((u64) hba->t2_hash_tbl_dma >> 32);
116 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
118 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
140 if (hba->cnic && hba->cnic->submit_kwqes)
141 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
145 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
159 if (hba->cnic && hba->cnic->submit_kwqes)
160 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
176 struct bnx2fc_hba *hba = interface->hba;
343 if (hba->cnic && hba->cnic->submit_kwqes)
344 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
361 struct bnx2fc_hba *hba = interface->hba;
414 if (hba->cnic && hba->cnic->submit_kwqes)
415 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
430 struct bnx2fc_hba *hba = interface->hba;
478 if (hba->cnic && hba->cnic->submit_kwqes)
479 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
490 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
508 if (hba->cnic && hba->cnic->submit_kwqes)
509 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
514 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
518 spin_lock_bh(&hba->hba_lock);
519 list_for_each_entry(blport, &hba->vports, list) {
521 spin_unlock_bh(&hba->hba_lock);
525 spin_unlock_bh(&hba->hba_lock);
535 struct bnx2fc_hba *hba;
541 hba = unsol_els->hba;
542 if (is_valid_lport(hba, lport))
614 unsol_els->hba = interface->hba;
637 struct bnx2fc_hba *hba = interface->hba;
705 if (xid > hba->max_xid) {
714 hba->task_ctx[task_idx];
717 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
823 if (xid > hba->max_xid) {
842 interface->hba->task_ctx[task_idx];
844 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
878 struct bnx2fc_hba *hba = interface->hba;
888 if (xid >= hba->max_tasks) {
895 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
902 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
1092 * @hba: adapter structure pointer
1097 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1101 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1114 * @hba: adapter structure pointer
1120 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1131 tgt = hba->tgt_ofld_list[conn_id];
1140 if (hba != interface->hba) {
1168 * @hba: adapter structure pointer
1174 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1184 tgt = hba->tgt_ofld_list[conn_id];
1202 if (hba != interface->hba) {
1215 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1223 tgt = hba->tgt_ofld_list[conn_id];
1248 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1255 tgt = hba->tgt_ofld_list[conn_id];
1277 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1305 * @hba: adapter structure pointer
1314 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1323 bnx2fc_fastpath_notification(hba, kcqe);
1327 bnx2fc_process_ofld_cmpl(hba, kcqe);
1331 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1337 bnx2fc_init_failure(hba,
1340 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1341 bnx2fc_get_link_state(hba);
1343 (u8)hba->pcidev->bus->number);
1355 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1356 wake_up_interruptible(&hba->destroy_wait);
1360 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1364 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1371 complete(&hba->stat_req_done);
1421 struct bnx2fc_hba *hba = interface->hba;
1423 reg_base = pci_resource_start(hba->pcidev,
1532 interface->hba->task_ctx[orig_task_idx];
1838 * @hba: pointer to adapter structure
1844 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1858 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1860 &hba->task_ctx_bd_dma,
1862 if (!hba->task_ctx_bd_tbl) {
1867 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1873 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1874 hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1876 if (!hba->task_ctx) {
1885 hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1887 if (!hba->task_ctx_dma) {
1893 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1896 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1898 &hba->task_ctx_dma[i],
1900 if (!hba->task_ctx[i]) {
1905 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1906 addr = (u64)hba->task_ctx_dma[i];
1915 if (hba->task_ctx[i]) {
1917 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1918 hba->task_ctx[i], hba->task_ctx_dma[i]);
1919 hba->task_ctx[i] = NULL;
1923 kfree(hba->task_ctx_dma);
1924 hba->task_ctx_dma = NULL;
1926 kfree(hba->task_ctx);
1927 hba->task_ctx = NULL;
1929 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1930 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1931 hba->task_ctx_bd_tbl = NULL;
1936 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1941 if (hba->task_ctx_bd_tbl) {
1942 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1943 hba->task_ctx_bd_tbl,
1944 hba->task_ctx_bd_dma);
1945 hba->task_ctx_bd_tbl = NULL;
1948 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1949 if (hba->task_ctx) {
1951 if (hba->task_ctx[i]) {
1952 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1953 hba->task_ctx[i],
1954 hba->task_ctx_dma[i]);
1955 hba->task_ctx[i] = NULL;
1958 kfree(hba->task_ctx);
1959 hba->task_ctx = NULL;
1962 kfree(hba->task_ctx_dma);
1963 hba->task_ctx_dma = NULL;
1966 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1972 if (hba->hash_tbl_segments) {
1974 pbl = hba->hash_tbl_pbl;
1976 segment_count = hba->hash_tbl_segment_count;
1984 dma_free_coherent(&hba->pcidev->dev,
1986 hba->hash_tbl_segments[i],
1991 kfree(hba->hash_tbl_segments);
1992 hba->hash_tbl_segments = NULL;
1995 if (hba->hash_tbl_pbl) {
1996 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1997 hba->hash_tbl_pbl,
1998 hba->hash_tbl_pbl_dma);
1999 hba->hash_tbl_pbl = NULL;
2003 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2018 hba->hash_tbl_segment_count = segment_count;
2020 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2021 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2022 if (!hba->hash_tbl_segments) {
2034 hba->hash_tbl_segments[i] =
2035 dma_alloc_coherent(&hba->pcidev->dev,
2039 if (!hba->hash_tbl_segments[i]) {
2043 memset(hba->hash_tbl_segments[i], 0,
2047 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
2049 &hba->hash_tbl_pbl_dma,
2051 if (!hba->hash_tbl_pbl) {
2055 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2057 pbl = hba->hash_tbl_pbl;
2065 pbl = hba->hash_tbl_pbl;
2081 if (hba->hash_tbl_segments[i])
2082 dma_free_coherent(&hba->pcidev->dev,
2084 hba->hash_tbl_segments[i],
2091 kfree(hba->hash_tbl_segments);
2092 hba->hash_tbl_segments = NULL;
2099 * @hba: Pointer to adapter structure
2102 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2108 if (bnx2fc_allocate_hash_table(hba))
2112 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2113 &hba->t2_hash_tbl_ptr_dma,
2115 if (!hba->t2_hash_tbl_ptr) {
2117 bnx2fc_free_fw_resc(hba);
2120 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
2124 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2125 &hba->t2_hash_tbl_dma,
2127 if (!hba->t2_hash_tbl) {
2129 bnx2fc_free_fw_resc(hba);
2132 memset(hba->t2_hash_tbl, 0x00, mem_size);
2134 addr = (unsigned long) hba->t2_hash_tbl_dma +
2136 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2137 hba->t2_hash_tbl[i].next.hi = addr >> 32;
2140 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2141 PAGE_SIZE, &hba->dummy_buf_dma,
2143 if (!hba->dummy_buffer) {
2145 bnx2fc_free_fw_resc(hba);
2149 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2151 &hba->stats_buf_dma,
2153 if (!hba->stats_buffer) {
2155 bnx2fc_free_fw_resc(hba);
2158 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2163 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2167 if (hba->stats_buffer) {
2168 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2169 hba->stats_buffer, hba->stats_buf_dma);
2170 hba->stats_buffer = NULL;
2173 if (hba->dummy_buffer) {
2174 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2175 hba->dummy_buffer, hba->dummy_buf_dma);
2176 hba->dummy_buffer = NULL;
2179 if (hba->t2_hash_tbl_ptr) {
2181 dma_free_coherent(&hba->pcidev->dev, mem_size,
2182 hba->t2_hash_tbl_ptr,
2183 hba->t2_hash_tbl_ptr_dma);
2184 hba->t2_hash_tbl_ptr = NULL;
2187 if (hba->t2_hash_tbl) {
2190 dma_free_coherent(&hba->pcidev->dev, mem_size,
2191 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2192 hba->t2_hash_tbl = NULL;
2194 bnx2fc_free_hash_table(hba);