03ae003d3b85660082e6d707953bf0f1c4c32b7f
[linux-2.6.git] / drivers / scsi / bnx2fc / bnx2fc_hwi.c
1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2  * This file contains the code that low level functions that interact
3  * with 57712 FCoE firmware.
4  *
5  * Copyright (c) 2008 - 2010 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12  */
13
14 #include "bnx2fc.h"
15
16 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
17
18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19                                         struct fcoe_kcqe *new_cqe_kcqe);
20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21                                         struct fcoe_kcqe *ofld_kcqe);
22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23                                                 struct fcoe_kcqe *ofld_kcqe);
24 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26                                         struct fcoe_kcqe *destroy_kcqe);
27
28 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29 {
30         struct fcoe_kwqe_stat stat_req;
31         struct kwqe *kwqe_arr[2];
32         int num_kwqes = 1;
33         int rc = 0;
34
35         memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36         stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
37         stat_req.hdr.flags =
38                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
39
40         stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41         stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
42
43         kwqe_arr[0] = (struct kwqe *) &stat_req;
44
45         if (hba->cnic && hba->cnic->submit_kwqes)
46                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
47
48         return rc;
49 }
50
51 /**
52  * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
53  *
54  * @hba:        adapter structure pointer
55  *
56  * Send down FCoE firmware init KWQEs which initiates the initial handshake
57  *      with the f/w.
58  *
59  */
60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
61 {
62         struct fcoe_kwqe_init1 fcoe_init1;
63         struct fcoe_kwqe_init2 fcoe_init2;
64         struct fcoe_kwqe_init3 fcoe_init3;
65         struct kwqe *kwqe_arr[3];
66         int num_kwqes = 3;
67         int rc = 0;
68
69         if (!hba->cnic) {
70                 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
71                 return -ENODEV;
72         }
73
74         /* fill init1 KWQE */
75         memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76         fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77         fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79
80         fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81         fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82         fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83         fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84         fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85         fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86         fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87         fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88         fcoe_init1.task_list_pbl_addr_hi =
89                                 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90         fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
91
92         fcoe_init1.flags = (PAGE_SHIFT <<
93                                 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
94
95         fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
96
97         /* fill init2 KWQE */
98         memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99         fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100         fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102
103         fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104         fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105
106
107         fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
108         fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
109                                            ((u64) hba->hash_tbl_pbl_dma >> 32);
110
111         fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
112         fcoe_init2.t2_hash_tbl_addr_hi = (u32)
113                                           ((u64) hba->t2_hash_tbl_dma >> 32);
114
115         fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
116         fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
117                                         ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
118
119         fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
120
121         /* fill init3 KWQE */
122         memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
123         fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
124         fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
125                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
126         fcoe_init3.error_bit_map_lo = 0xffffffff;
127         fcoe_init3.error_bit_map_hi = 0xffffffff;
128
129         fcoe_init3.perf_config = 1;
130
131         kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
132         kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
133         kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
134
135         if (hba->cnic && hba->cnic->submit_kwqes)
136                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
137
138         return rc;
139 }
140 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
141 {
142         struct fcoe_kwqe_destroy fcoe_destroy;
143         struct kwqe *kwqe_arr[2];
144         int num_kwqes = 1;
145         int rc = -1;
146
147         /* fill destroy KWQE */
148         memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
149         fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
150         fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
151                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
152         kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
153
154         if (hba->cnic && hba->cnic->submit_kwqes)
155                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
156         return rc;
157 }
158
159 /**
160  * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
161  *
162  * @port:               port structure pointer
163  * @tgt:                bnx2fc_rport structure pointer
164  */
165 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
166                                         struct bnx2fc_rport *tgt)
167 {
168         struct fc_lport *lport = port->lport;
169         struct bnx2fc_interface *interface = port->priv;
170         struct bnx2fc_hba *hba = interface->hba;
171         struct kwqe *kwqe_arr[4];
172         struct fcoe_kwqe_conn_offload1 ofld_req1;
173         struct fcoe_kwqe_conn_offload2 ofld_req2;
174         struct fcoe_kwqe_conn_offload3 ofld_req3;
175         struct fcoe_kwqe_conn_offload4 ofld_req4;
176         struct fc_rport_priv *rdata = tgt->rdata;
177         struct fc_rport *rport = tgt->rport;
178         int num_kwqes = 4;
179         u32 port_id;
180         int rc = 0;
181         u16 conn_id;
182
183         /* Initialize offload request 1 structure */
184         memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
185
186         ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
187         ofld_req1.hdr.flags =
188                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
189
190
191         conn_id = (u16)tgt->fcoe_conn_id;
192         ofld_req1.fcoe_conn_id = conn_id;
193
194
195         ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
196         ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
197
198         ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
199         ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
200
201         ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
202         ofld_req1.rq_first_pbe_addr_hi =
203                                 (u32)((u64) tgt->rq_dma >> 32);
204
205         ofld_req1.rq_prod = 0x8000;
206
207         /* Initialize offload request 2 structure */
208         memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
209
210         ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
211         ofld_req2.hdr.flags =
212                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
213
214         ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
215
216         ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
217         ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
218
219         ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
220         ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
221
222         ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
223         ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
224
225         /* Initialize offload request 3 structure */
226         memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
227
228         ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
229         ofld_req3.hdr.flags =
230                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
231
232         ofld_req3.vlan_tag = interface->vlan_id <<
233                                 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
234         ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
235
236         port_id = fc_host_port_id(lport->host);
237         if (port_id == 0) {
238                 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
239                 return -EINVAL;
240         }
241
242         /*
243          * Store s_id of the initiator for further reference. This will
244          * be used during disable/destroy during linkdown processing as
245          * when the lport is reset, the port_id also is reset to 0
246          */
247         tgt->sid = port_id;
248         ofld_req3.s_id[0] = (port_id & 0x000000FF);
249         ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
250         ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
251
252         port_id = rport->port_id;
253         ofld_req3.d_id[0] = (port_id & 0x000000FF);
254         ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
255         ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
256
257         ofld_req3.tx_total_conc_seqs = rdata->max_seq;
258
259         ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
260         ofld_req3.rx_max_fc_pay_len  = lport->mfs;
261
262         ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
263         ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
264         ofld_req3.rx_open_seqs_exch_c3 = 1;
265
266         ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
267         ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
268
269         /* set mul_n_port_ids supported flag to 0, until it is supported */
270         ofld_req3.flags = 0;
271         /*
272         ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
273                             FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
274         */
275         /* Info from PLOGI response */
276         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
277                              FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
278
279         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
280                              FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
281
282         /*
283          * Info from PRLI response, this info is used for sequence level error
284          * recovery support
285          */
286         if (tgt->dev_type == TYPE_TAPE) {
287                 ofld_req3.flags |= 1 <<
288                                     FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
289                 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
290                                     ? 1 : 0) <<
291                                     FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
292         }
293
294         /* vlan flag */
295         ofld_req3.flags |= (interface->vlan_enabled <<
296                             FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
297
298         /* C2_VALID and ACK flags are not set as they are not suppported */
299
300
301         /* Initialize offload request 4 structure */
302         memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
303         ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
304         ofld_req4.hdr.flags =
305                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
306
307         ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
308
309
310         ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
311                                                         /* local mac */
312         ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
313         ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
314         ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
315         ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
316         ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
317         ofld_req4.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
318                                                         /* fcf mac */
319         ofld_req4.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
320         ofld_req4.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
321         ofld_req4.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
322         ofld_req4.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
323         ofld_req4.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
324
325         ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
326         ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
327
328         ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
329         ofld_req4.confq_pbl_base_addr_hi =
330                                         (u32)((u64) tgt->confq_pbl_dma >> 32);
331
332         kwqe_arr[0] = (struct kwqe *) &ofld_req1;
333         kwqe_arr[1] = (struct kwqe *) &ofld_req2;
334         kwqe_arr[2] = (struct kwqe *) &ofld_req3;
335         kwqe_arr[3] = (struct kwqe *) &ofld_req4;
336
337         if (hba->cnic && hba->cnic->submit_kwqes)
338                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
339
340         return rc;
341 }
342
343 /**
344  * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
345  *
346  * @port:               port structure pointer
347  * @tgt:                bnx2fc_rport structure pointer
348  */
349 static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
350                                         struct bnx2fc_rport *tgt)
351 {
352         struct kwqe *kwqe_arr[2];
353         struct bnx2fc_interface *interface = port->priv;
354         struct bnx2fc_hba *hba = interface->hba;
355         struct fcoe_kwqe_conn_enable_disable enbl_req;
356         struct fc_lport *lport = port->lport;
357         struct fc_rport *rport = tgt->rport;
358         int num_kwqes = 1;
359         int rc = 0;
360         u32 port_id;
361
362         memset(&enbl_req, 0x00,
363                sizeof(struct fcoe_kwqe_conn_enable_disable));
364         enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
365         enbl_req.hdr.flags =
366                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
367
368         enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
369                                                         /* local mac */
370         enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
371         enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
372         enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
373         enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
374         enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
375         memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
376
377         enbl_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
378         enbl_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
379         enbl_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
380         enbl_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
381         enbl_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
382         enbl_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
383
384         port_id = fc_host_port_id(lport->host);
385         if (port_id != tgt->sid) {
386                 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
387                                 "sid = 0x%x\n", port_id, tgt->sid);
388                 port_id = tgt->sid;
389         }
390         enbl_req.s_id[0] = (port_id & 0x000000FF);
391         enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
392         enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
393
394         port_id = rport->port_id;
395         enbl_req.d_id[0] = (port_id & 0x000000FF);
396         enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
397         enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
398         enbl_req.vlan_tag = interface->vlan_id <<
399                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
400         enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
401         enbl_req.vlan_flag = interface->vlan_enabled;
402         enbl_req.context_id = tgt->context_id;
403         enbl_req.conn_id = tgt->fcoe_conn_id;
404
405         kwqe_arr[0] = (struct kwqe *) &enbl_req;
406
407         if (hba->cnic && hba->cnic->submit_kwqes)
408                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
409         return rc;
410 }
411
412 /**
413  * bnx2fc_send_session_disable_req - initiates FCoE Session disable
414  *
415  * @port:               port structure pointer
416  * @tgt:                bnx2fc_rport structure pointer
417  */
418 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
419                                     struct bnx2fc_rport *tgt)
420 {
421         struct bnx2fc_interface *interface = port->priv;
422         struct bnx2fc_hba *hba = interface->hba;
423         struct fcoe_kwqe_conn_enable_disable disable_req;
424         struct kwqe *kwqe_arr[2];
425         struct fc_rport *rport = tgt->rport;
426         int num_kwqes = 1;
427         int rc = 0;
428         u32 port_id;
429
430         memset(&disable_req, 0x00,
431                sizeof(struct fcoe_kwqe_conn_enable_disable));
432         disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
433         disable_req.hdr.flags =
434                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
435
436         disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
437         disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
438         disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
439         disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
440         disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
441         disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
442
443         disable_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
444         disable_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
445         disable_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
446         disable_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
447         disable_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
448         disable_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
449
450         port_id = tgt->sid;
451         disable_req.s_id[0] = (port_id & 0x000000FF);
452         disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
453         disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
454
455
456         port_id = rport->port_id;
457         disable_req.d_id[0] = (port_id & 0x000000FF);
458         disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
459         disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
460         disable_req.context_id = tgt->context_id;
461         disable_req.conn_id = tgt->fcoe_conn_id;
462         disable_req.vlan_tag = interface->vlan_id <<
463                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
464         disable_req.vlan_tag |=
465                         3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
466         disable_req.vlan_flag = interface->vlan_enabled;
467
468         kwqe_arr[0] = (struct kwqe *) &disable_req;
469
470         if (hba->cnic && hba->cnic->submit_kwqes)
471                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
472
473         return rc;
474 }
475
476 /**
477  * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
478  *
479  * @port:               port structure pointer
480  * @tgt:                bnx2fc_rport structure pointer
481  */
482 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
483                                         struct bnx2fc_rport *tgt)
484 {
485         struct fcoe_kwqe_conn_destroy destroy_req;
486         struct kwqe *kwqe_arr[2];
487         int num_kwqes = 1;
488         int rc = 0;
489
490         memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
491         destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
492         destroy_req.hdr.flags =
493                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
494
495         destroy_req.context_id = tgt->context_id;
496         destroy_req.conn_id = tgt->fcoe_conn_id;
497
498         kwqe_arr[0] = (struct kwqe *) &destroy_req;
499
500         if (hba->cnic && hba->cnic->submit_kwqes)
501                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
502
503         return rc;
504 }
505
506 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
507 {
508         struct bnx2fc_lport *blport;
509
510         spin_lock_bh(&hba->hba_lock);
511         list_for_each_entry(blport, &hba->vports, list) {
512                 if (blport->lport == lport) {
513                         spin_unlock_bh(&hba->hba_lock);
514                         return true;
515                 }
516         }
517         spin_unlock_bh(&hba->hba_lock);
518         return false;
519
520 }
521
522
523 static void bnx2fc_unsol_els_work(struct work_struct *work)
524 {
525         struct bnx2fc_unsol_els *unsol_els;
526         struct fc_lport *lport;
527         struct bnx2fc_hba *hba;
528         struct fc_frame *fp;
529
530         unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
531         lport = unsol_els->lport;
532         fp = unsol_els->fp;
533         hba = unsol_els->hba;
534         if (is_valid_lport(hba, lport))
535                 fc_exch_recv(lport, fp);
536         kfree(unsol_els);
537 }
538
539 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
540                                    unsigned char *buf,
541                                    u32 frame_len, u16 l2_oxid)
542 {
543         struct fcoe_port *port = tgt->port;
544         struct fc_lport *lport = port->lport;
545         struct bnx2fc_interface *interface = port->priv;
546         struct bnx2fc_unsol_els *unsol_els;
547         struct fc_frame_header *fh;
548         struct fc_frame *fp;
549         struct sk_buff *skb;
550         u32 payload_len;
551         u32 crc;
552         u8 op;
553
554
555         unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
556         if (!unsol_els) {
557                 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
558                 return;
559         }
560
561         BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
562                 l2_oxid, frame_len);
563
564         payload_len = frame_len - sizeof(struct fc_frame_header);
565
566         fp = fc_frame_alloc(lport, payload_len);
567         if (!fp) {
568                 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
569                 kfree(unsol_els);
570                 return;
571         }
572
573         fh = (struct fc_frame_header *) fc_frame_header_get(fp);
574         /* Copy FC Frame header and payload into the frame */
575         memcpy(fh, buf, frame_len);
576
577         if (l2_oxid != FC_XID_UNKNOWN)
578                 fh->fh_ox_id = htons(l2_oxid);
579
580         skb = fp_skb(fp);
581
582         if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
583             (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
584
585                 if (fh->fh_type == FC_TYPE_ELS) {
586                         op = fc_frame_payload_op(fp);
587                         if ((op == ELS_TEST) || (op == ELS_ESTC) ||
588                             (op == ELS_FAN) || (op == ELS_CSU)) {
589                                 /*
590                                  * No need to reply for these
591                                  * ELS requests
592                                  */
593                                 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
594                                 kfree_skb(skb);
595                                 kfree(unsol_els);
596                                 return;
597                         }
598                 }
599                 crc = fcoe_fc_crc(fp);
600                 fc_frame_init(fp);
601                 fr_dev(fp) = lport;
602                 fr_sof(fp) = FC_SOF_I3;
603                 fr_eof(fp) = FC_EOF_T;
604                 fr_crc(fp) = cpu_to_le32(~crc);
605                 unsol_els->lport = lport;
606                 unsol_els->hba = interface->hba;
607                 unsol_els->fp = fp;
608                 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
609                 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
610         } else {
611                 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
612                 kfree_skb(skb);
613                 kfree(unsol_els);
614         }
615 }
616
617 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
618 {
619         u8 num_rq;
620         struct fcoe_err_report_entry *err_entry;
621         unsigned char *rq_data;
622         unsigned char *buf = NULL, *buf1;
623         int i;
624         u16 xid;
625         u32 frame_len, len;
626         struct bnx2fc_cmd *io_req = NULL;
627         struct fcoe_task_ctx_entry *task, *task_page;
628         struct bnx2fc_interface *interface = tgt->port->priv;
629         struct bnx2fc_hba *hba = interface->hba;
630         int task_idx, index;
631         int rc = 0;
632
633
634         BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
635         switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
636         case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
637                 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
638                              FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
639
640                 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
641
642                 spin_lock_bh(&tgt->tgt_lock);
643                 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
644                 spin_unlock_bh(&tgt->tgt_lock);
645
646                 if (rq_data) {
647                         buf = rq_data;
648                 } else {
649                         buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
650                                               GFP_ATOMIC);
651
652                         if (!buf1) {
653                                 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
654                                 break;
655                         }
656
657                         for (i = 0; i < num_rq; i++) {
658                                 spin_lock_bh(&tgt->tgt_lock);
659                                 rq_data = (unsigned char *)
660                                            bnx2fc_get_next_rqe(tgt, 1);
661                                 spin_unlock_bh(&tgt->tgt_lock);
662                                 len = BNX2FC_RQ_BUF_SZ;
663                                 memcpy(buf1, rq_data, len);
664                                 buf1 += len;
665                         }
666                 }
667                 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
668                                               FC_XID_UNKNOWN);
669
670                 if (buf != rq_data)
671                         kfree(buf);
672                 spin_lock_bh(&tgt->tgt_lock);
673                 bnx2fc_return_rqe(tgt, num_rq);
674                 spin_unlock_bh(&tgt->tgt_lock);
675                 break;
676
677         case FCOE_ERROR_DETECTION_CQE_TYPE:
678                 /*
679                  * In case of error reporting CQE a single RQ entry
680                  * is consumed.
681                  */
682                 spin_lock_bh(&tgt->tgt_lock);
683                 num_rq = 1;
684                 err_entry = (struct fcoe_err_report_entry *)
685                              bnx2fc_get_next_rqe(tgt, 1);
686                 xid = err_entry->fc_hdr.ox_id;
687                 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
688                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
689                         err_entry->data.err_warn_bitmap_hi,
690                         err_entry->data.err_warn_bitmap_lo);
691                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
692                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
693
694                 bnx2fc_return_rqe(tgt, 1);
695
696                 if (xid > BNX2FC_MAX_XID) {
697                         BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
698                                    xid);
699                         spin_unlock_bh(&tgt->tgt_lock);
700                         break;
701                 }
702
703                 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
704                 index = xid % BNX2FC_TASKS_PER_PAGE;
705                 task_page = (struct fcoe_task_ctx_entry *)
706                                         hba->task_ctx[task_idx];
707                 task = &(task_page[index]);
708
709                 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
710                 if (!io_req) {
711                         spin_unlock_bh(&tgt->tgt_lock);
712                         break;
713                 }
714
715                 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
716                         printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
717                         spin_unlock_bh(&tgt->tgt_lock);
718                         break;
719                 }
720
721                 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
722                                        &io_req->req_flags)) {
723                         BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
724                                             "progress.. ignore unsol err\n");
725                         spin_unlock_bh(&tgt->tgt_lock);
726                         break;
727                 }
728
729                 /*
730                  * If ABTS is already in progress, and FW error is
731                  * received after that, do not cancel the timeout_work
732                  * and let the error recovery continue by explicitly
733                  * logging out the target, when the ABTS eventually
734                  * times out.
735                  */
736                 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
737                                       &io_req->req_flags)) {
738                         /*
739                          * Cancel the timeout_work, as we received IO
740                          * completion with FW error.
741                          */
742                         if (cancel_delayed_work(&io_req->timeout_work))
743                                 kref_put(&io_req->refcount,
744                                          bnx2fc_cmd_release); /* timer hold */
745
746                         rc = bnx2fc_initiate_abts(io_req);
747                         if (rc != SUCCESS) {
748                                 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
749                                         "failed. issue cleanup\n");
750                                 rc = bnx2fc_initiate_cleanup(io_req);
751                                 BUG_ON(rc);
752                         }
753                 } else
754                         printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
755                                             "in ABTS processing\n", xid);
756                 spin_unlock_bh(&tgt->tgt_lock);
757                 break;
758
759         case FCOE_WARNING_DETECTION_CQE_TYPE:
760                 /*
761                  *In case of warning reporting CQE a single RQ entry
762                  * is consumes.
763                  */
764                 spin_lock_bh(&tgt->tgt_lock);
765                 num_rq = 1;
766                 err_entry = (struct fcoe_err_report_entry *)
767                              bnx2fc_get_next_rqe(tgt, 1);
768                 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
769                 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
770                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
771                         err_entry->data.err_warn_bitmap_hi,
772                         err_entry->data.err_warn_bitmap_lo);
773                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
774                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
775
776                 bnx2fc_return_rqe(tgt, 1);
777                 spin_unlock_bh(&tgt->tgt_lock);
778                 break;
779
780         default:
781                 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
782                 break;
783         }
784 }
785
786 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
787 {
788         struct fcoe_task_ctx_entry *task;
789         struct fcoe_task_ctx_entry *task_page;
790         struct fcoe_port *port = tgt->port;
791         struct bnx2fc_interface *interface = port->priv;
792         struct bnx2fc_hba *hba = interface->hba;
793         struct bnx2fc_cmd *io_req;
794         int task_idx, index;
795         u16 xid;
796         u8  cmd_type;
797         u8 rx_state = 0;
798         u8 num_rq;
799
800         spin_lock_bh(&tgt->tgt_lock);
801         xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
802         if (xid >= BNX2FC_MAX_TASKS) {
803                 printk(KERN_ERR PFX "ERROR:xid out of range\n");
804                 spin_unlock_bh(&tgt->tgt_lock);
805                 return;
806         }
807         task_idx = xid / BNX2FC_TASKS_PER_PAGE;
808         index = xid % BNX2FC_TASKS_PER_PAGE;
809         task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
810         task = &(task_page[index]);
811
812         num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
813                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
814                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
815
816         io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
817
818         if (io_req == NULL) {
819                 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
820                 spin_unlock_bh(&tgt->tgt_lock);
821                 return;
822         }
823
824         /* Timestamp IO completion time */
825         cmd_type = io_req->cmd_type;
826
827         rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
828                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
829                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
830
831         /* Process other IO completion types */
832         switch (cmd_type) {
833         case BNX2FC_SCSI_CMD:
834                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
835                         bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
836                         spin_unlock_bh(&tgt->tgt_lock);
837                         return;
838                 }
839
840                 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
841                         bnx2fc_process_abts_compl(io_req, task, num_rq);
842                 else if (rx_state ==
843                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
844                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
845                 else
846                         printk(KERN_ERR PFX "Invalid rx state - %d\n",
847                                 rx_state);
848                 break;
849
850         case BNX2FC_TASK_MGMT_CMD:
851                 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
852                 bnx2fc_process_tm_compl(io_req, task, num_rq);
853                 break;
854
855         case BNX2FC_ABTS:
856                 /*
857                  * ABTS request received by firmware. ABTS response
858                  * will be delivered to the task belonging to the IO
859                  * that was aborted
860                  */
861                 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
862                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
863                 break;
864
865         case BNX2FC_ELS:
866                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
867                         bnx2fc_process_els_compl(io_req, task, num_rq);
868                 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
869                         bnx2fc_process_abts_compl(io_req, task, num_rq);
870                 else if (rx_state ==
871                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
872                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
873                 else
874                         printk(KERN_ERR PFX "Invalid rx state =  %d\n",
875                                 rx_state);
876                 break;
877
878         case BNX2FC_CLEANUP:
879                 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
880                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
881                 break;
882
883         case BNX2FC_SEQ_CLEANUP:
884                 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
885                               io_req->xid);
886                 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
887                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
888                 break;
889
890         default:
891                 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
892                 break;
893         }
894         spin_unlock_bh(&tgt->tgt_lock);
895 }
896
897 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
898 {
899         struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
900         u32 msg;
901
902         wmb();
903         rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
904                         FCOE_CQE_TOGGLE_BIT_SHIFT);
905         msg = *((u32 *)rx_db);
906         writel(cpu_to_le32(msg), tgt->ctx_base);
907         mmiowb();
908
909 }
910
911 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
912 {
913         struct bnx2fc_work *work;
914         work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
915         if (!work)
916                 return NULL;
917
918         INIT_LIST_HEAD(&work->list);
919         work->tgt = tgt;
920         work->wqe = wqe;
921         return work;
922 }
923
924 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
925 {
926         struct fcoe_cqe *cq;
927         u32 cq_cons;
928         struct fcoe_cqe *cqe;
929         u32 num_free_sqes = 0;
930         u16 wqe;
931
932         /*
933          * cq_lock is a low contention lock used to protect
934          * the CQ data structure from being freed up during
935          * the upload operation
936          */
937         spin_lock_bh(&tgt->cq_lock);
938
939         if (!tgt->cq) {
940                 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
941                 spin_unlock_bh(&tgt->cq_lock);
942                 return 0;
943         }
944         cq = tgt->cq;
945         cq_cons = tgt->cq_cons_idx;
946         cqe = &cq[cq_cons];
947
948         while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
949                (tgt->cq_curr_toggle_bit <<
950                FCOE_CQE_TOGGLE_BIT_SHIFT)) {
951
952                 /* new entry on the cq */
953                 if (wqe & FCOE_CQE_CQE_TYPE) {
954                         /* Unsolicited event notification */
955                         bnx2fc_process_unsol_compl(tgt, wqe);
956                 } else {
957                         /* Pending work request completion */
958                         struct bnx2fc_work *work = NULL;
959                         struct bnx2fc_percpu_s *fps = NULL;
960                         unsigned int cpu = wqe % num_possible_cpus();
961
962                         fps = &per_cpu(bnx2fc_percpu, cpu);
963                         spin_lock_bh(&fps->fp_work_lock);
964                         if (unlikely(!fps->iothread))
965                                 goto unlock;
966
967                         work = bnx2fc_alloc_work(tgt, wqe);
968                         if (work)
969                                 list_add_tail(&work->list,
970                                               &fps->work_list);
971 unlock:
972                         spin_unlock_bh(&fps->fp_work_lock);
973
974                         /* Pending work request completion */
975                         if (fps->iothread && work)
976                                 wake_up_process(fps->iothread);
977                         else
978                                 bnx2fc_process_cq_compl(tgt, wqe);
979                 }
980                 cqe++;
981                 tgt->cq_cons_idx++;
982                 num_free_sqes++;
983
984                 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
985                         tgt->cq_cons_idx = 0;
986                         cqe = cq;
987                         tgt->cq_curr_toggle_bit =
988                                 1 - tgt->cq_curr_toggle_bit;
989                 }
990         }
991         bnx2fc_arm_cq(tgt);
992         atomic_add(num_free_sqes, &tgt->free_sqes);
993         spin_unlock_bh(&tgt->cq_lock);
994         return 0;
995 }
996
997 /**
998  * bnx2fc_fastpath_notification - process global event queue (KCQ)
999  *
1000  * @hba:                adapter structure pointer
1001  * @new_cqe_kcqe:       pointer to newly DMA'd KCQ entry
1002  *
1003  * Fast path event notification handler
1004  */
1005 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1006                                         struct fcoe_kcqe *new_cqe_kcqe)
1007 {
1008         u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1009         struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1010
1011         if (!tgt) {
1012                 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1013                 return;
1014         }
1015
1016         bnx2fc_process_new_cqes(tgt);
1017 }
1018
1019 /**
1020  * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1021  *
1022  * @hba:        adapter structure pointer
1023  * @ofld_kcqe:  connection offload kcqe pointer
1024  *
1025  * handle session offload completion, enable the session if offload is
1026  * successful.
1027  */
1028 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1029                                         struct fcoe_kcqe *ofld_kcqe)
1030 {
1031         struct bnx2fc_rport             *tgt;
1032         struct fcoe_port                *port;
1033         struct bnx2fc_interface         *interface;
1034         u32                             conn_id;
1035         u32                             context_id;
1036         int                             rc;
1037
1038         conn_id = ofld_kcqe->fcoe_conn_id;
1039         context_id = ofld_kcqe->fcoe_conn_context_id;
1040         tgt = hba->tgt_ofld_list[conn_id];
1041         if (!tgt) {
1042                 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1043                 return;
1044         }
1045         BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1046                 ofld_kcqe->fcoe_conn_context_id);
1047         port = tgt->port;
1048         interface = tgt->port->priv;
1049         if (hba != interface->hba) {
1050                 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1051                 goto ofld_cmpl_err;
1052         }
1053         /*
1054          * cnic has allocated a context_id for this session; use this
1055          * while enabling the session.
1056          */
1057         tgt->context_id = context_id;
1058         if (ofld_kcqe->completion_status) {
1059                 if (ofld_kcqe->completion_status ==
1060                                 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1061                         printk(KERN_ERR PFX "unable to allocate FCoE context "
1062                                 "resources\n");
1063                         set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1064                 }
1065                 goto ofld_cmpl_err;
1066         } else {
1067
1068                 /* now enable the session */
1069                 rc = bnx2fc_send_session_enable_req(port, tgt);
1070                 if (rc) {
1071                         printk(KERN_ERR PFX "enable session failed\n");
1072                         goto ofld_cmpl_err;
1073                 }
1074         }
1075         return;
1076 ofld_cmpl_err:
1077         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1078         wake_up_interruptible(&tgt->ofld_wait);
1079 }
1080
1081 /**
1082  * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1083  *
1084  * @hba:        adapter structure pointer
1085  * @ofld_kcqe:  connection offload kcqe pointer
1086  *
1087  * handle session enable completion, mark the rport as ready
1088  */
1089
1090 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1091                                                 struct fcoe_kcqe *ofld_kcqe)
1092 {
1093         struct bnx2fc_rport             *tgt;
1094         struct bnx2fc_interface         *interface;
1095         u32                             conn_id;
1096         u32                             context_id;
1097
1098         context_id = ofld_kcqe->fcoe_conn_context_id;
1099         conn_id = ofld_kcqe->fcoe_conn_id;
1100         tgt = hba->tgt_ofld_list[conn_id];
1101         if (!tgt) {
1102                 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1103                 return;
1104         }
1105
1106         BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1107                 ofld_kcqe->fcoe_conn_context_id);
1108
1109         /*
1110          * context_id should be the same for this target during offload
1111          * and enable
1112          */
1113         if (tgt->context_id != context_id) {
1114                 printk(KERN_ERR PFX "context id mis-match\n");
1115                 return;
1116         }
1117         interface = tgt->port->priv;
1118         if (hba != interface->hba) {
1119                 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1120                 goto enbl_cmpl_err;
1121         }
1122         if (ofld_kcqe->completion_status)
1123                 goto enbl_cmpl_err;
1124         else {
1125                 /* enable successful - rport ready for issuing IOs */
1126                 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1127                 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1128                 wake_up_interruptible(&tgt->ofld_wait);
1129         }
1130         return;
1131
1132 enbl_cmpl_err:
1133         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1134         wake_up_interruptible(&tgt->ofld_wait);
1135 }
1136
1137 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1138                                         struct fcoe_kcqe *disable_kcqe)
1139 {
1140
1141         struct bnx2fc_rport             *tgt;
1142         u32                             conn_id;
1143
1144         conn_id = disable_kcqe->fcoe_conn_id;
1145         tgt = hba->tgt_ofld_list[conn_id];
1146         if (!tgt) {
1147                 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1148                 return;
1149         }
1150
1151         BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1152
1153         if (disable_kcqe->completion_status) {
1154                 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1155                         disable_kcqe->completion_status);
1156                 return;
1157         } else {
1158                 /* disable successful */
1159                 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1160                 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1161                 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1162                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1163                 wake_up_interruptible(&tgt->upld_wait);
1164         }
1165 }
1166
1167 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1168                                         struct fcoe_kcqe *destroy_kcqe)
1169 {
1170         struct bnx2fc_rport             *tgt;
1171         u32                             conn_id;
1172
1173         conn_id = destroy_kcqe->fcoe_conn_id;
1174         tgt = hba->tgt_ofld_list[conn_id];
1175         if (!tgt) {
1176                 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1177                 return;
1178         }
1179
1180         BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1181
1182         if (destroy_kcqe->completion_status) {
1183                 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1184                         destroy_kcqe->completion_status);
1185                 return;
1186         } else {
1187                 /* destroy successful */
1188                 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1189                 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1190                 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1191                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1192                 wake_up_interruptible(&tgt->upld_wait);
1193         }
1194 }
1195
1196 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1197 {
1198         switch (err_code) {
1199         case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1200                 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1201                 break;
1202
1203         case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1204                 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1205                 break;
1206
1207         case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1208                 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1209                 break;
1210         case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1211                 printk(KERN_ERR PFX "init failure due to compl status err\n");
1212                 break;
1213         case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1214                 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1215                 break;
1216         default:
1217                 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1218         }
1219 }
1220
1221 /**
1222  * bnx2fc_indicae_kcqe - process KCQE
1223  *
1224  * @hba:        adapter structure pointer
1225  * @kcqe:       kcqe pointer
1226  * @num_cqe:    Number of completion queue elements
1227  *
1228  * Generic KCQ event handler
1229  */
1230 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1231                                         u32 num_cqe)
1232 {
1233         struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1234         int i = 0;
1235         struct fcoe_kcqe *kcqe = NULL;
1236
1237         while (i < num_cqe) {
1238                 kcqe = (struct fcoe_kcqe *) kcq[i++];
1239
1240                 switch (kcqe->op_code) {
1241                 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1242                         bnx2fc_fastpath_notification(hba, kcqe);
1243                         break;
1244
1245                 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1246                         bnx2fc_process_ofld_cmpl(hba, kcqe);
1247                         break;
1248
1249                 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1250                         bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1251                         break;
1252
1253                 case FCOE_KCQE_OPCODE_INIT_FUNC:
1254                         if (kcqe->completion_status !=
1255                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1256                                 bnx2fc_init_failure(hba,
1257                                                 kcqe->completion_status);
1258                         } else {
1259                                 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1260                                 bnx2fc_get_link_state(hba);
1261                                 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1262                                         (u8)hba->pcidev->bus->number);
1263                         }
1264                         break;
1265
1266                 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1267                         if (kcqe->completion_status !=
1268                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1269
1270                                 printk(KERN_ERR PFX "DESTROY failed\n");
1271                         } else {
1272                                 printk(KERN_ERR PFX "DESTROY success\n");
1273                         }
1274                         set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1275                         wake_up_interruptible(&hba->destroy_wait);
1276                         break;
1277
1278                 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1279                         bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1280                         break;
1281
1282                 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1283                         bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1284                         break;
1285
1286                 case FCOE_KCQE_OPCODE_STAT_FUNC:
1287                         if (kcqe->completion_status !=
1288                             FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1289                                 printk(KERN_ERR PFX "STAT failed\n");
1290                         complete(&hba->stat_req_done);
1291                         break;
1292
1293                 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1294                         /* fall thru */
1295                 default:
1296                         printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1297                                                                 kcqe->op_code);
1298                 }
1299         }
1300 }
1301
1302 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1303 {
1304         struct fcoe_sqe *sqe;
1305
1306         sqe = &tgt->sq[tgt->sq_prod_idx];
1307
1308         /* Fill SQ WQE */
1309         sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1310         sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1311
1312         /* Advance SQ Prod Idx */
1313         if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1314                 tgt->sq_prod_idx = 0;
1315                 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1316         }
1317 }
1318
1319 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1320 {
1321         struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1322         u32 msg;
1323
1324         wmb();
1325         sq_db->prod = tgt->sq_prod_idx |
1326                                 (tgt->sq_curr_toggle_bit << 15);
1327         msg = *((u32 *)sq_db);
1328         writel(cpu_to_le32(msg), tgt->ctx_base);
1329         mmiowb();
1330
1331 }
1332
1333 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1334 {
1335         u32 context_id = tgt->context_id;
1336         struct fcoe_port *port = tgt->port;
1337         u32 reg_off;
1338         resource_size_t reg_base;
1339         struct bnx2fc_interface *interface = port->priv;
1340         struct bnx2fc_hba *hba = interface->hba;
1341
1342         reg_base = pci_resource_start(hba->pcidev,
1343                                         BNX2X_DOORBELL_PCI_BAR);
1344         reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1345                         (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1346         tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1347         if (!tgt->ctx_base)
1348                 return -ENOMEM;
1349         return 0;
1350 }
1351
1352 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1353 {
1354         char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1355
1356         if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1357                 return NULL;
1358
1359         tgt->rq_cons_idx += num_items;
1360
1361         if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1362                 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1363
1364         return buf;
1365 }
1366
1367 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1368 {
1369         /* return the rq buffer */
1370         u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1371         if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1372                 /* Wrap around RQ */
1373                 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1374         }
1375         tgt->rq_prod_idx = next_prod_idx;
1376         tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1377 }
1378
1379 void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1380                                   struct fcoe_task_ctx_entry *task,
1381                                   struct bnx2fc_cmd *orig_io_req,
1382                                   u32 offset)
1383 {
1384         struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1385         struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1386         struct bnx2fc_interface *interface = tgt->port->priv;
1387         struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1388         struct fcoe_task_ctx_entry *orig_task;
1389         struct fcoe_task_ctx_entry *task_page;
1390         struct fcoe_ext_mul_sges_ctx *sgl;
1391         u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1392         u8 orig_task_type;
1393         u16 orig_xid = orig_io_req->xid;
1394         u32 context_id = tgt->context_id;
1395         u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1396         u32 orig_offset = offset;
1397         int bd_count;
1398         int orig_task_idx, index;
1399         int i;
1400
1401         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1402
1403         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1404                 orig_task_type = FCOE_TASK_TYPE_WRITE;
1405         else
1406                 orig_task_type = FCOE_TASK_TYPE_READ;
1407
1408         /* Tx flags */
1409         task->txwr_rxrd.const_ctx.tx_flags =
1410                                 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1411                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1412         /* init flags */
1413         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1414                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1415         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1416                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1417         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1418                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1419         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1420                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1421
1422         task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1423
1424         task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1425         task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1426
1427         bd_count = orig_io_req->bd_tbl->bd_valid;
1428
1429         /* obtain the appropriate bd entry from relative offset */
1430         for (i = 0; i < bd_count; i++) {
1431                 if (offset < bd[i].buf_len)
1432                         break;
1433                 offset -= bd[i].buf_len;
1434         }
1435         phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1436
1437         if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1438                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1439                                 (u32)phys_addr;
1440                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1441                                 (u32)((u64)phys_addr >> 32);
1442                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1443                                 bd_count;
1444                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1445                                 offset; /* adjusted offset */
1446                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1447         } else {
1448                 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1449                 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1450
1451                 task_page = (struct fcoe_task_ctx_entry *)
1452                              interface->hba->task_ctx[orig_task_idx];
1453                 orig_task = &(task_page[index]);
1454
1455                 /* Multiple SGEs were used for this IO */
1456                 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1457                 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1458                 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1459                 sgl->mul_sgl.sgl_size = bd_count;
1460                 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1461                 sgl->mul_sgl.cur_sge_idx = i;
1462
1463                 memset(&task->rxwr_only.rx_seq_ctx, 0,
1464                        sizeof(struct fcoe_rx_seq_ctx));
1465                 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1466                 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1467         }
1468 }
1469 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1470                               struct fcoe_task_ctx_entry *task,
1471                               u16 orig_xid)
1472 {
1473         u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1474         struct bnx2fc_rport *tgt = io_req->tgt;
1475         u32 context_id = tgt->context_id;
1476
1477         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1478
1479         /* Tx Write Rx Read */
1480         /* init flags */
1481         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1482                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1483         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1484                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1485         task->txwr_rxrd.const_ctx.init_flags |=
1486                                 FCOE_TASK_DEV_TYPE_DISK <<
1487                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1488         task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1489
1490         /* Tx flags */
1491         task->txwr_rxrd.const_ctx.tx_flags =
1492                                 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1493                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1494
1495         /* Rx Read Tx Write */
1496         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1497                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1498         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1499                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1500 }
1501
1502 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1503                                 struct fcoe_task_ctx_entry *task)
1504 {
1505         struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1506         struct bnx2fc_rport *tgt = io_req->tgt;
1507         struct fc_frame_header *fc_hdr;
1508         struct fcoe_ext_mul_sges_ctx *sgl;
1509         u8 task_type = 0;
1510         u64 *hdr;
1511         u64 temp_hdr[3];
1512         u32 context_id;
1513
1514
1515         /* Obtain task_type */
1516         if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1517             (io_req->cmd_type == BNX2FC_ELS)) {
1518                 task_type = FCOE_TASK_TYPE_MIDPATH;
1519         } else if (io_req->cmd_type == BNX2FC_ABTS) {
1520                 task_type = FCOE_TASK_TYPE_ABTS;
1521         }
1522
1523         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1524
1525         /* Setup the task from io_req for easy reference */
1526         io_req->task = task;
1527
1528         BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1529                 io_req->cmd_type, task_type);
1530
1531         /* Tx only */
1532         if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1533             (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1534                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1535                                 (u32)mp_req->mp_req_bd_dma;
1536                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1537                                 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1538                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1539         }
1540
1541         /* Tx Write Rx Read */
1542         /* init flags */
1543         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1544                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1545         task->txwr_rxrd.const_ctx.init_flags |=
1546                                 FCOE_TASK_DEV_TYPE_DISK <<
1547                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1548         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1549                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1550
1551         /* tx flags */
1552         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1553                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1554
1555         /* Rx Write Tx Read */
1556         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1557
1558         /* rx flags */
1559         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1560                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1561
1562         context_id = tgt->context_id;
1563         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1564                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1565
1566         fc_hdr = &(mp_req->req_fc_hdr);
1567         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1568                 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1569                 fc_hdr->fh_rx_id = htons(0xffff);
1570                 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1571         } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1572                 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1573         }
1574
1575         /* Fill FC Header into middle path buffer */
1576         hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1577         memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1578         hdr[0] = cpu_to_be64(temp_hdr[0]);
1579         hdr[1] = cpu_to_be64(temp_hdr[1]);
1580         hdr[2] = cpu_to_be64(temp_hdr[2]);
1581
1582         /* Rx Only */
1583         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1584                 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1585
1586                 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1587                 sgl->mul_sgl.cur_sge_addr.hi =
1588                                 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1589                 sgl->mul_sgl.sgl_size = 1;
1590         }
1591 }
1592
1593 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1594                              struct fcoe_task_ctx_entry *task)
1595 {
1596         u8 task_type;
1597         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1598         struct io_bdt *bd_tbl = io_req->bd_tbl;
1599         struct bnx2fc_rport *tgt = io_req->tgt;
1600         struct fcoe_cached_sge_ctx *cached_sge;
1601         struct fcoe_ext_mul_sges_ctx *sgl;
1602         u64 *fcp_cmnd;
1603         u64 tmp_fcp_cmnd[4];
1604         u32 context_id;
1605         int cnt, i;
1606         int bd_count;
1607
1608         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1609
1610         /* Setup the task from io_req for easy reference */
1611         io_req->task = task;
1612
1613         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1614                 task_type = FCOE_TASK_TYPE_WRITE;
1615         else
1616                 task_type = FCOE_TASK_TYPE_READ;
1617
1618         /* Tx only */
1619         if (task_type == FCOE_TASK_TYPE_WRITE) {
1620                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1621                                 (u32)bd_tbl->bd_tbl_dma;
1622                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1623                                 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1624                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1625                                 bd_tbl->bd_valid;
1626         }
1627
1628         /*Tx Write Rx Read */
1629         /* Init state to NORMAL */
1630         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1631                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1632         task->txwr_rxrd.const_ctx.init_flags |=
1633                                 FCOE_TASK_DEV_TYPE_DISK <<
1634                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1635         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1636                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1637         /* tx flags */
1638         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1639                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1640
1641         /* Set initial seq counter */
1642         task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1643
1644         /* Fill FCP_CMND IU */
1645         fcp_cmnd = (u64 *)
1646                     task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1647         bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1648
1649         /* swap fcp_cmnd */
1650         cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1651
1652         for (i = 0; i < cnt; i++) {
1653                 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1654                 fcp_cmnd++;
1655         }
1656
1657         /* Rx Write Tx Read */
1658         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1659
1660         context_id = tgt->context_id;
1661         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1662                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1663
1664         /* rx flags */
1665         /* Set state to "waiting for the first packet" */
1666         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1667                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1668
1669         task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1670
1671         /* Rx Only */
1672         cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1673         sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1674         bd_count = bd_tbl->bd_valid;
1675         if (task_type == FCOE_TASK_TYPE_READ) {
1676                 if (bd_count == 1) {
1677
1678                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1679
1680                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1681                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1682                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1683                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1684                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1685                 } else if (bd_count == 2) {
1686                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1687
1688                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1689                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1690                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1691
1692                         fcoe_bd_tbl++;
1693                         cached_sge->second_buf_addr.lo =
1694                                                  fcoe_bd_tbl->buf_addr_lo;
1695                         cached_sge->second_buf_addr.hi =
1696                                                 fcoe_bd_tbl->buf_addr_hi;
1697                         cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1698                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1699                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1700                 } else {
1701
1702                         sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1703                         sgl->mul_sgl.cur_sge_addr.hi =
1704                                         (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1705                         sgl->mul_sgl.sgl_size = bd_count;
1706                 }
1707         }
1708 }
1709
1710 /**
1711  * bnx2fc_setup_task_ctx - allocate and map task context
1712  *
1713  * @hba:        pointer to adapter structure
1714  *
1715  * allocate memory for task context, and associated BD table to be used
1716  * by firmware
1717  *
1718  */
1719 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1720 {
1721         int rc = 0;
1722         struct regpair *task_ctx_bdt;
1723         dma_addr_t addr;
1724         int i;
1725
1726         /*
1727          * Allocate task context bd table. A page size of bd table
1728          * can map 256 buffers. Each buffer contains 32 task context
1729          * entries. Hence the limit with one page is 8192 task context
1730          * entries.
1731          */
1732         hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1733                                                   PAGE_SIZE,
1734                                                   &hba->task_ctx_bd_dma,
1735                                                   GFP_KERNEL);
1736         if (!hba->task_ctx_bd_tbl) {
1737                 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1738                 rc = -1;
1739                 goto out;
1740         }
1741         memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1742
1743         /*
1744          * Allocate task_ctx which is an array of pointers pointing to
1745          * a page containing 32 task contexts
1746          */
1747         hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1748                                  GFP_KERNEL);
1749         if (!hba->task_ctx) {
1750                 printk(KERN_ERR PFX "unable to allocate task context array\n");
1751                 rc = -1;
1752                 goto out1;
1753         }
1754
1755         /*
1756          * Allocate task_ctx_dma which is an array of dma addresses
1757          */
1758         hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1759                                         sizeof(dma_addr_t)), GFP_KERNEL);
1760         if (!hba->task_ctx_dma) {
1761                 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1762                 rc = -1;
1763                 goto out2;
1764         }
1765
1766         task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1767         for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1768
1769                 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1770                                                       PAGE_SIZE,
1771                                                       &hba->task_ctx_dma[i],
1772                                                       GFP_KERNEL);
1773                 if (!hba->task_ctx[i]) {
1774                         printk(KERN_ERR PFX "unable to alloc task context\n");
1775                         rc = -1;
1776                         goto out3;
1777                 }
1778                 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1779                 addr = (u64)hba->task_ctx_dma[i];
1780                 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1781                 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1782                 task_ctx_bdt++;
1783         }
1784         return 0;
1785
1786 out3:
1787         for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1788                 if (hba->task_ctx[i]) {
1789
1790                         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1791                                 hba->task_ctx[i], hba->task_ctx_dma[i]);
1792                         hba->task_ctx[i] = NULL;
1793                 }
1794         }
1795
1796         kfree(hba->task_ctx_dma);
1797         hba->task_ctx_dma = NULL;
1798 out2:
1799         kfree(hba->task_ctx);
1800         hba->task_ctx = NULL;
1801 out1:
1802         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1803                         hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1804         hba->task_ctx_bd_tbl = NULL;
1805 out:
1806         return rc;
1807 }
1808
1809 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1810 {
1811         int i;
1812
1813         if (hba->task_ctx_bd_tbl) {
1814                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1815                                     hba->task_ctx_bd_tbl,
1816                                     hba->task_ctx_bd_dma);
1817                 hba->task_ctx_bd_tbl = NULL;
1818         }
1819
1820         if (hba->task_ctx) {
1821                 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1822                         if (hba->task_ctx[i]) {
1823                                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1824                                                     hba->task_ctx[i],
1825                                                     hba->task_ctx_dma[i]);
1826                                 hba->task_ctx[i] = NULL;
1827                         }
1828                 }
1829                 kfree(hba->task_ctx);
1830                 hba->task_ctx = NULL;
1831         }
1832
1833         kfree(hba->task_ctx_dma);
1834         hba->task_ctx_dma = NULL;
1835 }
1836
1837 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1838 {
1839         int i;
1840         int segment_count;
1841         int hash_table_size;
1842         u32 *pbl;
1843
1844         segment_count = hba->hash_tbl_segment_count;
1845         hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1846                 sizeof(struct fcoe_hash_table_entry);
1847
1848         pbl = hba->hash_tbl_pbl;
1849         for (i = 0; i < segment_count; ++i) {
1850                 dma_addr_t dma_address;
1851
1852                 dma_address = le32_to_cpu(*pbl);
1853                 ++pbl;
1854                 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1855                 ++pbl;
1856                 dma_free_coherent(&hba->pcidev->dev,
1857                                   BNX2FC_HASH_TBL_CHUNK_SIZE,
1858                                   hba->hash_tbl_segments[i],
1859                                   dma_address);
1860
1861         }
1862
1863         if (hba->hash_tbl_pbl) {
1864                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1865                                     hba->hash_tbl_pbl,
1866                                     hba->hash_tbl_pbl_dma);
1867                 hba->hash_tbl_pbl = NULL;
1868         }
1869 }
1870
1871 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1872 {
1873         int i;
1874         int hash_table_size;
1875         int segment_count;
1876         int segment_array_size;
1877         int dma_segment_array_size;
1878         dma_addr_t *dma_segment_array;
1879         u32 *pbl;
1880
1881         hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1882                 sizeof(struct fcoe_hash_table_entry);
1883
1884         segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1885         segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1886         hba->hash_tbl_segment_count = segment_count;
1887
1888         segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1889         hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1890         if (!hba->hash_tbl_segments) {
1891                 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1892                 return -ENOMEM;
1893         }
1894         dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1895         dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1896         if (!dma_segment_array) {
1897                 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1898                 return -ENOMEM;
1899         }
1900
1901         for (i = 0; i < segment_count; ++i) {
1902                 hba->hash_tbl_segments[i] =
1903                         dma_alloc_coherent(&hba->pcidev->dev,
1904                                            BNX2FC_HASH_TBL_CHUNK_SIZE,
1905                                            &dma_segment_array[i],
1906                                            GFP_KERNEL);
1907                 if (!hba->hash_tbl_segments[i]) {
1908                         printk(KERN_ERR PFX "hash segment alloc failed\n");
1909                         while (--i >= 0) {
1910                                 dma_free_coherent(&hba->pcidev->dev,
1911                                                     BNX2FC_HASH_TBL_CHUNK_SIZE,
1912                                                     hba->hash_tbl_segments[i],
1913                                                     dma_segment_array[i]);
1914                                 hba->hash_tbl_segments[i] = NULL;
1915                         }
1916                         kfree(dma_segment_array);
1917                         return -ENOMEM;
1918                 }
1919                 memset(hba->hash_tbl_segments[i], 0,
1920                        BNX2FC_HASH_TBL_CHUNK_SIZE);
1921         }
1922
1923         hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1924                                                PAGE_SIZE,
1925                                                &hba->hash_tbl_pbl_dma,
1926                                                GFP_KERNEL);
1927         if (!hba->hash_tbl_pbl) {
1928                 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1929                 kfree(dma_segment_array);
1930                 return -ENOMEM;
1931         }
1932         memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1933
1934         pbl = hba->hash_tbl_pbl;
1935         for (i = 0; i < segment_count; ++i) {
1936                 u64 paddr = dma_segment_array[i];
1937                 *pbl = cpu_to_le32((u32) paddr);
1938                 ++pbl;
1939                 *pbl = cpu_to_le32((u32) (paddr >> 32));
1940                 ++pbl;
1941         }
1942         pbl = hba->hash_tbl_pbl;
1943         i = 0;
1944         while (*pbl && *(pbl + 1)) {
1945                 u32 lo;
1946                 u32 hi;
1947                 lo = *pbl;
1948                 ++pbl;
1949                 hi = *pbl;
1950                 ++pbl;
1951                 ++i;
1952         }
1953         kfree(dma_segment_array);
1954         return 0;
1955 }
1956
1957 /**
1958  * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1959  *
1960  * @hba:        Pointer to adapter structure
1961  *
1962  */
1963 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1964 {
1965         u64 addr;
1966         u32 mem_size;
1967         int i;
1968
1969         if (bnx2fc_allocate_hash_table(hba))
1970                 return -ENOMEM;
1971
1972         mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1973         hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1974                                                   &hba->t2_hash_tbl_ptr_dma,
1975                                                   GFP_KERNEL);
1976         if (!hba->t2_hash_tbl_ptr) {
1977                 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1978                 bnx2fc_free_fw_resc(hba);
1979                 return -ENOMEM;
1980         }
1981         memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1982
1983         mem_size = BNX2FC_NUM_MAX_SESS *
1984                                 sizeof(struct fcoe_t2_hash_table_entry);
1985         hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1986                                               &hba->t2_hash_tbl_dma,
1987                                               GFP_KERNEL);
1988         if (!hba->t2_hash_tbl) {
1989                 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1990                 bnx2fc_free_fw_resc(hba);
1991                 return -ENOMEM;
1992         }
1993         memset(hba->t2_hash_tbl, 0x00, mem_size);
1994         for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1995                 addr = (unsigned long) hba->t2_hash_tbl_dma +
1996                          ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1997                 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1998                 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1999         }
2000
2001         hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2002                                                PAGE_SIZE, &hba->dummy_buf_dma,
2003                                                GFP_KERNEL);
2004         if (!hba->dummy_buffer) {
2005                 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2006                 bnx2fc_free_fw_resc(hba);
2007                 return -ENOMEM;
2008         }
2009
2010         hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2011                                                PAGE_SIZE,
2012                                                &hba->stats_buf_dma,
2013                                                GFP_KERNEL);
2014         if (!hba->stats_buffer) {
2015                 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2016                 bnx2fc_free_fw_resc(hba);
2017                 return -ENOMEM;
2018         }
2019         memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2020
2021         return 0;
2022 }
2023
2024 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2025 {
2026         u32 mem_size;
2027
2028         if (hba->stats_buffer) {
2029                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2030                                   hba->stats_buffer, hba->stats_buf_dma);
2031                 hba->stats_buffer = NULL;
2032         }
2033
2034         if (hba->dummy_buffer) {
2035                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2036                                   hba->dummy_buffer, hba->dummy_buf_dma);
2037                 hba->dummy_buffer = NULL;
2038         }
2039
2040         if (hba->t2_hash_tbl_ptr) {
2041                 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2042                 dma_free_coherent(&hba->pcidev->dev, mem_size,
2043                                     hba->t2_hash_tbl_ptr,
2044                                     hba->t2_hash_tbl_ptr_dma);
2045                 hba->t2_hash_tbl_ptr = NULL;
2046         }
2047
2048         if (hba->t2_hash_tbl) {
2049                 mem_size = BNX2FC_NUM_MAX_SESS *
2050                             sizeof(struct fcoe_t2_hash_table_entry);
2051                 dma_free_coherent(&hba->pcidev->dev, mem_size,
2052                                     hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2053                 hba->t2_hash_tbl = NULL;
2054         }
2055         bnx2fc_free_hash_table(hba);
2056 }