bna: Added flash sub-module and ethtool eeprom entry points.
[linux-2.6.git] / drivers / net / ethernet / brocade / bna / bna_enet.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include "bna.h"
19
20 static inline int
21 ethport_can_be_up(struct bna_ethport *ethport)
22 {
23         int ready = 0;
24         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25                 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26                          (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27                          (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28         else
29                 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30                          (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31                          !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32         return ready;
33 }
34
35 #define ethport_is_up ethport_can_be_up
36
37 enum bna_ethport_event {
38         ETHPORT_E_START                 = 1,
39         ETHPORT_E_STOP                  = 2,
40         ETHPORT_E_FAIL                  = 3,
41         ETHPORT_E_UP                    = 4,
42         ETHPORT_E_DOWN                  = 5,
43         ETHPORT_E_FWRESP_UP_OK          = 6,
44         ETHPORT_E_FWRESP_DOWN           = 7,
45         ETHPORT_E_FWRESP_UP_FAIL        = 8,
46 };
47
48 enum bna_enet_event {
49         ENET_E_START                    = 1,
50         ENET_E_STOP                     = 2,
51         ENET_E_FAIL                     = 3,
52         ENET_E_PAUSE_CFG                = 4,
53         ENET_E_MTU_CFG                  = 5,
54         ENET_E_FWRESP_PAUSE             = 6,
55         ENET_E_CHLD_STOPPED             = 7,
56 };
57
58 enum bna_ioceth_event {
59         IOCETH_E_ENABLE                 = 1,
60         IOCETH_E_DISABLE                = 2,
61         IOCETH_E_IOC_RESET              = 3,
62         IOCETH_E_IOC_FAILED             = 4,
63         IOCETH_E_IOC_READY              = 5,
64         IOCETH_E_ENET_ATTR_RESP         = 6,
65         IOCETH_E_ENET_STOPPED           = 7,
66         IOCETH_E_IOC_DISABLED           = 8,
67 };
68
69 #define bna_stats_copy(_name, _type)                                    \
70 do {                                                                    \
71         count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64);  \
72         stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats;   \
73         stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats;        \
74         for (i = 0; i < count; i++)                                     \
75                 stats_dst[i] = be64_to_cpu(stats_src[i]);               \
76 } while (0)                                                             \
77
78 /*
79  * FW response handlers
80  */
81
82 static void
83 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84                                 struct bfi_msgq_mhdr *msghdr)
85 {
86         ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87
88         if (ethport_can_be_up(ethport))
89                 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90 }
91
92 static void
93 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94                                 struct bfi_msgq_mhdr *msghdr)
95 {
96         int ethport_up = ethport_is_up(ethport);
97
98         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99
100         if (ethport_up)
101                 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102 }
103
104 static void
105 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106                                 struct bfi_msgq_mhdr *msghdr)
107 {
108         struct bfi_enet_enable_req *admin_req =
109                 &ethport->bfi_enet_cmd.admin_req;
110         struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
111
112         switch (admin_req->enable) {
113         case BNA_STATUS_T_ENABLED:
114                 if (rsp->error == BFI_ENET_CMD_OK)
115                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
116                 else {
117                         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
119                 }
120                 break;
121
122         case BNA_STATUS_T_DISABLED:
123                 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124                 ethport->link_status = BNA_LINK_DOWN;
125                 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
126                 break;
127         }
128 }
129
130 static void
131 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132                                 struct bfi_msgq_mhdr *msghdr)
133 {
134         struct bfi_enet_diag_lb_req *diag_lb_req =
135                 &ethport->bfi_enet_cmd.lpbk_req;
136         struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
137
138         switch (diag_lb_req->enable) {
139         case BNA_STATUS_T_ENABLED:
140                 if (rsp->error == BFI_ENET_CMD_OK)
141                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
142                 else {
143                         ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
145                 }
146                 break;
147
148         case BNA_STATUS_T_DISABLED:
149                 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
150                 break;
151         }
152 }
153
154 static void
155 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
156 {
157         bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
158 }
159
160 static void
161 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162                         struct bfi_msgq_mhdr *msghdr)
163 {
164         struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
165
166         /**
167          * Store only if not set earlier, since BNAD can override the HW
168          * attributes
169          */
170         if (!ioceth->attr.fw_query_complete) {
171                 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172                 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
173                 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
174                 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
175                 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
176                 ioceth->attr.fw_query_complete = true;
177         }
178
179         bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
180 }
181
182 static void
183 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
184 {
185         struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
186         u64 *stats_src;
187         u64 *stats_dst;
188         u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
189         u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
190         int count;
191         int i;
192
193         bna_stats_copy(mac, mac);
194         bna_stats_copy(bpc, bpc);
195         bna_stats_copy(rad, rad);
196         bna_stats_copy(rlb, rad);
197         bna_stats_copy(fc_rx, fc_rx);
198         bna_stats_copy(fc_tx, fc_tx);
199
200         stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
201
202         /* Copy Rxf stats to SW area, scatter them while copying */
203         for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
204                 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
205                 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
206                 if (rx_enet_mask & ((u32)(1 << i))) {
207                         int k;
208                         count = sizeof(struct bfi_enet_stats_rxf) /
209                                 sizeof(u64);
210                         for (k = 0; k < count; k++) {
211                                 stats_dst[k] = be64_to_cpu(*stats_src);
212                                 stats_src++;
213                         }
214                 }
215         }
216
217         /* Copy Txf stats to SW area, scatter them while copying */
218         for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
219                 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
220                 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
221                 if (tx_enet_mask & ((u32)(1 << i))) {
222                         int k;
223                         count = sizeof(struct bfi_enet_stats_txf) /
224                                 sizeof(u64);
225                         for (k = 0; k < count; k++) {
226                                 stats_dst[k] = be64_to_cpu(*stats_src);
227                                 stats_src++;
228                         }
229                 }
230         }
231
232         bna->stats_mod.stats_get_busy = false;
233         bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
234 }
235
236 static void
237 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
238                         struct bfi_msgq_mhdr *msghdr)
239 {
240         ethport->link_status = BNA_LINK_UP;
241
242         /* Dispatch events */
243         ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
244 }
245
246 static void
247 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
248                                 struct bfi_msgq_mhdr *msghdr)
249 {
250         ethport->link_status = BNA_LINK_DOWN;
251
252         /* Dispatch events */
253         ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
254 }
255
256 static void
257 bna_err_handler(struct bna *bna, u32 intr_status)
258 {
259         if (BNA_IS_HALT_INTR(bna, intr_status))
260                 bna_halt_clear(bna);
261
262         bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
263 }
264
265 void
266 bna_mbox_handler(struct bna *bna, u32 intr_status)
267 {
268         if (BNA_IS_ERR_INTR(bna, intr_status)) {
269                 bna_err_handler(bna, intr_status);
270                 return;
271         }
272         if (BNA_IS_MBOX_INTR(bna, intr_status))
273                 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
274 }
275
276 static void
277 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
278 {
279         struct bna *bna = (struct bna *)arg;
280         struct bna_tx *tx;
281         struct bna_rx *rx;
282
283         switch (msghdr->msg_id) {
284         case BFI_ENET_I2H_RX_CFG_SET_RSP:
285                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
286                 if (rx)
287                         bna_bfi_rx_enet_start_rsp(rx, msghdr);
288                 break;
289
290         case BFI_ENET_I2H_RX_CFG_CLR_RSP:
291                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
292                 if (rx)
293                         bna_bfi_rx_enet_stop_rsp(rx, msghdr);
294                 break;
295
296         case BFI_ENET_I2H_RIT_CFG_RSP:
297         case BFI_ENET_I2H_RSS_CFG_RSP:
298         case BFI_ENET_I2H_RSS_ENABLE_RSP:
299         case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300         case BFI_ENET_I2H_RX_DEFAULT_RSP:
301         case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
302         case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
303         case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
304         case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
305         case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
306         case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
307         case BFI_ENET_I2H_RX_VLAN_SET_RSP:
308         case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
309                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
310                 if (rx)
311                         bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
312                 break;
313
314         case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
315                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
316                 if (rx)
317                         bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
318                 break;
319
320         case BFI_ENET_I2H_TX_CFG_SET_RSP:
321                 bna_tx_from_rid(bna, msghdr->enet_id, tx);
322                 if (tx)
323                         bna_bfi_tx_enet_start_rsp(tx, msghdr);
324                 break;
325
326         case BFI_ENET_I2H_TX_CFG_CLR_RSP:
327                 bna_tx_from_rid(bna, msghdr->enet_id, tx);
328                 if (tx)
329                         bna_bfi_tx_enet_stop_rsp(tx, msghdr);
330                 break;
331
332         case BFI_ENET_I2H_PORT_ADMIN_RSP:
333                 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
334                 break;
335
336         case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
337                 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
338                 break;
339
340         case BFI_ENET_I2H_SET_PAUSE_RSP:
341                 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
342                 break;
343
344         case BFI_ENET_I2H_GET_ATTR_RSP:
345                 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
346                 break;
347
348         case BFI_ENET_I2H_STATS_GET_RSP:
349                 bna_bfi_stats_get_rsp(bna, msghdr);
350                 break;
351
352         case BFI_ENET_I2H_STATS_CLR_RSP:
353                 /* No-op */
354                 break;
355
356         case BFI_ENET_I2H_LINK_UP_AEN:
357                 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
358                 break;
359
360         case BFI_ENET_I2H_LINK_DOWN_AEN:
361                 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
362                 break;
363
364         case BFI_ENET_I2H_PORT_ENABLE_AEN:
365                 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
366                 break;
367
368         case BFI_ENET_I2H_PORT_DISABLE_AEN:
369                 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
370                 break;
371
372         case BFI_ENET_I2H_BW_UPDATE_AEN:
373                 bna_bfi_bw_update_aen(&bna->tx_mod);
374                 break;
375
376         default:
377                 break;
378         }
379 }
380
381 /**
382  * ETHPORT
383  */
384 #define call_ethport_stop_cbfn(_ethport)                                \
385 do {                                                                    \
386         if ((_ethport)->stop_cbfn) {                                    \
387                 void (*cbfn)(struct bna_enet *);                        \
388                 cbfn = (_ethport)->stop_cbfn;                           \
389                 (_ethport)->stop_cbfn = NULL;                           \
390                 cbfn(&(_ethport)->bna->enet);                           \
391         }                                                               \
392 } while (0)
393
394 #define call_ethport_adminup_cbfn(ethport, status)                      \
395 do {                                                                    \
396         if ((ethport)->adminup_cbfn) {                                  \
397                 void (*cbfn)(struct bnad *, enum bna_cb_status);        \
398                 cbfn = (ethport)->adminup_cbfn;                         \
399                 (ethport)->adminup_cbfn = NULL;                         \
400                 cbfn((ethport)->bna->bnad, status);                     \
401         }                                                               \
402 } while (0)
403
404 static void
405 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
406 {
407         struct bfi_enet_enable_req *admin_up_req =
408                 &ethport->bfi_enet_cmd.admin_req;
409
410         bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
411                 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
412         admin_up_req->mh.num_entries = htons(
413                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
414         admin_up_req->enable = BNA_STATUS_T_ENABLED;
415
416         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
417                 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
418         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
419 }
420
421 static void
422 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
423 {
424         struct bfi_enet_enable_req *admin_down_req =
425                 &ethport->bfi_enet_cmd.admin_req;
426
427         bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
428                 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
429         admin_down_req->mh.num_entries = htons(
430                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
431         admin_down_req->enable = BNA_STATUS_T_DISABLED;
432
433         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
434                 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
435         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
436 }
437
438 static void
439 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
440 {
441         struct bfi_enet_diag_lb_req *lpbk_up_req =
442                 &ethport->bfi_enet_cmd.lpbk_req;
443
444         bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
445                 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
446         lpbk_up_req->mh.num_entries = htons(
447                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
448         lpbk_up_req->mode = (ethport->bna->enet.type ==
449                                 BNA_ENET_T_LOOPBACK_INTERNAL) ?
450                                 BFI_ENET_DIAG_LB_OPMODE_EXT :
451                                 BFI_ENET_DIAG_LB_OPMODE_CBL;
452         lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
453
454         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
455                 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
456         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
457 }
458
459 static void
460 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
461 {
462         struct bfi_enet_diag_lb_req *lpbk_down_req =
463                 &ethport->bfi_enet_cmd.lpbk_req;
464
465         bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
466                 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
467         lpbk_down_req->mh.num_entries = htons(
468                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
469         lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
470
471         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
472                 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
473         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
474 }
475
476 static void
477 bna_bfi_ethport_up(struct bna_ethport *ethport)
478 {
479         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
480                 bna_bfi_ethport_admin_up(ethport);
481         else
482                 bna_bfi_ethport_lpbk_up(ethport);
483 }
484
485 static void
486 bna_bfi_ethport_down(struct bna_ethport *ethport)
487 {
488         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
489                 bna_bfi_ethport_admin_down(ethport);
490         else
491                 bna_bfi_ethport_lpbk_down(ethport);
492 }
493
494 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
495                         enum bna_ethport_event);
496 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
497                         enum bna_ethport_event);
498 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
499                         enum bna_ethport_event);
500 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
501                         enum bna_ethport_event);
502 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
503                         enum bna_ethport_event);
504 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
505                         enum bna_ethport_event);
506
507 static void
508 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
509 {
510         call_ethport_stop_cbfn(ethport);
511 }
512
513 static void
514 bna_ethport_sm_stopped(struct bna_ethport *ethport,
515                         enum bna_ethport_event event)
516 {
517         switch (event) {
518         case ETHPORT_E_START:
519                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
520                 break;
521
522         case ETHPORT_E_STOP:
523                 call_ethport_stop_cbfn(ethport);
524                 break;
525
526         case ETHPORT_E_FAIL:
527                 /* No-op */
528                 break;
529
530         case ETHPORT_E_DOWN:
531                 /* This event is received due to Rx objects failing */
532                 /* No-op */
533                 break;
534
535         default:
536                 bfa_sm_fault(event);
537         }
538 }
539
540 static void
541 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
542 {
543 }
544
545 static void
546 bna_ethport_sm_down(struct bna_ethport *ethport,
547                         enum bna_ethport_event event)
548 {
549         switch (event) {
550         case ETHPORT_E_STOP:
551                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
552                 break;
553
554         case ETHPORT_E_FAIL:
555                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
556                 break;
557
558         case ETHPORT_E_UP:
559                 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
560                 bna_bfi_ethport_up(ethport);
561                 break;
562
563         default:
564                 bfa_sm_fault(event);
565         }
566 }
567
568 static void
569 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
570 {
571 }
572
573 static void
574 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
575                         enum bna_ethport_event event)
576 {
577         switch (event) {
578         case ETHPORT_E_STOP:
579                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
580                 break;
581
582         case ETHPORT_E_FAIL:
583                 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
584                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
585                 break;
586
587         case ETHPORT_E_DOWN:
588                 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
589                 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
590                 break;
591
592         case ETHPORT_E_FWRESP_UP_OK:
593                 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
594                 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
595                 break;
596
597         case ETHPORT_E_FWRESP_UP_FAIL:
598                 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
599                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
600                 break;
601
602         case ETHPORT_E_FWRESP_DOWN:
603                 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
604                 bna_bfi_ethport_up(ethport);
605                 break;
606
607         default:
608                 bfa_sm_fault(event);
609         }
610 }
611
612 static void
613 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
614 {
615         /**
616          * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
617          * mbox due to up_resp_wait -> down_resp_wait transition on event
618          * ETHPORT_E_DOWN
619          */
620 }
621
622 static void
623 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
624                         enum bna_ethport_event event)
625 {
626         switch (event) {
627         case ETHPORT_E_STOP:
628                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
629                 break;
630
631         case ETHPORT_E_FAIL:
632                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
633                 break;
634
635         case ETHPORT_E_UP:
636                 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
637                 break;
638
639         case ETHPORT_E_FWRESP_UP_OK:
640                 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
641                 bna_bfi_ethport_down(ethport);
642                 break;
643
644         case ETHPORT_E_FWRESP_UP_FAIL:
645         case ETHPORT_E_FWRESP_DOWN:
646                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
647                 break;
648
649         default:
650                 bfa_sm_fault(event);
651         }
652 }
653
654 static void
655 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
656 {
657 }
658
659 static void
660 bna_ethport_sm_up(struct bna_ethport *ethport,
661                         enum bna_ethport_event event)
662 {
663         switch (event) {
664         case ETHPORT_E_STOP:
665                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
666                 bna_bfi_ethport_down(ethport);
667                 break;
668
669         case ETHPORT_E_FAIL:
670                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
671                 break;
672
673         case ETHPORT_E_DOWN:
674                 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
675                 bna_bfi_ethport_down(ethport);
676                 break;
677
678         default:
679                 bfa_sm_fault(event);
680         }
681 }
682
683 static void
684 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
685 {
686 }
687
688 static void
689 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
690                         enum bna_ethport_event event)
691 {
692         switch (event) {
693         case ETHPORT_E_FAIL:
694                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
695                 break;
696
697         case ETHPORT_E_DOWN:
698                 /**
699                  * This event is received due to Rx objects stopping in
700                  * parallel to ethport
701                  */
702                 /* No-op */
703                 break;
704
705         case ETHPORT_E_FWRESP_UP_OK:
706                 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
707                 bna_bfi_ethport_down(ethport);
708                 break;
709
710         case ETHPORT_E_FWRESP_UP_FAIL:
711         case ETHPORT_E_FWRESP_DOWN:
712                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
713                 break;
714
715         default:
716                 bfa_sm_fault(event);
717         }
718 }
719
720 static void
721 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
722 {
723         ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
724         ethport->bna = bna;
725
726         ethport->link_status = BNA_LINK_DOWN;
727         ethport->link_cbfn = bnad_cb_ethport_link_status;
728
729         ethport->rx_started_count = 0;
730
731         ethport->stop_cbfn = NULL;
732         ethport->adminup_cbfn = NULL;
733
734         bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
735 }
736
737 static void
738 bna_ethport_uninit(struct bna_ethport *ethport)
739 {
740         ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
741         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
742
743         ethport->bna = NULL;
744 }
745
746 static void
747 bna_ethport_start(struct bna_ethport *ethport)
748 {
749         bfa_fsm_send_event(ethport, ETHPORT_E_START);
750 }
751
752 static void
753 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
754 {
755         bfa_wc_down(&enet->chld_stop_wc);
756 }
757
758 static void
759 bna_ethport_stop(struct bna_ethport *ethport)
760 {
761         ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
762         bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
763 }
764
765 static void
766 bna_ethport_fail(struct bna_ethport *ethport)
767 {
768         /* Reset the physical port status to enabled */
769         ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
770
771         if (ethport->link_status != BNA_LINK_DOWN) {
772                 ethport->link_status = BNA_LINK_DOWN;
773                 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
774         }
775         bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
776 }
777
778 /* Should be called only when ethport is disabled */
779 void
780 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
781 {
782         ethport->rx_started_count++;
783
784         if (ethport->rx_started_count == 1) {
785                 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
786
787                 if (ethport_can_be_up(ethport))
788                         bfa_fsm_send_event(ethport, ETHPORT_E_UP);
789         }
790 }
791
792 void
793 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
794 {
795         int ethport_up = ethport_is_up(ethport);
796
797         ethport->rx_started_count--;
798
799         if (ethport->rx_started_count == 0) {
800                 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
801
802                 if (ethport_up)
803                         bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
804         }
805 }
806
807 /**
808  * ENET
809  */
810 #define bna_enet_chld_start(enet)                                       \
811 do {                                                                    \
812         enum bna_tx_type tx_type =                                      \
813                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
814                 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
815         enum bna_rx_type rx_type =                                      \
816                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
817                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
818         bna_ethport_start(&(enet)->bna->ethport);                       \
819         bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type);                \
820         bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
821 } while (0)
822
823 #define bna_enet_chld_stop(enet)                                        \
824 do {                                                                    \
825         enum bna_tx_type tx_type =                                      \
826                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
827                 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
828         enum bna_rx_type rx_type =                                      \
829                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
830                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
831         bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
832         bfa_wc_up(&(enet)->chld_stop_wc);                               \
833         bna_ethport_stop(&(enet)->bna->ethport);                        \
834         bfa_wc_up(&(enet)->chld_stop_wc);                               \
835         bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type);                 \
836         bfa_wc_up(&(enet)->chld_stop_wc);                               \
837         bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
838         bfa_wc_wait(&(enet)->chld_stop_wc);                             \
839 } while (0)
840
841 #define bna_enet_chld_fail(enet)                                        \
842 do {                                                                    \
843         bna_ethport_fail(&(enet)->bna->ethport);                        \
844         bna_tx_mod_fail(&(enet)->bna->tx_mod);                          \
845         bna_rx_mod_fail(&(enet)->bna->rx_mod);                          \
846 } while (0)
847
848 #define bna_enet_rx_start(enet)                                         \
849 do {                                                                    \
850         enum bna_rx_type rx_type =                                      \
851                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
852                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
853         bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
854 } while (0)
855
856 #define bna_enet_rx_stop(enet)                                          \
857 do {                                                                    \
858         enum bna_rx_type rx_type =                                      \
859                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
860                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
861         bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
862         bfa_wc_up(&(enet)->chld_stop_wc);                               \
863         bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
864         bfa_wc_wait(&(enet)->chld_stop_wc);                             \
865 } while (0)
866
867 #define call_enet_stop_cbfn(enet)                                       \
868 do {                                                                    \
869         if ((enet)->stop_cbfn) {                                        \
870                 void (*cbfn)(void *);                                   \
871                 void *cbarg;                                            \
872                 cbfn = (enet)->stop_cbfn;                               \
873                 cbarg = (enet)->stop_cbarg;                             \
874                 (enet)->stop_cbfn = NULL;                               \
875                 (enet)->stop_cbarg = NULL;                              \
876                 cbfn(cbarg);                                            \
877         }                                                               \
878 } while (0)
879
880 #define call_enet_pause_cbfn(enet)                                      \
881 do {                                                                    \
882         if ((enet)->pause_cbfn) {                                       \
883                 void (*cbfn)(struct bnad *);                            \
884                 cbfn = (enet)->pause_cbfn;                              \
885                 (enet)->pause_cbfn = NULL;                              \
886                 cbfn((enet)->bna->bnad);                                \
887         }                                                               \
888 } while (0)
889
890 #define call_enet_mtu_cbfn(enet)                                        \
891 do {                                                                    \
892         if ((enet)->mtu_cbfn) {                                         \
893                 void (*cbfn)(struct bnad *);                            \
894                 cbfn = (enet)->mtu_cbfn;                                \
895                 (enet)->mtu_cbfn = NULL;                                \
896                 cbfn((enet)->bna->bnad);                                \
897         }                                                               \
898 } while (0)
899
900 static void bna_enet_cb_chld_stopped(void *arg);
901 static void bna_bfi_pause_set(struct bna_enet *enet);
902
903 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
904                         enum bna_enet_event);
905 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
906                         enum bna_enet_event);
907 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
908                         enum bna_enet_event);
909 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
910                         enum bna_enet_event);
911 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
912                         enum bna_enet_event);
913 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
914                         enum bna_enet_event);
915 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
916                         enum bna_enet_event);
917
918 static void
919 bna_enet_sm_stopped_entry(struct bna_enet *enet)
920 {
921         call_enet_pause_cbfn(enet);
922         call_enet_mtu_cbfn(enet);
923         call_enet_stop_cbfn(enet);
924 }
925
926 static void
927 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
928 {
929         switch (event) {
930         case ENET_E_START:
931                 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
932                 break;
933
934         case ENET_E_STOP:
935                 call_enet_stop_cbfn(enet);
936                 break;
937
938         case ENET_E_FAIL:
939                 /* No-op */
940                 break;
941
942         case ENET_E_PAUSE_CFG:
943                 call_enet_pause_cbfn(enet);
944                 break;
945
946         case ENET_E_MTU_CFG:
947                 call_enet_mtu_cbfn(enet);
948                 break;
949
950         case ENET_E_CHLD_STOPPED:
951                 /**
952                  * This event is received due to Ethport, Tx and Rx objects
953                  * failing
954                  */
955                 /* No-op */
956                 break;
957
958         default:
959                 bfa_sm_fault(event);
960         }
961 }
962
963 static void
964 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
965 {
966         bna_bfi_pause_set(enet);
967 }
968
969 static void
970 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
971                                 enum bna_enet_event event)
972 {
973         switch (event) {
974         case ENET_E_STOP:
975                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
976                 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
977                 break;
978
979         case ENET_E_FAIL:
980                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
981                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
982                 break;
983
984         case ENET_E_PAUSE_CFG:
985                 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
986                 break;
987
988         case ENET_E_MTU_CFG:
989                 /* No-op */
990                 break;
991
992         case ENET_E_FWRESP_PAUSE:
993                 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
994                         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
995                         bna_bfi_pause_set(enet);
996                 } else {
997                         bfa_fsm_set_state(enet, bna_enet_sm_started);
998                         bna_enet_chld_start(enet);
999                 }
1000                 break;
1001
1002         default:
1003                 bfa_sm_fault(event);
1004         }
1005 }
1006
1007 static void
1008 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1009 {
1010         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1011 }
1012
1013 static void
1014 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1015                                 enum bna_enet_event event)
1016 {
1017         switch (event) {
1018         case ENET_E_FAIL:
1019         case ENET_E_FWRESP_PAUSE:
1020                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1021                 break;
1022
1023         default:
1024                 bfa_sm_fault(event);
1025         }
1026 }
1027
1028 static void
1029 bna_enet_sm_started_entry(struct bna_enet *enet)
1030 {
1031         /**
1032          * NOTE: Do not call bna_enet_chld_start() here, since it will be
1033          * inadvertently called during cfg_wait->started transition as well
1034          */
1035         call_enet_pause_cbfn(enet);
1036         call_enet_mtu_cbfn(enet);
1037 }
1038
1039 static void
1040 bna_enet_sm_started(struct bna_enet *enet,
1041                         enum bna_enet_event event)
1042 {
1043         switch (event) {
1044         case ENET_E_STOP:
1045                 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1046                 break;
1047
1048         case ENET_E_FAIL:
1049                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1050                 bna_enet_chld_fail(enet);
1051                 break;
1052
1053         case ENET_E_PAUSE_CFG:
1054                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1055                 bna_bfi_pause_set(enet);
1056                 break;
1057
1058         case ENET_E_MTU_CFG:
1059                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1060                 bna_enet_rx_stop(enet);
1061                 break;
1062
1063         default:
1064                 bfa_sm_fault(event);
1065         }
1066 }
1067
1068 static void
1069 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1070 {
1071 }
1072
1073 static void
1074 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1075                         enum bna_enet_event event)
1076 {
1077         switch (event) {
1078         case ENET_E_STOP:
1079                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1080                 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1081                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1082                 break;
1083
1084         case ENET_E_FAIL:
1085                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1086                 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1087                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1088                 bna_enet_chld_fail(enet);
1089                 break;
1090
1091         case ENET_E_PAUSE_CFG:
1092                 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1093                 break;
1094
1095         case ENET_E_MTU_CFG:
1096                 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1097                 break;
1098
1099         case ENET_E_CHLD_STOPPED:
1100                 bna_enet_rx_start(enet);
1101                 /* Fall through */
1102         case ENET_E_FWRESP_PAUSE:
1103                 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1104                         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1105                         bna_bfi_pause_set(enet);
1106                 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1107                         enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1108                         bna_enet_rx_stop(enet);
1109                 } else {
1110                         bfa_fsm_set_state(enet, bna_enet_sm_started);
1111                 }
1112                 break;
1113
1114         default:
1115                 bfa_sm_fault(event);
1116         }
1117 }
1118
1119 static void
1120 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1121 {
1122         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1123         enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1124 }
1125
1126 static void
1127 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1128                                 enum bna_enet_event event)
1129 {
1130         switch (event) {
1131         case ENET_E_FAIL:
1132                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1133                 bna_enet_chld_fail(enet);
1134                 break;
1135
1136         case ENET_E_FWRESP_PAUSE:
1137         case ENET_E_CHLD_STOPPED:
1138                 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1139                 break;
1140
1141         default:
1142                 bfa_sm_fault(event);
1143         }
1144 }
1145
1146 static void
1147 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1148 {
1149         bna_enet_chld_stop(enet);
1150 }
1151
1152 static void
1153 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1154                                 enum bna_enet_event event)
1155 {
1156         switch (event) {
1157         case ENET_E_FAIL:
1158                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1159                 bna_enet_chld_fail(enet);
1160                 break;
1161
1162         case ENET_E_CHLD_STOPPED:
1163                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1164                 break;
1165
1166         default:
1167                 bfa_sm_fault(event);
1168         }
1169 }
1170
1171 static void
1172 bna_bfi_pause_set(struct bna_enet *enet)
1173 {
1174         struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1175
1176         bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1177                 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1178         pause_req->mh.num_entries = htons(
1179         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1180         pause_req->tx_pause = enet->pause_config.tx_pause;
1181         pause_req->rx_pause = enet->pause_config.rx_pause;
1182
1183         bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1184                 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1185         bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1186 }
1187
1188 static void
1189 bna_enet_cb_chld_stopped(void *arg)
1190 {
1191         struct bna_enet *enet = (struct bna_enet *)arg;
1192
1193         bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1194 }
1195
1196 static void
1197 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1198 {
1199         enet->bna = bna;
1200         enet->flags = 0;
1201         enet->mtu = 0;
1202         enet->type = BNA_ENET_T_REGULAR;
1203
1204         enet->stop_cbfn = NULL;
1205         enet->stop_cbarg = NULL;
1206
1207         enet->pause_cbfn = NULL;
1208
1209         enet->mtu_cbfn = NULL;
1210
1211         bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1212 }
1213
1214 static void
1215 bna_enet_uninit(struct bna_enet *enet)
1216 {
1217         enet->flags = 0;
1218
1219         enet->bna = NULL;
1220 }
1221
1222 static void
1223 bna_enet_start(struct bna_enet *enet)
1224 {
1225         enet->flags |= BNA_ENET_F_IOCETH_READY;
1226         if (enet->flags & BNA_ENET_F_ENABLED)
1227                 bfa_fsm_send_event(enet, ENET_E_START);
1228 }
1229
1230 static void
1231 bna_ioceth_cb_enet_stopped(void *arg)
1232 {
1233         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1234
1235         bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1236 }
1237
1238 static void
1239 bna_enet_stop(struct bna_enet *enet)
1240 {
1241         enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1242         enet->stop_cbarg = &enet->bna->ioceth;
1243
1244         enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1245         bfa_fsm_send_event(enet, ENET_E_STOP);
1246 }
1247
1248 static void
1249 bna_enet_fail(struct bna_enet *enet)
1250 {
1251         enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1252         bfa_fsm_send_event(enet, ENET_E_FAIL);
1253 }
1254
1255 void
1256 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1257 {
1258         bfa_wc_down(&enet->chld_stop_wc);
1259 }
1260
1261 void
1262 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1263 {
1264         bfa_wc_down(&enet->chld_stop_wc);
1265 }
1266
1267 int
1268 bna_enet_mtu_get(struct bna_enet *enet)
1269 {
1270         return enet->mtu;
1271 }
1272
1273 void
1274 bna_enet_enable(struct bna_enet *enet)
1275 {
1276         if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1277                 return;
1278
1279         enet->flags |= BNA_ENET_F_ENABLED;
1280
1281         if (enet->flags & BNA_ENET_F_IOCETH_READY)
1282                 bfa_fsm_send_event(enet, ENET_E_START);
1283 }
1284
1285 void
1286 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1287                  void (*cbfn)(void *))
1288 {
1289         if (type == BNA_SOFT_CLEANUP) {
1290                 (*cbfn)(enet->bna->bnad);
1291                 return;
1292         }
1293
1294         enet->stop_cbfn = cbfn;
1295         enet->stop_cbarg = enet->bna->bnad;
1296
1297         enet->flags &= ~BNA_ENET_F_ENABLED;
1298
1299         bfa_fsm_send_event(enet, ENET_E_STOP);
1300 }
1301
1302 void
1303 bna_enet_pause_config(struct bna_enet *enet,
1304                       struct bna_pause_config *pause_config,
1305                       void (*cbfn)(struct bnad *))
1306 {
1307         enet->pause_config = *pause_config;
1308
1309         enet->pause_cbfn = cbfn;
1310
1311         bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1312 }
1313
1314 void
1315 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1316                  void (*cbfn)(struct bnad *))
1317 {
1318         enet->mtu = mtu;
1319
1320         enet->mtu_cbfn = cbfn;
1321
1322         bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1323 }
1324
1325 void
1326 bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1327 {
1328         *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1329 }
1330
1331 /**
1332  * IOCETH
1333  */
1334 #define enable_mbox_intr(_ioceth)                                       \
1335 do {                                                                    \
1336         u32 intr_status;                                                \
1337         bna_intr_status_get((_ioceth)->bna, intr_status);               \
1338         bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad);                 \
1339         bna_mbox_intr_enable((_ioceth)->bna);                           \
1340 } while (0)
1341
1342 #define disable_mbox_intr(_ioceth)                                      \
1343 do {                                                                    \
1344         bna_mbox_intr_disable((_ioceth)->bna);                          \
1345         bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad);                \
1346 } while (0)
1347
1348 #define call_ioceth_stop_cbfn(_ioceth)                                  \
1349 do {                                                                    \
1350         if ((_ioceth)->stop_cbfn) {                                     \
1351                 void (*cbfn)(struct bnad *);                            \
1352                 struct bnad *cbarg;                                     \
1353                 cbfn = (_ioceth)->stop_cbfn;                            \
1354                 cbarg = (_ioceth)->stop_cbarg;                          \
1355                 (_ioceth)->stop_cbfn = NULL;                            \
1356                 (_ioceth)->stop_cbarg = NULL;                           \
1357                 cbfn(cbarg);                                            \
1358         }                                                               \
1359 } while (0)
1360
1361 #define bna_stats_mod_uninit(_stats_mod)                                \
1362 do {                                                                    \
1363 } while (0)
1364
1365 #define bna_stats_mod_start(_stats_mod)                                 \
1366 do {                                                                    \
1367         (_stats_mod)->ioc_ready = true;                                 \
1368 } while (0)
1369
1370 #define bna_stats_mod_stop(_stats_mod)                                  \
1371 do {                                                                    \
1372         (_stats_mod)->ioc_ready = false;                                \
1373 } while (0)
1374
1375 #define bna_stats_mod_fail(_stats_mod)                                  \
1376 do {                                                                    \
1377         (_stats_mod)->ioc_ready = false;                                \
1378         (_stats_mod)->stats_get_busy = false;                           \
1379         (_stats_mod)->stats_clr_busy = false;                           \
1380 } while (0)
1381
1382 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1383
1384 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1385                         enum bna_ioceth_event);
1386 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1387                         enum bna_ioceth_event);
1388 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1389                         enum bna_ioceth_event);
1390 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1391                         enum bna_ioceth_event);
1392 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1393                         enum bna_ioceth_event);
1394 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1395                         enum bna_ioceth_event);
1396 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1397                         enum bna_ioceth_event);
1398 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1399                         enum bna_ioceth_event);
1400
1401 static void
1402 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1403 {
1404         call_ioceth_stop_cbfn(ioceth);
1405 }
1406
1407 static void
1408 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1409                         enum bna_ioceth_event event)
1410 {
1411         switch (event) {
1412         case IOCETH_E_ENABLE:
1413                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1414                 bfa_nw_ioc_enable(&ioceth->ioc);
1415                 break;
1416
1417         case IOCETH_E_DISABLE:
1418                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1419                 break;
1420
1421         case IOCETH_E_IOC_RESET:
1422                 enable_mbox_intr(ioceth);
1423                 break;
1424
1425         case IOCETH_E_IOC_FAILED:
1426                 disable_mbox_intr(ioceth);
1427                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1428                 break;
1429
1430         default:
1431                 bfa_sm_fault(event);
1432         }
1433 }
1434
1435 static void
1436 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1437 {
1438         /**
1439          * Do not call bfa_nw_ioc_enable() here. It must be called in the
1440          * previous state due to failed -> ioc_ready_wait transition.
1441          */
1442 }
1443
1444 static void
1445 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1446                                 enum bna_ioceth_event event)
1447 {
1448         switch (event) {
1449         case IOCETH_E_DISABLE:
1450                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1451                 bfa_nw_ioc_disable(&ioceth->ioc);
1452                 break;
1453
1454         case IOCETH_E_IOC_RESET:
1455                 enable_mbox_intr(ioceth);
1456                 break;
1457
1458         case IOCETH_E_IOC_FAILED:
1459                 disable_mbox_intr(ioceth);
1460                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1461                 break;
1462
1463         case IOCETH_E_IOC_READY:
1464                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1465                 break;
1466
1467         default:
1468                 bfa_sm_fault(event);
1469         }
1470 }
1471
1472 static void
1473 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1474 {
1475         bna_bfi_attr_get(ioceth);
1476 }
1477
1478 static void
1479 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1480                                 enum bna_ioceth_event event)
1481 {
1482         switch (event) {
1483         case IOCETH_E_DISABLE:
1484                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1485                 break;
1486
1487         case IOCETH_E_IOC_FAILED:
1488                 disable_mbox_intr(ioceth);
1489                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1490                 break;
1491
1492         case IOCETH_E_ENET_ATTR_RESP:
1493                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1494                 break;
1495
1496         default:
1497                 bfa_sm_fault(event);
1498         }
1499 }
1500
1501 static void
1502 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1503 {
1504         bna_enet_start(&ioceth->bna->enet);
1505         bna_stats_mod_start(&ioceth->bna->stats_mod);
1506         bnad_cb_ioceth_ready(ioceth->bna->bnad);
1507 }
1508
1509 static void
1510 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1511 {
1512         switch (event) {
1513         case IOCETH_E_DISABLE:
1514                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1515                 break;
1516
1517         case IOCETH_E_IOC_FAILED:
1518                 disable_mbox_intr(ioceth);
1519                 bna_enet_fail(&ioceth->bna->enet);
1520                 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1521                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1522                 break;
1523
1524         default:
1525                 bfa_sm_fault(event);
1526         }
1527 }
1528
1529 static void
1530 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1531 {
1532 }
1533
1534 static void
1535 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1536                                 enum bna_ioceth_event event)
1537 {
1538         switch (event) {
1539         case IOCETH_E_IOC_FAILED:
1540                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1541                 disable_mbox_intr(ioceth);
1542                 bfa_nw_ioc_disable(&ioceth->ioc);
1543                 break;
1544
1545         case IOCETH_E_ENET_ATTR_RESP:
1546                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1547                 bfa_nw_ioc_disable(&ioceth->ioc);
1548                 break;
1549
1550         default:
1551                 bfa_sm_fault(event);
1552         }
1553 }
1554
1555 static void
1556 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1557 {
1558         bna_stats_mod_stop(&ioceth->bna->stats_mod);
1559         bna_enet_stop(&ioceth->bna->enet);
1560 }
1561
1562 static void
1563 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1564                                 enum bna_ioceth_event event)
1565 {
1566         switch (event) {
1567         case IOCETH_E_IOC_FAILED:
1568                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1569                 disable_mbox_intr(ioceth);
1570                 bna_enet_fail(&ioceth->bna->enet);
1571                 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1572                 bfa_nw_ioc_disable(&ioceth->ioc);
1573                 break;
1574
1575         case IOCETH_E_ENET_STOPPED:
1576                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1577                 bfa_nw_ioc_disable(&ioceth->ioc);
1578                 break;
1579
1580         default:
1581                 bfa_sm_fault(event);
1582         }
1583 }
1584
1585 static void
1586 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1587 {
1588 }
1589
1590 static void
1591 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1592                                 enum bna_ioceth_event event)
1593 {
1594         switch (event) {
1595         case IOCETH_E_IOC_DISABLED:
1596                 disable_mbox_intr(ioceth);
1597                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1598                 break;
1599
1600         case IOCETH_E_ENET_STOPPED:
1601                 /* This event is received due to enet failing */
1602                 /* No-op */
1603                 break;
1604
1605         default:
1606                 bfa_sm_fault(event);
1607         }
1608 }
1609
1610 static void
1611 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1612 {
1613         bnad_cb_ioceth_failed(ioceth->bna->bnad);
1614 }
1615
1616 static void
1617 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1618                         enum bna_ioceth_event event)
1619 {
1620         switch (event) {
1621         case IOCETH_E_DISABLE:
1622                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1623                 bfa_nw_ioc_disable(&ioceth->ioc);
1624                 break;
1625
1626         case IOCETH_E_IOC_RESET:
1627                 enable_mbox_intr(ioceth);
1628                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1629                 break;
1630
1631         case IOCETH_E_IOC_FAILED:
1632                 break;
1633
1634         default:
1635                 bfa_sm_fault(event);
1636         }
1637 }
1638
1639 static void
1640 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1641 {
1642         struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1643
1644         bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1645                 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1646         attr_req->mh.num_entries = htons(
1647         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1648         bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1649                 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1650         bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1651 }
1652
1653 /* IOC callback functions */
1654
1655 static void
1656 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1657 {
1658         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1659
1660         if (error)
1661                 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1662         else
1663                 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1664 }
1665
1666 static void
1667 bna_cb_ioceth_disable(void *arg)
1668 {
1669         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1670
1671         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1672 }
1673
1674 static void
1675 bna_cb_ioceth_hbfail(void *arg)
1676 {
1677         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1678
1679         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1680 }
1681
1682 static void
1683 bna_cb_ioceth_reset(void *arg)
1684 {
1685         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1686
1687         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1688 }
1689
1690 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1691         bna_cb_ioceth_enable,
1692         bna_cb_ioceth_disable,
1693         bna_cb_ioceth_hbfail,
1694         bna_cb_ioceth_reset
1695 };
1696
1697 static void bna_attr_init(struct bna_ioceth *ioceth)
1698 {
1699         ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1700         ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1701         ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1702         ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1703         ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1704         ioceth->attr.fw_query_complete = false;
1705 }
1706
1707 static void
1708 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1709                 struct bna_res_info *res_info)
1710 {
1711         u64 dma;
1712         u8 *kva;
1713
1714         ioceth->bna = bna;
1715
1716         /**
1717          * Attach IOC and claim:
1718          *      1. DMA memory for IOC attributes
1719          *      2. Kernel memory for FW trace
1720          */
1721         bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1722         bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1723
1724         BNA_GET_DMA_ADDR(
1725                 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1726         kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1727         bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1728
1729         kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1730
1731         /**
1732          * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1733          * DMA memory.
1734          */
1735         BNA_GET_DMA_ADDR(
1736                 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1737         kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1738         bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1739         bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1740         kva += bfa_nw_cee_meminfo();
1741         dma += bfa_nw_cee_meminfo();
1742
1743         bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1744         bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1745         kva += bfa_nw_flash_meminfo();
1746         dma += bfa_nw_flash_meminfo();
1747
1748         bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1749         bfa_msgq_memclaim(&bna->msgq, kva, dma);
1750         bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1751         kva += bfa_msgq_meminfo();
1752         dma += bfa_msgq_meminfo();
1753
1754         ioceth->stop_cbfn = NULL;
1755         ioceth->stop_cbarg = NULL;
1756
1757         bna_attr_init(ioceth);
1758
1759         bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1760 }
1761
1762 static void
1763 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1764 {
1765         bfa_nw_ioc_detach(&ioceth->ioc);
1766
1767         ioceth->bna = NULL;
1768 }
1769
1770 void
1771 bna_ioceth_enable(struct bna_ioceth *ioceth)
1772 {
1773         if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1774                 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1775                 return;
1776         }
1777
1778         if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1779                 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1780 }
1781
1782 void
1783 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1784 {
1785         if (type == BNA_SOFT_CLEANUP) {
1786                 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1787                 return;
1788         }
1789
1790         ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1791         ioceth->stop_cbarg = ioceth->bna->bnad;
1792
1793         bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1794 }
1795
1796 static void
1797 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1798                   struct bna_res_info *res_info)
1799 {
1800         int i;
1801
1802         ucam_mod->ucmac = (struct bna_mac *)
1803         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1804
1805         INIT_LIST_HEAD(&ucam_mod->free_q);
1806         for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1807                 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1808                 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1809         }
1810
1811         ucam_mod->bna = bna;
1812 }
1813
1814 static void
1815 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1816 {
1817         struct list_head *qe;
1818         int i = 0;
1819
1820         list_for_each(qe, &ucam_mod->free_q)
1821                 i++;
1822
1823         ucam_mod->bna = NULL;
1824 }
1825
1826 static void
1827 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1828                   struct bna_res_info *res_info)
1829 {
1830         int i;
1831
1832         mcam_mod->mcmac = (struct bna_mac *)
1833         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1834
1835         INIT_LIST_HEAD(&mcam_mod->free_q);
1836         for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1837                 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1838                 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1839         }
1840
1841         mcam_mod->mchandle = (struct bna_mcam_handle *)
1842         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1843
1844         INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1845         for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1846                 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1847                 list_add_tail(&mcam_mod->mchandle[i].qe,
1848                                 &mcam_mod->free_handle_q);
1849         }
1850
1851         mcam_mod->bna = bna;
1852 }
1853
1854 static void
1855 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1856 {
1857         struct list_head *qe;
1858         int i;
1859
1860         i = 0;
1861         list_for_each(qe, &mcam_mod->free_q) i++;
1862
1863         i = 0;
1864         list_for_each(qe, &mcam_mod->free_handle_q) i++;
1865
1866         mcam_mod->bna = NULL;
1867 }
1868
1869 static void
1870 bna_bfi_stats_get(struct bna *bna)
1871 {
1872         struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1873
1874         bna->stats_mod.stats_get_busy = true;
1875
1876         bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1877                 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1878         stats_req->mh.num_entries = htons(
1879                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1880         stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1881         stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1882         stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1883         stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1884         stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1885
1886         bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1887                 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1888         bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1889 }
1890
1891 void
1892 bna_res_req(struct bna_res_info *res_info)
1893 {
1894         /* DMA memory for COMMON_MODULE */
1895         res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1896         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1897         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1898         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1899                                 (bfa_nw_cee_meminfo() +
1900                                  bfa_nw_flash_meminfo() +
1901                                  bfa_msgq_meminfo()), PAGE_SIZE);
1902
1903         /* DMA memory for retrieving IOC attributes */
1904         res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1905         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1906         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1907         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1908                                 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1909
1910         /* Virtual memory for retreiving fw_trc */
1911         res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1912         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1913         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1914         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1915
1916         /* DMA memory for retreiving stats */
1917         res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1918         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1919         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1920         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1921                                 ALIGN(sizeof(struct bfi_enet_stats),
1922                                         PAGE_SIZE);
1923 }
1924
1925 void
1926 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1927 {
1928         struct bna_attr *attr = &bna->ioceth.attr;
1929
1930         /* Virtual memory for Tx objects - stored by Tx module */
1931         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1932         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1933                 BNA_MEM_T_KVA;
1934         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1935         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1936                 attr->num_txq * sizeof(struct bna_tx);
1937
1938         /* Virtual memory for TxQ - stored by Tx module */
1939         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1940         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1941                 BNA_MEM_T_KVA;
1942         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1943         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1944                 attr->num_txq * sizeof(struct bna_txq);
1945
1946         /* Virtual memory for Rx objects - stored by Rx module */
1947         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1948         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1949                 BNA_MEM_T_KVA;
1950         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1951         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1952                 attr->num_rxp * sizeof(struct bna_rx);
1953
1954         /* Virtual memory for RxPath - stored by Rx module */
1955         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1956         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1957                 BNA_MEM_T_KVA;
1958         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1959         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1960                 attr->num_rxp * sizeof(struct bna_rxp);
1961
1962         /* Virtual memory for RxQ - stored by Rx module */
1963         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1964         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1965                 BNA_MEM_T_KVA;
1966         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1967         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1968                 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1969
1970         /* Virtual memory for Unicast MAC address - stored by ucam module */
1971         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1972         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1973                 BNA_MEM_T_KVA;
1974         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1975         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1976                 attr->num_ucmac * sizeof(struct bna_mac);
1977
1978         /* Virtual memory for Multicast MAC address - stored by mcam module */
1979         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1980         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1981                 BNA_MEM_T_KVA;
1982         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1983         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1984                 attr->num_mcmac * sizeof(struct bna_mac);
1985
1986         /* Virtual memory for Multicast handle - stored by mcam module */
1987         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1988         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1989                 BNA_MEM_T_KVA;
1990         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1991         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1992                 attr->num_mcmac * sizeof(struct bna_mcam_handle);
1993 }
1994
1995 void
1996 bna_init(struct bna *bna, struct bnad *bnad,
1997                 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1998 {
1999         bna->bnad = bnad;
2000         bna->pcidev = *pcidev;
2001
2002         bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2003                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2004         bna->stats.hw_stats_dma.msb =
2005                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2006         bna->stats.hw_stats_dma.lsb =
2007                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2008
2009         bna_reg_addr_init(bna, &bna->pcidev);
2010
2011         /* Also initializes diag, cee, sfp, phy_port, msgq */
2012         bna_ioceth_init(&bna->ioceth, bna, res_info);
2013
2014         bna_enet_init(&bna->enet, bna);
2015         bna_ethport_init(&bna->ethport, bna);
2016 }
2017
2018 void
2019 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2020 {
2021         bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2022
2023         bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2024
2025         bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2026
2027         bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2028
2029         bna->default_mode_rid = BFI_INVALID_RID;
2030         bna->promisc_rid = BFI_INVALID_RID;
2031
2032         bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2033 }
2034
2035 void
2036 bna_uninit(struct bna *bna)
2037 {
2038         if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2039                 bna_mcam_mod_uninit(&bna->mcam_mod);
2040                 bna_ucam_mod_uninit(&bna->ucam_mod);
2041                 bna_rx_mod_uninit(&bna->rx_mod);
2042                 bna_tx_mod_uninit(&bna->tx_mod);
2043                 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2044         }
2045
2046         bna_stats_mod_uninit(&bna->stats_mod);
2047         bna_ethport_uninit(&bna->ethport);
2048         bna_enet_uninit(&bna->enet);
2049
2050         bna_ioceth_uninit(&bna->ioceth);
2051
2052         bna->bnad = NULL;
2053 }
2054
2055 int
2056 bna_num_txq_set(struct bna *bna, int num_txq)
2057 {
2058         if (bna->ioceth.attr.fw_query_complete &&
2059                 (num_txq <= bna->ioceth.attr.num_txq)) {
2060                 bna->ioceth.attr.num_txq = num_txq;
2061                 return BNA_CB_SUCCESS;
2062         }
2063
2064         return BNA_CB_FAIL;
2065 }
2066
2067 int
2068 bna_num_rxp_set(struct bna *bna, int num_rxp)
2069 {
2070         if (bna->ioceth.attr.fw_query_complete &&
2071                 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2072                 bna->ioceth.attr.num_rxp = num_rxp;
2073                 return BNA_CB_SUCCESS;
2074         }
2075
2076         return BNA_CB_FAIL;
2077 }
2078
2079 struct bna_mac *
2080 bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2081 {
2082         struct list_head *qe;
2083
2084         if (list_empty(&ucam_mod->free_q))
2085                 return NULL;
2086
2087         bfa_q_deq(&ucam_mod->free_q, &qe);
2088
2089         return (struct bna_mac *)qe;
2090 }
2091
2092 void
2093 bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2094 {
2095         list_add_tail(&mac->qe, &ucam_mod->free_q);
2096 }
2097
2098 struct bna_mac *
2099 bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2100 {
2101         struct list_head *qe;
2102
2103         if (list_empty(&mcam_mod->free_q))
2104                 return NULL;
2105
2106         bfa_q_deq(&mcam_mod->free_q, &qe);
2107
2108         return (struct bna_mac *)qe;
2109 }
2110
2111 void
2112 bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
2113 {
2114         list_add_tail(&mac->qe, &mcam_mod->free_q);
2115 }
2116
2117 struct bna_mcam_handle *
2118 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2119 {
2120         struct list_head *qe;
2121
2122         if (list_empty(&mcam_mod->free_handle_q))
2123                 return NULL;
2124
2125         bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2126
2127         return (struct bna_mcam_handle *)qe;
2128 }
2129
2130 void
2131 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2132                         struct bna_mcam_handle *handle)
2133 {
2134         list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2135 }
2136
2137 void
2138 bna_hw_stats_get(struct bna *bna)
2139 {
2140         if (!bna->stats_mod.ioc_ready) {
2141                 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2142                 return;
2143         }
2144         if (bna->stats_mod.stats_get_busy) {
2145                 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2146                 return;
2147         }
2148
2149         bna_bfi_stats_get(bna);
2150 }