2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
22 ethport_can_be_up(struct bna_ethport *ethport)
25 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
26 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
27 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
28 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
30 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
31 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
32 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
36 #define ethport_is_up ethport_can_be_up
38 enum bna_ethport_event {
44 ETHPORT_E_FWRESP_UP_OK = 6,
45 ETHPORT_E_FWRESP_DOWN = 7,
46 ETHPORT_E_FWRESP_UP_FAIL = 8,
55 ENET_E_FWRESP_PAUSE = 6,
56 ENET_E_CHLD_STOPPED = 7,
59 enum bna_ioceth_event {
62 IOCETH_E_IOC_RESET = 3,
63 IOCETH_E_IOC_FAILED = 4,
64 IOCETH_E_IOC_READY = 5,
65 IOCETH_E_ENET_ATTR_RESP = 6,
66 IOCETH_E_ENET_STOPPED = 7,
67 IOCETH_E_IOC_DISABLED = 8,
70 #define bna_stats_copy(_name, _type) \
72 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
73 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
74 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
75 for (i = 0; i < count; i++) \
76 stats_dst[i] = be64_to_cpu(stats_src[i]); \
80 * FW response handlers
84 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
85 struct bfi_msgq_mhdr *msghdr)
87 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
89 if (ethport_can_be_up(ethport))
90 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
94 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
95 struct bfi_msgq_mhdr *msghdr)
97 int ethport_up = ethport_is_up(ethport);
99 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
102 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
106 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
107 struct bfi_msgq_mhdr *msghdr)
109 struct bfi_enet_enable_req *admin_req =
110 ðport->bfi_enet_cmd.admin_req;
111 struct bfi_enet_rsp *rsp =
112 container_of(msghdr, struct bfi_enet_rsp, mh);
114 switch (admin_req->enable) {
115 case BNA_STATUS_T_ENABLED:
116 if (rsp->error == BFI_ENET_CMD_OK)
117 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
119 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
120 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
124 case BNA_STATUS_T_DISABLED:
125 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
126 ethport->link_status = BNA_LINK_DOWN;
127 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
133 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
134 struct bfi_msgq_mhdr *msghdr)
136 struct bfi_enet_diag_lb_req *diag_lb_req =
137 ðport->bfi_enet_cmd.lpbk_req;
138 struct bfi_enet_rsp *rsp =
139 container_of(msghdr, struct bfi_enet_rsp, mh);
141 switch (diag_lb_req->enable) {
142 case BNA_STATUS_T_ENABLED:
143 if (rsp->error == BFI_ENET_CMD_OK)
144 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
146 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
147 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
151 case BNA_STATUS_T_DISABLED:
152 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
158 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
160 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
164 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
165 struct bfi_msgq_mhdr *msghdr)
167 struct bfi_enet_attr_rsp *rsp =
168 container_of(msghdr, struct bfi_enet_attr_rsp, mh);
171 * Store only if not set earlier, since BNAD can override the HW
174 if (!ioceth->attr.fw_query_complete) {
175 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
176 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
177 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
178 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
179 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
180 ioceth->attr.fw_query_complete = true;
183 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
187 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
189 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
192 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
193 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
197 bna_stats_copy(mac, mac);
198 bna_stats_copy(bpc, bpc);
199 bna_stats_copy(rad, rad);
200 bna_stats_copy(rlb, rad);
201 bna_stats_copy(fc_rx, fc_rx);
202 bna_stats_copy(fc_tx, fc_tx);
204 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
206 /* Copy Rxf stats to SW area, scatter them while copying */
207 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
208 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
209 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
210 if (rx_enet_mask & ((u32)(1 << i))) {
212 count = sizeof(struct bfi_enet_stats_rxf) /
214 for (k = 0; k < count; k++) {
215 stats_dst[k] = be64_to_cpu(*stats_src);
221 /* Copy Txf stats to SW area, scatter them while copying */
222 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
223 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
224 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
225 if (tx_enet_mask & ((u32)(1 << i))) {
227 count = sizeof(struct bfi_enet_stats_txf) /
229 for (k = 0; k < count; k++) {
230 stats_dst[k] = be64_to_cpu(*stats_src);
236 bna->stats_mod.stats_get_busy = false;
237 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
241 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
242 struct bfi_msgq_mhdr *msghdr)
244 ethport->link_status = BNA_LINK_UP;
246 /* Dispatch events */
247 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
251 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
252 struct bfi_msgq_mhdr *msghdr)
254 ethport->link_status = BNA_LINK_DOWN;
256 /* Dispatch events */
257 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
261 bna_err_handler(struct bna *bna, u32 intr_status)
263 if (BNA_IS_HALT_INTR(bna, intr_status))
266 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
270 bna_mbox_handler(struct bna *bna, u32 intr_status)
272 if (BNA_IS_ERR_INTR(bna, intr_status)) {
273 bna_err_handler(bna, intr_status);
276 if (BNA_IS_MBOX_INTR(bna, intr_status))
277 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
281 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
283 struct bna *bna = (struct bna *)arg;
287 switch (msghdr->msg_id) {
288 case BFI_ENET_I2H_RX_CFG_SET_RSP:
289 bna_rx_from_rid(bna, msghdr->enet_id, rx);
291 bna_bfi_rx_enet_start_rsp(rx, msghdr);
294 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
295 bna_rx_from_rid(bna, msghdr->enet_id, rx);
297 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
300 case BFI_ENET_I2H_RIT_CFG_RSP:
301 case BFI_ENET_I2H_RSS_CFG_RSP:
302 case BFI_ENET_I2H_RSS_ENABLE_RSP:
303 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
304 case BFI_ENET_I2H_RX_DEFAULT_RSP:
305 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
306 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
307 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
308 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
309 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
310 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
311 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
312 bna_rx_from_rid(bna, msghdr->enet_id, rx);
314 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
317 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
318 bna_rx_from_rid(bna, msghdr->enet_id, rx);
320 bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
323 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
324 bna_rx_from_rid(bna, msghdr->enet_id, rx);
326 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
329 case BFI_ENET_I2H_TX_CFG_SET_RSP:
330 bna_tx_from_rid(bna, msghdr->enet_id, tx);
332 bna_bfi_tx_enet_start_rsp(tx, msghdr);
335 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
336 bna_tx_from_rid(bna, msghdr->enet_id, tx);
338 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
341 case BFI_ENET_I2H_PORT_ADMIN_RSP:
342 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
345 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
346 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
349 case BFI_ENET_I2H_SET_PAUSE_RSP:
350 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
353 case BFI_ENET_I2H_GET_ATTR_RSP:
354 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
357 case BFI_ENET_I2H_STATS_GET_RSP:
358 bna_bfi_stats_get_rsp(bna, msghdr);
361 case BFI_ENET_I2H_STATS_CLR_RSP:
365 case BFI_ENET_I2H_LINK_UP_AEN:
366 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
369 case BFI_ENET_I2H_LINK_DOWN_AEN:
370 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
373 case BFI_ENET_I2H_PORT_ENABLE_AEN:
374 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
377 case BFI_ENET_I2H_PORT_DISABLE_AEN:
378 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
381 case BFI_ENET_I2H_BW_UPDATE_AEN:
382 bna_bfi_bw_update_aen(&bna->tx_mod);
392 #define call_ethport_stop_cbfn(_ethport) \
394 if ((_ethport)->stop_cbfn) { \
395 void (*cbfn)(struct bna_enet *); \
396 cbfn = (_ethport)->stop_cbfn; \
397 (_ethport)->stop_cbfn = NULL; \
398 cbfn(&(_ethport)->bna->enet); \
402 #define call_ethport_adminup_cbfn(ethport, status) \
404 if ((ethport)->adminup_cbfn) { \
405 void (*cbfn)(struct bnad *, enum bna_cb_status); \
406 cbfn = (ethport)->adminup_cbfn; \
407 (ethport)->adminup_cbfn = NULL; \
408 cbfn((ethport)->bna->bnad, status); \
413 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
415 struct bfi_enet_enable_req *admin_up_req =
416 ðport->bfi_enet_cmd.admin_req;
418 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
419 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
420 admin_up_req->mh.num_entries = htons(
421 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
422 admin_up_req->enable = BNA_STATUS_T_ENABLED;
424 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
425 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
426 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
430 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
432 struct bfi_enet_enable_req *admin_down_req =
433 ðport->bfi_enet_cmd.admin_req;
435 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
436 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
437 admin_down_req->mh.num_entries = htons(
438 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
439 admin_down_req->enable = BNA_STATUS_T_DISABLED;
441 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
442 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
443 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
447 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
449 struct bfi_enet_diag_lb_req *lpbk_up_req =
450 ðport->bfi_enet_cmd.lpbk_req;
452 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
453 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
454 lpbk_up_req->mh.num_entries = htons(
455 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
456 lpbk_up_req->mode = (ethport->bna->enet.type ==
457 BNA_ENET_T_LOOPBACK_INTERNAL) ?
458 BFI_ENET_DIAG_LB_OPMODE_EXT :
459 BFI_ENET_DIAG_LB_OPMODE_CBL;
460 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
462 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
463 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
464 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
468 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
470 struct bfi_enet_diag_lb_req *lpbk_down_req =
471 ðport->bfi_enet_cmd.lpbk_req;
473 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
474 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
475 lpbk_down_req->mh.num_entries = htons(
476 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
477 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
479 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
480 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
481 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
485 bna_bfi_ethport_up(struct bna_ethport *ethport)
487 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
488 bna_bfi_ethport_admin_up(ethport);
490 bna_bfi_ethport_lpbk_up(ethport);
494 bna_bfi_ethport_down(struct bna_ethport *ethport)
496 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
497 bna_bfi_ethport_admin_down(ethport);
499 bna_bfi_ethport_lpbk_down(ethport);
502 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
503 enum bna_ethport_event);
504 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
505 enum bna_ethport_event);
506 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
507 enum bna_ethport_event);
508 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
509 enum bna_ethport_event);
510 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
511 enum bna_ethport_event);
512 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
513 enum bna_ethport_event);
516 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
518 call_ethport_stop_cbfn(ethport);
522 bna_ethport_sm_stopped(struct bna_ethport *ethport,
523 enum bna_ethport_event event)
526 case ETHPORT_E_START:
527 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
531 call_ethport_stop_cbfn(ethport);
539 /* This event is received due to Rx objects failing */
549 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
554 bna_ethport_sm_down(struct bna_ethport *ethport,
555 enum bna_ethport_event event)
559 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
563 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
567 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
568 bna_bfi_ethport_up(ethport);
577 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
582 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
583 enum bna_ethport_event event)
587 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
591 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
592 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
596 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
597 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
600 case ETHPORT_E_FWRESP_UP_OK:
601 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
602 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
605 case ETHPORT_E_FWRESP_UP_FAIL:
606 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
607 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
610 case ETHPORT_E_FWRESP_DOWN:
611 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
612 bna_bfi_ethport_up(ethport);
621 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
624 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
625 * mbox due to up_resp_wait -> down_resp_wait transition on event
631 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
632 enum bna_ethport_event event)
636 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
640 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
644 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
647 case ETHPORT_E_FWRESP_UP_OK:
648 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
649 bna_bfi_ethport_down(ethport);
652 case ETHPORT_E_FWRESP_UP_FAIL:
653 case ETHPORT_E_FWRESP_DOWN:
654 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
663 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
668 bna_ethport_sm_up(struct bna_ethport *ethport,
669 enum bna_ethport_event event)
673 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
674 bna_bfi_ethport_down(ethport);
678 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
682 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
683 bna_bfi_ethport_down(ethport);
692 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
697 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
698 enum bna_ethport_event event)
702 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
707 * This event is received due to Rx objects stopping in
708 * parallel to ethport
713 case ETHPORT_E_FWRESP_UP_OK:
714 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
715 bna_bfi_ethport_down(ethport);
718 case ETHPORT_E_FWRESP_UP_FAIL:
719 case ETHPORT_E_FWRESP_DOWN:
720 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
729 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
731 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
734 ethport->link_status = BNA_LINK_DOWN;
735 ethport->link_cbfn = bnad_cb_ethport_link_status;
737 ethport->rx_started_count = 0;
739 ethport->stop_cbfn = NULL;
740 ethport->adminup_cbfn = NULL;
742 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
746 bna_ethport_uninit(struct bna_ethport *ethport)
748 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
749 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
755 bna_ethport_start(struct bna_ethport *ethport)
757 bfa_fsm_send_event(ethport, ETHPORT_E_START);
761 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
763 bfa_wc_down(&enet->chld_stop_wc);
767 bna_ethport_stop(struct bna_ethport *ethport)
769 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
770 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
774 bna_ethport_fail(struct bna_ethport *ethport)
776 /* Reset the physical port status to enabled */
777 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
779 if (ethport->link_status != BNA_LINK_DOWN) {
780 ethport->link_status = BNA_LINK_DOWN;
781 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
783 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
786 /* Should be called only when ethport is disabled */
788 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
790 ethport->rx_started_count++;
792 if (ethport->rx_started_count == 1) {
793 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
795 if (ethport_can_be_up(ethport))
796 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
801 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
803 int ethport_up = ethport_is_up(ethport);
805 ethport->rx_started_count--;
807 if (ethport->rx_started_count == 0) {
808 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
811 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
817 #define bna_enet_chld_start(enet) \
819 enum bna_tx_type tx_type = \
820 ((enet)->type == BNA_ENET_T_REGULAR) ? \
821 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
822 enum bna_rx_type rx_type = \
823 ((enet)->type == BNA_ENET_T_REGULAR) ? \
824 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
825 bna_ethport_start(&(enet)->bna->ethport); \
826 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
827 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
830 #define bna_enet_chld_stop(enet) \
832 enum bna_tx_type tx_type = \
833 ((enet)->type == BNA_ENET_T_REGULAR) ? \
834 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
835 enum bna_rx_type rx_type = \
836 ((enet)->type == BNA_ENET_T_REGULAR) ? \
837 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
838 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
839 bfa_wc_up(&(enet)->chld_stop_wc); \
840 bna_ethport_stop(&(enet)->bna->ethport); \
841 bfa_wc_up(&(enet)->chld_stop_wc); \
842 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
843 bfa_wc_up(&(enet)->chld_stop_wc); \
844 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
845 bfa_wc_wait(&(enet)->chld_stop_wc); \
848 #define bna_enet_chld_fail(enet) \
850 bna_ethport_fail(&(enet)->bna->ethport); \
851 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
852 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
855 #define bna_enet_rx_start(enet) \
857 enum bna_rx_type rx_type = \
858 ((enet)->type == BNA_ENET_T_REGULAR) ? \
859 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
860 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
863 #define bna_enet_rx_stop(enet) \
865 enum bna_rx_type rx_type = \
866 ((enet)->type == BNA_ENET_T_REGULAR) ? \
867 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
868 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
869 bfa_wc_up(&(enet)->chld_stop_wc); \
870 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
871 bfa_wc_wait(&(enet)->chld_stop_wc); \
874 #define call_enet_stop_cbfn(enet) \
876 if ((enet)->stop_cbfn) { \
877 void (*cbfn)(void *); \
879 cbfn = (enet)->stop_cbfn; \
880 cbarg = (enet)->stop_cbarg; \
881 (enet)->stop_cbfn = NULL; \
882 (enet)->stop_cbarg = NULL; \
887 #define call_enet_pause_cbfn(enet) \
889 if ((enet)->pause_cbfn) { \
890 void (*cbfn)(struct bnad *); \
891 cbfn = (enet)->pause_cbfn; \
892 (enet)->pause_cbfn = NULL; \
893 cbfn((enet)->bna->bnad); \
897 #define call_enet_mtu_cbfn(enet) \
899 if ((enet)->mtu_cbfn) { \
900 void (*cbfn)(struct bnad *); \
901 cbfn = (enet)->mtu_cbfn; \
902 (enet)->mtu_cbfn = NULL; \
903 cbfn((enet)->bna->bnad); \
907 static void bna_enet_cb_chld_stopped(void *arg);
908 static void bna_bfi_pause_set(struct bna_enet *enet);
910 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
911 enum bna_enet_event);
912 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
913 enum bna_enet_event);
914 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
915 enum bna_enet_event);
916 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
917 enum bna_enet_event);
918 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
919 enum bna_enet_event);
920 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
921 enum bna_enet_event);
922 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
923 enum bna_enet_event);
926 bna_enet_sm_stopped_entry(struct bna_enet *enet)
928 call_enet_pause_cbfn(enet);
929 call_enet_mtu_cbfn(enet);
930 call_enet_stop_cbfn(enet);
934 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
938 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
942 call_enet_stop_cbfn(enet);
949 case ENET_E_PAUSE_CFG:
950 call_enet_pause_cbfn(enet);
954 call_enet_mtu_cbfn(enet);
957 case ENET_E_CHLD_STOPPED:
959 * This event is received due to Ethport, Tx and Rx objects
971 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
973 bna_bfi_pause_set(enet);
977 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
978 enum bna_enet_event event)
982 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
983 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
987 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
988 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
991 case ENET_E_PAUSE_CFG:
992 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
999 case ENET_E_FWRESP_PAUSE:
1000 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1001 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1002 bna_bfi_pause_set(enet);
1004 bfa_fsm_set_state(enet, bna_enet_sm_started);
1005 bna_enet_chld_start(enet);
1010 bfa_sm_fault(event);
1015 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1017 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1021 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1022 enum bna_enet_event event)
1026 case ENET_E_FWRESP_PAUSE:
1027 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1031 bfa_sm_fault(event);
1036 bna_enet_sm_started_entry(struct bna_enet *enet)
1039 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1040 * inadvertently called during cfg_wait->started transition as well
1042 call_enet_pause_cbfn(enet);
1043 call_enet_mtu_cbfn(enet);
1047 bna_enet_sm_started(struct bna_enet *enet,
1048 enum bna_enet_event event)
1052 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1056 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1057 bna_enet_chld_fail(enet);
1060 case ENET_E_PAUSE_CFG:
1061 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1062 bna_bfi_pause_set(enet);
1065 case ENET_E_MTU_CFG:
1066 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1067 bna_enet_rx_stop(enet);
1071 bfa_sm_fault(event);
1076 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1081 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1082 enum bna_enet_event event)
1086 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1087 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1088 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1092 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1093 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1094 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1095 bna_enet_chld_fail(enet);
1098 case ENET_E_PAUSE_CFG:
1099 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1102 case ENET_E_MTU_CFG:
1103 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1106 case ENET_E_CHLD_STOPPED:
1107 bna_enet_rx_start(enet);
1109 case ENET_E_FWRESP_PAUSE:
1110 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1111 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1112 bna_bfi_pause_set(enet);
1113 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1114 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1115 bna_enet_rx_stop(enet);
1117 bfa_fsm_set_state(enet, bna_enet_sm_started);
1122 bfa_sm_fault(event);
1127 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1129 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1130 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1134 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1135 enum bna_enet_event event)
1139 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1140 bna_enet_chld_fail(enet);
1143 case ENET_E_FWRESP_PAUSE:
1144 case ENET_E_CHLD_STOPPED:
1145 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1149 bfa_sm_fault(event);
1154 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1156 bna_enet_chld_stop(enet);
1160 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1161 enum bna_enet_event event)
1165 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1166 bna_enet_chld_fail(enet);
1169 case ENET_E_CHLD_STOPPED:
1170 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1174 bfa_sm_fault(event);
1179 bna_bfi_pause_set(struct bna_enet *enet)
1181 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1183 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1184 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1185 pause_req->mh.num_entries = htons(
1186 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1187 pause_req->tx_pause = enet->pause_config.tx_pause;
1188 pause_req->rx_pause = enet->pause_config.rx_pause;
1190 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1191 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1192 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1196 bna_enet_cb_chld_stopped(void *arg)
1198 struct bna_enet *enet = (struct bna_enet *)arg;
1200 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1204 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1209 enet->type = BNA_ENET_T_REGULAR;
1211 enet->stop_cbfn = NULL;
1212 enet->stop_cbarg = NULL;
1214 enet->pause_cbfn = NULL;
1216 enet->mtu_cbfn = NULL;
1218 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1222 bna_enet_uninit(struct bna_enet *enet)
1230 bna_enet_start(struct bna_enet *enet)
1232 enet->flags |= BNA_ENET_F_IOCETH_READY;
1233 if (enet->flags & BNA_ENET_F_ENABLED)
1234 bfa_fsm_send_event(enet, ENET_E_START);
1238 bna_ioceth_cb_enet_stopped(void *arg)
1240 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1242 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1246 bna_enet_stop(struct bna_enet *enet)
1248 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1249 enet->stop_cbarg = &enet->bna->ioceth;
1251 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1252 bfa_fsm_send_event(enet, ENET_E_STOP);
1256 bna_enet_fail(struct bna_enet *enet)
1258 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1259 bfa_fsm_send_event(enet, ENET_E_FAIL);
1263 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1265 bfa_wc_down(&enet->chld_stop_wc);
1269 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1271 bfa_wc_down(&enet->chld_stop_wc);
1275 bna_enet_mtu_get(struct bna_enet *enet)
1281 bna_enet_enable(struct bna_enet *enet)
1283 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1286 enet->flags |= BNA_ENET_F_ENABLED;
1288 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1289 bfa_fsm_send_event(enet, ENET_E_START);
1293 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1294 void (*cbfn)(void *))
1296 if (type == BNA_SOFT_CLEANUP) {
1297 (*cbfn)(enet->bna->bnad);
1301 enet->stop_cbfn = cbfn;
1302 enet->stop_cbarg = enet->bna->bnad;
1304 enet->flags &= ~BNA_ENET_F_ENABLED;
1306 bfa_fsm_send_event(enet, ENET_E_STOP);
1310 bna_enet_pause_config(struct bna_enet *enet,
1311 struct bna_pause_config *pause_config,
1312 void (*cbfn)(struct bnad *))
1314 enet->pause_config = *pause_config;
1316 enet->pause_cbfn = cbfn;
1318 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1322 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1323 void (*cbfn)(struct bnad *))
1327 enet->mtu_cbfn = cbfn;
1329 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1333 bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
1335 bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
1340 #define enable_mbox_intr(_ioceth) \
1343 bna_intr_status_get((_ioceth)->bna, intr_status); \
1344 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1345 bna_mbox_intr_enable((_ioceth)->bna); \
1348 #define disable_mbox_intr(_ioceth) \
1350 bna_mbox_intr_disable((_ioceth)->bna); \
1351 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1354 #define call_ioceth_stop_cbfn(_ioceth) \
1356 if ((_ioceth)->stop_cbfn) { \
1357 void (*cbfn)(struct bnad *); \
1358 struct bnad *cbarg; \
1359 cbfn = (_ioceth)->stop_cbfn; \
1360 cbarg = (_ioceth)->stop_cbarg; \
1361 (_ioceth)->stop_cbfn = NULL; \
1362 (_ioceth)->stop_cbarg = NULL; \
1367 #define bna_stats_mod_uninit(_stats_mod) \
1371 #define bna_stats_mod_start(_stats_mod) \
1373 (_stats_mod)->ioc_ready = true; \
1376 #define bna_stats_mod_stop(_stats_mod) \
1378 (_stats_mod)->ioc_ready = false; \
1381 #define bna_stats_mod_fail(_stats_mod) \
1383 (_stats_mod)->ioc_ready = false; \
1384 (_stats_mod)->stats_get_busy = false; \
1385 (_stats_mod)->stats_clr_busy = false; \
1388 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1390 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1391 enum bna_ioceth_event);
1392 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1393 enum bna_ioceth_event);
1394 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1395 enum bna_ioceth_event);
1396 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1397 enum bna_ioceth_event);
1398 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1399 enum bna_ioceth_event);
1400 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1401 enum bna_ioceth_event);
1402 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1403 enum bna_ioceth_event);
1404 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1405 enum bna_ioceth_event);
1408 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1410 call_ioceth_stop_cbfn(ioceth);
1414 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1415 enum bna_ioceth_event event)
1418 case IOCETH_E_ENABLE:
1419 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1420 bfa_nw_ioc_enable(&ioceth->ioc);
1423 case IOCETH_E_DISABLE:
1424 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1427 case IOCETH_E_IOC_RESET:
1428 enable_mbox_intr(ioceth);
1431 case IOCETH_E_IOC_FAILED:
1432 disable_mbox_intr(ioceth);
1433 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1437 bfa_sm_fault(event);
1442 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1445 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1446 * previous state due to failed -> ioc_ready_wait transition.
1451 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1452 enum bna_ioceth_event event)
1455 case IOCETH_E_DISABLE:
1456 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1457 bfa_nw_ioc_disable(&ioceth->ioc);
1460 case IOCETH_E_IOC_RESET:
1461 enable_mbox_intr(ioceth);
1464 case IOCETH_E_IOC_FAILED:
1465 disable_mbox_intr(ioceth);
1466 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1469 case IOCETH_E_IOC_READY:
1470 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1474 bfa_sm_fault(event);
1479 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1481 bna_bfi_attr_get(ioceth);
1485 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1486 enum bna_ioceth_event event)
1489 case IOCETH_E_DISABLE:
1490 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1493 case IOCETH_E_IOC_FAILED:
1494 disable_mbox_intr(ioceth);
1495 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1498 case IOCETH_E_ENET_ATTR_RESP:
1499 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1503 bfa_sm_fault(event);
1508 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1510 bna_enet_start(&ioceth->bna->enet);
1511 bna_stats_mod_start(&ioceth->bna->stats_mod);
1512 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1516 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1519 case IOCETH_E_DISABLE:
1520 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1523 case IOCETH_E_IOC_FAILED:
1524 disable_mbox_intr(ioceth);
1525 bna_enet_fail(&ioceth->bna->enet);
1526 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1527 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1531 bfa_sm_fault(event);
1536 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1541 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1542 enum bna_ioceth_event event)
1545 case IOCETH_E_IOC_FAILED:
1546 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1547 disable_mbox_intr(ioceth);
1548 bfa_nw_ioc_disable(&ioceth->ioc);
1551 case IOCETH_E_ENET_ATTR_RESP:
1552 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1553 bfa_nw_ioc_disable(&ioceth->ioc);
1557 bfa_sm_fault(event);
1562 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1564 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1565 bna_enet_stop(&ioceth->bna->enet);
1569 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1570 enum bna_ioceth_event event)
1573 case IOCETH_E_IOC_FAILED:
1574 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1575 disable_mbox_intr(ioceth);
1576 bna_enet_fail(&ioceth->bna->enet);
1577 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1578 bfa_nw_ioc_disable(&ioceth->ioc);
1581 case IOCETH_E_ENET_STOPPED:
1582 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1583 bfa_nw_ioc_disable(&ioceth->ioc);
1587 bfa_sm_fault(event);
1592 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1597 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1598 enum bna_ioceth_event event)
1601 case IOCETH_E_IOC_DISABLED:
1602 disable_mbox_intr(ioceth);
1603 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1606 case IOCETH_E_ENET_STOPPED:
1607 /* This event is received due to enet failing */
1612 bfa_sm_fault(event);
1617 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1619 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1623 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1624 enum bna_ioceth_event event)
1627 case IOCETH_E_DISABLE:
1628 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1629 bfa_nw_ioc_disable(&ioceth->ioc);
1632 case IOCETH_E_IOC_RESET:
1633 enable_mbox_intr(ioceth);
1634 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1637 case IOCETH_E_IOC_FAILED:
1641 bfa_sm_fault(event);
1646 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1648 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1650 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1651 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1652 attr_req->mh.num_entries = htons(
1653 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1654 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1655 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1656 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1659 /* IOC callback functions */
1662 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1664 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1667 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1669 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1673 bna_cb_ioceth_disable(void *arg)
1675 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1677 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1681 bna_cb_ioceth_hbfail(void *arg)
1683 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1685 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1689 bna_cb_ioceth_reset(void *arg)
1691 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1693 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1696 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1697 bna_cb_ioceth_enable,
1698 bna_cb_ioceth_disable,
1699 bna_cb_ioceth_hbfail,
1703 static void bna_attr_init(struct bna_ioceth *ioceth)
1705 ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1706 ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1707 ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1708 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1709 ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1710 ioceth->attr.fw_query_complete = false;
1714 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1715 struct bna_res_info *res_info)
1723 * Attach IOC and claim:
1724 * 1. DMA memory for IOC attributes
1725 * 2. Kernel memory for FW trace
1727 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1728 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1731 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1732 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1733 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1735 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1736 bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1739 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1743 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1744 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1745 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1746 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1747 kva += bfa_nw_cee_meminfo();
1748 dma += bfa_nw_cee_meminfo();
1750 bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1751 bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1752 kva += bfa_nw_flash_meminfo();
1753 dma += bfa_nw_flash_meminfo();
1755 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1756 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1757 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1758 kva += bfa_msgq_meminfo();
1759 dma += bfa_msgq_meminfo();
1761 ioceth->stop_cbfn = NULL;
1762 ioceth->stop_cbarg = NULL;
1764 bna_attr_init(ioceth);
1766 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1770 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1772 bfa_nw_ioc_detach(&ioceth->ioc);
1778 bna_ioceth_enable(struct bna_ioceth *ioceth)
1780 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1781 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1785 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1786 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1790 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1792 if (type == BNA_SOFT_CLEANUP) {
1793 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1797 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1798 ioceth->stop_cbarg = ioceth->bna->bnad;
1800 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1804 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1805 struct bna_res_info *res_info)
1809 ucam_mod->ucmac = (struct bna_mac *)
1810 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1812 INIT_LIST_HEAD(&ucam_mod->free_q);
1813 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1814 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1815 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1818 /* A separate queue to allow synchronous setting of a list of MACs */
1819 INIT_LIST_HEAD(&ucam_mod->del_q);
1820 for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
1821 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1822 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1825 ucam_mod->bna = bna;
1829 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1831 struct list_head *qe;
1835 list_for_each(qe, &ucam_mod->free_q)
1839 list_for_each(qe, &ucam_mod->del_q)
1842 ucam_mod->bna = NULL;
1846 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1847 struct bna_res_info *res_info)
1851 mcam_mod->mcmac = (struct bna_mac *)
1852 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1854 INIT_LIST_HEAD(&mcam_mod->free_q);
1855 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1856 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1857 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1860 mcam_mod->mchandle = (struct bna_mcam_handle *)
1861 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1863 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1864 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1865 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1866 list_add_tail(&mcam_mod->mchandle[i].qe,
1867 &mcam_mod->free_handle_q);
1870 /* A separate queue to allow synchronous setting of a list of MACs */
1871 INIT_LIST_HEAD(&mcam_mod->del_q);
1872 for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
1873 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1874 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1877 mcam_mod->bna = bna;
1881 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1883 struct list_head *qe;
1887 list_for_each(qe, &mcam_mod->free_q) i++;
1890 list_for_each(qe, &mcam_mod->del_q) i++;
1893 list_for_each(qe, &mcam_mod->free_handle_q) i++;
1895 mcam_mod->bna = NULL;
1899 bna_bfi_stats_get(struct bna *bna)
1901 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1903 bna->stats_mod.stats_get_busy = true;
1905 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1906 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1907 stats_req->mh.num_entries = htons(
1908 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1909 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1910 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1911 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1912 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1913 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1915 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1916 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1917 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1921 bna_res_req(struct bna_res_info *res_info)
1923 /* DMA memory for COMMON_MODULE */
1924 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1925 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1926 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1927 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1928 (bfa_nw_cee_meminfo() +
1929 bfa_nw_flash_meminfo() +
1930 bfa_msgq_meminfo()), PAGE_SIZE);
1932 /* DMA memory for retrieving IOC attributes */
1933 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1934 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1935 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1936 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1937 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1939 /* Virtual memory for retreiving fw_trc */
1940 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1941 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1942 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1943 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1945 /* DMA memory for retreiving stats */
1946 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1947 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1948 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1949 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1950 ALIGN(sizeof(struct bfi_enet_stats),
1955 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1957 struct bna_attr *attr = &bna->ioceth.attr;
1959 /* Virtual memory for Tx objects - stored by Tx module */
1960 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1961 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1963 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1964 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1965 attr->num_txq * sizeof(struct bna_tx);
1967 /* Virtual memory for TxQ - stored by Tx module */
1968 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1969 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1971 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1972 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1973 attr->num_txq * sizeof(struct bna_txq);
1975 /* Virtual memory for Rx objects - stored by Rx module */
1976 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1977 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1979 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1980 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1981 attr->num_rxp * sizeof(struct bna_rx);
1983 /* Virtual memory for RxPath - stored by Rx module */
1984 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1985 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1987 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1988 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1989 attr->num_rxp * sizeof(struct bna_rxp);
1991 /* Virtual memory for RxQ - stored by Rx module */
1992 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1993 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1995 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1996 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1997 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1999 /* Virtual memory for Unicast MAC address - stored by ucam module */
2000 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2001 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
2003 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
2004 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
2005 (attr->num_ucmac * 2) * sizeof(struct bna_mac);
2007 /* Virtual memory for Multicast MAC address - stored by mcam module */
2008 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2009 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
2011 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
2012 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
2013 (attr->num_mcmac * 2) * sizeof(struct bna_mac);
2015 /* Virtual memory for Multicast handle - stored by mcam module */
2016 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
2017 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
2019 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
2020 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
2021 attr->num_mcmac * sizeof(struct bna_mcam_handle);
2025 bna_init(struct bna *bna, struct bnad *bnad,
2026 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
2029 bna->pcidev = *pcidev;
2031 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2032 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2033 bna->stats.hw_stats_dma.msb =
2034 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2035 bna->stats.hw_stats_dma.lsb =
2036 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2038 bna_reg_addr_init(bna, &bna->pcidev);
2040 /* Also initializes diag, cee, sfp, phy_port, msgq */
2041 bna_ioceth_init(&bna->ioceth, bna, res_info);
2043 bna_enet_init(&bna->enet, bna);
2044 bna_ethport_init(&bna->ethport, bna);
2048 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2050 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2052 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2054 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2056 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2058 bna->default_mode_rid = BFI_INVALID_RID;
2059 bna->promisc_rid = BFI_INVALID_RID;
2061 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2065 bna_uninit(struct bna *bna)
2067 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2068 bna_mcam_mod_uninit(&bna->mcam_mod);
2069 bna_ucam_mod_uninit(&bna->ucam_mod);
2070 bna_rx_mod_uninit(&bna->rx_mod);
2071 bna_tx_mod_uninit(&bna->tx_mod);
2072 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2075 bna_stats_mod_uninit(&bna->stats_mod);
2076 bna_ethport_uninit(&bna->ethport);
2077 bna_enet_uninit(&bna->enet);
2079 bna_ioceth_uninit(&bna->ioceth);
2085 bna_num_txq_set(struct bna *bna, int num_txq)
2087 if (bna->ioceth.attr.fw_query_complete &&
2088 (num_txq <= bna->ioceth.attr.num_txq)) {
2089 bna->ioceth.attr.num_txq = num_txq;
2090 return BNA_CB_SUCCESS;
2097 bna_num_rxp_set(struct bna *bna, int num_rxp)
2099 if (bna->ioceth.attr.fw_query_complete &&
2100 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2101 bna->ioceth.attr.num_rxp = num_rxp;
2102 return BNA_CB_SUCCESS;
2109 bna_cam_mod_mac_get(struct list_head *head)
2111 struct list_head *qe;
2113 if (list_empty(head))
2116 bfa_q_deq(head, &qe);
2117 return (struct bna_mac *)qe;
2121 bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
2123 list_add_tail(&mac->qe, tail);
2126 struct bna_mcam_handle *
2127 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2129 struct list_head *qe;
2131 if (list_empty(&mcam_mod->free_handle_q))
2134 bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2136 return (struct bna_mcam_handle *)qe;
2140 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2141 struct bna_mcam_handle *handle)
2143 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2147 bna_hw_stats_get(struct bna *bna)
2149 if (!bna->stats_mod.ioc_ready) {
2150 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2153 if (bna->stats_mod.stats_get_busy) {
2154 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2158 bna_bfi_stats_get(bna);