bna: remove unused cbfn parameter
[cascardo/linux.git] / drivers / net / ethernet / brocade / bna / bna_enet.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include "bna.h"
20
21 static inline int
22 ethport_can_be_up(struct bna_ethport *ethport)
23 {
24         int ready = 0;
25         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
26                 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
27                          (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
28                          (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
29         else
30                 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
31                          (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
32                          !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
33         return ready;
34 }
35
36 #define ethport_is_up ethport_can_be_up
37
38 enum bna_ethport_event {
39         ETHPORT_E_START                 = 1,
40         ETHPORT_E_STOP                  = 2,
41         ETHPORT_E_FAIL                  = 3,
42         ETHPORT_E_UP                    = 4,
43         ETHPORT_E_DOWN                  = 5,
44         ETHPORT_E_FWRESP_UP_OK          = 6,
45         ETHPORT_E_FWRESP_DOWN           = 7,
46         ETHPORT_E_FWRESP_UP_FAIL        = 8,
47 };
48
49 enum bna_enet_event {
50         ENET_E_START                    = 1,
51         ENET_E_STOP                     = 2,
52         ENET_E_FAIL                     = 3,
53         ENET_E_PAUSE_CFG                = 4,
54         ENET_E_MTU_CFG                  = 5,
55         ENET_E_FWRESP_PAUSE             = 6,
56         ENET_E_CHLD_STOPPED             = 7,
57 };
58
59 enum bna_ioceth_event {
60         IOCETH_E_ENABLE                 = 1,
61         IOCETH_E_DISABLE                = 2,
62         IOCETH_E_IOC_RESET              = 3,
63         IOCETH_E_IOC_FAILED             = 4,
64         IOCETH_E_IOC_READY              = 5,
65         IOCETH_E_ENET_ATTR_RESP         = 6,
66         IOCETH_E_ENET_STOPPED           = 7,
67         IOCETH_E_IOC_DISABLED           = 8,
68 };
69
70 #define bna_stats_copy(_name, _type)                                    \
71 do {                                                                    \
72         count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64);  \
73         stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats;   \
74         stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats;        \
75         for (i = 0; i < count; i++)                                     \
76                 stats_dst[i] = be64_to_cpu(stats_src[i]);               \
77 } while (0)                                                             \
78
79 /*
80  * FW response handlers
81  */
82
83 static void
84 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
85                                 struct bfi_msgq_mhdr *msghdr)
86 {
87         ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
88
89         if (ethport_can_be_up(ethport))
90                 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
91 }
92
93 static void
94 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
95                                 struct bfi_msgq_mhdr *msghdr)
96 {
97         int ethport_up = ethport_is_up(ethport);
98
99         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
100
101         if (ethport_up)
102                 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
103 }
104
105 static void
106 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
107                                 struct bfi_msgq_mhdr *msghdr)
108 {
109         struct bfi_enet_enable_req *admin_req =
110                 &ethport->bfi_enet_cmd.admin_req;
111         struct bfi_enet_rsp *rsp =
112                 container_of(msghdr, struct bfi_enet_rsp, mh);
113
114         switch (admin_req->enable) {
115         case BNA_STATUS_T_ENABLED:
116                 if (rsp->error == BFI_ENET_CMD_OK)
117                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
118                 else {
119                         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
120                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
121                 }
122                 break;
123
124         case BNA_STATUS_T_DISABLED:
125                 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
126                 ethport->link_status = BNA_LINK_DOWN;
127                 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
128                 break;
129         }
130 }
131
132 static void
133 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
134                                 struct bfi_msgq_mhdr *msghdr)
135 {
136         struct bfi_enet_diag_lb_req *diag_lb_req =
137                 &ethport->bfi_enet_cmd.lpbk_req;
138         struct bfi_enet_rsp *rsp =
139                 container_of(msghdr, struct bfi_enet_rsp, mh);
140
141         switch (diag_lb_req->enable) {
142         case BNA_STATUS_T_ENABLED:
143                 if (rsp->error == BFI_ENET_CMD_OK)
144                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
145                 else {
146                         ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
147                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
148                 }
149                 break;
150
151         case BNA_STATUS_T_DISABLED:
152                 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
153                 break;
154         }
155 }
156
157 static void
158 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
159 {
160         bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
161 }
162
163 static void
164 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
165                         struct bfi_msgq_mhdr *msghdr)
166 {
167         struct bfi_enet_attr_rsp *rsp =
168                 container_of(msghdr, struct bfi_enet_attr_rsp, mh);
169
170         /**
171          * Store only if not set earlier, since BNAD can override the HW
172          * attributes
173          */
174         if (!ioceth->attr.fw_query_complete) {
175                 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
176                 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
177                 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
178                 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
179                 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
180                 ioceth->attr.fw_query_complete = true;
181         }
182
183         bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
184 }
185
186 static void
187 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
188 {
189         struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
190         u64 *stats_src;
191         u64 *stats_dst;
192         u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
193         u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
194         int count;
195         int i;
196
197         bna_stats_copy(mac, mac);
198         bna_stats_copy(bpc, bpc);
199         bna_stats_copy(rad, rad);
200         bna_stats_copy(rlb, rad);
201         bna_stats_copy(fc_rx, fc_rx);
202         bna_stats_copy(fc_tx, fc_tx);
203
204         stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
205
206         /* Copy Rxf stats to SW area, scatter them while copying */
207         for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
208                 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
209                 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
210                 if (rx_enet_mask & ((u32)BIT(i))) {
211                         int k;
212                         count = sizeof(struct bfi_enet_stats_rxf) /
213                                 sizeof(u64);
214                         for (k = 0; k < count; k++) {
215                                 stats_dst[k] = be64_to_cpu(*stats_src);
216                                 stats_src++;
217                         }
218                 }
219         }
220
221         /* Copy Txf stats to SW area, scatter them while copying */
222         for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
223                 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
224                 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
225                 if (tx_enet_mask & ((u32)BIT(i))) {
226                         int k;
227                         count = sizeof(struct bfi_enet_stats_txf) /
228                                 sizeof(u64);
229                         for (k = 0; k < count; k++) {
230                                 stats_dst[k] = be64_to_cpu(*stats_src);
231                                 stats_src++;
232                         }
233                 }
234         }
235
236         bna->stats_mod.stats_get_busy = false;
237         bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
238 }
239
240 static void
241 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
242                         struct bfi_msgq_mhdr *msghdr)
243 {
244         ethport->link_status = BNA_LINK_UP;
245
246         /* Dispatch events */
247         ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
248 }
249
250 static void
251 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
252                                 struct bfi_msgq_mhdr *msghdr)
253 {
254         ethport->link_status = BNA_LINK_DOWN;
255
256         /* Dispatch events */
257         ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
258 }
259
260 static void
261 bna_err_handler(struct bna *bna, u32 intr_status)
262 {
263         if (BNA_IS_HALT_INTR(bna, intr_status))
264                 bna_halt_clear(bna);
265
266         bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
267 }
268
269 void
270 bna_mbox_handler(struct bna *bna, u32 intr_status)
271 {
272         if (BNA_IS_ERR_INTR(bna, intr_status)) {
273                 bna_err_handler(bna, intr_status);
274                 return;
275         }
276         if (BNA_IS_MBOX_INTR(bna, intr_status))
277                 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
278 }
279
280 static void
281 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
282 {
283         struct bna *bna = (struct bna *)arg;
284         struct bna_tx *tx;
285         struct bna_rx *rx;
286
287         switch (msghdr->msg_id) {
288         case BFI_ENET_I2H_RX_CFG_SET_RSP:
289                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
290                 if (rx)
291                         bna_bfi_rx_enet_start_rsp(rx, msghdr);
292                 break;
293
294         case BFI_ENET_I2H_RX_CFG_CLR_RSP:
295                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
296                 if (rx)
297                         bna_bfi_rx_enet_stop_rsp(rx, msghdr);
298                 break;
299
300         case BFI_ENET_I2H_RIT_CFG_RSP:
301         case BFI_ENET_I2H_RSS_CFG_RSP:
302         case BFI_ENET_I2H_RSS_ENABLE_RSP:
303         case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
304         case BFI_ENET_I2H_RX_DEFAULT_RSP:
305         case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
306         case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
307         case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
308         case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
309         case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
310         case BFI_ENET_I2H_RX_VLAN_SET_RSP:
311         case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
312                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
313                 if (rx)
314                         bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
315                 break;
316
317         case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
318                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
319                 if (rx)
320                         bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
321                 break;
322
323         case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
324                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
325                 if (rx)
326                         bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
327                 break;
328
329         case BFI_ENET_I2H_TX_CFG_SET_RSP:
330                 bna_tx_from_rid(bna, msghdr->enet_id, tx);
331                 if (tx)
332                         bna_bfi_tx_enet_start_rsp(tx, msghdr);
333                 break;
334
335         case BFI_ENET_I2H_TX_CFG_CLR_RSP:
336                 bna_tx_from_rid(bna, msghdr->enet_id, tx);
337                 if (tx)
338                         bna_bfi_tx_enet_stop_rsp(tx, msghdr);
339                 break;
340
341         case BFI_ENET_I2H_PORT_ADMIN_RSP:
342                 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
343                 break;
344
345         case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
346                 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
347                 break;
348
349         case BFI_ENET_I2H_SET_PAUSE_RSP:
350                 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
351                 break;
352
353         case BFI_ENET_I2H_GET_ATTR_RSP:
354                 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
355                 break;
356
357         case BFI_ENET_I2H_STATS_GET_RSP:
358                 bna_bfi_stats_get_rsp(bna, msghdr);
359                 break;
360
361         case BFI_ENET_I2H_STATS_CLR_RSP:
362                 /* No-op */
363                 break;
364
365         case BFI_ENET_I2H_LINK_UP_AEN:
366                 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
367                 break;
368
369         case BFI_ENET_I2H_LINK_DOWN_AEN:
370                 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
371                 break;
372
373         case BFI_ENET_I2H_PORT_ENABLE_AEN:
374                 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
375                 break;
376
377         case BFI_ENET_I2H_PORT_DISABLE_AEN:
378                 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
379                 break;
380
381         case BFI_ENET_I2H_BW_UPDATE_AEN:
382                 bna_bfi_bw_update_aen(&bna->tx_mod);
383                 break;
384
385         default:
386                 break;
387         }
388 }
389
390 /* ETHPORT */
391
392 #define call_ethport_stop_cbfn(_ethport)                                \
393 do {                                                                    \
394         if ((_ethport)->stop_cbfn) {                                    \
395                 void (*cbfn)(struct bna_enet *);                        \
396                 cbfn = (_ethport)->stop_cbfn;                           \
397                 (_ethport)->stop_cbfn = NULL;                           \
398                 cbfn(&(_ethport)->bna->enet);                           \
399         }                                                               \
400 } while (0)
401
402 #define call_ethport_adminup_cbfn(ethport, status)                      \
403 do {                                                                    \
404         if ((ethport)->adminup_cbfn) {                                  \
405                 void (*cbfn)(struct bnad *, enum bna_cb_status);        \
406                 cbfn = (ethport)->adminup_cbfn;                         \
407                 (ethport)->adminup_cbfn = NULL;                         \
408                 cbfn((ethport)->bna->bnad, status);                     \
409         }                                                               \
410 } while (0)
411
412 static void
413 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
414 {
415         struct bfi_enet_enable_req *admin_up_req =
416                 &ethport->bfi_enet_cmd.admin_req;
417
418         bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
419                 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
420         admin_up_req->mh.num_entries = htons(
421                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
422         admin_up_req->enable = BNA_STATUS_T_ENABLED;
423
424         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
425                 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
426         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
427 }
428
429 static void
430 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
431 {
432         struct bfi_enet_enable_req *admin_down_req =
433                 &ethport->bfi_enet_cmd.admin_req;
434
435         bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
436                 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
437         admin_down_req->mh.num_entries = htons(
438                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
439         admin_down_req->enable = BNA_STATUS_T_DISABLED;
440
441         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
442                 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
443         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
444 }
445
446 static void
447 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
448 {
449         struct bfi_enet_diag_lb_req *lpbk_up_req =
450                 &ethport->bfi_enet_cmd.lpbk_req;
451
452         bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
453                 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
454         lpbk_up_req->mh.num_entries = htons(
455                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
456         lpbk_up_req->mode = (ethport->bna->enet.type ==
457                                 BNA_ENET_T_LOOPBACK_INTERNAL) ?
458                                 BFI_ENET_DIAG_LB_OPMODE_EXT :
459                                 BFI_ENET_DIAG_LB_OPMODE_CBL;
460         lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
461
462         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
463                 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
464         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
465 }
466
467 static void
468 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
469 {
470         struct bfi_enet_diag_lb_req *lpbk_down_req =
471                 &ethport->bfi_enet_cmd.lpbk_req;
472
473         bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
474                 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
475         lpbk_down_req->mh.num_entries = htons(
476                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
477         lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
478
479         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
480                 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
481         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
482 }
483
484 static void
485 bna_bfi_ethport_up(struct bna_ethport *ethport)
486 {
487         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
488                 bna_bfi_ethport_admin_up(ethport);
489         else
490                 bna_bfi_ethport_lpbk_up(ethport);
491 }
492
493 static void
494 bna_bfi_ethport_down(struct bna_ethport *ethport)
495 {
496         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
497                 bna_bfi_ethport_admin_down(ethport);
498         else
499                 bna_bfi_ethport_lpbk_down(ethport);
500 }
501
502 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
503                         enum bna_ethport_event);
504 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
505                         enum bna_ethport_event);
506 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
507                         enum bna_ethport_event);
508 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
509                         enum bna_ethport_event);
510 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
511                         enum bna_ethport_event);
512 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
513                         enum bna_ethport_event);
514
515 static void
516 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
517 {
518         call_ethport_stop_cbfn(ethport);
519 }
520
521 static void
522 bna_ethport_sm_stopped(struct bna_ethport *ethport,
523                         enum bna_ethport_event event)
524 {
525         switch (event) {
526         case ETHPORT_E_START:
527                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
528                 break;
529
530         case ETHPORT_E_STOP:
531                 call_ethport_stop_cbfn(ethport);
532                 break;
533
534         case ETHPORT_E_FAIL:
535                 /* No-op */
536                 break;
537
538         case ETHPORT_E_DOWN:
539                 /* This event is received due to Rx objects failing */
540                 /* No-op */
541                 break;
542
543         default:
544                 bfa_sm_fault(event);
545         }
546 }
547
548 static void
549 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
550 {
551 }
552
553 static void
554 bna_ethport_sm_down(struct bna_ethport *ethport,
555                         enum bna_ethport_event event)
556 {
557         switch (event) {
558         case ETHPORT_E_STOP:
559                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
560                 break;
561
562         case ETHPORT_E_FAIL:
563                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
564                 break;
565
566         case ETHPORT_E_UP:
567                 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
568                 bna_bfi_ethport_up(ethport);
569                 break;
570
571         default:
572                 bfa_sm_fault(event);
573         }
574 }
575
576 static void
577 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
578 {
579 }
580
581 static void
582 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
583                         enum bna_ethport_event event)
584 {
585         switch (event) {
586         case ETHPORT_E_STOP:
587                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
588                 break;
589
590         case ETHPORT_E_FAIL:
591                 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
592                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
593                 break;
594
595         case ETHPORT_E_DOWN:
596                 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
597                 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
598                 break;
599
600         case ETHPORT_E_FWRESP_UP_OK:
601                 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
602                 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
603                 break;
604
605         case ETHPORT_E_FWRESP_UP_FAIL:
606                 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
607                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
608                 break;
609
610         case ETHPORT_E_FWRESP_DOWN:
611                 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
612                 bna_bfi_ethport_up(ethport);
613                 break;
614
615         default:
616                 bfa_sm_fault(event);
617         }
618 }
619
620 static void
621 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
622 {
623         /**
624          * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
625          * mbox due to up_resp_wait -> down_resp_wait transition on event
626          * ETHPORT_E_DOWN
627          */
628 }
629
630 static void
631 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
632                         enum bna_ethport_event event)
633 {
634         switch (event) {
635         case ETHPORT_E_STOP:
636                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
637                 break;
638
639         case ETHPORT_E_FAIL:
640                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
641                 break;
642
643         case ETHPORT_E_UP:
644                 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
645                 break;
646
647         case ETHPORT_E_FWRESP_UP_OK:
648                 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
649                 bna_bfi_ethport_down(ethport);
650                 break;
651
652         case ETHPORT_E_FWRESP_UP_FAIL:
653         case ETHPORT_E_FWRESP_DOWN:
654                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
655                 break;
656
657         default:
658                 bfa_sm_fault(event);
659         }
660 }
661
662 static void
663 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
664 {
665 }
666
667 static void
668 bna_ethport_sm_up(struct bna_ethport *ethport,
669                         enum bna_ethport_event event)
670 {
671         switch (event) {
672         case ETHPORT_E_STOP:
673                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
674                 bna_bfi_ethport_down(ethport);
675                 break;
676
677         case ETHPORT_E_FAIL:
678                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
679                 break;
680
681         case ETHPORT_E_DOWN:
682                 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
683                 bna_bfi_ethport_down(ethport);
684                 break;
685
686         default:
687                 bfa_sm_fault(event);
688         }
689 }
690
691 static void
692 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
693 {
694 }
695
696 static void
697 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
698                         enum bna_ethport_event event)
699 {
700         switch (event) {
701         case ETHPORT_E_FAIL:
702                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
703                 break;
704
705         case ETHPORT_E_DOWN:
706                 /**
707                  * This event is received due to Rx objects stopping in
708                  * parallel to ethport
709                  */
710                 /* No-op */
711                 break;
712
713         case ETHPORT_E_FWRESP_UP_OK:
714                 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
715                 bna_bfi_ethport_down(ethport);
716                 break;
717
718         case ETHPORT_E_FWRESP_UP_FAIL:
719         case ETHPORT_E_FWRESP_DOWN:
720                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
721                 break;
722
723         default:
724                 bfa_sm_fault(event);
725         }
726 }
727
728 static void
729 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
730 {
731         ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
732         ethport->bna = bna;
733
734         ethport->link_status = BNA_LINK_DOWN;
735         ethport->link_cbfn = bnad_cb_ethport_link_status;
736
737         ethport->rx_started_count = 0;
738
739         ethport->stop_cbfn = NULL;
740         ethport->adminup_cbfn = NULL;
741
742         bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
743 }
744
745 static void
746 bna_ethport_uninit(struct bna_ethport *ethport)
747 {
748         ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
749         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
750
751         ethport->bna = NULL;
752 }
753
754 static void
755 bna_ethport_start(struct bna_ethport *ethport)
756 {
757         bfa_fsm_send_event(ethport, ETHPORT_E_START);
758 }
759
760 static void
761 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
762 {
763         bfa_wc_down(&enet->chld_stop_wc);
764 }
765
766 static void
767 bna_ethport_stop(struct bna_ethport *ethport)
768 {
769         ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
770         bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
771 }
772
773 static void
774 bna_ethport_fail(struct bna_ethport *ethport)
775 {
776         /* Reset the physical port status to enabled */
777         ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
778
779         if (ethport->link_status != BNA_LINK_DOWN) {
780                 ethport->link_status = BNA_LINK_DOWN;
781                 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
782         }
783         bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
784 }
785
786 /* Should be called only when ethport is disabled */
787 void
788 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
789 {
790         ethport->rx_started_count++;
791
792         if (ethport->rx_started_count == 1) {
793                 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
794
795                 if (ethport_can_be_up(ethport))
796                         bfa_fsm_send_event(ethport, ETHPORT_E_UP);
797         }
798 }
799
800 void
801 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
802 {
803         int ethport_up = ethport_is_up(ethport);
804
805         ethport->rx_started_count--;
806
807         if (ethport->rx_started_count == 0) {
808                 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
809
810                 if (ethport_up)
811                         bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
812         }
813 }
814
815 /* ENET */
816
817 #define bna_enet_chld_start(enet)                                       \
818 do {                                                                    \
819         enum bna_tx_type tx_type =                                      \
820                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
821                 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
822         enum bna_rx_type rx_type =                                      \
823                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
824                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
825         bna_ethport_start(&(enet)->bna->ethport);                       \
826         bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type);                \
827         bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
828 } while (0)
829
830 #define bna_enet_chld_stop(enet)                                        \
831 do {                                                                    \
832         enum bna_tx_type tx_type =                                      \
833                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
834                 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
835         enum bna_rx_type rx_type =                                      \
836                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
837                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
838         bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
839         bfa_wc_up(&(enet)->chld_stop_wc);                               \
840         bna_ethport_stop(&(enet)->bna->ethport);                        \
841         bfa_wc_up(&(enet)->chld_stop_wc);                               \
842         bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type);                 \
843         bfa_wc_up(&(enet)->chld_stop_wc);                               \
844         bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
845         bfa_wc_wait(&(enet)->chld_stop_wc);                             \
846 } while (0)
847
848 #define bna_enet_chld_fail(enet)                                        \
849 do {                                                                    \
850         bna_ethport_fail(&(enet)->bna->ethport);                        \
851         bna_tx_mod_fail(&(enet)->bna->tx_mod);                          \
852         bna_rx_mod_fail(&(enet)->bna->rx_mod);                          \
853 } while (0)
854
855 #define bna_enet_rx_start(enet)                                         \
856 do {                                                                    \
857         enum bna_rx_type rx_type =                                      \
858                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
859                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
860         bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
861 } while (0)
862
863 #define bna_enet_rx_stop(enet)                                          \
864 do {                                                                    \
865         enum bna_rx_type rx_type =                                      \
866                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
867                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
868         bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
869         bfa_wc_up(&(enet)->chld_stop_wc);                               \
870         bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
871         bfa_wc_wait(&(enet)->chld_stop_wc);                             \
872 } while (0)
873
874 #define call_enet_stop_cbfn(enet)                                       \
875 do {                                                                    \
876         if ((enet)->stop_cbfn) {                                        \
877                 void (*cbfn)(void *);                                   \
878                 void *cbarg;                                            \
879                 cbfn = (enet)->stop_cbfn;                               \
880                 cbarg = (enet)->stop_cbarg;                             \
881                 (enet)->stop_cbfn = NULL;                               \
882                 (enet)->stop_cbarg = NULL;                              \
883                 cbfn(cbarg);                                            \
884         }                                                               \
885 } while (0)
886
887 #define call_enet_pause_cbfn(enet)                                      \
888 do {                                                                    \
889         if ((enet)->pause_cbfn) {                                       \
890                 void (*cbfn)(struct bnad *);                            \
891                 cbfn = (enet)->pause_cbfn;                              \
892                 (enet)->pause_cbfn = NULL;                              \
893                 cbfn((enet)->bna->bnad);                                \
894         }                                                               \
895 } while (0)
896
897 #define call_enet_mtu_cbfn(enet)                                        \
898 do {                                                                    \
899         if ((enet)->mtu_cbfn) {                                         \
900                 void (*cbfn)(struct bnad *);                            \
901                 cbfn = (enet)->mtu_cbfn;                                \
902                 (enet)->mtu_cbfn = NULL;                                \
903                 cbfn((enet)->bna->bnad);                                \
904         }                                                               \
905 } while (0)
906
907 static void bna_enet_cb_chld_stopped(void *arg);
908 static void bna_bfi_pause_set(struct bna_enet *enet);
909
910 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
911                         enum bna_enet_event);
912 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
913                         enum bna_enet_event);
914 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
915                         enum bna_enet_event);
916 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
917                         enum bna_enet_event);
918 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
919                         enum bna_enet_event);
920 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
921                         enum bna_enet_event);
922 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
923                         enum bna_enet_event);
924
925 static void
926 bna_enet_sm_stopped_entry(struct bna_enet *enet)
927 {
928         call_enet_pause_cbfn(enet);
929         call_enet_mtu_cbfn(enet);
930         call_enet_stop_cbfn(enet);
931 }
932
933 static void
934 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
935 {
936         switch (event) {
937         case ENET_E_START:
938                 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
939                 break;
940
941         case ENET_E_STOP:
942                 call_enet_stop_cbfn(enet);
943                 break;
944
945         case ENET_E_FAIL:
946                 /* No-op */
947                 break;
948
949         case ENET_E_PAUSE_CFG:
950                 call_enet_pause_cbfn(enet);
951                 break;
952
953         case ENET_E_MTU_CFG:
954                 call_enet_mtu_cbfn(enet);
955                 break;
956
957         case ENET_E_CHLD_STOPPED:
958                 /**
959                  * This event is received due to Ethport, Tx and Rx objects
960                  * failing
961                  */
962                 /* No-op */
963                 break;
964
965         default:
966                 bfa_sm_fault(event);
967         }
968 }
969
970 static void
971 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
972 {
973         bna_bfi_pause_set(enet);
974 }
975
976 static void
977 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
978                                 enum bna_enet_event event)
979 {
980         switch (event) {
981         case ENET_E_STOP:
982                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
983                 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
984                 break;
985
986         case ENET_E_FAIL:
987                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
988                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
989                 break;
990
991         case ENET_E_PAUSE_CFG:
992                 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
993                 break;
994
995         case ENET_E_MTU_CFG:
996                 /* No-op */
997                 break;
998
999         case ENET_E_FWRESP_PAUSE:
1000                 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1001                         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1002                         bna_bfi_pause_set(enet);
1003                 } else {
1004                         bfa_fsm_set_state(enet, bna_enet_sm_started);
1005                         bna_enet_chld_start(enet);
1006                 }
1007                 break;
1008
1009         default:
1010                 bfa_sm_fault(event);
1011         }
1012 }
1013
1014 static void
1015 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1016 {
1017         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1018 }
1019
1020 static void
1021 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1022                                 enum bna_enet_event event)
1023 {
1024         switch (event) {
1025         case ENET_E_FAIL:
1026         case ENET_E_FWRESP_PAUSE:
1027                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1028                 break;
1029
1030         default:
1031                 bfa_sm_fault(event);
1032         }
1033 }
1034
1035 static void
1036 bna_enet_sm_started_entry(struct bna_enet *enet)
1037 {
1038         /**
1039          * NOTE: Do not call bna_enet_chld_start() here, since it will be
1040          * inadvertently called during cfg_wait->started transition as well
1041          */
1042         call_enet_pause_cbfn(enet);
1043         call_enet_mtu_cbfn(enet);
1044 }
1045
1046 static void
1047 bna_enet_sm_started(struct bna_enet *enet,
1048                         enum bna_enet_event event)
1049 {
1050         switch (event) {
1051         case ENET_E_STOP:
1052                 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1053                 break;
1054
1055         case ENET_E_FAIL:
1056                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1057                 bna_enet_chld_fail(enet);
1058                 break;
1059
1060         case ENET_E_PAUSE_CFG:
1061                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1062                 bna_bfi_pause_set(enet);
1063                 break;
1064
1065         case ENET_E_MTU_CFG:
1066                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1067                 bna_enet_rx_stop(enet);
1068                 break;
1069
1070         default:
1071                 bfa_sm_fault(event);
1072         }
1073 }
1074
1075 static void
1076 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1077 {
1078 }
1079
1080 static void
1081 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1082                         enum bna_enet_event event)
1083 {
1084         switch (event) {
1085         case ENET_E_STOP:
1086                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1087                 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1088                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1089                 break;
1090
1091         case ENET_E_FAIL:
1092                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1093                 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1094                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1095                 bna_enet_chld_fail(enet);
1096                 break;
1097
1098         case ENET_E_PAUSE_CFG:
1099                 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1100                 break;
1101
1102         case ENET_E_MTU_CFG:
1103                 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1104                 break;
1105
1106         case ENET_E_CHLD_STOPPED:
1107                 bna_enet_rx_start(enet);
1108                 /* Fall through */
1109         case ENET_E_FWRESP_PAUSE:
1110                 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1111                         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1112                         bna_bfi_pause_set(enet);
1113                 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1114                         enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1115                         bna_enet_rx_stop(enet);
1116                 } else {
1117                         bfa_fsm_set_state(enet, bna_enet_sm_started);
1118                 }
1119                 break;
1120
1121         default:
1122                 bfa_sm_fault(event);
1123         }
1124 }
1125
1126 static void
1127 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1128 {
1129         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1130         enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1131 }
1132
1133 static void
1134 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1135                                 enum bna_enet_event event)
1136 {
1137         switch (event) {
1138         case ENET_E_FAIL:
1139                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1140                 bna_enet_chld_fail(enet);
1141                 break;
1142
1143         case ENET_E_FWRESP_PAUSE:
1144         case ENET_E_CHLD_STOPPED:
1145                 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1146                 break;
1147
1148         default:
1149                 bfa_sm_fault(event);
1150         }
1151 }
1152
1153 static void
1154 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1155 {
1156         bna_enet_chld_stop(enet);
1157 }
1158
1159 static void
1160 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1161                                 enum bna_enet_event event)
1162 {
1163         switch (event) {
1164         case ENET_E_FAIL:
1165                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1166                 bna_enet_chld_fail(enet);
1167                 break;
1168
1169         case ENET_E_CHLD_STOPPED:
1170                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1171                 break;
1172
1173         default:
1174                 bfa_sm_fault(event);
1175         }
1176 }
1177
1178 static void
1179 bna_bfi_pause_set(struct bna_enet *enet)
1180 {
1181         struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1182
1183         bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1184                 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1185         pause_req->mh.num_entries = htons(
1186         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1187         pause_req->tx_pause = enet->pause_config.tx_pause;
1188         pause_req->rx_pause = enet->pause_config.rx_pause;
1189
1190         bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1191                 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1192         bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1193 }
1194
1195 static void
1196 bna_enet_cb_chld_stopped(void *arg)
1197 {
1198         struct bna_enet *enet = (struct bna_enet *)arg;
1199
1200         bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1201 }
1202
1203 static void
1204 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1205 {
1206         enet->bna = bna;
1207         enet->flags = 0;
1208         enet->mtu = 0;
1209         enet->type = BNA_ENET_T_REGULAR;
1210
1211         enet->stop_cbfn = NULL;
1212         enet->stop_cbarg = NULL;
1213
1214         enet->pause_cbfn = NULL;
1215
1216         enet->mtu_cbfn = NULL;
1217
1218         bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1219 }
1220
1221 static void
1222 bna_enet_uninit(struct bna_enet *enet)
1223 {
1224         enet->flags = 0;
1225
1226         enet->bna = NULL;
1227 }
1228
1229 static void
1230 bna_enet_start(struct bna_enet *enet)
1231 {
1232         enet->flags |= BNA_ENET_F_IOCETH_READY;
1233         if (enet->flags & BNA_ENET_F_ENABLED)
1234                 bfa_fsm_send_event(enet, ENET_E_START);
1235 }
1236
1237 static void
1238 bna_ioceth_cb_enet_stopped(void *arg)
1239 {
1240         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1241
1242         bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1243 }
1244
1245 static void
1246 bna_enet_stop(struct bna_enet *enet)
1247 {
1248         enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1249         enet->stop_cbarg = &enet->bna->ioceth;
1250
1251         enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1252         bfa_fsm_send_event(enet, ENET_E_STOP);
1253 }
1254
1255 static void
1256 bna_enet_fail(struct bna_enet *enet)
1257 {
1258         enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1259         bfa_fsm_send_event(enet, ENET_E_FAIL);
1260 }
1261
1262 void
1263 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1264 {
1265         bfa_wc_down(&enet->chld_stop_wc);
1266 }
1267
1268 void
1269 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1270 {
1271         bfa_wc_down(&enet->chld_stop_wc);
1272 }
1273
1274 int
1275 bna_enet_mtu_get(struct bna_enet *enet)
1276 {
1277         return enet->mtu;
1278 }
1279
1280 void
1281 bna_enet_enable(struct bna_enet *enet)
1282 {
1283         if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1284                 return;
1285
1286         enet->flags |= BNA_ENET_F_ENABLED;
1287
1288         if (enet->flags & BNA_ENET_F_IOCETH_READY)
1289                 bfa_fsm_send_event(enet, ENET_E_START);
1290 }
1291
1292 void
1293 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1294                  void (*cbfn)(void *))
1295 {
1296         if (type == BNA_SOFT_CLEANUP) {
1297                 (*cbfn)(enet->bna->bnad);
1298                 return;
1299         }
1300
1301         enet->stop_cbfn = cbfn;
1302         enet->stop_cbarg = enet->bna->bnad;
1303
1304         enet->flags &= ~BNA_ENET_F_ENABLED;
1305
1306         bfa_fsm_send_event(enet, ENET_E_STOP);
1307 }
1308
1309 void
1310 bna_enet_pause_config(struct bna_enet *enet,
1311                       struct bna_pause_config *pause_config)
1312 {
1313         enet->pause_config = *pause_config;
1314
1315         bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1316 }
1317
1318 void
1319 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1320                  void (*cbfn)(struct bnad *))
1321 {
1322         enet->mtu = mtu;
1323
1324         enet->mtu_cbfn = cbfn;
1325
1326         bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1327 }
1328
1329 void
1330 bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
1331 {
1332         bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
1333 }
1334
1335 /* IOCETH */
1336
1337 #define enable_mbox_intr(_ioceth)                                       \
1338 do {                                                                    \
1339         u32 intr_status;                                                \
1340         bna_intr_status_get((_ioceth)->bna, intr_status);               \
1341         bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad);                 \
1342         bna_mbox_intr_enable((_ioceth)->bna);                           \
1343 } while (0)
1344
1345 #define disable_mbox_intr(_ioceth)                                      \
1346 do {                                                                    \
1347         bna_mbox_intr_disable((_ioceth)->bna);                          \
1348         bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad);                \
1349 } while (0)
1350
1351 #define call_ioceth_stop_cbfn(_ioceth)                                  \
1352 do {                                                                    \
1353         if ((_ioceth)->stop_cbfn) {                                     \
1354                 void (*cbfn)(struct bnad *);                            \
1355                 struct bnad *cbarg;                                     \
1356                 cbfn = (_ioceth)->stop_cbfn;                            \
1357                 cbarg = (_ioceth)->stop_cbarg;                          \
1358                 (_ioceth)->stop_cbfn = NULL;                            \
1359                 (_ioceth)->stop_cbarg = NULL;                           \
1360                 cbfn(cbarg);                                            \
1361         }                                                               \
1362 } while (0)
1363
1364 #define bna_stats_mod_uninit(_stats_mod)                                \
1365 do {                                                                    \
1366 } while (0)
1367
1368 #define bna_stats_mod_start(_stats_mod)                                 \
1369 do {                                                                    \
1370         (_stats_mod)->ioc_ready = true;                                 \
1371 } while (0)
1372
1373 #define bna_stats_mod_stop(_stats_mod)                                  \
1374 do {                                                                    \
1375         (_stats_mod)->ioc_ready = false;                                \
1376 } while (0)
1377
1378 #define bna_stats_mod_fail(_stats_mod)                                  \
1379 do {                                                                    \
1380         (_stats_mod)->ioc_ready = false;                                \
1381         (_stats_mod)->stats_get_busy = false;                           \
1382         (_stats_mod)->stats_clr_busy = false;                           \
1383 } while (0)
1384
1385 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1386
1387 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1388                         enum bna_ioceth_event);
1389 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1390                         enum bna_ioceth_event);
1391 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1392                         enum bna_ioceth_event);
1393 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1394                         enum bna_ioceth_event);
1395 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1396                         enum bna_ioceth_event);
1397 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1398                         enum bna_ioceth_event);
1399 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1400                         enum bna_ioceth_event);
1401 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1402                         enum bna_ioceth_event);
1403
1404 static void
1405 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1406 {
1407         call_ioceth_stop_cbfn(ioceth);
1408 }
1409
1410 static void
1411 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1412                         enum bna_ioceth_event event)
1413 {
1414         switch (event) {
1415         case IOCETH_E_ENABLE:
1416                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1417                 bfa_nw_ioc_enable(&ioceth->ioc);
1418                 break;
1419
1420         case IOCETH_E_DISABLE:
1421                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1422                 break;
1423
1424         case IOCETH_E_IOC_RESET:
1425                 enable_mbox_intr(ioceth);
1426                 break;
1427
1428         case IOCETH_E_IOC_FAILED:
1429                 disable_mbox_intr(ioceth);
1430                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1431                 break;
1432
1433         default:
1434                 bfa_sm_fault(event);
1435         }
1436 }
1437
1438 static void
1439 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1440 {
1441         /**
1442          * Do not call bfa_nw_ioc_enable() here. It must be called in the
1443          * previous state due to failed -> ioc_ready_wait transition.
1444          */
1445 }
1446
1447 static void
1448 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1449                                 enum bna_ioceth_event event)
1450 {
1451         switch (event) {
1452         case IOCETH_E_DISABLE:
1453                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1454                 bfa_nw_ioc_disable(&ioceth->ioc);
1455                 break;
1456
1457         case IOCETH_E_IOC_RESET:
1458                 enable_mbox_intr(ioceth);
1459                 break;
1460
1461         case IOCETH_E_IOC_FAILED:
1462                 disable_mbox_intr(ioceth);
1463                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1464                 break;
1465
1466         case IOCETH_E_IOC_READY:
1467                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1468                 break;
1469
1470         default:
1471                 bfa_sm_fault(event);
1472         }
1473 }
1474
1475 static void
1476 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1477 {
1478         bna_bfi_attr_get(ioceth);
1479 }
1480
1481 static void
1482 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1483                                 enum bna_ioceth_event event)
1484 {
1485         switch (event) {
1486         case IOCETH_E_DISABLE:
1487                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1488                 break;
1489
1490         case IOCETH_E_IOC_FAILED:
1491                 disable_mbox_intr(ioceth);
1492                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1493                 break;
1494
1495         case IOCETH_E_ENET_ATTR_RESP:
1496                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1497                 break;
1498
1499         default:
1500                 bfa_sm_fault(event);
1501         }
1502 }
1503
1504 static void
1505 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1506 {
1507         bna_enet_start(&ioceth->bna->enet);
1508         bna_stats_mod_start(&ioceth->bna->stats_mod);
1509         bnad_cb_ioceth_ready(ioceth->bna->bnad);
1510 }
1511
1512 static void
1513 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1514 {
1515         switch (event) {
1516         case IOCETH_E_DISABLE:
1517                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1518                 break;
1519
1520         case IOCETH_E_IOC_FAILED:
1521                 disable_mbox_intr(ioceth);
1522                 bna_enet_fail(&ioceth->bna->enet);
1523                 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1524                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1525                 break;
1526
1527         default:
1528                 bfa_sm_fault(event);
1529         }
1530 }
1531
1532 static void
1533 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1534 {
1535 }
1536
1537 static void
1538 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1539                                 enum bna_ioceth_event event)
1540 {
1541         switch (event) {
1542         case IOCETH_E_IOC_FAILED:
1543                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1544                 disable_mbox_intr(ioceth);
1545                 bfa_nw_ioc_disable(&ioceth->ioc);
1546                 break;
1547
1548         case IOCETH_E_ENET_ATTR_RESP:
1549                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1550                 bfa_nw_ioc_disable(&ioceth->ioc);
1551                 break;
1552
1553         default:
1554                 bfa_sm_fault(event);
1555         }
1556 }
1557
1558 static void
1559 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1560 {
1561         bna_stats_mod_stop(&ioceth->bna->stats_mod);
1562         bna_enet_stop(&ioceth->bna->enet);
1563 }
1564
1565 static void
1566 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1567                                 enum bna_ioceth_event event)
1568 {
1569         switch (event) {
1570         case IOCETH_E_IOC_FAILED:
1571                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1572                 disable_mbox_intr(ioceth);
1573                 bna_enet_fail(&ioceth->bna->enet);
1574                 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1575                 bfa_nw_ioc_disable(&ioceth->ioc);
1576                 break;
1577
1578         case IOCETH_E_ENET_STOPPED:
1579                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1580                 bfa_nw_ioc_disable(&ioceth->ioc);
1581                 break;
1582
1583         default:
1584                 bfa_sm_fault(event);
1585         }
1586 }
1587
1588 static void
1589 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1590 {
1591 }
1592
1593 static void
1594 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1595                                 enum bna_ioceth_event event)
1596 {
1597         switch (event) {
1598         case IOCETH_E_IOC_DISABLED:
1599                 disable_mbox_intr(ioceth);
1600                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1601                 break;
1602
1603         case IOCETH_E_ENET_STOPPED:
1604                 /* This event is received due to enet failing */
1605                 /* No-op */
1606                 break;
1607
1608         default:
1609                 bfa_sm_fault(event);
1610         }
1611 }
1612
1613 static void
1614 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1615 {
1616         bnad_cb_ioceth_failed(ioceth->bna->bnad);
1617 }
1618
1619 static void
1620 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1621                         enum bna_ioceth_event event)
1622 {
1623         switch (event) {
1624         case IOCETH_E_DISABLE:
1625                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1626                 bfa_nw_ioc_disable(&ioceth->ioc);
1627                 break;
1628
1629         case IOCETH_E_IOC_RESET:
1630                 enable_mbox_intr(ioceth);
1631                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1632                 break;
1633
1634         case IOCETH_E_IOC_FAILED:
1635                 break;
1636
1637         default:
1638                 bfa_sm_fault(event);
1639         }
1640 }
1641
1642 static void
1643 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1644 {
1645         struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1646
1647         bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1648                 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1649         attr_req->mh.num_entries = htons(
1650         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1651         bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1652                 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1653         bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1654 }
1655
1656 /* IOC callback functions */
1657
1658 static void
1659 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1660 {
1661         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1662
1663         if (error)
1664                 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1665         else
1666                 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1667 }
1668
1669 static void
1670 bna_cb_ioceth_disable(void *arg)
1671 {
1672         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1673
1674         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1675 }
1676
1677 static void
1678 bna_cb_ioceth_hbfail(void *arg)
1679 {
1680         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1681
1682         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1683 }
1684
1685 static void
1686 bna_cb_ioceth_reset(void *arg)
1687 {
1688         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1689
1690         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1691 }
1692
1693 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1694         bna_cb_ioceth_enable,
1695         bna_cb_ioceth_disable,
1696         bna_cb_ioceth_hbfail,
1697         bna_cb_ioceth_reset
1698 };
1699
1700 static void bna_attr_init(struct bna_ioceth *ioceth)
1701 {
1702         ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1703         ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1704         ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1705         ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1706         ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1707         ioceth->attr.fw_query_complete = false;
1708 }
1709
1710 static void
1711 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1712                 struct bna_res_info *res_info)
1713 {
1714         u64 dma;
1715         u8 *kva;
1716
1717         ioceth->bna = bna;
1718
1719         /**
1720          * Attach IOC and claim:
1721          *      1. DMA memory for IOC attributes
1722          *      2. Kernel memory for FW trace
1723          */
1724         bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1725         bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1726
1727         BNA_GET_DMA_ADDR(
1728                 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1729         kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1730         bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1731
1732         kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1733         bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1734
1735         /**
1736          * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1737          * DMA memory.
1738          */
1739         BNA_GET_DMA_ADDR(
1740                 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1741         kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1742         bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1743         bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1744         kva += bfa_nw_cee_meminfo();
1745         dma += bfa_nw_cee_meminfo();
1746
1747         bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1748         bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1749         kva += bfa_nw_flash_meminfo();
1750         dma += bfa_nw_flash_meminfo();
1751
1752         bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1753         bfa_msgq_memclaim(&bna->msgq, kva, dma);
1754         bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1755         kva += bfa_msgq_meminfo();
1756         dma += bfa_msgq_meminfo();
1757
1758         ioceth->stop_cbfn = NULL;
1759         ioceth->stop_cbarg = NULL;
1760
1761         bna_attr_init(ioceth);
1762
1763         bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1764 }
1765
1766 static void
1767 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1768 {
1769         bfa_nw_ioc_detach(&ioceth->ioc);
1770
1771         ioceth->bna = NULL;
1772 }
1773
1774 void
1775 bna_ioceth_enable(struct bna_ioceth *ioceth)
1776 {
1777         if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1778                 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1779                 return;
1780         }
1781
1782         if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1783                 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1784 }
1785
1786 void
1787 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1788 {
1789         if (type == BNA_SOFT_CLEANUP) {
1790                 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1791                 return;
1792         }
1793
1794         ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1795         ioceth->stop_cbarg = ioceth->bna->bnad;
1796
1797         bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1798 }
1799
1800 static void
1801 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1802                   struct bna_res_info *res_info)
1803 {
1804         int i;
1805
1806         ucam_mod->ucmac = (struct bna_mac *)
1807         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1808
1809         INIT_LIST_HEAD(&ucam_mod->free_q);
1810         for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1811                 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1812                 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1813         }
1814
1815         /* A separate queue to allow synchronous setting of a list of MACs */
1816         INIT_LIST_HEAD(&ucam_mod->del_q);
1817         for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
1818                 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1819                 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1820         }
1821
1822         ucam_mod->bna = bna;
1823 }
1824
1825 static void
1826 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1827 {
1828         struct list_head *qe;
1829         int i;
1830
1831         i = 0;
1832         list_for_each(qe, &ucam_mod->free_q)
1833                 i++;
1834
1835         i = 0;
1836         list_for_each(qe, &ucam_mod->del_q)
1837                 i++;
1838
1839         ucam_mod->bna = NULL;
1840 }
1841
1842 static void
1843 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1844                   struct bna_res_info *res_info)
1845 {
1846         int i;
1847
1848         mcam_mod->mcmac = (struct bna_mac *)
1849         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1850
1851         INIT_LIST_HEAD(&mcam_mod->free_q);
1852         for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1853                 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1854                 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1855         }
1856
1857         mcam_mod->mchandle = (struct bna_mcam_handle *)
1858         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1859
1860         INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1861         for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1862                 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1863                 list_add_tail(&mcam_mod->mchandle[i].qe,
1864                                 &mcam_mod->free_handle_q);
1865         }
1866
1867         /* A separate queue to allow synchronous setting of a list of MACs */
1868         INIT_LIST_HEAD(&mcam_mod->del_q);
1869         for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
1870                 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1871                 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1872         }
1873
1874         mcam_mod->bna = bna;
1875 }
1876
1877 static void
1878 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1879 {
1880         struct list_head *qe;
1881         int i;
1882
1883         i = 0;
1884         list_for_each(qe, &mcam_mod->free_q) i++;
1885
1886         i = 0;
1887         list_for_each(qe, &mcam_mod->del_q) i++;
1888
1889         i = 0;
1890         list_for_each(qe, &mcam_mod->free_handle_q) i++;
1891
1892         mcam_mod->bna = NULL;
1893 }
1894
1895 static void
1896 bna_bfi_stats_get(struct bna *bna)
1897 {
1898         struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1899
1900         bna->stats_mod.stats_get_busy = true;
1901
1902         bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1903                 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1904         stats_req->mh.num_entries = htons(
1905                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1906         stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1907         stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1908         stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1909         stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1910         stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1911
1912         bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1913                 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1914         bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1915 }
1916
1917 void
1918 bna_res_req(struct bna_res_info *res_info)
1919 {
1920         /* DMA memory for COMMON_MODULE */
1921         res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1922         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1923         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1924         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1925                                 (bfa_nw_cee_meminfo() +
1926                                  bfa_nw_flash_meminfo() +
1927                                  bfa_msgq_meminfo()), PAGE_SIZE);
1928
1929         /* DMA memory for retrieving IOC attributes */
1930         res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1931         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1932         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1933         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1934                                 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1935
1936         /* Virtual memory for retreiving fw_trc */
1937         res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1938         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1939         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1940         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1941
1942         /* DMA memory for retreiving stats */
1943         res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1944         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1945         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1946         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1947                                 ALIGN(sizeof(struct bfi_enet_stats),
1948                                         PAGE_SIZE);
1949 }
1950
1951 void
1952 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1953 {
1954         struct bna_attr *attr = &bna->ioceth.attr;
1955
1956         /* Virtual memory for Tx objects - stored by Tx module */
1957         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1958         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1959                 BNA_MEM_T_KVA;
1960         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1961         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1962                 attr->num_txq * sizeof(struct bna_tx);
1963
1964         /* Virtual memory for TxQ - stored by Tx module */
1965         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1966         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1967                 BNA_MEM_T_KVA;
1968         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1969         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1970                 attr->num_txq * sizeof(struct bna_txq);
1971
1972         /* Virtual memory for Rx objects - stored by Rx module */
1973         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1974         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1975                 BNA_MEM_T_KVA;
1976         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1977         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1978                 attr->num_rxp * sizeof(struct bna_rx);
1979
1980         /* Virtual memory for RxPath - stored by Rx module */
1981         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1982         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1983                 BNA_MEM_T_KVA;
1984         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1985         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1986                 attr->num_rxp * sizeof(struct bna_rxp);
1987
1988         /* Virtual memory for RxQ - stored by Rx module */
1989         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1990         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1991                 BNA_MEM_T_KVA;
1992         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1993         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1994                 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1995
1996         /* Virtual memory for Unicast MAC address - stored by ucam module */
1997         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1998         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1999                 BNA_MEM_T_KVA;
2000         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
2001         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
2002                 (attr->num_ucmac * 2) * sizeof(struct bna_mac);
2003
2004         /* Virtual memory for Multicast MAC address - stored by mcam module */
2005         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2006         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
2007                 BNA_MEM_T_KVA;
2008         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
2009         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
2010                 (attr->num_mcmac * 2) * sizeof(struct bna_mac);
2011
2012         /* Virtual memory for Multicast handle - stored by mcam module */
2013         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
2014         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
2015                 BNA_MEM_T_KVA;
2016         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
2017         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
2018                 attr->num_mcmac * sizeof(struct bna_mcam_handle);
2019 }
2020
2021 void
2022 bna_init(struct bna *bna, struct bnad *bnad,
2023                 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
2024 {
2025         bna->bnad = bnad;
2026         bna->pcidev = *pcidev;
2027
2028         bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2029                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2030         bna->stats.hw_stats_dma.msb =
2031                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2032         bna->stats.hw_stats_dma.lsb =
2033                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2034
2035         bna_reg_addr_init(bna, &bna->pcidev);
2036
2037         /* Also initializes diag, cee, sfp, phy_port, msgq */
2038         bna_ioceth_init(&bna->ioceth, bna, res_info);
2039
2040         bna_enet_init(&bna->enet, bna);
2041         bna_ethport_init(&bna->ethport, bna);
2042 }
2043
2044 void
2045 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2046 {
2047         bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2048
2049         bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2050
2051         bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2052
2053         bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2054
2055         bna->default_mode_rid = BFI_INVALID_RID;
2056         bna->promisc_rid = BFI_INVALID_RID;
2057
2058         bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2059 }
2060
2061 void
2062 bna_uninit(struct bna *bna)
2063 {
2064         if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2065                 bna_mcam_mod_uninit(&bna->mcam_mod);
2066                 bna_ucam_mod_uninit(&bna->ucam_mod);
2067                 bna_rx_mod_uninit(&bna->rx_mod);
2068                 bna_tx_mod_uninit(&bna->tx_mod);
2069                 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2070         }
2071
2072         bna_stats_mod_uninit(&bna->stats_mod);
2073         bna_ethport_uninit(&bna->ethport);
2074         bna_enet_uninit(&bna->enet);
2075
2076         bna_ioceth_uninit(&bna->ioceth);
2077
2078         bna->bnad = NULL;
2079 }
2080
2081 int
2082 bna_num_txq_set(struct bna *bna, int num_txq)
2083 {
2084         if (bna->ioceth.attr.fw_query_complete &&
2085                 (num_txq <= bna->ioceth.attr.num_txq)) {
2086                 bna->ioceth.attr.num_txq = num_txq;
2087                 return BNA_CB_SUCCESS;
2088         }
2089
2090         return BNA_CB_FAIL;
2091 }
2092
2093 int
2094 bna_num_rxp_set(struct bna *bna, int num_rxp)
2095 {
2096         if (bna->ioceth.attr.fw_query_complete &&
2097                 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2098                 bna->ioceth.attr.num_rxp = num_rxp;
2099                 return BNA_CB_SUCCESS;
2100         }
2101
2102         return BNA_CB_FAIL;
2103 }
2104
2105 struct bna_mac *
2106 bna_cam_mod_mac_get(struct list_head *head)
2107 {
2108         struct list_head *qe;
2109
2110         if (list_empty(head))
2111                 return NULL;
2112
2113         bfa_q_deq(head, &qe);
2114         return (struct bna_mac *)qe;
2115 }
2116
2117 void
2118 bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
2119 {
2120         list_add_tail(&mac->qe, tail);
2121 }
2122
2123 struct bna_mcam_handle *
2124 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2125 {
2126         struct list_head *qe;
2127
2128         if (list_empty(&mcam_mod->free_handle_q))
2129                 return NULL;
2130
2131         bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2132
2133         return (struct bna_mcam_handle *)qe;
2134 }
2135
2136 void
2137 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2138                         struct bna_mcam_handle *handle)
2139 {
2140         list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2141 }
2142
2143 void
2144 bna_hw_stats_get(struct bna *bna)
2145 {
2146         if (!bna->stats_mod.ioc_ready) {
2147                 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2148                 return;
2149         }
2150         if (bna->stats_mod.stats_get_busy) {
2151                 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2152                 return;
2153         }
2154
2155         bna_bfi_stats_get(bna);
2156 }