bna: remove oper_state_cbfn from struct bna_rxf
[cascardo/linux.git] / drivers / net / ethernet / brocade / bna / bna_tx_rx.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12   */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include "bna.h"
20 #include "bfi.h"
21
22 /* IB */
23 static void
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 {
26         ib->coalescing_timeo = coalescing_timeo;
27         ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28                                 (u32)ib->coalescing_timeo, 0);
29 }
30
31 /* RXF */
32
33 #define bna_rxf_vlan_cfg_soft_reset(rxf)                                \
34 do {                                                                    \
35         (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;           \
36         (rxf)->vlan_strip_pending = true;                               \
37 } while (0)
38
39 #define bna_rxf_rss_cfg_soft_reset(rxf)                                 \
40 do {                                                                    \
41         if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)                  \
42                 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |           \
43                                 BNA_RSS_F_CFG_PENDING |                 \
44                                 BNA_RSS_F_STATUS_PENDING);              \
45 } while (0)
46
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
50 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
54 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
55                                         enum bna_cleanup_type cleanup);
56 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
57                                         enum bna_cleanup_type cleanup);
58 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
59                                         enum bna_cleanup_type cleanup);
60
61 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62                         enum bna_rxf_event);
63 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
64                         enum bna_rxf_event);
65 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
66                         enum bna_rxf_event);
67 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
68                         enum bna_rxf_event);
69 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
70                         enum bna_rxf_event);
71 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
72                         enum bna_rxf_event);
73
74 static void
75 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
76 {
77         call_rxf_stop_cbfn(rxf);
78 }
79
80 static void
81 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
82 {
83         switch (event) {
84         case RXF_E_START:
85                 if (rxf->flags & BNA_RXF_F_PAUSED) {
86                         bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
87                         call_rxf_start_cbfn(rxf);
88                 } else
89                         bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
90                 break;
91
92         case RXF_E_STOP:
93                 call_rxf_stop_cbfn(rxf);
94                 break;
95
96         case RXF_E_FAIL:
97                 /* No-op */
98                 break;
99
100         case RXF_E_CONFIG:
101                 call_rxf_cam_fltr_cbfn(rxf);
102                 break;
103
104         case RXF_E_PAUSE:
105                 rxf->flags |= BNA_RXF_F_PAUSED;
106                 break;
107
108         case RXF_E_RESUME:
109                 rxf->flags &= ~BNA_RXF_F_PAUSED;
110                 break;
111
112         default:
113                 bfa_sm_fault(event);
114         }
115 }
116
117 static void
118 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
119 {
120 }
121
122 static void
123 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
124 {
125         switch (event) {
126         case RXF_E_STOP:
127         case RXF_E_FAIL:
128                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
129                 break;
130
131         case RXF_E_CONFIG:
132                 call_rxf_cam_fltr_cbfn(rxf);
133                 break;
134
135         case RXF_E_RESUME:
136                 rxf->flags &= ~BNA_RXF_F_PAUSED;
137                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
138                 break;
139
140         default:
141                 bfa_sm_fault(event);
142         }
143 }
144
145 static void
146 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
147 {
148         if (!bna_rxf_cfg_apply(rxf)) {
149                 /* No more pending config updates */
150                 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
151         }
152 }
153
154 static void
155 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
156 {
157         switch (event) {
158         case RXF_E_STOP:
159                 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
160                 break;
161
162         case RXF_E_FAIL:
163                 bna_rxf_cfg_reset(rxf);
164                 call_rxf_start_cbfn(rxf);
165                 call_rxf_cam_fltr_cbfn(rxf);
166                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
167                 break;
168
169         case RXF_E_CONFIG:
170                 /* No-op */
171                 break;
172
173         case RXF_E_PAUSE:
174                 rxf->flags |= BNA_RXF_F_PAUSED;
175                 call_rxf_start_cbfn(rxf);
176                 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
177                 break;
178
179         case RXF_E_FW_RESP:
180                 if (!bna_rxf_cfg_apply(rxf)) {
181                         /* No more pending config updates */
182                         bfa_fsm_set_state(rxf, bna_rxf_sm_started);
183                 }
184                 break;
185
186         default:
187                 bfa_sm_fault(event);
188         }
189 }
190
191 static void
192 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
193 {
194         call_rxf_start_cbfn(rxf);
195         call_rxf_cam_fltr_cbfn(rxf);
196 }
197
198 static void
199 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
200 {
201         switch (event) {
202         case RXF_E_STOP:
203         case RXF_E_FAIL:
204                 bna_rxf_cfg_reset(rxf);
205                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
206                 break;
207
208         case RXF_E_CONFIG:
209                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
210                 break;
211
212         case RXF_E_PAUSE:
213                 rxf->flags |= BNA_RXF_F_PAUSED;
214                 if (!bna_rxf_fltr_clear(rxf))
215                         bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
216                 else
217                         bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
218                 break;
219
220         default:
221                 bfa_sm_fault(event);
222         }
223 }
224
225 static void
226 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
227 {
228 }
229
230 static void
231 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
232 {
233         switch (event) {
234         case RXF_E_FAIL:
235                 bna_rxf_cfg_reset(rxf);
236                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
237                 break;
238
239         case RXF_E_FW_RESP:
240                 if (!bna_rxf_fltr_clear(rxf)) {
241                         /* No more pending CAM entries to clear */
242                         bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
243                 }
244                 break;
245
246         default:
247                 bfa_sm_fault(event);
248         }
249 }
250
251 static void
252 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
253 {
254 }
255
256 static void
257 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
258 {
259         switch (event) {
260         case RXF_E_FAIL:
261         case RXF_E_FW_RESP:
262                 bna_rxf_cfg_reset(rxf);
263                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
264                 break;
265
266         default:
267                 bfa_sm_fault(event);
268         }
269 }
270
271 static void
272 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
273                 enum bfi_enet_h2i_msgs req_type)
274 {
275         struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
276
277         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
278         req->mh.num_entries = htons(
279         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
280         ether_addr_copy(req->mac_addr, mac->addr);
281         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
282                 sizeof(struct bfi_enet_ucast_req), &req->mh);
283         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
284 }
285
286 static void
287 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
288 {
289         struct bfi_enet_mcast_add_req *req =
290                 &rxf->bfi_enet_cmd.mcast_add_req;
291
292         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
293                 0, rxf->rx->rid);
294         req->mh.num_entries = htons(
295         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
296         ether_addr_copy(req->mac_addr, mac->addr);
297         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
298                 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
299         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
300 }
301
302 static void
303 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
304 {
305         struct bfi_enet_mcast_del_req *req =
306                 &rxf->bfi_enet_cmd.mcast_del_req;
307
308         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
309                 0, rxf->rx->rid);
310         req->mh.num_entries = htons(
311         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
312         req->handle = htons(handle);
313         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
314                 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
315         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
316 }
317
318 static void
319 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
320 {
321         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
322
323         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
324                 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
325         req->mh.num_entries = htons(
326                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
327         req->enable = status;
328         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
329                 sizeof(struct bfi_enet_enable_req), &req->mh);
330         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
331 }
332
333 static void
334 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
335 {
336         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
337
338         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
339                 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
340         req->mh.num_entries = htons(
341                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
342         req->enable = status;
343         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
344                 sizeof(struct bfi_enet_enable_req), &req->mh);
345         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
346 }
347
348 static void
349 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
350 {
351         struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
352         int i;
353         int j;
354
355         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
356                 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
357         req->mh.num_entries = htons(
358                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
359         req->block_idx = block_idx;
360         for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
361                 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
362                 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
363                         req->bit_mask[i] =
364                                 htonl(rxf->vlan_filter_table[j]);
365                 else
366                         req->bit_mask[i] = 0xFFFFFFFF;
367         }
368         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
369                 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
370         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
371 }
372
373 static void
374 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
375 {
376         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
377
378         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
379                 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
380         req->mh.num_entries = htons(
381                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
382         req->enable = rxf->vlan_strip_status;
383         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
384                 sizeof(struct bfi_enet_enable_req), &req->mh);
385         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
386 }
387
388 static void
389 bna_bfi_rit_cfg(struct bna_rxf *rxf)
390 {
391         struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
392
393         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
394                 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
395         req->mh.num_entries = htons(
396                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
397         req->size = htons(rxf->rit_size);
398         memcpy(&req->table[0], rxf->rit, rxf->rit_size);
399         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
400                 sizeof(struct bfi_enet_rit_req), &req->mh);
401         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
402 }
403
404 static void
405 bna_bfi_rss_cfg(struct bna_rxf *rxf)
406 {
407         struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
408         int i;
409
410         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
411                 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
412         req->mh.num_entries = htons(
413                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
414         req->cfg.type = rxf->rss_cfg.hash_type;
415         req->cfg.mask = rxf->rss_cfg.hash_mask;
416         for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
417                 req->cfg.key[i] =
418                         htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
419         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
420                 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
421         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
422 }
423
424 static void
425 bna_bfi_rss_enable(struct bna_rxf *rxf)
426 {
427         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
428
429         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
430                 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
431         req->mh.num_entries = htons(
432                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
433         req->enable = rxf->rss_status;
434         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
435                 sizeof(struct bfi_enet_enable_req), &req->mh);
436         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
437 }
438
439 /* This function gets the multicast MAC that has already been added to CAM */
440 static struct bna_mac *
441 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
442 {
443         struct bna_mac *mac;
444         struct list_head *qe;
445
446         list_for_each(qe, &rxf->mcast_active_q) {
447                 mac = (struct bna_mac *)qe;
448                 if (ether_addr_equal(mac->addr, mac_addr))
449                         return mac;
450         }
451
452         list_for_each(qe, &rxf->mcast_pending_del_q) {
453                 mac = (struct bna_mac *)qe;
454                 if (ether_addr_equal(mac->addr, mac_addr))
455                         return mac;
456         }
457
458         return NULL;
459 }
460
461 static struct bna_mcam_handle *
462 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
463 {
464         struct bna_mcam_handle *mchandle;
465         struct list_head *qe;
466
467         list_for_each(qe, &rxf->mcast_handle_q) {
468                 mchandle = (struct bna_mcam_handle *)qe;
469                 if (mchandle->handle == handle)
470                         return mchandle;
471         }
472
473         return NULL;
474 }
475
476 static void
477 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
478 {
479         struct bna_mac *mcmac;
480         struct bna_mcam_handle *mchandle;
481
482         mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
483         mchandle = bna_rxf_mchandle_get(rxf, handle);
484         if (mchandle == NULL) {
485                 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
486                 mchandle->handle = handle;
487                 mchandle->refcnt = 0;
488                 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
489         }
490         mchandle->refcnt++;
491         mcmac->handle = mchandle;
492 }
493
494 static int
495 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
496                 enum bna_cleanup_type cleanup)
497 {
498         struct bna_mcam_handle *mchandle;
499         int ret = 0;
500
501         mchandle = mac->handle;
502         if (mchandle == NULL)
503                 return ret;
504
505         mchandle->refcnt--;
506         if (mchandle->refcnt == 0) {
507                 if (cleanup == BNA_HARD_CLEANUP) {
508                         bna_bfi_mcast_del_req(rxf, mchandle->handle);
509                         ret = 1;
510                 }
511                 list_del(&mchandle->qe);
512                 bfa_q_qe_init(&mchandle->qe);
513                 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
514         }
515         mac->handle = NULL;
516
517         return ret;
518 }
519
520 static int
521 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
522 {
523         struct bna_mac *mac = NULL;
524         struct list_head *qe;
525         int ret;
526
527         /* First delete multicast entries to maintain the count */
528         while (!list_empty(&rxf->mcast_pending_del_q)) {
529                 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
530                 bfa_q_qe_init(qe);
531                 mac = (struct bna_mac *)qe;
532                 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
533                 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
534                 if (ret)
535                         return ret;
536         }
537
538         /* Add multicast entries */
539         if (!list_empty(&rxf->mcast_pending_add_q)) {
540                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
541                 bfa_q_qe_init(qe);
542                 mac = (struct bna_mac *)qe;
543                 list_add_tail(&mac->qe, &rxf->mcast_active_q);
544                 bna_bfi_mcast_add_req(rxf, mac);
545                 return 1;
546         }
547
548         return 0;
549 }
550
551 static int
552 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
553 {
554         u8 vlan_pending_bitmask;
555         int block_idx = 0;
556
557         if (rxf->vlan_pending_bitmask) {
558                 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
559                 while (!(vlan_pending_bitmask & 0x1)) {
560                         block_idx++;
561                         vlan_pending_bitmask >>= 1;
562                 }
563                 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
564                 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
565                 return 1;
566         }
567
568         return 0;
569 }
570
571 static int
572 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
573 {
574         struct list_head *qe;
575         struct bna_mac *mac;
576         int ret;
577
578         /* Throw away delete pending mcast entries */
579         while (!list_empty(&rxf->mcast_pending_del_q)) {
580                 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
581                 bfa_q_qe_init(qe);
582                 mac = (struct bna_mac *)qe;
583                 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
584                 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
585                 if (ret)
586                         return ret;
587         }
588
589         /* Move active mcast entries to pending_add_q */
590         while (!list_empty(&rxf->mcast_active_q)) {
591                 bfa_q_deq(&rxf->mcast_active_q, &qe);
592                 bfa_q_qe_init(qe);
593                 list_add_tail(qe, &rxf->mcast_pending_add_q);
594                 mac = (struct bna_mac *)qe;
595                 if (bna_rxf_mcast_del(rxf, mac, cleanup))
596                         return 1;
597         }
598
599         return 0;
600 }
601
602 static int
603 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
604 {
605         if (rxf->rss_pending) {
606                 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
607                         rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
608                         bna_bfi_rit_cfg(rxf);
609                         return 1;
610                 }
611
612                 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
613                         rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
614                         bna_bfi_rss_cfg(rxf);
615                         return 1;
616                 }
617
618                 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
619                         rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
620                         bna_bfi_rss_enable(rxf);
621                         return 1;
622                 }
623         }
624
625         return 0;
626 }
627
628 static int
629 bna_rxf_cfg_apply(struct bna_rxf *rxf)
630 {
631         if (bna_rxf_ucast_cfg_apply(rxf))
632                 return 1;
633
634         if (bna_rxf_mcast_cfg_apply(rxf))
635                 return 1;
636
637         if (bna_rxf_promisc_cfg_apply(rxf))
638                 return 1;
639
640         if (bna_rxf_allmulti_cfg_apply(rxf))
641                 return 1;
642
643         if (bna_rxf_vlan_cfg_apply(rxf))
644                 return 1;
645
646         if (bna_rxf_vlan_strip_cfg_apply(rxf))
647                 return 1;
648
649         if (bna_rxf_rss_cfg_apply(rxf))
650                 return 1;
651
652         return 0;
653 }
654
655 /* Only software reset */
656 static int
657 bna_rxf_fltr_clear(struct bna_rxf *rxf)
658 {
659         if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
660                 return 1;
661
662         if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
663                 return 1;
664
665         if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
666                 return 1;
667
668         if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
669                 return 1;
670
671         return 0;
672 }
673
674 static void
675 bna_rxf_cfg_reset(struct bna_rxf *rxf)
676 {
677         bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
678         bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
679         bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
680         bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
681         bna_rxf_vlan_cfg_soft_reset(rxf);
682         bna_rxf_rss_cfg_soft_reset(rxf);
683 }
684
685 static void
686 bna_rit_init(struct bna_rxf *rxf, int rit_size)
687 {
688         struct bna_rx *rx = rxf->rx;
689         struct bna_rxp *rxp;
690         struct list_head *qe;
691         int offset = 0;
692
693         rxf->rit_size = rit_size;
694         list_for_each(qe, &rx->rxp_q) {
695                 rxp = (struct bna_rxp *)qe;
696                 rxf->rit[offset] = rxp->cq.ccb->id;
697                 offset++;
698         }
699
700 }
701
702 void
703 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
704 {
705         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
706 }
707
708 void
709 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
710                         struct bfi_msgq_mhdr *msghdr)
711 {
712         struct bfi_enet_rsp *rsp =
713                 container_of(msghdr, struct bfi_enet_rsp, mh);
714
715         if (rsp->error) {
716                 /* Clear ucast from cache */
717                 rxf->ucast_active_set = 0;
718         }
719
720         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
721 }
722
723 void
724 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
725                         struct bfi_msgq_mhdr *msghdr)
726 {
727         struct bfi_enet_mcast_add_req *req =
728                 &rxf->bfi_enet_cmd.mcast_add_req;
729         struct bfi_enet_mcast_add_rsp *rsp =
730                 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
731
732         bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
733                 ntohs(rsp->handle));
734         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
735 }
736
737 static void
738 bna_rxf_init(struct bna_rxf *rxf,
739                 struct bna_rx *rx,
740                 struct bna_rx_config *q_config,
741                 struct bna_res_info *res_info)
742 {
743         rxf->rx = rx;
744
745         INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
746         INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
747         rxf->ucast_pending_set = 0;
748         rxf->ucast_active_set = 0;
749         INIT_LIST_HEAD(&rxf->ucast_active_q);
750         rxf->ucast_pending_mac = NULL;
751
752         INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
753         INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
754         INIT_LIST_HEAD(&rxf->mcast_active_q);
755         INIT_LIST_HEAD(&rxf->mcast_handle_q);
756
757         if (q_config->paused)
758                 rxf->flags |= BNA_RXF_F_PAUSED;
759
760         rxf->rit = (u8 *)
761                 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
762         bna_rit_init(rxf, q_config->num_paths);
763
764         rxf->rss_status = q_config->rss_status;
765         if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
766                 rxf->rss_cfg = q_config->rss_config;
767                 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
768                 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
769                 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
770         }
771
772         rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
773         memset(rxf->vlan_filter_table, 0,
774                         (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
775         rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
776         rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
777
778         rxf->vlan_strip_status = q_config->vlan_strip_status;
779
780         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
781 }
782
783 static void
784 bna_rxf_uninit(struct bna_rxf *rxf)
785 {
786         struct bna_mac *mac;
787
788         rxf->ucast_pending_set = 0;
789         rxf->ucast_active_set = 0;
790
791         while (!list_empty(&rxf->ucast_pending_add_q)) {
792                 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
793                 bfa_q_qe_init(&mac->qe);
794                 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
795         }
796
797         if (rxf->ucast_pending_mac) {
798                 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
799                 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
800                                     rxf->ucast_pending_mac);
801                 rxf->ucast_pending_mac = NULL;
802         }
803
804         while (!list_empty(&rxf->mcast_pending_add_q)) {
805                 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
806                 bfa_q_qe_init(&mac->qe);
807                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
808         }
809
810         rxf->rxmode_pending = 0;
811         rxf->rxmode_pending_bitmask = 0;
812         if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
813                 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
814         if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
815                 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
816
817         rxf->rss_pending = 0;
818         rxf->vlan_strip_pending = false;
819
820         rxf->flags = 0;
821
822         rxf->rx = NULL;
823 }
824
825 static void
826 bna_rx_cb_rxf_started(struct bna_rx *rx)
827 {
828         bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
829 }
830
831 static void
832 bna_rxf_start(struct bna_rxf *rxf)
833 {
834         rxf->start_cbfn = bna_rx_cb_rxf_started;
835         rxf->start_cbarg = rxf->rx;
836         bfa_fsm_send_event(rxf, RXF_E_START);
837 }
838
839 static void
840 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
841 {
842         bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
843 }
844
845 static void
846 bna_rxf_stop(struct bna_rxf *rxf)
847 {
848         rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
849         rxf->stop_cbarg = rxf->rx;
850         bfa_fsm_send_event(rxf, RXF_E_STOP);
851 }
852
853 static void
854 bna_rxf_fail(struct bna_rxf *rxf)
855 {
856         bfa_fsm_send_event(rxf, RXF_E_FAIL);
857 }
858
859 enum bna_cb_status
860 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
861 {
862         struct bna_rxf *rxf = &rx->rxf;
863
864         if (rxf->ucast_pending_mac == NULL) {
865                 rxf->ucast_pending_mac =
866                         bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
867                 if (rxf->ucast_pending_mac == NULL)
868                         return BNA_CB_UCAST_CAM_FULL;
869                 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
870         }
871
872         ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
873         rxf->ucast_pending_set = 1;
874         rxf->cam_fltr_cbfn = NULL;
875         rxf->cam_fltr_cbarg = rx->bna->bnad;
876
877         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
878
879         return BNA_CB_SUCCESS;
880 }
881
882 enum bna_cb_status
883 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
884                  void (*cbfn)(struct bnad *, struct bna_rx *))
885 {
886         struct bna_rxf *rxf = &rx->rxf;
887         struct bna_mac *mac;
888
889         /* Check if already added or pending addition */
890         if (bna_mac_find(&rxf->mcast_active_q, addr) ||
891                 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
892                 if (cbfn)
893                         cbfn(rx->bna->bnad, rx);
894                 return BNA_CB_SUCCESS;
895         }
896
897         mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
898         if (mac == NULL)
899                 return BNA_CB_MCAST_LIST_FULL;
900         bfa_q_qe_init(&mac->qe);
901         ether_addr_copy(mac->addr, addr);
902         list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
903
904         rxf->cam_fltr_cbfn = cbfn;
905         rxf->cam_fltr_cbarg = rx->bna->bnad;
906
907         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
908
909         return BNA_CB_SUCCESS;
910 }
911
912 enum bna_cb_status
913 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
914 {
915         struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
916         struct bna_rxf *rxf = &rx->rxf;
917         struct list_head list_head;
918         struct list_head *qe;
919         u8 *mcaddr;
920         struct bna_mac *mac, *del_mac;
921         int i;
922
923         /* Purge the pending_add_q */
924         while (!list_empty(&rxf->ucast_pending_add_q)) {
925                 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
926                 bfa_q_qe_init(qe);
927                 mac = (struct bna_mac *)qe;
928                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
929         }
930
931         /* Schedule active_q entries for deletion */
932         while (!list_empty(&rxf->ucast_active_q)) {
933                 bfa_q_deq(&rxf->ucast_active_q, &qe);
934                 mac = (struct bna_mac *)qe;
935                 bfa_q_qe_init(&mac->qe);
936
937                 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
938                 memcpy(del_mac, mac, sizeof(*del_mac));
939                 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
940                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
941         }
942
943         /* Allocate nodes */
944         INIT_LIST_HEAD(&list_head);
945         for (i = 0, mcaddr = uclist; i < count; i++) {
946                 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
947                 if (mac == NULL)
948                         goto err_return;
949                 bfa_q_qe_init(&mac->qe);
950                 ether_addr_copy(mac->addr, mcaddr);
951                 list_add_tail(&mac->qe, &list_head);
952                 mcaddr += ETH_ALEN;
953         }
954
955         /* Add the new entries */
956         while (!list_empty(&list_head)) {
957                 bfa_q_deq(&list_head, &qe);
958                 mac = (struct bna_mac *)qe;
959                 bfa_q_qe_init(&mac->qe);
960                 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
961         }
962
963         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
964
965         return BNA_CB_SUCCESS;
966
967 err_return:
968         while (!list_empty(&list_head)) {
969                 bfa_q_deq(&list_head, &qe);
970                 mac = (struct bna_mac *)qe;
971                 bfa_q_qe_init(&mac->qe);
972                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
973         }
974
975         return BNA_CB_UCAST_CAM_FULL;
976 }
977
978 enum bna_cb_status
979 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
980 {
981         struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
982         struct bna_rxf *rxf = &rx->rxf;
983         struct list_head list_head;
984         struct list_head *qe;
985         u8 *mcaddr;
986         struct bna_mac *mac, *del_mac;
987         int i;
988
989         /* Purge the pending_add_q */
990         while (!list_empty(&rxf->mcast_pending_add_q)) {
991                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
992                 bfa_q_qe_init(qe);
993                 mac = (struct bna_mac *)qe;
994                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
995         }
996
997         /* Schedule active_q entries for deletion */
998         while (!list_empty(&rxf->mcast_active_q)) {
999                 bfa_q_deq(&rxf->mcast_active_q, &qe);
1000                 mac = (struct bna_mac *)qe;
1001                 bfa_q_qe_init(&mac->qe);
1002
1003                 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
1004
1005                 memcpy(del_mac, mac, sizeof(*del_mac));
1006                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1007                 mac->handle = NULL;
1008                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1009         }
1010
1011         /* Allocate nodes */
1012         INIT_LIST_HEAD(&list_head);
1013         for (i = 0, mcaddr = mclist; i < count; i++) {
1014                 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
1015                 if (mac == NULL)
1016                         goto err_return;
1017                 bfa_q_qe_init(&mac->qe);
1018                 ether_addr_copy(mac->addr, mcaddr);
1019                 list_add_tail(&mac->qe, &list_head);
1020
1021                 mcaddr += ETH_ALEN;
1022         }
1023
1024         /* Add the new entries */
1025         while (!list_empty(&list_head)) {
1026                 bfa_q_deq(&list_head, &qe);
1027                 mac = (struct bna_mac *)qe;
1028                 bfa_q_qe_init(&mac->qe);
1029                 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1030         }
1031
1032         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1033
1034         return BNA_CB_SUCCESS;
1035
1036 err_return:
1037         while (!list_empty(&list_head)) {
1038                 bfa_q_deq(&list_head, &qe);
1039                 mac = (struct bna_mac *)qe;
1040                 bfa_q_qe_init(&mac->qe);
1041                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1042         }
1043
1044         return BNA_CB_MCAST_LIST_FULL;
1045 }
1046
1047 void
1048 bna_rx_mcast_delall(struct bna_rx *rx)
1049 {
1050         struct bna_rxf *rxf = &rx->rxf;
1051         struct list_head *qe;
1052         struct bna_mac *mac, *del_mac;
1053         int need_hw_config = 0;
1054
1055         /* Purge all entries from pending_add_q */
1056         while (!list_empty(&rxf->mcast_pending_add_q)) {
1057                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1058                 mac = (struct bna_mac *)qe;
1059                 bfa_q_qe_init(&mac->qe);
1060                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1061         }
1062
1063         /* Schedule all entries in active_q for deletion */
1064         while (!list_empty(&rxf->mcast_active_q)) {
1065                 bfa_q_deq(&rxf->mcast_active_q, &qe);
1066                 mac = (struct bna_mac *)qe;
1067                 bfa_q_qe_init(&mac->qe);
1068
1069                 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
1070
1071                 memcpy(del_mac, mac, sizeof(*del_mac));
1072                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1073                 mac->handle = NULL;
1074                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1075                 need_hw_config = 1;
1076         }
1077
1078         if (need_hw_config)
1079                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1080 }
1081
1082 void
1083 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1084 {
1085         struct bna_rxf *rxf = &rx->rxf;
1086         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1087         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1088         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1089
1090         rxf->vlan_filter_table[index] |= bit;
1091         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1092                 rxf->vlan_pending_bitmask |= BIT(group_id);
1093                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1094         }
1095 }
1096
1097 void
1098 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1099 {
1100         struct bna_rxf *rxf = &rx->rxf;
1101         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1102         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1103         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1104
1105         rxf->vlan_filter_table[index] &= ~bit;
1106         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1107                 rxf->vlan_pending_bitmask |= BIT(group_id);
1108                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1109         }
1110 }
1111
1112 static int
1113 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1114 {
1115         struct bna_mac *mac = NULL;
1116         struct list_head *qe;
1117
1118         /* Delete MAC addresses previousely added */
1119         if (!list_empty(&rxf->ucast_pending_del_q)) {
1120                 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1121                 bfa_q_qe_init(qe);
1122                 mac = (struct bna_mac *)qe;
1123                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1124                 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1125                 return 1;
1126         }
1127
1128         /* Set default unicast MAC */
1129         if (rxf->ucast_pending_set) {
1130                 rxf->ucast_pending_set = 0;
1131                 ether_addr_copy(rxf->ucast_active_mac.addr,
1132                                 rxf->ucast_pending_mac->addr);
1133                 rxf->ucast_active_set = 1;
1134                 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1135                         BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1136                 return 1;
1137         }
1138
1139         /* Add additional MAC entries */
1140         if (!list_empty(&rxf->ucast_pending_add_q)) {
1141                 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1142                 bfa_q_qe_init(qe);
1143                 mac = (struct bna_mac *)qe;
1144                 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1145                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1146                 return 1;
1147         }
1148
1149         return 0;
1150 }
1151
1152 static int
1153 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1154 {
1155         struct list_head *qe;
1156         struct bna_mac *mac;
1157
1158         /* Throw away delete pending ucast entries */
1159         while (!list_empty(&rxf->ucast_pending_del_q)) {
1160                 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1161                 bfa_q_qe_init(qe);
1162                 mac = (struct bna_mac *)qe;
1163                 if (cleanup == BNA_SOFT_CLEANUP)
1164                         bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1165                                             mac);
1166                 else {
1167                         bna_bfi_ucast_req(rxf, mac,
1168                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1169                         bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1170                                             mac);
1171                         return 1;
1172                 }
1173         }
1174
1175         /* Move active ucast entries to pending_add_q */
1176         while (!list_empty(&rxf->ucast_active_q)) {
1177                 bfa_q_deq(&rxf->ucast_active_q, &qe);
1178                 bfa_q_qe_init(qe);
1179                 list_add_tail(qe, &rxf->ucast_pending_add_q);
1180                 if (cleanup == BNA_HARD_CLEANUP) {
1181                         mac = (struct bna_mac *)qe;
1182                         bna_bfi_ucast_req(rxf, mac,
1183                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1184                         return 1;
1185                 }
1186         }
1187
1188         if (rxf->ucast_active_set) {
1189                 rxf->ucast_pending_set = 1;
1190                 rxf->ucast_active_set = 0;
1191                 if (cleanup == BNA_HARD_CLEANUP) {
1192                         bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1193                                 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1194                         return 1;
1195                 }
1196         }
1197
1198         return 0;
1199 }
1200
1201 static int
1202 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1203 {
1204         struct bna *bna = rxf->rx->bna;
1205
1206         /* Enable/disable promiscuous mode */
1207         if (is_promisc_enable(rxf->rxmode_pending,
1208                                 rxf->rxmode_pending_bitmask)) {
1209                 /* move promisc configuration from pending -> active */
1210                 promisc_inactive(rxf->rxmode_pending,
1211                                 rxf->rxmode_pending_bitmask);
1212                 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1213                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1214                 return 1;
1215         } else if (is_promisc_disable(rxf->rxmode_pending,
1216                                 rxf->rxmode_pending_bitmask)) {
1217                 /* move promisc configuration from pending -> active */
1218                 promisc_inactive(rxf->rxmode_pending,
1219                                 rxf->rxmode_pending_bitmask);
1220                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1221                 bna->promisc_rid = BFI_INVALID_RID;
1222                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1223                 return 1;
1224         }
1225
1226         return 0;
1227 }
1228
1229 static int
1230 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1231 {
1232         struct bna *bna = rxf->rx->bna;
1233
1234         /* Clear pending promisc mode disable */
1235         if (is_promisc_disable(rxf->rxmode_pending,
1236                                 rxf->rxmode_pending_bitmask)) {
1237                 promisc_inactive(rxf->rxmode_pending,
1238                                 rxf->rxmode_pending_bitmask);
1239                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1240                 bna->promisc_rid = BFI_INVALID_RID;
1241                 if (cleanup == BNA_HARD_CLEANUP) {
1242                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1243                         return 1;
1244                 }
1245         }
1246
1247         /* Move promisc mode config from active -> pending */
1248         if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1249                 promisc_enable(rxf->rxmode_pending,
1250                                 rxf->rxmode_pending_bitmask);
1251                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1252                 if (cleanup == BNA_HARD_CLEANUP) {
1253                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1254                         return 1;
1255                 }
1256         }
1257
1258         return 0;
1259 }
1260
1261 static int
1262 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1263 {
1264         /* Enable/disable allmulti mode */
1265         if (is_allmulti_enable(rxf->rxmode_pending,
1266                                 rxf->rxmode_pending_bitmask)) {
1267                 /* move allmulti configuration from pending -> active */
1268                 allmulti_inactive(rxf->rxmode_pending,
1269                                 rxf->rxmode_pending_bitmask);
1270                 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1271                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1272                 return 1;
1273         } else if (is_allmulti_disable(rxf->rxmode_pending,
1274                                         rxf->rxmode_pending_bitmask)) {
1275                 /* move allmulti configuration from pending -> active */
1276                 allmulti_inactive(rxf->rxmode_pending,
1277                                 rxf->rxmode_pending_bitmask);
1278                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1279                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1280                 return 1;
1281         }
1282
1283         return 0;
1284 }
1285
1286 static int
1287 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1288 {
1289         /* Clear pending allmulti mode disable */
1290         if (is_allmulti_disable(rxf->rxmode_pending,
1291                                 rxf->rxmode_pending_bitmask)) {
1292                 allmulti_inactive(rxf->rxmode_pending,
1293                                 rxf->rxmode_pending_bitmask);
1294                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1295                 if (cleanup == BNA_HARD_CLEANUP) {
1296                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1297                         return 1;
1298                 }
1299         }
1300
1301         /* Move allmulti mode config from active -> pending */
1302         if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1303                 allmulti_enable(rxf->rxmode_pending,
1304                                 rxf->rxmode_pending_bitmask);
1305                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1306                 if (cleanup == BNA_HARD_CLEANUP) {
1307                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1308                         return 1;
1309                 }
1310         }
1311
1312         return 0;
1313 }
1314
1315 static int
1316 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1317 {
1318         struct bna *bna = rxf->rx->bna;
1319         int ret = 0;
1320
1321         if (is_promisc_enable(rxf->rxmode_pending,
1322                                 rxf->rxmode_pending_bitmask) ||
1323                 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1324                 /* Do nothing if pending enable or already enabled */
1325         } else if (is_promisc_disable(rxf->rxmode_pending,
1326                                         rxf->rxmode_pending_bitmask)) {
1327                 /* Turn off pending disable command */
1328                 promisc_inactive(rxf->rxmode_pending,
1329                         rxf->rxmode_pending_bitmask);
1330         } else {
1331                 /* Schedule enable */
1332                 promisc_enable(rxf->rxmode_pending,
1333                                 rxf->rxmode_pending_bitmask);
1334                 bna->promisc_rid = rxf->rx->rid;
1335                 ret = 1;
1336         }
1337
1338         return ret;
1339 }
1340
1341 static int
1342 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1343 {
1344         struct bna *bna = rxf->rx->bna;
1345         int ret = 0;
1346
1347         if (is_promisc_disable(rxf->rxmode_pending,
1348                                 rxf->rxmode_pending_bitmask) ||
1349                 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1350                 /* Do nothing if pending disable or already disabled */
1351         } else if (is_promisc_enable(rxf->rxmode_pending,
1352                                         rxf->rxmode_pending_bitmask)) {
1353                 /* Turn off pending enable command */
1354                 promisc_inactive(rxf->rxmode_pending,
1355                                 rxf->rxmode_pending_bitmask);
1356                 bna->promisc_rid = BFI_INVALID_RID;
1357         } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1358                 /* Schedule disable */
1359                 promisc_disable(rxf->rxmode_pending,
1360                                 rxf->rxmode_pending_bitmask);
1361                 ret = 1;
1362         }
1363
1364         return ret;
1365 }
1366
1367 static int
1368 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1369 {
1370         int ret = 0;
1371
1372         if (is_allmulti_enable(rxf->rxmode_pending,
1373                         rxf->rxmode_pending_bitmask) ||
1374                         (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1375                 /* Do nothing if pending enable or already enabled */
1376         } else if (is_allmulti_disable(rxf->rxmode_pending,
1377                                         rxf->rxmode_pending_bitmask)) {
1378                 /* Turn off pending disable command */
1379                 allmulti_inactive(rxf->rxmode_pending,
1380                         rxf->rxmode_pending_bitmask);
1381         } else {
1382                 /* Schedule enable */
1383                 allmulti_enable(rxf->rxmode_pending,
1384                                 rxf->rxmode_pending_bitmask);
1385                 ret = 1;
1386         }
1387
1388         return ret;
1389 }
1390
1391 static int
1392 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1393 {
1394         int ret = 0;
1395
1396         if (is_allmulti_disable(rxf->rxmode_pending,
1397                                 rxf->rxmode_pending_bitmask) ||
1398                 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1399                 /* Do nothing if pending disable or already disabled */
1400         } else if (is_allmulti_enable(rxf->rxmode_pending,
1401                                         rxf->rxmode_pending_bitmask)) {
1402                 /* Turn off pending enable command */
1403                 allmulti_inactive(rxf->rxmode_pending,
1404                                 rxf->rxmode_pending_bitmask);
1405         } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1406                 /* Schedule disable */
1407                 allmulti_disable(rxf->rxmode_pending,
1408                                 rxf->rxmode_pending_bitmask);
1409                 ret = 1;
1410         }
1411
1412         return ret;
1413 }
1414
1415 static int
1416 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1417 {
1418         if (rxf->vlan_strip_pending) {
1419                         rxf->vlan_strip_pending = false;
1420                         bna_bfi_vlan_strip_enable(rxf);
1421                         return 1;
1422         }
1423
1424         return 0;
1425 }
1426
1427 /* RX */
1428
1429 #define BNA_GET_RXQS(qcfg)      (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1430         (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1431
1432 #define SIZE_TO_PAGES(size)     (((size) >> PAGE_SHIFT) + ((((size) &\
1433         (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1434
1435 #define call_rx_stop_cbfn(rx)                                           \
1436 do {                                                                \
1437         if ((rx)->stop_cbfn) {                                          \
1438                 void (*cbfn)(void *, struct bna_rx *);    \
1439                 void *cbarg;                                        \
1440                 cbfn = (rx)->stop_cbfn;                          \
1441                 cbarg = (rx)->stop_cbarg;                              \
1442                 (rx)->stop_cbfn = NULL;                                 \
1443                 (rx)->stop_cbarg = NULL;                                \
1444                 cbfn(cbarg, rx);                                        \
1445         }                                                              \
1446 } while (0)
1447
1448 #define call_rx_stall_cbfn(rx)                                          \
1449 do {                                                                    \
1450         if ((rx)->rx_stall_cbfn)                                        \
1451                 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));             \
1452 } while (0)
1453
1454 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt)                        \
1455 do {                                                                    \
1456         struct bna_dma_addr cur_q_addr =                                \
1457                 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));      \
1458         (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;        \
1459         (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;        \
1460         (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;              \
1461         (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;              \
1462         (bfi_q)->pages = htons((u16)(bna_qpt)->page_count);     \
1463         (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1464 } while (0)
1465
1466 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1467 static void bna_rx_enet_stop(struct bna_rx *rx);
1468 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1469
1470 bfa_fsm_state_decl(bna_rx, stopped,
1471         struct bna_rx, enum bna_rx_event);
1472 bfa_fsm_state_decl(bna_rx, start_wait,
1473         struct bna_rx, enum bna_rx_event);
1474 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1475         struct bna_rx, enum bna_rx_event);
1476 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1477         struct bna_rx, enum bna_rx_event);
1478 bfa_fsm_state_decl(bna_rx, started,
1479         struct bna_rx, enum bna_rx_event);
1480 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1481         struct bna_rx, enum bna_rx_event);
1482 bfa_fsm_state_decl(bna_rx, stop_wait,
1483         struct bna_rx, enum bna_rx_event);
1484 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1485         struct bna_rx, enum bna_rx_event);
1486 bfa_fsm_state_decl(bna_rx, failed,
1487         struct bna_rx, enum bna_rx_event);
1488 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1489         struct bna_rx, enum bna_rx_event);
1490
1491 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1492 {
1493         call_rx_stop_cbfn(rx);
1494 }
1495
1496 static void bna_rx_sm_stopped(struct bna_rx *rx,
1497                                 enum bna_rx_event event)
1498 {
1499         switch (event) {
1500         case RX_E_START:
1501                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1502                 break;
1503
1504         case RX_E_STOP:
1505                 call_rx_stop_cbfn(rx);
1506                 break;
1507
1508         case RX_E_FAIL:
1509                 /* no-op */
1510                 break;
1511
1512         default:
1513                 bfa_sm_fault(event);
1514                 break;
1515         }
1516 }
1517
1518 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1519 {
1520         bna_bfi_rx_enet_start(rx);
1521 }
1522
1523 static void
1524 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1525 {
1526 }
1527
1528 static void
1529 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1530 {
1531         switch (event) {
1532         case RX_E_FAIL:
1533         case RX_E_STOPPED:
1534                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1535                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1536                 break;
1537
1538         case RX_E_STARTED:
1539                 bna_rx_enet_stop(rx);
1540                 break;
1541
1542         default:
1543                 bfa_sm_fault(event);
1544                 break;
1545         }
1546 }
1547
1548 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1549                                 enum bna_rx_event event)
1550 {
1551         switch (event) {
1552         case RX_E_STOP:
1553                 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1554                 break;
1555
1556         case RX_E_FAIL:
1557                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1558                 break;
1559
1560         case RX_E_STARTED:
1561                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1562                 break;
1563
1564         default:
1565                 bfa_sm_fault(event);
1566                 break;
1567         }
1568 }
1569
1570 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1571 {
1572         rx->rx_post_cbfn(rx->bna->bnad, rx);
1573         bna_rxf_start(&rx->rxf);
1574 }
1575
1576 static void
1577 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1578 {
1579 }
1580
1581 static void
1582 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1583 {
1584         switch (event) {
1585         case RX_E_FAIL:
1586                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1587                 bna_rxf_fail(&rx->rxf);
1588                 call_rx_stall_cbfn(rx);
1589                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1590                 break;
1591
1592         case RX_E_RXF_STARTED:
1593                 bna_rxf_stop(&rx->rxf);
1594                 break;
1595
1596         case RX_E_RXF_STOPPED:
1597                 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1598                 call_rx_stall_cbfn(rx);
1599                 bna_rx_enet_stop(rx);
1600                 break;
1601
1602         default:
1603                 bfa_sm_fault(event);
1604                 break;
1605         }
1606
1607 }
1608
1609 static void
1610 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1611 {
1612 }
1613
1614 static void
1615 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1616 {
1617         switch (event) {
1618         case RX_E_FAIL:
1619         case RX_E_STOPPED:
1620                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1621                 break;
1622
1623         case RX_E_STARTED:
1624                 bna_rx_enet_stop(rx);
1625                 break;
1626
1627         default:
1628                 bfa_sm_fault(event);
1629         }
1630 }
1631
1632 static void
1633 bna_rx_sm_started_entry(struct bna_rx *rx)
1634 {
1635         struct bna_rxp *rxp;
1636         struct list_head *qe_rxp;
1637         int is_regular = (rx->type == BNA_RX_T_REGULAR);
1638
1639         /* Start IB */
1640         list_for_each(qe_rxp, &rx->rxp_q) {
1641                 rxp = (struct bna_rxp *)qe_rxp;
1642                 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1643         }
1644
1645         bna_ethport_cb_rx_started(&rx->bna->ethport);
1646 }
1647
1648 static void
1649 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1650 {
1651         switch (event) {
1652         case RX_E_STOP:
1653                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1654                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1655                 bna_rxf_stop(&rx->rxf);
1656                 break;
1657
1658         case RX_E_FAIL:
1659                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1660                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1661                 bna_rxf_fail(&rx->rxf);
1662                 call_rx_stall_cbfn(rx);
1663                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1664                 break;
1665
1666         default:
1667                 bfa_sm_fault(event);
1668                 break;
1669         }
1670 }
1671
1672 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1673                                 enum bna_rx_event event)
1674 {
1675         switch (event) {
1676         case RX_E_STOP:
1677                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1678                 break;
1679
1680         case RX_E_FAIL:
1681                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1682                 bna_rxf_fail(&rx->rxf);
1683                 call_rx_stall_cbfn(rx);
1684                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1685                 break;
1686
1687         case RX_E_RXF_STARTED:
1688                 bfa_fsm_set_state(rx, bna_rx_sm_started);
1689                 break;
1690
1691         default:
1692                 bfa_sm_fault(event);
1693                 break;
1694         }
1695 }
1696
1697 static void
1698 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1699 {
1700 }
1701
1702 static void
1703 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1704 {
1705         switch (event) {
1706         case RX_E_FAIL:
1707         case RX_E_RXF_STOPPED:
1708                 /* No-op */
1709                 break;
1710
1711         case RX_E_CLEANUP_DONE:
1712                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1713                 break;
1714
1715         default:
1716                 bfa_sm_fault(event);
1717                 break;
1718         }
1719 }
1720
1721 static void
1722 bna_rx_sm_failed_entry(struct bna_rx *rx)
1723 {
1724 }
1725
1726 static void
1727 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1728 {
1729         switch (event) {
1730         case RX_E_START:
1731                 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1732                 break;
1733
1734         case RX_E_STOP:
1735                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1736                 break;
1737
1738         case RX_E_FAIL:
1739         case RX_E_RXF_STARTED:
1740         case RX_E_RXF_STOPPED:
1741                 /* No-op */
1742                 break;
1743
1744         case RX_E_CLEANUP_DONE:
1745                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1746                 break;
1747
1748         default:
1749                 bfa_sm_fault(event);
1750                 break;
1751 }       }
1752
1753 static void
1754 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1755 {
1756 }
1757
1758 static void
1759 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1760 {
1761         switch (event) {
1762         case RX_E_STOP:
1763                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1764                 break;
1765
1766         case RX_E_FAIL:
1767                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1768                 break;
1769
1770         case RX_E_CLEANUP_DONE:
1771                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1772                 break;
1773
1774         default:
1775                 bfa_sm_fault(event);
1776                 break;
1777         }
1778 }
1779
1780 static void
1781 bna_bfi_rx_enet_start(struct bna_rx *rx)
1782 {
1783         struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1784         struct bna_rxp *rxp = NULL;
1785         struct bna_rxq *q0 = NULL, *q1 = NULL;
1786         struct list_head *rxp_qe;
1787         int i;
1788
1789         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1790                 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1791         cfg_req->mh.num_entries = htons(
1792                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1793
1794         cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1795         cfg_req->num_queue_sets = rx->num_paths;
1796         for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1797                 i < rx->num_paths;
1798                 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1799                 rxp = (struct bna_rxp *)rxp_qe;
1800
1801                 GET_RXQS(rxp, q0, q1);
1802                 switch (rxp->type) {
1803                 case BNA_RXP_SLR:
1804                 case BNA_RXP_HDS:
1805                         /* Small RxQ */
1806                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1807                                                 &q1->qpt);
1808                         cfg_req->q_cfg[i].qs.rx_buffer_size =
1809                                 htons((u16)q1->buffer_size);
1810                         /* Fall through */
1811
1812                 case BNA_RXP_SINGLE:
1813                         /* Large/Single RxQ */
1814                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1815                                                 &q0->qpt);
1816                         if (q0->multi_buffer)
1817                                 /* multi-buffer is enabled by allocating
1818                                  * a new rx with new set of resources.
1819                                  * q0->buffer_size should be initialized to
1820                                  * fragment size.
1821                                  */
1822                                 cfg_req->rx_cfg.multi_buffer =
1823                                         BNA_STATUS_T_ENABLED;
1824                         else
1825                                 q0->buffer_size =
1826                                         bna_enet_mtu_get(&rx->bna->enet);
1827                         cfg_req->q_cfg[i].ql.rx_buffer_size =
1828                                 htons((u16)q0->buffer_size);
1829                         break;
1830
1831                 default:
1832                         BUG_ON(1);
1833                 }
1834
1835                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1836                                         &rxp->cq.qpt);
1837
1838                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1839                         rxp->cq.ib.ib_seg_host_addr.lsb;
1840                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1841                         rxp->cq.ib.ib_seg_host_addr.msb;
1842                 cfg_req->q_cfg[i].ib.intr.msix_index =
1843                         htons((u16)rxp->cq.ib.intr_vector);
1844         }
1845
1846         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1847         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1848         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1849         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1850         cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1851                                 ? BNA_STATUS_T_ENABLED :
1852                                 BNA_STATUS_T_DISABLED;
1853         cfg_req->ib_cfg.coalescing_timeout =
1854                         htonl((u32)rxp->cq.ib.coalescing_timeo);
1855         cfg_req->ib_cfg.inter_pkt_timeout =
1856                         htonl((u32)rxp->cq.ib.interpkt_timeo);
1857         cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1858
1859         switch (rxp->type) {
1860         case BNA_RXP_SLR:
1861                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1862                 break;
1863
1864         case BNA_RXP_HDS:
1865                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1866                 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1867                 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1868                 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1869                 break;
1870
1871         case BNA_RXP_SINGLE:
1872                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1873                 break;
1874
1875         default:
1876                 BUG_ON(1);
1877         }
1878         cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1879
1880         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1881                 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1882         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1883 }
1884
1885 static void
1886 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1887 {
1888         struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1889
1890         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1891                 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1892         req->mh.num_entries = htons(
1893                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1894         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1895                 &req->mh);
1896         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1897 }
1898
1899 static void
1900 bna_rx_enet_stop(struct bna_rx *rx)
1901 {
1902         struct bna_rxp *rxp;
1903         struct list_head                 *qe_rxp;
1904
1905         /* Stop IB */
1906         list_for_each(qe_rxp, &rx->rxp_q) {
1907                 rxp = (struct bna_rxp *)qe_rxp;
1908                 bna_ib_stop(rx->bna, &rxp->cq.ib);
1909         }
1910
1911         bna_bfi_rx_enet_stop(rx);
1912 }
1913
1914 static int
1915 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1916 {
1917         if ((rx_mod->rx_free_count == 0) ||
1918                 (rx_mod->rxp_free_count == 0) ||
1919                 (rx_mod->rxq_free_count == 0))
1920                 return 0;
1921
1922         if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1923                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1924                         (rx_mod->rxq_free_count < rx_cfg->num_paths))
1925                                 return 0;
1926         } else {
1927                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1928                         (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1929                         return 0;
1930         }
1931
1932         return 1;
1933 }
1934
1935 static struct bna_rxq *
1936 bna_rxq_get(struct bna_rx_mod *rx_mod)
1937 {
1938         struct bna_rxq *rxq = NULL;
1939         struct list_head        *qe = NULL;
1940
1941         bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1942         rx_mod->rxq_free_count--;
1943         rxq = (struct bna_rxq *)qe;
1944         bfa_q_qe_init(&rxq->qe);
1945
1946         return rxq;
1947 }
1948
1949 static void
1950 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1951 {
1952         bfa_q_qe_init(&rxq->qe);
1953         list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1954         rx_mod->rxq_free_count++;
1955 }
1956
1957 static struct bna_rxp *
1958 bna_rxp_get(struct bna_rx_mod *rx_mod)
1959 {
1960         struct list_head        *qe = NULL;
1961         struct bna_rxp *rxp = NULL;
1962
1963         bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1964         rx_mod->rxp_free_count--;
1965         rxp = (struct bna_rxp *)qe;
1966         bfa_q_qe_init(&rxp->qe);
1967
1968         return rxp;
1969 }
1970
1971 static void
1972 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1973 {
1974         bfa_q_qe_init(&rxp->qe);
1975         list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1976         rx_mod->rxp_free_count++;
1977 }
1978
1979 static struct bna_rx *
1980 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1981 {
1982         struct list_head        *qe = NULL;
1983         struct bna_rx *rx = NULL;
1984
1985         if (type == BNA_RX_T_REGULAR) {
1986                 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1987         } else
1988                 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1989
1990         rx_mod->rx_free_count--;
1991         rx = (struct bna_rx *)qe;
1992         bfa_q_qe_init(&rx->qe);
1993         list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1994         rx->type = type;
1995
1996         return rx;
1997 }
1998
1999 static void
2000 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2001 {
2002         struct list_head *prev_qe = NULL;
2003         struct list_head *qe;
2004
2005         bfa_q_qe_init(&rx->qe);
2006
2007         list_for_each(qe, &rx_mod->rx_free_q) {
2008                 if (((struct bna_rx *)qe)->rid < rx->rid)
2009                         prev_qe = qe;
2010                 else
2011                         break;
2012         }
2013
2014         if (prev_qe == NULL) {
2015                 /* This is the first entry */
2016                 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
2017         } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
2018                 /* This is the last entry */
2019                 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2020         } else {
2021                 /* Somewhere in the middle */
2022                 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
2023                 bfa_q_prev(&rx->qe) = prev_qe;
2024                 bfa_q_next(prev_qe) = &rx->qe;
2025                 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
2026         }
2027
2028         rx_mod->rx_free_count++;
2029 }
2030
2031 static void
2032 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
2033                 struct bna_rxq *q1)
2034 {
2035         switch (rxp->type) {
2036         case BNA_RXP_SINGLE:
2037                 rxp->rxq.single.only = q0;
2038                 rxp->rxq.single.reserved = NULL;
2039                 break;
2040         case BNA_RXP_SLR:
2041                 rxp->rxq.slr.large = q0;
2042                 rxp->rxq.slr.small = q1;
2043                 break;
2044         case BNA_RXP_HDS:
2045                 rxp->rxq.hds.data = q0;
2046                 rxp->rxq.hds.hdr = q1;
2047                 break;
2048         default:
2049                 break;
2050         }
2051 }
2052
2053 static void
2054 bna_rxq_qpt_setup(struct bna_rxq *rxq,
2055                 struct bna_rxp *rxp,
2056                 u32 page_count,
2057                 u32 page_size,
2058                 struct bna_mem_descr *qpt_mem,
2059                 struct bna_mem_descr *swqpt_mem,
2060                 struct bna_mem_descr *page_mem)
2061 {
2062         u8 *kva;
2063         u64 dma;
2064         struct bna_dma_addr bna_dma;
2065         int     i;
2066
2067         rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2068         rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2069         rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2070         rxq->qpt.page_count = page_count;
2071         rxq->qpt.page_size = page_size;
2072
2073         rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2074         rxq->rcb->sw_q = page_mem->kva;
2075
2076         kva = page_mem->kva;
2077         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2078
2079         for (i = 0; i < rxq->qpt.page_count; i++) {
2080                 rxq->rcb->sw_qpt[i] = kva;
2081                 kva += PAGE_SIZE;
2082
2083                 BNA_SET_DMA_ADDR(dma, &bna_dma);
2084                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2085                         bna_dma.lsb;
2086                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2087                         bna_dma.msb;
2088                 dma += PAGE_SIZE;
2089         }
2090 }
2091
2092 static void
2093 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2094                 u32 page_count,
2095                 u32 page_size,
2096                 struct bna_mem_descr *qpt_mem,
2097                 struct bna_mem_descr *swqpt_mem,
2098                 struct bna_mem_descr *page_mem)
2099 {
2100         u8 *kva;
2101         u64 dma;
2102         struct bna_dma_addr bna_dma;
2103         int     i;
2104
2105         rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2106         rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2107         rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2108         rxp->cq.qpt.page_count = page_count;
2109         rxp->cq.qpt.page_size = page_size;
2110
2111         rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2112         rxp->cq.ccb->sw_q = page_mem->kva;
2113
2114         kva = page_mem->kva;
2115         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2116
2117         for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2118                 rxp->cq.ccb->sw_qpt[i] = kva;
2119                 kva += PAGE_SIZE;
2120
2121                 BNA_SET_DMA_ADDR(dma, &bna_dma);
2122                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2123                         bna_dma.lsb;
2124                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2125                         bna_dma.msb;
2126                 dma += PAGE_SIZE;
2127         }
2128 }
2129
2130 static void
2131 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2132 {
2133         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2134
2135         bfa_wc_down(&rx_mod->rx_stop_wc);
2136 }
2137
2138 static void
2139 bna_rx_mod_cb_rx_stopped_all(void *arg)
2140 {
2141         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2142
2143         if (rx_mod->stop_cbfn)
2144                 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2145         rx_mod->stop_cbfn = NULL;
2146 }
2147
2148 static void
2149 bna_rx_start(struct bna_rx *rx)
2150 {
2151         rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2152         if (rx->rx_flags & BNA_RX_F_ENABLED)
2153                 bfa_fsm_send_event(rx, RX_E_START);
2154 }
2155
2156 static void
2157 bna_rx_stop(struct bna_rx *rx)
2158 {
2159         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2160         if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2161                 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2162         else {
2163                 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2164                 rx->stop_cbarg = &rx->bna->rx_mod;
2165                 bfa_fsm_send_event(rx, RX_E_STOP);
2166         }
2167 }
2168
2169 static void
2170 bna_rx_fail(struct bna_rx *rx)
2171 {
2172         /* Indicate Enet is not enabled, and failed */
2173         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2174         bfa_fsm_send_event(rx, RX_E_FAIL);
2175 }
2176
2177 void
2178 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2179 {
2180         struct bna_rx *rx;
2181         struct list_head *qe;
2182
2183         rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2184         if (type == BNA_RX_T_LOOPBACK)
2185                 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2186
2187         list_for_each(qe, &rx_mod->rx_active_q) {
2188                 rx = (struct bna_rx *)qe;
2189                 if (rx->type == type)
2190                         bna_rx_start(rx);
2191         }
2192 }
2193
2194 void
2195 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2196 {
2197         struct bna_rx *rx;
2198         struct list_head *qe;
2199
2200         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2201         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2202
2203         rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2204
2205         bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2206
2207         list_for_each(qe, &rx_mod->rx_active_q) {
2208                 rx = (struct bna_rx *)qe;
2209                 if (rx->type == type) {
2210                         bfa_wc_up(&rx_mod->rx_stop_wc);
2211                         bna_rx_stop(rx);
2212                 }
2213         }
2214
2215         bfa_wc_wait(&rx_mod->rx_stop_wc);
2216 }
2217
2218 void
2219 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2220 {
2221         struct bna_rx *rx;
2222         struct list_head *qe;
2223
2224         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2225         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2226
2227         list_for_each(qe, &rx_mod->rx_active_q) {
2228                 rx = (struct bna_rx *)qe;
2229                 bna_rx_fail(rx);
2230         }
2231 }
2232
2233 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2234                         struct bna_res_info *res_info)
2235 {
2236         int     index;
2237         struct bna_rx *rx_ptr;
2238         struct bna_rxp *rxp_ptr;
2239         struct bna_rxq *rxq_ptr;
2240
2241         rx_mod->bna = bna;
2242         rx_mod->flags = 0;
2243
2244         rx_mod->rx = (struct bna_rx *)
2245                 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2246         rx_mod->rxp = (struct bna_rxp *)
2247                 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2248         rx_mod->rxq = (struct bna_rxq *)
2249                 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2250
2251         /* Initialize the queues */
2252         INIT_LIST_HEAD(&rx_mod->rx_free_q);
2253         rx_mod->rx_free_count = 0;
2254         INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2255         rx_mod->rxq_free_count = 0;
2256         INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2257         rx_mod->rxp_free_count = 0;
2258         INIT_LIST_HEAD(&rx_mod->rx_active_q);
2259
2260         /* Build RX queues */
2261         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2262                 rx_ptr = &rx_mod->rx[index];
2263
2264                 bfa_q_qe_init(&rx_ptr->qe);
2265                 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2266                 rx_ptr->bna = NULL;
2267                 rx_ptr->rid = index;
2268                 rx_ptr->stop_cbfn = NULL;
2269                 rx_ptr->stop_cbarg = NULL;
2270
2271                 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2272                 rx_mod->rx_free_count++;
2273         }
2274
2275         /* build RX-path queue */
2276         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2277                 rxp_ptr = &rx_mod->rxp[index];
2278                 bfa_q_qe_init(&rxp_ptr->qe);
2279                 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2280                 rx_mod->rxp_free_count++;
2281         }
2282
2283         /* build RXQ queue */
2284         for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2285                 rxq_ptr = &rx_mod->rxq[index];
2286                 bfa_q_qe_init(&rxq_ptr->qe);
2287                 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2288                 rx_mod->rxq_free_count++;
2289         }
2290 }
2291
2292 void
2293 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2294 {
2295         struct list_head                *qe;
2296         int i;
2297
2298         i = 0;
2299         list_for_each(qe, &rx_mod->rx_free_q)
2300                 i++;
2301
2302         i = 0;
2303         list_for_each(qe, &rx_mod->rxp_free_q)
2304                 i++;
2305
2306         i = 0;
2307         list_for_each(qe, &rx_mod->rxq_free_q)
2308                 i++;
2309
2310         rx_mod->bna = NULL;
2311 }
2312
2313 void
2314 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2315 {
2316         struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2317         struct bna_rxp *rxp = NULL;
2318         struct bna_rxq *q0 = NULL, *q1 = NULL;
2319         struct list_head *rxp_qe;
2320         int i;
2321
2322         bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2323                 sizeof(struct bfi_enet_rx_cfg_rsp));
2324
2325         rx->hw_id = cfg_rsp->hw_id;
2326
2327         for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2328                 i < rx->num_paths;
2329                 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2330                 rxp = (struct bna_rxp *)rxp_qe;
2331                 GET_RXQS(rxp, q0, q1);
2332
2333                 /* Setup doorbells */
2334                 rxp->cq.ccb->i_dbell->doorbell_addr =
2335                         rx->bna->pcidev.pci_bar_kva
2336                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
2337                 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2338                 q0->rcb->q_dbell =
2339                         rx->bna->pcidev.pci_bar_kva
2340                         + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2341                 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2342                 if (q1) {
2343                         q1->rcb->q_dbell =
2344                         rx->bna->pcidev.pci_bar_kva
2345                         + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2346                         q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2347                 }
2348
2349                 /* Initialize producer/consumer indexes */
2350                 (*rxp->cq.ccb->hw_producer_index) = 0;
2351                 rxp->cq.ccb->producer_index = 0;
2352                 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2353                 if (q1)
2354                         q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2355         }
2356
2357         bfa_fsm_send_event(rx, RX_E_STARTED);
2358 }
2359
2360 void
2361 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2362 {
2363         bfa_fsm_send_event(rx, RX_E_STOPPED);
2364 }
2365
2366 void
2367 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2368 {
2369         u32 cq_size, hq_size, dq_size;
2370         u32 cpage_count, hpage_count, dpage_count;
2371         struct bna_mem_info *mem_info;
2372         u32 cq_depth;
2373         u32 hq_depth;
2374         u32 dq_depth;
2375
2376         dq_depth = q_cfg->q0_depth;
2377         hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2378         cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2379
2380         cq_size = cq_depth * BFI_CQ_WI_SIZE;
2381         cq_size = ALIGN(cq_size, PAGE_SIZE);
2382         cpage_count = SIZE_TO_PAGES(cq_size);
2383
2384         dq_depth = roundup_pow_of_two(dq_depth);
2385         dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2386         dq_size = ALIGN(dq_size, PAGE_SIZE);
2387         dpage_count = SIZE_TO_PAGES(dq_size);
2388
2389         if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2390                 hq_depth = roundup_pow_of_two(hq_depth);
2391                 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2392                 hq_size = ALIGN(hq_size, PAGE_SIZE);
2393                 hpage_count = SIZE_TO_PAGES(hq_size);
2394         } else
2395                 hpage_count = 0;
2396
2397         res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2398         mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2399         mem_info->mem_type = BNA_MEM_T_KVA;
2400         mem_info->len = sizeof(struct bna_ccb);
2401         mem_info->num = q_cfg->num_paths;
2402
2403         res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2404         mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2405         mem_info->mem_type = BNA_MEM_T_KVA;
2406         mem_info->len = sizeof(struct bna_rcb);
2407         mem_info->num = BNA_GET_RXQS(q_cfg);
2408
2409         res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2410         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2411         mem_info->mem_type = BNA_MEM_T_DMA;
2412         mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2413         mem_info->num = q_cfg->num_paths;
2414
2415         res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2416         mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2417         mem_info->mem_type = BNA_MEM_T_KVA;
2418         mem_info->len = cpage_count * sizeof(void *);
2419         mem_info->num = q_cfg->num_paths;
2420
2421         res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2422         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2423         mem_info->mem_type = BNA_MEM_T_DMA;
2424         mem_info->len = PAGE_SIZE * cpage_count;
2425         mem_info->num = q_cfg->num_paths;
2426
2427         res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2428         mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2429         mem_info->mem_type = BNA_MEM_T_DMA;
2430         mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2431         mem_info->num = q_cfg->num_paths;
2432
2433         res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2434         mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2435         mem_info->mem_type = BNA_MEM_T_KVA;
2436         mem_info->len = dpage_count * sizeof(void *);
2437         mem_info->num = q_cfg->num_paths;
2438
2439         res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2440         mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2441         mem_info->mem_type = BNA_MEM_T_DMA;
2442         mem_info->len = PAGE_SIZE * dpage_count;
2443         mem_info->num = q_cfg->num_paths;
2444
2445         res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2446         mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2447         mem_info->mem_type = BNA_MEM_T_DMA;
2448         mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2449         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2450
2451         res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2452         mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2453         mem_info->mem_type = BNA_MEM_T_KVA;
2454         mem_info->len = hpage_count * sizeof(void *);
2455         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2456
2457         res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2458         mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2459         mem_info->mem_type = BNA_MEM_T_DMA;
2460         mem_info->len = PAGE_SIZE * hpage_count;
2461         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2462
2463         res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2464         mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2465         mem_info->mem_type = BNA_MEM_T_DMA;
2466         mem_info->len = BFI_IBIDX_SIZE;
2467         mem_info->num = q_cfg->num_paths;
2468
2469         res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2470         mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2471         mem_info->mem_type = BNA_MEM_T_KVA;
2472         mem_info->len = BFI_ENET_RSS_RIT_MAX;
2473         mem_info->num = 1;
2474
2475         res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2476         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2477         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2478 }
2479
2480 struct bna_rx *
2481 bna_rx_create(struct bna *bna, struct bnad *bnad,
2482                 struct bna_rx_config *rx_cfg,
2483                 const struct bna_rx_event_cbfn *rx_cbfn,
2484                 struct bna_res_info *res_info,
2485                 void *priv)
2486 {
2487         struct bna_rx_mod *rx_mod = &bna->rx_mod;
2488         struct bna_rx *rx;
2489         struct bna_rxp *rxp;
2490         struct bna_rxq *q0;
2491         struct bna_rxq *q1;
2492         struct bna_intr_info *intr_info;
2493         struct bna_mem_descr *hqunmap_mem;
2494         struct bna_mem_descr *dqunmap_mem;
2495         struct bna_mem_descr *ccb_mem;
2496         struct bna_mem_descr *rcb_mem;
2497         struct bna_mem_descr *cqpt_mem;
2498         struct bna_mem_descr *cswqpt_mem;
2499         struct bna_mem_descr *cpage_mem;
2500         struct bna_mem_descr *hqpt_mem;
2501         struct bna_mem_descr *dqpt_mem;
2502         struct bna_mem_descr *hsqpt_mem;
2503         struct bna_mem_descr *dsqpt_mem;
2504         struct bna_mem_descr *hpage_mem;
2505         struct bna_mem_descr *dpage_mem;
2506         u32 dpage_count, hpage_count;
2507         u32 hq_idx, dq_idx, rcb_idx;
2508         u32 cq_depth, i;
2509         u32 page_count;
2510
2511         if (!bna_rx_res_check(rx_mod, rx_cfg))
2512                 return NULL;
2513
2514         intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2515         ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2516         rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2517         dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2518         hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2519         cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2520         cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2521         cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2522         hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2523         dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2524         hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2525         dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2526         hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2527         dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2528
2529         page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2530                         PAGE_SIZE;
2531
2532         dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2533                         PAGE_SIZE;
2534
2535         hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2536                         PAGE_SIZE;
2537
2538         rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2539         rx->bna = bna;
2540         rx->rx_flags = 0;
2541         INIT_LIST_HEAD(&rx->rxp_q);
2542         rx->stop_cbfn = NULL;
2543         rx->stop_cbarg = NULL;
2544         rx->priv = priv;
2545
2546         rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2547         rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2548         rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2549         rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2550         rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2551         /* Following callbacks are mandatory */
2552         rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2553         rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2554
2555         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2556                 switch (rx->type) {
2557                 case BNA_RX_T_REGULAR:
2558                         if (!(rx->bna->rx_mod.flags &
2559                                 BNA_RX_MOD_F_ENET_LOOPBACK))
2560                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2561                         break;
2562                 case BNA_RX_T_LOOPBACK:
2563                         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2564                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2565                         break;
2566                 }
2567         }
2568
2569         rx->num_paths = rx_cfg->num_paths;
2570         for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2571                         i < rx->num_paths; i++) {
2572                 rxp = bna_rxp_get(rx_mod);
2573                 list_add_tail(&rxp->qe, &rx->rxp_q);
2574                 rxp->type = rx_cfg->rxp_type;
2575                 rxp->rx = rx;
2576                 rxp->cq.rx = rx;
2577
2578                 q0 = bna_rxq_get(rx_mod);
2579                 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2580                         q1 = NULL;
2581                 else
2582                         q1 = bna_rxq_get(rx_mod);
2583
2584                 if (1 == intr_info->num)
2585                         rxp->vector = intr_info->idl[0].vector;
2586                 else
2587                         rxp->vector = intr_info->idl[i].vector;
2588
2589                 /* Setup IB */
2590
2591                 rxp->cq.ib.ib_seg_host_addr.lsb =
2592                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2593                 rxp->cq.ib.ib_seg_host_addr.msb =
2594                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2595                 rxp->cq.ib.ib_seg_host_addr_kva =
2596                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2597                 rxp->cq.ib.intr_type = intr_info->intr_type;
2598                 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2599                         rxp->cq.ib.intr_vector = rxp->vector;
2600                 else
2601                         rxp->cq.ib.intr_vector = BIT(rxp->vector);
2602                 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2603                 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2604                 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2605
2606                 bna_rxp_add_rxqs(rxp, q0, q1);
2607
2608                 /* Setup large Q */
2609
2610                 q0->rx = rx;
2611                 q0->rxp = rxp;
2612
2613                 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2614                 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2615                 rcb_idx++; dq_idx++;
2616                 q0->rcb->q_depth = rx_cfg->q0_depth;
2617                 q0->q_depth = rx_cfg->q0_depth;
2618                 q0->multi_buffer = rx_cfg->q0_multi_buf;
2619                 q0->buffer_size = rx_cfg->q0_buf_size;
2620                 q0->num_vecs = rx_cfg->q0_num_vecs;
2621                 q0->rcb->rxq = q0;
2622                 q0->rcb->bnad = bna->bnad;
2623                 q0->rcb->id = 0;
2624                 q0->rx_packets = q0->rx_bytes = 0;
2625                 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2626
2627                 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2628                         &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2629
2630                 if (rx->rcb_setup_cbfn)
2631                         rx->rcb_setup_cbfn(bnad, q0->rcb);
2632
2633                 /* Setup small Q */
2634
2635                 if (q1) {
2636                         q1->rx = rx;
2637                         q1->rxp = rxp;
2638
2639                         q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2640                         q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2641                         rcb_idx++; hq_idx++;
2642                         q1->rcb->q_depth = rx_cfg->q1_depth;
2643                         q1->q_depth = rx_cfg->q1_depth;
2644                         q1->multi_buffer = BNA_STATUS_T_DISABLED;
2645                         q1->num_vecs = 1;
2646                         q1->rcb->rxq = q1;
2647                         q1->rcb->bnad = bna->bnad;
2648                         q1->rcb->id = 1;
2649                         q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2650                                         rx_cfg->hds_config.forced_offset
2651                                         : rx_cfg->q1_buf_size;
2652                         q1->rx_packets = q1->rx_bytes = 0;
2653                         q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2654
2655                         bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2656                                 &hqpt_mem[i], &hsqpt_mem[i],
2657                                 &hpage_mem[i]);
2658
2659                         if (rx->rcb_setup_cbfn)
2660                                 rx->rcb_setup_cbfn(bnad, q1->rcb);
2661                 }
2662
2663                 /* Setup CQ */
2664
2665                 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2666                 cq_depth = rx_cfg->q0_depth +
2667                         ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2668                          0 : rx_cfg->q1_depth);
2669                 /* if multi-buffer is enabled sum of q0_depth
2670                  * and q1_depth need not be a power of 2
2671                  */
2672                 cq_depth = roundup_pow_of_two(cq_depth);
2673                 rxp->cq.ccb->q_depth = cq_depth;
2674                 rxp->cq.ccb->cq = &rxp->cq;
2675                 rxp->cq.ccb->rcb[0] = q0->rcb;
2676                 q0->rcb->ccb = rxp->cq.ccb;
2677                 if (q1) {
2678                         rxp->cq.ccb->rcb[1] = q1->rcb;
2679                         q1->rcb->ccb = rxp->cq.ccb;
2680                 }
2681                 rxp->cq.ccb->hw_producer_index =
2682                         (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2683                 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2684                 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2685                 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2686                 rxp->cq.ccb->rx_coalescing_timeo =
2687                         rxp->cq.ib.coalescing_timeo;
2688                 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2689                 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2690                 rxp->cq.ccb->bnad = bna->bnad;
2691                 rxp->cq.ccb->id = i;
2692
2693                 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2694                         &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2695
2696                 if (rx->ccb_setup_cbfn)
2697                         rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2698         }
2699
2700         rx->hds_cfg = rx_cfg->hds_config;
2701
2702         bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2703
2704         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2705
2706         rx_mod->rid_mask |= BIT(rx->rid);
2707
2708         return rx;
2709 }
2710
2711 void
2712 bna_rx_destroy(struct bna_rx *rx)
2713 {
2714         struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2715         struct bna_rxq *q0 = NULL;
2716         struct bna_rxq *q1 = NULL;
2717         struct bna_rxp *rxp;
2718         struct list_head *qe;
2719
2720         bna_rxf_uninit(&rx->rxf);
2721
2722         while (!list_empty(&rx->rxp_q)) {
2723                 bfa_q_deq(&rx->rxp_q, &rxp);
2724                 GET_RXQS(rxp, q0, q1);
2725                 if (rx->rcb_destroy_cbfn)
2726                         rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2727                 q0->rcb = NULL;
2728                 q0->rxp = NULL;
2729                 q0->rx = NULL;
2730                 bna_rxq_put(rx_mod, q0);
2731
2732                 if (q1) {
2733                         if (rx->rcb_destroy_cbfn)
2734                                 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2735                         q1->rcb = NULL;
2736                         q1->rxp = NULL;
2737                         q1->rx = NULL;
2738                         bna_rxq_put(rx_mod, q1);
2739                 }
2740                 rxp->rxq.slr.large = NULL;
2741                 rxp->rxq.slr.small = NULL;
2742
2743                 if (rx->ccb_destroy_cbfn)
2744                         rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2745                 rxp->cq.ccb = NULL;
2746                 rxp->rx = NULL;
2747                 bna_rxp_put(rx_mod, rxp);
2748         }
2749
2750         list_for_each(qe, &rx_mod->rx_active_q) {
2751                 if (qe == &rx->qe) {
2752                         list_del(&rx->qe);
2753                         bfa_q_qe_init(&rx->qe);
2754                         break;
2755                 }
2756         }
2757
2758         rx_mod->rid_mask &= ~BIT(rx->rid);
2759
2760         rx->bna = NULL;
2761         rx->priv = NULL;
2762         bna_rx_put(rx_mod, rx);
2763 }
2764
2765 void
2766 bna_rx_enable(struct bna_rx *rx)
2767 {
2768         if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2769                 return;
2770
2771         rx->rx_flags |= BNA_RX_F_ENABLED;
2772         if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2773                 bfa_fsm_send_event(rx, RX_E_START);
2774 }
2775
2776 void
2777 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2778                 void (*cbfn)(void *, struct bna_rx *))
2779 {
2780         if (type == BNA_SOFT_CLEANUP) {
2781                 /* h/w should not be accessed. Treat we're stopped */
2782                 (*cbfn)(rx->bna->bnad, rx);
2783         } else {
2784                 rx->stop_cbfn = cbfn;
2785                 rx->stop_cbarg = rx->bna->bnad;
2786
2787                 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2788
2789                 bfa_fsm_send_event(rx, RX_E_STOP);
2790         }
2791 }
2792
2793 void
2794 bna_rx_cleanup_complete(struct bna_rx *rx)
2795 {
2796         bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2797 }
2798
2799 void
2800 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2801 {
2802         struct bna_rxf *rxf = &rx->rxf;
2803
2804         if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2805                 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2806                 rxf->vlan_strip_pending = true;
2807                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2808         }
2809 }
2810
2811 void
2812 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2813 {
2814         struct bna_rxf *rxf = &rx->rxf;
2815
2816         if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2817                 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2818                 rxf->vlan_strip_pending = true;
2819                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2820         }
2821 }
2822
2823 enum bna_cb_status
2824 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2825                 enum bna_rxmode bitmask)
2826 {
2827         struct bna_rxf *rxf = &rx->rxf;
2828         int need_hw_config = 0;
2829
2830         /* Error checks */
2831
2832         if (is_promisc_enable(new_mode, bitmask)) {
2833                 /* If promisc mode is already enabled elsewhere in the system */
2834                 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2835                         (rx->bna->promisc_rid != rxf->rx->rid))
2836                         goto err_return;
2837
2838                 /* If default mode is already enabled in the system */
2839                 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2840                         goto err_return;
2841
2842                 /* Trying to enable promiscuous and default mode together */
2843                 if (is_default_enable(new_mode, bitmask))
2844                         goto err_return;
2845         }
2846
2847         if (is_default_enable(new_mode, bitmask)) {
2848                 /* If default mode is already enabled elsewhere in the system */
2849                 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2850                         (rx->bna->default_mode_rid != rxf->rx->rid)) {
2851                                 goto err_return;
2852                 }
2853
2854                 /* If promiscuous mode is already enabled in the system */
2855                 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2856                         goto err_return;
2857         }
2858
2859         /* Process the commands */
2860
2861         if (is_promisc_enable(new_mode, bitmask)) {
2862                 if (bna_rxf_promisc_enable(rxf))
2863                         need_hw_config = 1;
2864         } else if (is_promisc_disable(new_mode, bitmask)) {
2865                 if (bna_rxf_promisc_disable(rxf))
2866                         need_hw_config = 1;
2867         }
2868
2869         if (is_allmulti_enable(new_mode, bitmask)) {
2870                 if (bna_rxf_allmulti_enable(rxf))
2871                         need_hw_config = 1;
2872         } else if (is_allmulti_disable(new_mode, bitmask)) {
2873                 if (bna_rxf_allmulti_disable(rxf))
2874                         need_hw_config = 1;
2875         }
2876
2877         /* Trigger h/w if needed */
2878
2879         if (need_hw_config) {
2880                 rxf->cam_fltr_cbfn = NULL;
2881                 rxf->cam_fltr_cbarg = rx->bna->bnad;
2882                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2883         }
2884
2885         return BNA_CB_SUCCESS;
2886
2887 err_return:
2888         return BNA_CB_FAIL;
2889 }
2890
2891 void
2892 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2893 {
2894         struct bna_rxf *rxf = &rx->rxf;
2895
2896         if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2897                 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2898                 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2899                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2900         }
2901 }
2902
2903 void
2904 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2905 {
2906         struct bna_rxp *rxp;
2907         struct list_head *qe;
2908
2909         list_for_each(qe, &rx->rxp_q) {
2910                 rxp = (struct bna_rxp *)qe;
2911                 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2912                 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2913         }
2914 }
2915
2916 void
2917 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2918 {
2919         int i, j;
2920
2921         for (i = 0; i < BNA_LOAD_T_MAX; i++)
2922                 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2923                         bna->rx_mod.dim_vector[i][j] = vector[i][j];
2924 }
2925
2926 void
2927 bna_rx_dim_update(struct bna_ccb *ccb)
2928 {
2929         struct bna *bna = ccb->cq->rx->bna;
2930         u32 load, bias;
2931         u32 pkt_rt, small_rt, large_rt;
2932         u8 coalescing_timeo;
2933
2934         if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2935                 (ccb->pkt_rate.large_pkt_cnt == 0))
2936                 return;
2937
2938         /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2939
2940         small_rt = ccb->pkt_rate.small_pkt_cnt;
2941         large_rt = ccb->pkt_rate.large_pkt_cnt;
2942
2943         pkt_rt = small_rt + large_rt;
2944
2945         if (pkt_rt < BNA_PKT_RATE_10K)
2946                 load = BNA_LOAD_T_LOW_4;
2947         else if (pkt_rt < BNA_PKT_RATE_20K)
2948                 load = BNA_LOAD_T_LOW_3;
2949         else if (pkt_rt < BNA_PKT_RATE_30K)
2950                 load = BNA_LOAD_T_LOW_2;
2951         else if (pkt_rt < BNA_PKT_RATE_40K)
2952                 load = BNA_LOAD_T_LOW_1;
2953         else if (pkt_rt < BNA_PKT_RATE_50K)
2954                 load = BNA_LOAD_T_HIGH_1;
2955         else if (pkt_rt < BNA_PKT_RATE_60K)
2956                 load = BNA_LOAD_T_HIGH_2;
2957         else if (pkt_rt < BNA_PKT_RATE_80K)
2958                 load = BNA_LOAD_T_HIGH_3;
2959         else
2960                 load = BNA_LOAD_T_HIGH_4;
2961
2962         if (small_rt > (large_rt << 1))
2963                 bias = 0;
2964         else
2965                 bias = 1;
2966
2967         ccb->pkt_rate.small_pkt_cnt = 0;
2968         ccb->pkt_rate.large_pkt_cnt = 0;
2969
2970         coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2971         ccb->rx_coalescing_timeo = coalescing_timeo;
2972
2973         /* Set it to IB */
2974         bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2975 }
2976
2977 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2978         {12, 12},
2979         {6, 10},
2980         {5, 10},
2981         {4, 8},
2982         {3, 6},
2983         {3, 6},
2984         {2, 4},
2985         {1, 2},
2986 };
2987
2988 /* TX */
2989
2990 #define call_tx_stop_cbfn(tx)                                           \
2991 do {                                                                    \
2992         if ((tx)->stop_cbfn) {                                          \
2993                 void (*cbfn)(void *, struct bna_tx *);          \
2994                 void *cbarg;                                            \
2995                 cbfn = (tx)->stop_cbfn;                                 \
2996                 cbarg = (tx)->stop_cbarg;                               \
2997                 (tx)->stop_cbfn = NULL;                                 \
2998                 (tx)->stop_cbarg = NULL;                                \
2999                 cbfn(cbarg, (tx));                                      \
3000         }                                                               \
3001 } while (0)
3002
3003 #define call_tx_prio_change_cbfn(tx)                                    \
3004 do {                                                                    \
3005         if ((tx)->prio_change_cbfn) {                                   \
3006                 void (*cbfn)(struct bnad *, struct bna_tx *);   \
3007                 cbfn = (tx)->prio_change_cbfn;                          \
3008                 (tx)->prio_change_cbfn = NULL;                          \
3009                 cbfn((tx)->bna->bnad, (tx));                            \
3010         }                                                               \
3011 } while (0)
3012
3013 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3014 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3015 static void bna_tx_enet_stop(struct bna_tx *tx);
3016
3017 enum bna_tx_event {
3018         TX_E_START                      = 1,
3019         TX_E_STOP                       = 2,
3020         TX_E_FAIL                       = 3,
3021         TX_E_STARTED                    = 4,
3022         TX_E_STOPPED                    = 5,
3023         TX_E_PRIO_CHANGE                = 6,
3024         TX_E_CLEANUP_DONE               = 7,
3025         TX_E_BW_UPDATE                  = 8,
3026 };
3027
3028 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
3029 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
3030 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
3031 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
3032 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
3033                         enum bna_tx_event);
3034 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3035                         enum bna_tx_event);
3036 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
3037                         enum bna_tx_event);
3038 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
3039 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
3040                         enum bna_tx_event);
3041
3042 static void
3043 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3044 {
3045         call_tx_stop_cbfn(tx);
3046 }
3047
3048 static void
3049 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3050 {
3051         switch (event) {
3052         case TX_E_START:
3053                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3054                 break;
3055
3056         case TX_E_STOP:
3057                 call_tx_stop_cbfn(tx);
3058                 break;
3059
3060         case TX_E_FAIL:
3061                 /* No-op */
3062                 break;
3063
3064         case TX_E_PRIO_CHANGE:
3065                 call_tx_prio_change_cbfn(tx);
3066                 break;
3067
3068         case TX_E_BW_UPDATE:
3069                 /* No-op */
3070                 break;
3071
3072         default:
3073                 bfa_sm_fault(event);
3074         }
3075 }
3076
3077 static void
3078 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
3079 {
3080         bna_bfi_tx_enet_start(tx);
3081 }
3082
3083 static void
3084 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3085 {
3086         switch (event) {
3087         case TX_E_STOP:
3088                 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3089                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3090                 break;
3091
3092         case TX_E_FAIL:
3093                 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3094                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3095                 break;
3096
3097         case TX_E_STARTED:
3098                 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3099                         tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3100                                 BNA_TX_F_BW_UPDATED);
3101                         bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3102                 } else
3103                         bfa_fsm_set_state(tx, bna_tx_sm_started);
3104                 break;
3105
3106         case TX_E_PRIO_CHANGE:
3107                 tx->flags |=  BNA_TX_F_PRIO_CHANGED;
3108                 break;
3109
3110         case TX_E_BW_UPDATE:
3111                 tx->flags |= BNA_TX_F_BW_UPDATED;
3112                 break;
3113
3114         default:
3115                 bfa_sm_fault(event);
3116         }
3117 }
3118
3119 static void
3120 bna_tx_sm_started_entry(struct bna_tx *tx)
3121 {
3122         struct bna_txq *txq;
3123         struct list_head                 *qe;
3124         int is_regular = (tx->type == BNA_TX_T_REGULAR);
3125
3126         list_for_each(qe, &tx->txq_q) {
3127                 txq = (struct bna_txq *)qe;
3128                 txq->tcb->priority = txq->priority;
3129                 /* Start IB */
3130                 bna_ib_start(tx->bna, &txq->ib, is_regular);
3131         }
3132         tx->tx_resume_cbfn(tx->bna->bnad, tx);
3133 }
3134
3135 static void
3136 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3137 {
3138         switch (event) {
3139         case TX_E_STOP:
3140                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3141                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3142                 bna_tx_enet_stop(tx);
3143                 break;
3144
3145         case TX_E_FAIL:
3146                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3147                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3148                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3149                 break;
3150
3151         case TX_E_PRIO_CHANGE:
3152         case TX_E_BW_UPDATE:
3153                 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3154                 break;
3155
3156         default:
3157                 bfa_sm_fault(event);
3158         }
3159 }
3160
3161 static void
3162 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3163 {
3164 }
3165
3166 static void
3167 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3168 {
3169         switch (event) {
3170         case TX_E_FAIL:
3171         case TX_E_STOPPED:
3172                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3173                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3174                 break;
3175
3176         case TX_E_STARTED:
3177                 /**
3178                  * We are here due to start_wait -> stop_wait transition on
3179                  * TX_E_STOP event
3180                  */
3181                 bna_tx_enet_stop(tx);
3182                 break;
3183
3184         case TX_E_PRIO_CHANGE:
3185         case TX_E_BW_UPDATE:
3186                 /* No-op */
3187                 break;
3188
3189         default:
3190                 bfa_sm_fault(event);
3191         }
3192 }
3193
3194 static void
3195 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3196 {
3197 }
3198
3199 static void
3200 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3201 {
3202         switch (event) {
3203         case TX_E_FAIL:
3204         case TX_E_PRIO_CHANGE:
3205         case TX_E_BW_UPDATE:
3206                 /* No-op */
3207                 break;
3208
3209         case TX_E_CLEANUP_DONE:
3210                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3211                 break;
3212
3213         default:
3214                 bfa_sm_fault(event);
3215         }
3216 }
3217
3218 static void
3219 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3220 {
3221         tx->tx_stall_cbfn(tx->bna->bnad, tx);
3222         bna_tx_enet_stop(tx);
3223 }
3224
3225 static void
3226 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3227 {
3228         switch (event) {
3229         case TX_E_STOP:
3230                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3231                 break;
3232
3233         case TX_E_FAIL:
3234                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3235                 call_tx_prio_change_cbfn(tx);
3236                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3237                 break;
3238
3239         case TX_E_STOPPED:
3240                 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3241                 break;
3242
3243         case TX_E_PRIO_CHANGE:
3244         case TX_E_BW_UPDATE:
3245                 /* No-op */
3246                 break;
3247
3248         default:
3249                 bfa_sm_fault(event);
3250         }
3251 }
3252
3253 static void
3254 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3255 {
3256         call_tx_prio_change_cbfn(tx);
3257         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3258 }
3259
3260 static void
3261 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3262 {
3263         switch (event) {
3264         case TX_E_STOP:
3265                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3266                 break;
3267
3268         case TX_E_FAIL:
3269                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3270                 break;
3271
3272         case TX_E_PRIO_CHANGE:
3273         case TX_E_BW_UPDATE:
3274                 /* No-op */
3275                 break;
3276
3277         case TX_E_CLEANUP_DONE:
3278                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3279                 break;
3280
3281         default:
3282                 bfa_sm_fault(event);
3283         }
3284 }
3285
3286 static void
3287 bna_tx_sm_failed_entry(struct bna_tx *tx)
3288 {
3289 }
3290
3291 static void
3292 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3293 {
3294         switch (event) {
3295         case TX_E_START:
3296                 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3297                 break;
3298
3299         case TX_E_STOP:
3300                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3301                 break;
3302
3303         case TX_E_FAIL:
3304                 /* No-op */
3305                 break;
3306
3307         case TX_E_CLEANUP_DONE:
3308                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3309                 break;
3310
3311         default:
3312                 bfa_sm_fault(event);
3313         }
3314 }
3315
3316 static void
3317 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3318 {
3319 }
3320
3321 static void
3322 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3323 {
3324         switch (event) {
3325         case TX_E_STOP:
3326                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3327                 break;
3328
3329         case TX_E_FAIL:
3330                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3331                 break;
3332
3333         case TX_E_CLEANUP_DONE:
3334                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3335                 break;
3336
3337         case TX_E_BW_UPDATE:
3338                 /* No-op */
3339                 break;
3340
3341         default:
3342                 bfa_sm_fault(event);
3343         }
3344 }
3345
3346 static void
3347 bna_bfi_tx_enet_start(struct bna_tx *tx)
3348 {
3349         struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3350         struct bna_txq *txq = NULL;
3351         struct list_head *qe;
3352         int i;
3353
3354         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3355                 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3356         cfg_req->mh.num_entries = htons(
3357                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3358
3359         cfg_req->num_queues = tx->num_txq;
3360         for (i = 0, qe = bfa_q_first(&tx->txq_q);
3361                 i < tx->num_txq;
3362                 i++, qe = bfa_q_next(qe)) {
3363                 txq = (struct bna_txq *)qe;
3364
3365                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3366                 cfg_req->q_cfg[i].q.priority = txq->priority;
3367
3368                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3369                         txq->ib.ib_seg_host_addr.lsb;
3370                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3371                         txq->ib.ib_seg_host_addr.msb;
3372                 cfg_req->q_cfg[i].ib.intr.msix_index =
3373                         htons((u16)txq->ib.intr_vector);
3374         }
3375
3376         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3377         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3378         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3379         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3380         cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3381                                 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3382         cfg_req->ib_cfg.coalescing_timeout =
3383                         htonl((u32)txq->ib.coalescing_timeo);
3384         cfg_req->ib_cfg.inter_pkt_timeout =
3385                         htonl((u32)txq->ib.interpkt_timeo);
3386         cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3387
3388         cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3389         cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3390         cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3391         cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3392
3393         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3394                 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3395         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3396 }
3397
3398 static void
3399 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3400 {
3401         struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3402
3403         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3404                 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3405         req->mh.num_entries = htons(
3406                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3407         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3408                 &req->mh);
3409         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3410 }
3411
3412 static void
3413 bna_tx_enet_stop(struct bna_tx *tx)
3414 {
3415         struct bna_txq *txq;
3416         struct list_head                 *qe;
3417
3418         /* Stop IB */
3419         list_for_each(qe, &tx->txq_q) {
3420                 txq = (struct bna_txq *)qe;
3421                 bna_ib_stop(tx->bna, &txq->ib);
3422         }
3423
3424         bna_bfi_tx_enet_stop(tx);
3425 }
3426
3427 static void
3428 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3429                 struct bna_mem_descr *qpt_mem,
3430                 struct bna_mem_descr *swqpt_mem,
3431                 struct bna_mem_descr *page_mem)
3432 {
3433         u8 *kva;
3434         u64 dma;
3435         struct bna_dma_addr bna_dma;
3436         int i;
3437
3438         txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3439         txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3440         txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3441         txq->qpt.page_count = page_count;
3442         txq->qpt.page_size = page_size;
3443
3444         txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3445         txq->tcb->sw_q = page_mem->kva;
3446
3447         kva = page_mem->kva;
3448         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3449
3450         for (i = 0; i < page_count; i++) {
3451                 txq->tcb->sw_qpt[i] = kva;
3452                 kva += PAGE_SIZE;
3453
3454                 BNA_SET_DMA_ADDR(dma, &bna_dma);
3455                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3456                         bna_dma.lsb;
3457                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3458                         bna_dma.msb;
3459                 dma += PAGE_SIZE;
3460         }
3461 }
3462
3463 static struct bna_tx *
3464 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3465 {
3466         struct list_head        *qe = NULL;
3467         struct bna_tx *tx = NULL;
3468
3469         if (list_empty(&tx_mod->tx_free_q))
3470                 return NULL;
3471         if (type == BNA_TX_T_REGULAR) {
3472                 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3473         } else {
3474                 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3475         }
3476         tx = (struct bna_tx *)qe;
3477         bfa_q_qe_init(&tx->qe);
3478         tx->type = type;
3479
3480         return tx;
3481 }
3482
3483 static void
3484 bna_tx_free(struct bna_tx *tx)
3485 {
3486         struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3487         struct bna_txq *txq;
3488         struct list_head *prev_qe;
3489         struct list_head *qe;
3490
3491         while (!list_empty(&tx->txq_q)) {
3492                 bfa_q_deq(&tx->txq_q, &txq);
3493                 bfa_q_qe_init(&txq->qe);
3494                 txq->tcb = NULL;
3495                 txq->tx = NULL;
3496                 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3497         }
3498
3499         list_for_each(qe, &tx_mod->tx_active_q) {
3500                 if (qe == &tx->qe) {
3501                         list_del(&tx->qe);
3502                         bfa_q_qe_init(&tx->qe);
3503                         break;
3504                 }
3505         }
3506
3507         tx->bna = NULL;
3508         tx->priv = NULL;
3509
3510         prev_qe = NULL;
3511         list_for_each(qe, &tx_mod->tx_free_q) {
3512                 if (((struct bna_tx *)qe)->rid < tx->rid)
3513                         prev_qe = qe;
3514                 else {
3515                         break;
3516                 }
3517         }
3518
3519         if (prev_qe == NULL) {
3520                 /* This is the first entry */
3521                 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3522         } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3523                 /* This is the last entry */
3524                 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3525         } else {
3526                 /* Somewhere in the middle */
3527                 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3528                 bfa_q_prev(&tx->qe) = prev_qe;
3529                 bfa_q_next(prev_qe) = &tx->qe;
3530                 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3531         }
3532 }
3533
3534 static void
3535 bna_tx_start(struct bna_tx *tx)
3536 {
3537         tx->flags |= BNA_TX_F_ENET_STARTED;
3538         if (tx->flags & BNA_TX_F_ENABLED)
3539                 bfa_fsm_send_event(tx, TX_E_START);
3540 }
3541
3542 static void
3543 bna_tx_stop(struct bna_tx *tx)
3544 {
3545         tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3546         tx->stop_cbarg = &tx->bna->tx_mod;
3547
3548         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3549         bfa_fsm_send_event(tx, TX_E_STOP);
3550 }
3551
3552 static void
3553 bna_tx_fail(struct bna_tx *tx)
3554 {
3555         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3556         bfa_fsm_send_event(tx, TX_E_FAIL);
3557 }
3558
3559 void
3560 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3561 {
3562         struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3563         struct bna_txq *txq = NULL;
3564         struct list_head *qe;
3565         int i;
3566
3567         bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3568                 sizeof(struct bfi_enet_tx_cfg_rsp));
3569
3570         tx->hw_id = cfg_rsp->hw_id;
3571
3572         for (i = 0, qe = bfa_q_first(&tx->txq_q);
3573                 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3574                 txq = (struct bna_txq *)qe;
3575
3576                 /* Setup doorbells */
3577                 txq->tcb->i_dbell->doorbell_addr =
3578                         tx->bna->pcidev.pci_bar_kva
3579                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
3580                 txq->tcb->q_dbell =
3581                         tx->bna->pcidev.pci_bar_kva
3582                         + ntohl(cfg_rsp->q_handles[i].q_dbell);
3583                 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3584
3585                 /* Initialize producer/consumer indexes */
3586                 (*txq->tcb->hw_consumer_index) = 0;
3587                 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3588         }
3589
3590         bfa_fsm_send_event(tx, TX_E_STARTED);
3591 }
3592
3593 void
3594 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3595 {
3596         bfa_fsm_send_event(tx, TX_E_STOPPED);
3597 }
3598
3599 void
3600 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3601 {
3602         struct bna_tx *tx;
3603         struct list_head                *qe;
3604
3605         list_for_each(qe, &tx_mod->tx_active_q) {
3606                 tx = (struct bna_tx *)qe;
3607                 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3608         }
3609 }
3610
3611 void
3612 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3613 {
3614         u32 q_size;
3615         u32 page_count;
3616         struct bna_mem_info *mem_info;
3617
3618         res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3619         mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3620         mem_info->mem_type = BNA_MEM_T_KVA;
3621         mem_info->len = sizeof(struct bna_tcb);
3622         mem_info->num = num_txq;
3623
3624         q_size = txq_depth * BFI_TXQ_WI_SIZE;
3625         q_size = ALIGN(q_size, PAGE_SIZE);
3626         page_count = q_size >> PAGE_SHIFT;
3627
3628         res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3629         mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3630         mem_info->mem_type = BNA_MEM_T_DMA;
3631         mem_info->len = page_count * sizeof(struct bna_dma_addr);
3632         mem_info->num = num_txq;
3633
3634         res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3635         mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3636         mem_info->mem_type = BNA_MEM_T_KVA;
3637         mem_info->len = page_count * sizeof(void *);
3638         mem_info->num = num_txq;
3639
3640         res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3641         mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3642         mem_info->mem_type = BNA_MEM_T_DMA;
3643         mem_info->len = PAGE_SIZE * page_count;
3644         mem_info->num = num_txq;
3645
3646         res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3647         mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3648         mem_info->mem_type = BNA_MEM_T_DMA;
3649         mem_info->len = BFI_IBIDX_SIZE;
3650         mem_info->num = num_txq;
3651
3652         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3653         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3654                         BNA_INTR_T_MSIX;
3655         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3656 }
3657
3658 struct bna_tx *
3659 bna_tx_create(struct bna *bna, struct bnad *bnad,
3660                 struct bna_tx_config *tx_cfg,
3661                 const struct bna_tx_event_cbfn *tx_cbfn,
3662                 struct bna_res_info *res_info, void *priv)
3663 {
3664         struct bna_intr_info *intr_info;
3665         struct bna_tx_mod *tx_mod = &bna->tx_mod;
3666         struct bna_tx *tx;
3667         struct bna_txq *txq;
3668         struct list_head *qe;
3669         int page_count;
3670         int i;
3671
3672         intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3673         page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3674                                         PAGE_SIZE;
3675
3676         /**
3677          * Get resources
3678          */
3679
3680         if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3681                 return NULL;
3682
3683         /* Tx */
3684
3685         tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3686         if (!tx)
3687                 return NULL;
3688         tx->bna = bna;
3689         tx->priv = priv;
3690
3691         /* TxQs */
3692
3693         INIT_LIST_HEAD(&tx->txq_q);
3694         for (i = 0; i < tx_cfg->num_txq; i++) {
3695                 if (list_empty(&tx_mod->txq_free_q))
3696                         goto err_return;
3697
3698                 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3699                 bfa_q_qe_init(&txq->qe);
3700                 list_add_tail(&txq->qe, &tx->txq_q);
3701                 txq->tx = tx;
3702         }
3703
3704         /*
3705          * Initialize
3706          */
3707
3708         /* Tx */
3709
3710         tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3711         tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3712         /* Following callbacks are mandatory */
3713         tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3714         tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3715         tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3716
3717         list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3718
3719         tx->num_txq = tx_cfg->num_txq;
3720
3721         tx->flags = 0;
3722         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3723                 switch (tx->type) {
3724                 case BNA_TX_T_REGULAR:
3725                         if (!(tx->bna->tx_mod.flags &
3726                                 BNA_TX_MOD_F_ENET_LOOPBACK))
3727                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3728                         break;
3729                 case BNA_TX_T_LOOPBACK:
3730                         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3731                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3732                         break;
3733                 }
3734         }
3735
3736         /* TxQ */
3737
3738         i = 0;
3739         list_for_each(qe, &tx->txq_q) {
3740                 txq = (struct bna_txq *)qe;
3741                 txq->tcb = (struct bna_tcb *)
3742                 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3743                 txq->tx_packets = 0;
3744                 txq->tx_bytes = 0;
3745
3746                 /* IB */
3747                 txq->ib.ib_seg_host_addr.lsb =
3748                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3749                 txq->ib.ib_seg_host_addr.msb =
3750                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3751                 txq->ib.ib_seg_host_addr_kva =
3752                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3753                 txq->ib.intr_type = intr_info->intr_type;
3754                 txq->ib.intr_vector = (intr_info->num == 1) ?
3755                                         intr_info->idl[0].vector :
3756                                         intr_info->idl[i].vector;
3757                 if (intr_info->intr_type == BNA_INTR_T_INTX)
3758                         txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3759                 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3760                 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3761                 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3762
3763                 /* TCB */
3764
3765                 txq->tcb->q_depth = tx_cfg->txq_depth;
3766                 txq->tcb->unmap_q = (void *)
3767                 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3768                 txq->tcb->hw_consumer_index =
3769                         (u32 *)txq->ib.ib_seg_host_addr_kva;
3770                 txq->tcb->i_dbell = &txq->ib.door_bell;
3771                 txq->tcb->intr_type = txq->ib.intr_type;
3772                 txq->tcb->intr_vector = txq->ib.intr_vector;
3773                 txq->tcb->txq = txq;
3774                 txq->tcb->bnad = bnad;
3775                 txq->tcb->id = i;
3776
3777                 /* QPT, SWQPT, Pages */
3778                 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3779                         &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3780                         &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3781                         &res_info[BNA_TX_RES_MEM_T_PAGE].
3782                                   res_u.mem_info.mdl[i]);
3783
3784                 /* Callback to bnad for setting up TCB */
3785                 if (tx->tcb_setup_cbfn)
3786                         (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3787
3788                 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3789                         txq->priority = txq->tcb->id;
3790                 else
3791                         txq->priority = tx_mod->default_prio;
3792
3793                 i++;
3794         }
3795
3796         tx->txf_vlan_id = 0;
3797
3798         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3799
3800         tx_mod->rid_mask |= BIT(tx->rid);
3801
3802         return tx;
3803
3804 err_return:
3805         bna_tx_free(tx);
3806         return NULL;
3807 }
3808
3809 void
3810 bna_tx_destroy(struct bna_tx *tx)
3811 {
3812         struct bna_txq *txq;
3813         struct list_head *qe;
3814
3815         list_for_each(qe, &tx->txq_q) {
3816                 txq = (struct bna_txq *)qe;
3817                 if (tx->tcb_destroy_cbfn)
3818                         (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3819         }
3820
3821         tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3822         bna_tx_free(tx);
3823 }
3824
3825 void
3826 bna_tx_enable(struct bna_tx *tx)
3827 {
3828         if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3829                 return;
3830
3831         tx->flags |= BNA_TX_F_ENABLED;
3832
3833         if (tx->flags & BNA_TX_F_ENET_STARTED)
3834                 bfa_fsm_send_event(tx, TX_E_START);
3835 }
3836
3837 void
3838 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3839                 void (*cbfn)(void *, struct bna_tx *))
3840 {
3841         if (type == BNA_SOFT_CLEANUP) {
3842                 (*cbfn)(tx->bna->bnad, tx);
3843                 return;
3844         }
3845
3846         tx->stop_cbfn = cbfn;
3847         tx->stop_cbarg = tx->bna->bnad;
3848
3849         tx->flags &= ~BNA_TX_F_ENABLED;
3850
3851         bfa_fsm_send_event(tx, TX_E_STOP);
3852 }
3853
3854 void
3855 bna_tx_cleanup_complete(struct bna_tx *tx)
3856 {
3857         bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3858 }
3859
3860 static void
3861 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3862 {
3863         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3864
3865         bfa_wc_down(&tx_mod->tx_stop_wc);
3866 }
3867
3868 static void
3869 bna_tx_mod_cb_tx_stopped_all(void *arg)
3870 {
3871         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3872
3873         if (tx_mod->stop_cbfn)
3874                 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3875         tx_mod->stop_cbfn = NULL;
3876 }
3877
3878 void
3879 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3880                 struct bna_res_info *res_info)
3881 {
3882         int i;
3883
3884         tx_mod->bna = bna;
3885         tx_mod->flags = 0;
3886
3887         tx_mod->tx = (struct bna_tx *)
3888                 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3889         tx_mod->txq = (struct bna_txq *)
3890                 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3891
3892         INIT_LIST_HEAD(&tx_mod->tx_free_q);
3893         INIT_LIST_HEAD(&tx_mod->tx_active_q);
3894
3895         INIT_LIST_HEAD(&tx_mod->txq_free_q);
3896
3897         for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3898                 tx_mod->tx[i].rid = i;
3899                 bfa_q_qe_init(&tx_mod->tx[i].qe);
3900                 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3901                 bfa_q_qe_init(&tx_mod->txq[i].qe);
3902                 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3903         }
3904
3905         tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3906         tx_mod->default_prio = 0;
3907         tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3908         tx_mod->iscsi_prio = -1;
3909 }
3910
3911 void
3912 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3913 {
3914         struct list_head                *qe;
3915         int i;
3916
3917         i = 0;
3918         list_for_each(qe, &tx_mod->tx_free_q)
3919                 i++;
3920
3921         i = 0;
3922         list_for_each(qe, &tx_mod->txq_free_q)
3923                 i++;
3924
3925         tx_mod->bna = NULL;
3926 }
3927
3928 void
3929 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3930 {
3931         struct bna_tx *tx;
3932         struct list_head                *qe;
3933
3934         tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3935         if (type == BNA_TX_T_LOOPBACK)
3936                 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3937
3938         list_for_each(qe, &tx_mod->tx_active_q) {
3939                 tx = (struct bna_tx *)qe;
3940                 if (tx->type == type)
3941                         bna_tx_start(tx);
3942         }
3943 }
3944
3945 void
3946 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3947 {
3948         struct bna_tx *tx;
3949         struct list_head                *qe;
3950
3951         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3952         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3953
3954         tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3955
3956         bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3957
3958         list_for_each(qe, &tx_mod->tx_active_q) {
3959                 tx = (struct bna_tx *)qe;
3960                 if (tx->type == type) {
3961                         bfa_wc_up(&tx_mod->tx_stop_wc);
3962                         bna_tx_stop(tx);
3963                 }
3964         }
3965
3966         bfa_wc_wait(&tx_mod->tx_stop_wc);
3967 }
3968
3969 void
3970 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3971 {
3972         struct bna_tx *tx;
3973         struct list_head                *qe;
3974
3975         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3976         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3977
3978         list_for_each(qe, &tx_mod->tx_active_q) {
3979                 tx = (struct bna_tx *)qe;
3980                 bna_tx_fail(tx);
3981         }
3982 }
3983
3984 void
3985 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3986 {
3987         struct bna_txq *txq;
3988         struct list_head *qe;
3989
3990         list_for_each(qe, &tx->txq_q) {
3991                 txq = (struct bna_txq *)qe;
3992                 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3993         }
3994 }