16d36df7cd0e747d3d702ccb44ffc4043179814c
[cascardo/linux.git] / drivers / net / ethernet / brocade / bna / bna_tx_rx.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12   */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include "bna.h"
20 #include "bfi.h"
21
22 /* IB */
23 static void
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 {
26         ib->coalescing_timeo = coalescing_timeo;
27         ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28                                 (u32)ib->coalescing_timeo, 0);
29 }
30
31 /* RXF */
32
33 #define bna_rxf_vlan_cfg_soft_reset(rxf)                                \
34 do {                                                                    \
35         (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;           \
36         (rxf)->vlan_strip_pending = true;                               \
37 } while (0)
38
39 #define bna_rxf_rss_cfg_soft_reset(rxf)                                 \
40 do {                                                                    \
41         if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)                  \
42                 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |           \
43                                 BNA_RSS_F_CFG_PENDING |                 \
44                                 BNA_RSS_F_STATUS_PENDING);              \
45 } while (0)
46
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
50 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
54 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
55                                         enum bna_cleanup_type cleanup);
56 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
57                                         enum bna_cleanup_type cleanup);
58 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
59                                         enum bna_cleanup_type cleanup);
60
61 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62                         enum bna_rxf_event);
63 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
64                         enum bna_rxf_event);
65 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
66                         enum bna_rxf_event);
67 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
68                         enum bna_rxf_event);
69 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
70                         enum bna_rxf_event);
71 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
72                         enum bna_rxf_event);
73
74 static void
75 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
76 {
77         call_rxf_stop_cbfn(rxf);
78 }
79
80 static void
81 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
82 {
83         switch (event) {
84         case RXF_E_START:
85                 if (rxf->flags & BNA_RXF_F_PAUSED) {
86                         bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
87                         call_rxf_start_cbfn(rxf);
88                 } else
89                         bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
90                 break;
91
92         case RXF_E_STOP:
93                 call_rxf_stop_cbfn(rxf);
94                 break;
95
96         case RXF_E_FAIL:
97                 /* No-op */
98                 break;
99
100         case RXF_E_CONFIG:
101                 call_rxf_cam_fltr_cbfn(rxf);
102                 break;
103
104         case RXF_E_PAUSE:
105                 rxf->flags |= BNA_RXF_F_PAUSED;
106                 call_rxf_pause_cbfn(rxf);
107                 break;
108
109         case RXF_E_RESUME:
110                 rxf->flags &= ~BNA_RXF_F_PAUSED;
111                 call_rxf_resume_cbfn(rxf);
112                 break;
113
114         default:
115                 bfa_sm_fault(event);
116         }
117 }
118
119 static void
120 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
121 {
122         call_rxf_pause_cbfn(rxf);
123 }
124
125 static void
126 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
127 {
128         switch (event) {
129         case RXF_E_STOP:
130         case RXF_E_FAIL:
131                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
132                 break;
133
134         case RXF_E_CONFIG:
135                 call_rxf_cam_fltr_cbfn(rxf);
136                 break;
137
138         case RXF_E_RESUME:
139                 rxf->flags &= ~BNA_RXF_F_PAUSED;
140                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
141                 break;
142
143         default:
144                 bfa_sm_fault(event);
145         }
146 }
147
148 static void
149 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
150 {
151         if (!bna_rxf_cfg_apply(rxf)) {
152                 /* No more pending config updates */
153                 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
154         }
155 }
156
157 static void
158 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
159 {
160         switch (event) {
161         case RXF_E_STOP:
162                 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
163                 break;
164
165         case RXF_E_FAIL:
166                 bna_rxf_cfg_reset(rxf);
167                 call_rxf_start_cbfn(rxf);
168                 call_rxf_cam_fltr_cbfn(rxf);
169                 call_rxf_resume_cbfn(rxf);
170                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
171                 break;
172
173         case RXF_E_CONFIG:
174                 /* No-op */
175                 break;
176
177         case RXF_E_PAUSE:
178                 rxf->flags |= BNA_RXF_F_PAUSED;
179                 call_rxf_start_cbfn(rxf);
180                 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
181                 break;
182
183         case RXF_E_FW_RESP:
184                 if (!bna_rxf_cfg_apply(rxf)) {
185                         /* No more pending config updates */
186                         bfa_fsm_set_state(rxf, bna_rxf_sm_started);
187                 }
188                 break;
189
190         default:
191                 bfa_sm_fault(event);
192         }
193 }
194
195 static void
196 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
197 {
198         call_rxf_start_cbfn(rxf);
199         call_rxf_cam_fltr_cbfn(rxf);
200         call_rxf_resume_cbfn(rxf);
201 }
202
203 static void
204 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
205 {
206         switch (event) {
207         case RXF_E_STOP:
208         case RXF_E_FAIL:
209                 bna_rxf_cfg_reset(rxf);
210                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
211                 break;
212
213         case RXF_E_CONFIG:
214                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
215                 break;
216
217         case RXF_E_PAUSE:
218                 rxf->flags |= BNA_RXF_F_PAUSED;
219                 if (!bna_rxf_fltr_clear(rxf))
220                         bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
221                 else
222                         bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
223                 break;
224
225         default:
226                 bfa_sm_fault(event);
227         }
228 }
229
230 static void
231 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
232 {
233 }
234
235 static void
236 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
237 {
238         switch (event) {
239         case RXF_E_FAIL:
240                 bna_rxf_cfg_reset(rxf);
241                 call_rxf_pause_cbfn(rxf);
242                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
243                 break;
244
245         case RXF_E_FW_RESP:
246                 if (!bna_rxf_fltr_clear(rxf)) {
247                         /* No more pending CAM entries to clear */
248                         bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
249                 }
250                 break;
251
252         default:
253                 bfa_sm_fault(event);
254         }
255 }
256
257 static void
258 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
259 {
260 }
261
262 static void
263 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
264 {
265         switch (event) {
266         case RXF_E_FAIL:
267         case RXF_E_FW_RESP:
268                 bna_rxf_cfg_reset(rxf);
269                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
270                 break;
271
272         default:
273                 bfa_sm_fault(event);
274         }
275 }
276
277 static void
278 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
279                 enum bfi_enet_h2i_msgs req_type)
280 {
281         struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
282
283         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
284         req->mh.num_entries = htons(
285         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
286         ether_addr_copy(req->mac_addr, mac->addr);
287         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
288                 sizeof(struct bfi_enet_ucast_req), &req->mh);
289         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
290 }
291
292 static void
293 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
294 {
295         struct bfi_enet_mcast_add_req *req =
296                 &rxf->bfi_enet_cmd.mcast_add_req;
297
298         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
299                 0, rxf->rx->rid);
300         req->mh.num_entries = htons(
301         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
302         ether_addr_copy(req->mac_addr, mac->addr);
303         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
304                 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
305         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
306 }
307
308 static void
309 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
310 {
311         struct bfi_enet_mcast_del_req *req =
312                 &rxf->bfi_enet_cmd.mcast_del_req;
313
314         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
315                 0, rxf->rx->rid);
316         req->mh.num_entries = htons(
317         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
318         req->handle = htons(handle);
319         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
320                 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
321         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
322 }
323
324 static void
325 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
326 {
327         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
328
329         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
330                 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
331         req->mh.num_entries = htons(
332                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
333         req->enable = status;
334         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
335                 sizeof(struct bfi_enet_enable_req), &req->mh);
336         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
337 }
338
339 static void
340 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
341 {
342         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343
344         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
345                 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
346         req->mh.num_entries = htons(
347                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
348         req->enable = status;
349         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
350                 sizeof(struct bfi_enet_enable_req), &req->mh);
351         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
352 }
353
354 static void
355 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
356 {
357         struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
358         int i;
359         int j;
360
361         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
362                 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
363         req->mh.num_entries = htons(
364                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
365         req->block_idx = block_idx;
366         for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
367                 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
368                 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
369                         req->bit_mask[i] =
370                                 htonl(rxf->vlan_filter_table[j]);
371                 else
372                         req->bit_mask[i] = 0xFFFFFFFF;
373         }
374         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
375                 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
376         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
377 }
378
379 static void
380 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
381 {
382         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
383
384         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
385                 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
386         req->mh.num_entries = htons(
387                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
388         req->enable = rxf->vlan_strip_status;
389         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
390                 sizeof(struct bfi_enet_enable_req), &req->mh);
391         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
392 }
393
394 static void
395 bna_bfi_rit_cfg(struct bna_rxf *rxf)
396 {
397         struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
398
399         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
400                 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
401         req->mh.num_entries = htons(
402                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
403         req->size = htons(rxf->rit_size);
404         memcpy(&req->table[0], rxf->rit, rxf->rit_size);
405         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
406                 sizeof(struct bfi_enet_rit_req), &req->mh);
407         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
408 }
409
410 static void
411 bna_bfi_rss_cfg(struct bna_rxf *rxf)
412 {
413         struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
414         int i;
415
416         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
417                 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
418         req->mh.num_entries = htons(
419                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
420         req->cfg.type = rxf->rss_cfg.hash_type;
421         req->cfg.mask = rxf->rss_cfg.hash_mask;
422         for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
423                 req->cfg.key[i] =
424                         htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
425         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
426                 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
427         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
428 }
429
430 static void
431 bna_bfi_rss_enable(struct bna_rxf *rxf)
432 {
433         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
434
435         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
436                 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
437         req->mh.num_entries = htons(
438                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
439         req->enable = rxf->rss_status;
440         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
441                 sizeof(struct bfi_enet_enable_req), &req->mh);
442         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
443 }
444
445 /* This function gets the multicast MAC that has already been added to CAM */
446 static struct bna_mac *
447 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
448 {
449         struct bna_mac *mac;
450         struct list_head *qe;
451
452         list_for_each(qe, &rxf->mcast_active_q) {
453                 mac = (struct bna_mac *)qe;
454                 if (ether_addr_equal(mac->addr, mac_addr))
455                         return mac;
456         }
457
458         list_for_each(qe, &rxf->mcast_pending_del_q) {
459                 mac = (struct bna_mac *)qe;
460                 if (ether_addr_equal(mac->addr, mac_addr))
461                         return mac;
462         }
463
464         return NULL;
465 }
466
467 static struct bna_mcam_handle *
468 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
469 {
470         struct bna_mcam_handle *mchandle;
471         struct list_head *qe;
472
473         list_for_each(qe, &rxf->mcast_handle_q) {
474                 mchandle = (struct bna_mcam_handle *)qe;
475                 if (mchandle->handle == handle)
476                         return mchandle;
477         }
478
479         return NULL;
480 }
481
482 static void
483 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
484 {
485         struct bna_mac *mcmac;
486         struct bna_mcam_handle *mchandle;
487
488         mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
489         mchandle = bna_rxf_mchandle_get(rxf, handle);
490         if (mchandle == NULL) {
491                 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
492                 mchandle->handle = handle;
493                 mchandle->refcnt = 0;
494                 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
495         }
496         mchandle->refcnt++;
497         mcmac->handle = mchandle;
498 }
499
500 static int
501 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
502                 enum bna_cleanup_type cleanup)
503 {
504         struct bna_mcam_handle *mchandle;
505         int ret = 0;
506
507         mchandle = mac->handle;
508         if (mchandle == NULL)
509                 return ret;
510
511         mchandle->refcnt--;
512         if (mchandle->refcnt == 0) {
513                 if (cleanup == BNA_HARD_CLEANUP) {
514                         bna_bfi_mcast_del_req(rxf, mchandle->handle);
515                         ret = 1;
516                 }
517                 list_del(&mchandle->qe);
518                 bfa_q_qe_init(&mchandle->qe);
519                 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
520         }
521         mac->handle = NULL;
522
523         return ret;
524 }
525
526 static int
527 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
528 {
529         struct bna_mac *mac = NULL;
530         struct list_head *qe;
531         int ret;
532
533         /* First delete multicast entries to maintain the count */
534         while (!list_empty(&rxf->mcast_pending_del_q)) {
535                 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
536                 bfa_q_qe_init(qe);
537                 mac = (struct bna_mac *)qe;
538                 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
539                 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
540                 if (ret)
541                         return ret;
542         }
543
544         /* Add multicast entries */
545         if (!list_empty(&rxf->mcast_pending_add_q)) {
546                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
547                 bfa_q_qe_init(qe);
548                 mac = (struct bna_mac *)qe;
549                 list_add_tail(&mac->qe, &rxf->mcast_active_q);
550                 bna_bfi_mcast_add_req(rxf, mac);
551                 return 1;
552         }
553
554         return 0;
555 }
556
557 static int
558 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
559 {
560         u8 vlan_pending_bitmask;
561         int block_idx = 0;
562
563         if (rxf->vlan_pending_bitmask) {
564                 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
565                 while (!(vlan_pending_bitmask & 0x1)) {
566                         block_idx++;
567                         vlan_pending_bitmask >>= 1;
568                 }
569                 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
570                 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
571                 return 1;
572         }
573
574         return 0;
575 }
576
577 static int
578 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
579 {
580         struct list_head *qe;
581         struct bna_mac *mac;
582         int ret;
583
584         /* Throw away delete pending mcast entries */
585         while (!list_empty(&rxf->mcast_pending_del_q)) {
586                 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
587                 bfa_q_qe_init(qe);
588                 mac = (struct bna_mac *)qe;
589                 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
590                 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
591                 if (ret)
592                         return ret;
593         }
594
595         /* Move active mcast entries to pending_add_q */
596         while (!list_empty(&rxf->mcast_active_q)) {
597                 bfa_q_deq(&rxf->mcast_active_q, &qe);
598                 bfa_q_qe_init(qe);
599                 list_add_tail(qe, &rxf->mcast_pending_add_q);
600                 mac = (struct bna_mac *)qe;
601                 if (bna_rxf_mcast_del(rxf, mac, cleanup))
602                         return 1;
603         }
604
605         return 0;
606 }
607
608 static int
609 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
610 {
611         if (rxf->rss_pending) {
612                 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
613                         rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
614                         bna_bfi_rit_cfg(rxf);
615                         return 1;
616                 }
617
618                 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
619                         rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
620                         bna_bfi_rss_cfg(rxf);
621                         return 1;
622                 }
623
624                 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
625                         rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
626                         bna_bfi_rss_enable(rxf);
627                         return 1;
628                 }
629         }
630
631         return 0;
632 }
633
634 static int
635 bna_rxf_cfg_apply(struct bna_rxf *rxf)
636 {
637         if (bna_rxf_ucast_cfg_apply(rxf))
638                 return 1;
639
640         if (bna_rxf_mcast_cfg_apply(rxf))
641                 return 1;
642
643         if (bna_rxf_promisc_cfg_apply(rxf))
644                 return 1;
645
646         if (bna_rxf_allmulti_cfg_apply(rxf))
647                 return 1;
648
649         if (bna_rxf_vlan_cfg_apply(rxf))
650                 return 1;
651
652         if (bna_rxf_vlan_strip_cfg_apply(rxf))
653                 return 1;
654
655         if (bna_rxf_rss_cfg_apply(rxf))
656                 return 1;
657
658         return 0;
659 }
660
661 /* Only software reset */
662 static int
663 bna_rxf_fltr_clear(struct bna_rxf *rxf)
664 {
665         if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
666                 return 1;
667
668         if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
669                 return 1;
670
671         if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
672                 return 1;
673
674         if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
675                 return 1;
676
677         return 0;
678 }
679
680 static void
681 bna_rxf_cfg_reset(struct bna_rxf *rxf)
682 {
683         bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684         bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685         bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686         bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
687         bna_rxf_vlan_cfg_soft_reset(rxf);
688         bna_rxf_rss_cfg_soft_reset(rxf);
689 }
690
691 static void
692 bna_rit_init(struct bna_rxf *rxf, int rit_size)
693 {
694         struct bna_rx *rx = rxf->rx;
695         struct bna_rxp *rxp;
696         struct list_head *qe;
697         int offset = 0;
698
699         rxf->rit_size = rit_size;
700         list_for_each(qe, &rx->rxp_q) {
701                 rxp = (struct bna_rxp *)qe;
702                 rxf->rit[offset] = rxp->cq.ccb->id;
703                 offset++;
704         }
705
706 }
707
708 void
709 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
710 {
711         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
712 }
713
714 void
715 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
716                         struct bfi_msgq_mhdr *msghdr)
717 {
718         struct bfi_enet_rsp *rsp =
719                 container_of(msghdr, struct bfi_enet_rsp, mh);
720
721         if (rsp->error) {
722                 /* Clear ucast from cache */
723                 rxf->ucast_active_set = 0;
724         }
725
726         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
727 }
728
729 void
730 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
731                         struct bfi_msgq_mhdr *msghdr)
732 {
733         struct bfi_enet_mcast_add_req *req =
734                 &rxf->bfi_enet_cmd.mcast_add_req;
735         struct bfi_enet_mcast_add_rsp *rsp =
736                 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
737
738         bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
739                 ntohs(rsp->handle));
740         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
741 }
742
743 static void
744 bna_rxf_init(struct bna_rxf *rxf,
745                 struct bna_rx *rx,
746                 struct bna_rx_config *q_config,
747                 struct bna_res_info *res_info)
748 {
749         rxf->rx = rx;
750
751         INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
752         INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
753         rxf->ucast_pending_set = 0;
754         rxf->ucast_active_set = 0;
755         INIT_LIST_HEAD(&rxf->ucast_active_q);
756         rxf->ucast_pending_mac = NULL;
757
758         INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
759         INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
760         INIT_LIST_HEAD(&rxf->mcast_active_q);
761         INIT_LIST_HEAD(&rxf->mcast_handle_q);
762
763         if (q_config->paused)
764                 rxf->flags |= BNA_RXF_F_PAUSED;
765
766         rxf->rit = (u8 *)
767                 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
768         bna_rit_init(rxf, q_config->num_paths);
769
770         rxf->rss_status = q_config->rss_status;
771         if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
772                 rxf->rss_cfg = q_config->rss_config;
773                 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
774                 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
775                 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
776         }
777
778         rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
779         memset(rxf->vlan_filter_table, 0,
780                         (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
781         rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
782         rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
783
784         rxf->vlan_strip_status = q_config->vlan_strip_status;
785
786         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
787 }
788
789 static void
790 bna_rxf_uninit(struct bna_rxf *rxf)
791 {
792         struct bna_mac *mac;
793
794         rxf->ucast_pending_set = 0;
795         rxf->ucast_active_set = 0;
796
797         while (!list_empty(&rxf->ucast_pending_add_q)) {
798                 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
799                 bfa_q_qe_init(&mac->qe);
800                 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
801         }
802
803         if (rxf->ucast_pending_mac) {
804                 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
805                 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
806                                     rxf->ucast_pending_mac);
807                 rxf->ucast_pending_mac = NULL;
808         }
809
810         while (!list_empty(&rxf->mcast_pending_add_q)) {
811                 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
812                 bfa_q_qe_init(&mac->qe);
813                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
814         }
815
816         rxf->rxmode_pending = 0;
817         rxf->rxmode_pending_bitmask = 0;
818         if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
819                 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
820         if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
821                 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
822
823         rxf->rss_pending = 0;
824         rxf->vlan_strip_pending = false;
825
826         rxf->flags = 0;
827
828         rxf->rx = NULL;
829 }
830
831 static void
832 bna_rx_cb_rxf_started(struct bna_rx *rx)
833 {
834         bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
835 }
836
837 static void
838 bna_rxf_start(struct bna_rxf *rxf)
839 {
840         rxf->start_cbfn = bna_rx_cb_rxf_started;
841         rxf->start_cbarg = rxf->rx;
842         bfa_fsm_send_event(rxf, RXF_E_START);
843 }
844
845 static void
846 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
847 {
848         bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
849 }
850
851 static void
852 bna_rxf_stop(struct bna_rxf *rxf)
853 {
854         rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
855         rxf->stop_cbarg = rxf->rx;
856         bfa_fsm_send_event(rxf, RXF_E_STOP);
857 }
858
859 static void
860 bna_rxf_fail(struct bna_rxf *rxf)
861 {
862         bfa_fsm_send_event(rxf, RXF_E_FAIL);
863 }
864
865 enum bna_cb_status
866 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
867 {
868         struct bna_rxf *rxf = &rx->rxf;
869
870         if (rxf->ucast_pending_mac == NULL) {
871                 rxf->ucast_pending_mac =
872                         bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
873                 if (rxf->ucast_pending_mac == NULL)
874                         return BNA_CB_UCAST_CAM_FULL;
875                 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
876         }
877
878         ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
879         rxf->ucast_pending_set = 1;
880         rxf->cam_fltr_cbfn = NULL;
881         rxf->cam_fltr_cbarg = rx->bna->bnad;
882
883         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
884
885         return BNA_CB_SUCCESS;
886 }
887
888 enum bna_cb_status
889 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
890                  void (*cbfn)(struct bnad *, struct bna_rx *))
891 {
892         struct bna_rxf *rxf = &rx->rxf;
893         struct bna_mac *mac;
894
895         /* Check if already added or pending addition */
896         if (bna_mac_find(&rxf->mcast_active_q, addr) ||
897                 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
898                 if (cbfn)
899                         cbfn(rx->bna->bnad, rx);
900                 return BNA_CB_SUCCESS;
901         }
902
903         mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
904         if (mac == NULL)
905                 return BNA_CB_MCAST_LIST_FULL;
906         bfa_q_qe_init(&mac->qe);
907         ether_addr_copy(mac->addr, addr);
908         list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
909
910         rxf->cam_fltr_cbfn = cbfn;
911         rxf->cam_fltr_cbarg = rx->bna->bnad;
912
913         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
914
915         return BNA_CB_SUCCESS;
916 }
917
918 enum bna_cb_status
919 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
920 {
921         struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
922         struct bna_rxf *rxf = &rx->rxf;
923         struct list_head list_head;
924         struct list_head *qe;
925         u8 *mcaddr;
926         struct bna_mac *mac, *del_mac;
927         int i;
928
929         /* Purge the pending_add_q */
930         while (!list_empty(&rxf->ucast_pending_add_q)) {
931                 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
932                 bfa_q_qe_init(qe);
933                 mac = (struct bna_mac *)qe;
934                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
935         }
936
937         /* Schedule active_q entries for deletion */
938         while (!list_empty(&rxf->ucast_active_q)) {
939                 bfa_q_deq(&rxf->ucast_active_q, &qe);
940                 mac = (struct bna_mac *)qe;
941                 bfa_q_qe_init(&mac->qe);
942
943                 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
944                 memcpy(del_mac, mac, sizeof(*del_mac));
945                 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
946                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
947         }
948
949         /* Allocate nodes */
950         INIT_LIST_HEAD(&list_head);
951         for (i = 0, mcaddr = uclist; i < count; i++) {
952                 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
953                 if (mac == NULL)
954                         goto err_return;
955                 bfa_q_qe_init(&mac->qe);
956                 ether_addr_copy(mac->addr, mcaddr);
957                 list_add_tail(&mac->qe, &list_head);
958                 mcaddr += ETH_ALEN;
959         }
960
961         /* Add the new entries */
962         while (!list_empty(&list_head)) {
963                 bfa_q_deq(&list_head, &qe);
964                 mac = (struct bna_mac *)qe;
965                 bfa_q_qe_init(&mac->qe);
966                 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
967         }
968
969         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
970
971         return BNA_CB_SUCCESS;
972
973 err_return:
974         while (!list_empty(&list_head)) {
975                 bfa_q_deq(&list_head, &qe);
976                 mac = (struct bna_mac *)qe;
977                 bfa_q_qe_init(&mac->qe);
978                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
979         }
980
981         return BNA_CB_UCAST_CAM_FULL;
982 }
983
984 enum bna_cb_status
985 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
986 {
987         struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
988         struct bna_rxf *rxf = &rx->rxf;
989         struct list_head list_head;
990         struct list_head *qe;
991         u8 *mcaddr;
992         struct bna_mac *mac, *del_mac;
993         int i;
994
995         /* Purge the pending_add_q */
996         while (!list_empty(&rxf->mcast_pending_add_q)) {
997                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
998                 bfa_q_qe_init(qe);
999                 mac = (struct bna_mac *)qe;
1000                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1001         }
1002
1003         /* Schedule active_q entries for deletion */
1004         while (!list_empty(&rxf->mcast_active_q)) {
1005                 bfa_q_deq(&rxf->mcast_active_q, &qe);
1006                 mac = (struct bna_mac *)qe;
1007                 bfa_q_qe_init(&mac->qe);
1008
1009                 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
1010
1011                 memcpy(del_mac, mac, sizeof(*del_mac));
1012                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1013                 mac->handle = NULL;
1014                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1015         }
1016
1017         /* Allocate nodes */
1018         INIT_LIST_HEAD(&list_head);
1019         for (i = 0, mcaddr = mclist; i < count; i++) {
1020                 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
1021                 if (mac == NULL)
1022                         goto err_return;
1023                 bfa_q_qe_init(&mac->qe);
1024                 ether_addr_copy(mac->addr, mcaddr);
1025                 list_add_tail(&mac->qe, &list_head);
1026
1027                 mcaddr += ETH_ALEN;
1028         }
1029
1030         /* Add the new entries */
1031         while (!list_empty(&list_head)) {
1032                 bfa_q_deq(&list_head, &qe);
1033                 mac = (struct bna_mac *)qe;
1034                 bfa_q_qe_init(&mac->qe);
1035                 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1036         }
1037
1038         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1039
1040         return BNA_CB_SUCCESS;
1041
1042 err_return:
1043         while (!list_empty(&list_head)) {
1044                 bfa_q_deq(&list_head, &qe);
1045                 mac = (struct bna_mac *)qe;
1046                 bfa_q_qe_init(&mac->qe);
1047                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1048         }
1049
1050         return BNA_CB_MCAST_LIST_FULL;
1051 }
1052
1053 void
1054 bna_rx_mcast_delall(struct bna_rx *rx)
1055 {
1056         struct bna_rxf *rxf = &rx->rxf;
1057         struct list_head *qe;
1058         struct bna_mac *mac, *del_mac;
1059         int need_hw_config = 0;
1060
1061         /* Purge all entries from pending_add_q */
1062         while (!list_empty(&rxf->mcast_pending_add_q)) {
1063                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1064                 mac = (struct bna_mac *)qe;
1065                 bfa_q_qe_init(&mac->qe);
1066                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1067         }
1068
1069         /* Schedule all entries in active_q for deletion */
1070         while (!list_empty(&rxf->mcast_active_q)) {
1071                 bfa_q_deq(&rxf->mcast_active_q, &qe);
1072                 mac = (struct bna_mac *)qe;
1073                 bfa_q_qe_init(&mac->qe);
1074
1075                 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
1076
1077                 memcpy(del_mac, mac, sizeof(*del_mac));
1078                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1079                 mac->handle = NULL;
1080                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1081                 need_hw_config = 1;
1082         }
1083
1084         if (need_hw_config)
1085                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1086 }
1087
1088 void
1089 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1090 {
1091         struct bna_rxf *rxf = &rx->rxf;
1092         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1093         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1094         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1095
1096         rxf->vlan_filter_table[index] |= bit;
1097         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1098                 rxf->vlan_pending_bitmask |= BIT(group_id);
1099                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1100         }
1101 }
1102
1103 void
1104 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1105 {
1106         struct bna_rxf *rxf = &rx->rxf;
1107         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1108         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1109         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1110
1111         rxf->vlan_filter_table[index] &= ~bit;
1112         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1113                 rxf->vlan_pending_bitmask |= BIT(group_id);
1114                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1115         }
1116 }
1117
1118 static int
1119 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1120 {
1121         struct bna_mac *mac = NULL;
1122         struct list_head *qe;
1123
1124         /* Delete MAC addresses previousely added */
1125         if (!list_empty(&rxf->ucast_pending_del_q)) {
1126                 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1127                 bfa_q_qe_init(qe);
1128                 mac = (struct bna_mac *)qe;
1129                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1130                 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1131                 return 1;
1132         }
1133
1134         /* Set default unicast MAC */
1135         if (rxf->ucast_pending_set) {
1136                 rxf->ucast_pending_set = 0;
1137                 ether_addr_copy(rxf->ucast_active_mac.addr,
1138                                 rxf->ucast_pending_mac->addr);
1139                 rxf->ucast_active_set = 1;
1140                 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1141                         BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1142                 return 1;
1143         }
1144
1145         /* Add additional MAC entries */
1146         if (!list_empty(&rxf->ucast_pending_add_q)) {
1147                 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1148                 bfa_q_qe_init(qe);
1149                 mac = (struct bna_mac *)qe;
1150                 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1151                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1152                 return 1;
1153         }
1154
1155         return 0;
1156 }
1157
1158 static int
1159 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1160 {
1161         struct list_head *qe;
1162         struct bna_mac *mac;
1163
1164         /* Throw away delete pending ucast entries */
1165         while (!list_empty(&rxf->ucast_pending_del_q)) {
1166                 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1167                 bfa_q_qe_init(qe);
1168                 mac = (struct bna_mac *)qe;
1169                 if (cleanup == BNA_SOFT_CLEANUP)
1170                         bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1171                                             mac);
1172                 else {
1173                         bna_bfi_ucast_req(rxf, mac,
1174                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1175                         bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1176                                             mac);
1177                         return 1;
1178                 }
1179         }
1180
1181         /* Move active ucast entries to pending_add_q */
1182         while (!list_empty(&rxf->ucast_active_q)) {
1183                 bfa_q_deq(&rxf->ucast_active_q, &qe);
1184                 bfa_q_qe_init(qe);
1185                 list_add_tail(qe, &rxf->ucast_pending_add_q);
1186                 if (cleanup == BNA_HARD_CLEANUP) {
1187                         mac = (struct bna_mac *)qe;
1188                         bna_bfi_ucast_req(rxf, mac,
1189                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1190                         return 1;
1191                 }
1192         }
1193
1194         if (rxf->ucast_active_set) {
1195                 rxf->ucast_pending_set = 1;
1196                 rxf->ucast_active_set = 0;
1197                 if (cleanup == BNA_HARD_CLEANUP) {
1198                         bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1199                                 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1200                         return 1;
1201                 }
1202         }
1203
1204         return 0;
1205 }
1206
1207 static int
1208 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1209 {
1210         struct bna *bna = rxf->rx->bna;
1211
1212         /* Enable/disable promiscuous mode */
1213         if (is_promisc_enable(rxf->rxmode_pending,
1214                                 rxf->rxmode_pending_bitmask)) {
1215                 /* move promisc configuration from pending -> active */
1216                 promisc_inactive(rxf->rxmode_pending,
1217                                 rxf->rxmode_pending_bitmask);
1218                 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1219                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1220                 return 1;
1221         } else if (is_promisc_disable(rxf->rxmode_pending,
1222                                 rxf->rxmode_pending_bitmask)) {
1223                 /* move promisc configuration from pending -> active */
1224                 promisc_inactive(rxf->rxmode_pending,
1225                                 rxf->rxmode_pending_bitmask);
1226                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1227                 bna->promisc_rid = BFI_INVALID_RID;
1228                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1229                 return 1;
1230         }
1231
1232         return 0;
1233 }
1234
1235 static int
1236 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1237 {
1238         struct bna *bna = rxf->rx->bna;
1239
1240         /* Clear pending promisc mode disable */
1241         if (is_promisc_disable(rxf->rxmode_pending,
1242                                 rxf->rxmode_pending_bitmask)) {
1243                 promisc_inactive(rxf->rxmode_pending,
1244                                 rxf->rxmode_pending_bitmask);
1245                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1246                 bna->promisc_rid = BFI_INVALID_RID;
1247                 if (cleanup == BNA_HARD_CLEANUP) {
1248                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1249                         return 1;
1250                 }
1251         }
1252
1253         /* Move promisc mode config from active -> pending */
1254         if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1255                 promisc_enable(rxf->rxmode_pending,
1256                                 rxf->rxmode_pending_bitmask);
1257                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1258                 if (cleanup == BNA_HARD_CLEANUP) {
1259                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1260                         return 1;
1261                 }
1262         }
1263
1264         return 0;
1265 }
1266
1267 static int
1268 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1269 {
1270         /* Enable/disable allmulti mode */
1271         if (is_allmulti_enable(rxf->rxmode_pending,
1272                                 rxf->rxmode_pending_bitmask)) {
1273                 /* move allmulti configuration from pending -> active */
1274                 allmulti_inactive(rxf->rxmode_pending,
1275                                 rxf->rxmode_pending_bitmask);
1276                 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1277                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1278                 return 1;
1279         } else if (is_allmulti_disable(rxf->rxmode_pending,
1280                                         rxf->rxmode_pending_bitmask)) {
1281                 /* move allmulti configuration from pending -> active */
1282                 allmulti_inactive(rxf->rxmode_pending,
1283                                 rxf->rxmode_pending_bitmask);
1284                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1285                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1286                 return 1;
1287         }
1288
1289         return 0;
1290 }
1291
1292 static int
1293 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1294 {
1295         /* Clear pending allmulti mode disable */
1296         if (is_allmulti_disable(rxf->rxmode_pending,
1297                                 rxf->rxmode_pending_bitmask)) {
1298                 allmulti_inactive(rxf->rxmode_pending,
1299                                 rxf->rxmode_pending_bitmask);
1300                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1301                 if (cleanup == BNA_HARD_CLEANUP) {
1302                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1303                         return 1;
1304                 }
1305         }
1306
1307         /* Move allmulti mode config from active -> pending */
1308         if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1309                 allmulti_enable(rxf->rxmode_pending,
1310                                 rxf->rxmode_pending_bitmask);
1311                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1312                 if (cleanup == BNA_HARD_CLEANUP) {
1313                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1314                         return 1;
1315                 }
1316         }
1317
1318         return 0;
1319 }
1320
1321 static int
1322 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1323 {
1324         struct bna *bna = rxf->rx->bna;
1325         int ret = 0;
1326
1327         if (is_promisc_enable(rxf->rxmode_pending,
1328                                 rxf->rxmode_pending_bitmask) ||
1329                 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1330                 /* Do nothing if pending enable or already enabled */
1331         } else if (is_promisc_disable(rxf->rxmode_pending,
1332                                         rxf->rxmode_pending_bitmask)) {
1333                 /* Turn off pending disable command */
1334                 promisc_inactive(rxf->rxmode_pending,
1335                         rxf->rxmode_pending_bitmask);
1336         } else {
1337                 /* Schedule enable */
1338                 promisc_enable(rxf->rxmode_pending,
1339                                 rxf->rxmode_pending_bitmask);
1340                 bna->promisc_rid = rxf->rx->rid;
1341                 ret = 1;
1342         }
1343
1344         return ret;
1345 }
1346
1347 static int
1348 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1349 {
1350         struct bna *bna = rxf->rx->bna;
1351         int ret = 0;
1352
1353         if (is_promisc_disable(rxf->rxmode_pending,
1354                                 rxf->rxmode_pending_bitmask) ||
1355                 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1356                 /* Do nothing if pending disable or already disabled */
1357         } else if (is_promisc_enable(rxf->rxmode_pending,
1358                                         rxf->rxmode_pending_bitmask)) {
1359                 /* Turn off pending enable command */
1360                 promisc_inactive(rxf->rxmode_pending,
1361                                 rxf->rxmode_pending_bitmask);
1362                 bna->promisc_rid = BFI_INVALID_RID;
1363         } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1364                 /* Schedule disable */
1365                 promisc_disable(rxf->rxmode_pending,
1366                                 rxf->rxmode_pending_bitmask);
1367                 ret = 1;
1368         }
1369
1370         return ret;
1371 }
1372
1373 static int
1374 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1375 {
1376         int ret = 0;
1377
1378         if (is_allmulti_enable(rxf->rxmode_pending,
1379                         rxf->rxmode_pending_bitmask) ||
1380                         (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1381                 /* Do nothing if pending enable or already enabled */
1382         } else if (is_allmulti_disable(rxf->rxmode_pending,
1383                                         rxf->rxmode_pending_bitmask)) {
1384                 /* Turn off pending disable command */
1385                 allmulti_inactive(rxf->rxmode_pending,
1386                         rxf->rxmode_pending_bitmask);
1387         } else {
1388                 /* Schedule enable */
1389                 allmulti_enable(rxf->rxmode_pending,
1390                                 rxf->rxmode_pending_bitmask);
1391                 ret = 1;
1392         }
1393
1394         return ret;
1395 }
1396
1397 static int
1398 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1399 {
1400         int ret = 0;
1401
1402         if (is_allmulti_disable(rxf->rxmode_pending,
1403                                 rxf->rxmode_pending_bitmask) ||
1404                 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1405                 /* Do nothing if pending disable or already disabled */
1406         } else if (is_allmulti_enable(rxf->rxmode_pending,
1407                                         rxf->rxmode_pending_bitmask)) {
1408                 /* Turn off pending enable command */
1409                 allmulti_inactive(rxf->rxmode_pending,
1410                                 rxf->rxmode_pending_bitmask);
1411         } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1412                 /* Schedule disable */
1413                 allmulti_disable(rxf->rxmode_pending,
1414                                 rxf->rxmode_pending_bitmask);
1415                 ret = 1;
1416         }
1417
1418         return ret;
1419 }
1420
1421 static int
1422 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1423 {
1424         if (rxf->vlan_strip_pending) {
1425                         rxf->vlan_strip_pending = false;
1426                         bna_bfi_vlan_strip_enable(rxf);
1427                         return 1;
1428         }
1429
1430         return 0;
1431 }
1432
1433 /* RX */
1434
1435 #define BNA_GET_RXQS(qcfg)      (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1436         (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1437
1438 #define SIZE_TO_PAGES(size)     (((size) >> PAGE_SHIFT) + ((((size) &\
1439         (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1440
1441 #define call_rx_stop_cbfn(rx)                                           \
1442 do {                                                                \
1443         if ((rx)->stop_cbfn) {                                          \
1444                 void (*cbfn)(void *, struct bna_rx *);    \
1445                 void *cbarg;                                        \
1446                 cbfn = (rx)->stop_cbfn;                          \
1447                 cbarg = (rx)->stop_cbarg;                              \
1448                 (rx)->stop_cbfn = NULL;                                 \
1449                 (rx)->stop_cbarg = NULL;                                \
1450                 cbfn(cbarg, rx);                                        \
1451         }                                                              \
1452 } while (0)
1453
1454 #define call_rx_stall_cbfn(rx)                                          \
1455 do {                                                                    \
1456         if ((rx)->rx_stall_cbfn)                                        \
1457                 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));             \
1458 } while (0)
1459
1460 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt)                        \
1461 do {                                                                    \
1462         struct bna_dma_addr cur_q_addr =                                \
1463                 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));      \
1464         (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;        \
1465         (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;        \
1466         (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;              \
1467         (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;              \
1468         (bfi_q)->pages = htons((u16)(bna_qpt)->page_count);     \
1469         (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1470 } while (0)
1471
1472 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1473 static void bna_rx_enet_stop(struct bna_rx *rx);
1474 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1475
1476 bfa_fsm_state_decl(bna_rx, stopped,
1477         struct bna_rx, enum bna_rx_event);
1478 bfa_fsm_state_decl(bna_rx, start_wait,
1479         struct bna_rx, enum bna_rx_event);
1480 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1481         struct bna_rx, enum bna_rx_event);
1482 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1483         struct bna_rx, enum bna_rx_event);
1484 bfa_fsm_state_decl(bna_rx, started,
1485         struct bna_rx, enum bna_rx_event);
1486 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1487         struct bna_rx, enum bna_rx_event);
1488 bfa_fsm_state_decl(bna_rx, stop_wait,
1489         struct bna_rx, enum bna_rx_event);
1490 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1491         struct bna_rx, enum bna_rx_event);
1492 bfa_fsm_state_decl(bna_rx, failed,
1493         struct bna_rx, enum bna_rx_event);
1494 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1495         struct bna_rx, enum bna_rx_event);
1496
1497 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1498 {
1499         call_rx_stop_cbfn(rx);
1500 }
1501
1502 static void bna_rx_sm_stopped(struct bna_rx *rx,
1503                                 enum bna_rx_event event)
1504 {
1505         switch (event) {
1506         case RX_E_START:
1507                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1508                 break;
1509
1510         case RX_E_STOP:
1511                 call_rx_stop_cbfn(rx);
1512                 break;
1513
1514         case RX_E_FAIL:
1515                 /* no-op */
1516                 break;
1517
1518         default:
1519                 bfa_sm_fault(event);
1520                 break;
1521         }
1522 }
1523
1524 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1525 {
1526         bna_bfi_rx_enet_start(rx);
1527 }
1528
1529 static void
1530 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1531 {
1532 }
1533
1534 static void
1535 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1536 {
1537         switch (event) {
1538         case RX_E_FAIL:
1539         case RX_E_STOPPED:
1540                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1541                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1542                 break;
1543
1544         case RX_E_STARTED:
1545                 bna_rx_enet_stop(rx);
1546                 break;
1547
1548         default:
1549                 bfa_sm_fault(event);
1550                 break;
1551         }
1552 }
1553
1554 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1555                                 enum bna_rx_event event)
1556 {
1557         switch (event) {
1558         case RX_E_STOP:
1559                 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1560                 break;
1561
1562         case RX_E_FAIL:
1563                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1564                 break;
1565
1566         case RX_E_STARTED:
1567                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1568                 break;
1569
1570         default:
1571                 bfa_sm_fault(event);
1572                 break;
1573         }
1574 }
1575
1576 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1577 {
1578         rx->rx_post_cbfn(rx->bna->bnad, rx);
1579         bna_rxf_start(&rx->rxf);
1580 }
1581
1582 static void
1583 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1584 {
1585 }
1586
1587 static void
1588 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1589 {
1590         switch (event) {
1591         case RX_E_FAIL:
1592                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1593                 bna_rxf_fail(&rx->rxf);
1594                 call_rx_stall_cbfn(rx);
1595                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1596                 break;
1597
1598         case RX_E_RXF_STARTED:
1599                 bna_rxf_stop(&rx->rxf);
1600                 break;
1601
1602         case RX_E_RXF_STOPPED:
1603                 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1604                 call_rx_stall_cbfn(rx);
1605                 bna_rx_enet_stop(rx);
1606                 break;
1607
1608         default:
1609                 bfa_sm_fault(event);
1610                 break;
1611         }
1612
1613 }
1614
1615 static void
1616 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1617 {
1618 }
1619
1620 static void
1621 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1622 {
1623         switch (event) {
1624         case RX_E_FAIL:
1625         case RX_E_STOPPED:
1626                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1627                 break;
1628
1629         case RX_E_STARTED:
1630                 bna_rx_enet_stop(rx);
1631                 break;
1632
1633         default:
1634                 bfa_sm_fault(event);
1635         }
1636 }
1637
1638 static void
1639 bna_rx_sm_started_entry(struct bna_rx *rx)
1640 {
1641         struct bna_rxp *rxp;
1642         struct list_head *qe_rxp;
1643         int is_regular = (rx->type == BNA_RX_T_REGULAR);
1644
1645         /* Start IB */
1646         list_for_each(qe_rxp, &rx->rxp_q) {
1647                 rxp = (struct bna_rxp *)qe_rxp;
1648                 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1649         }
1650
1651         bna_ethport_cb_rx_started(&rx->bna->ethport);
1652 }
1653
1654 static void
1655 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1656 {
1657         switch (event) {
1658         case RX_E_STOP:
1659                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1660                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1661                 bna_rxf_stop(&rx->rxf);
1662                 break;
1663
1664         case RX_E_FAIL:
1665                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1666                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1667                 bna_rxf_fail(&rx->rxf);
1668                 call_rx_stall_cbfn(rx);
1669                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1670                 break;
1671
1672         default:
1673                 bfa_sm_fault(event);
1674                 break;
1675         }
1676 }
1677
1678 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1679                                 enum bna_rx_event event)
1680 {
1681         switch (event) {
1682         case RX_E_STOP:
1683                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1684                 break;
1685
1686         case RX_E_FAIL:
1687                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1688                 bna_rxf_fail(&rx->rxf);
1689                 call_rx_stall_cbfn(rx);
1690                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1691                 break;
1692
1693         case RX_E_RXF_STARTED:
1694                 bfa_fsm_set_state(rx, bna_rx_sm_started);
1695                 break;
1696
1697         default:
1698                 bfa_sm_fault(event);
1699                 break;
1700         }
1701 }
1702
1703 static void
1704 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1705 {
1706 }
1707
1708 static void
1709 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1710 {
1711         switch (event) {
1712         case RX_E_FAIL:
1713         case RX_E_RXF_STOPPED:
1714                 /* No-op */
1715                 break;
1716
1717         case RX_E_CLEANUP_DONE:
1718                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1719                 break;
1720
1721         default:
1722                 bfa_sm_fault(event);
1723                 break;
1724         }
1725 }
1726
1727 static void
1728 bna_rx_sm_failed_entry(struct bna_rx *rx)
1729 {
1730 }
1731
1732 static void
1733 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1734 {
1735         switch (event) {
1736         case RX_E_START:
1737                 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1738                 break;
1739
1740         case RX_E_STOP:
1741                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1742                 break;
1743
1744         case RX_E_FAIL:
1745         case RX_E_RXF_STARTED:
1746         case RX_E_RXF_STOPPED:
1747                 /* No-op */
1748                 break;
1749
1750         case RX_E_CLEANUP_DONE:
1751                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1752                 break;
1753
1754         default:
1755                 bfa_sm_fault(event);
1756                 break;
1757 }       }
1758
1759 static void
1760 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1761 {
1762 }
1763
1764 static void
1765 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1766 {
1767         switch (event) {
1768         case RX_E_STOP:
1769                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1770                 break;
1771
1772         case RX_E_FAIL:
1773                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1774                 break;
1775
1776         case RX_E_CLEANUP_DONE:
1777                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1778                 break;
1779
1780         default:
1781                 bfa_sm_fault(event);
1782                 break;
1783         }
1784 }
1785
1786 static void
1787 bna_bfi_rx_enet_start(struct bna_rx *rx)
1788 {
1789         struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1790         struct bna_rxp *rxp = NULL;
1791         struct bna_rxq *q0 = NULL, *q1 = NULL;
1792         struct list_head *rxp_qe;
1793         int i;
1794
1795         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1796                 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1797         cfg_req->mh.num_entries = htons(
1798                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1799
1800         cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1801         cfg_req->num_queue_sets = rx->num_paths;
1802         for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1803                 i < rx->num_paths;
1804                 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1805                 rxp = (struct bna_rxp *)rxp_qe;
1806
1807                 GET_RXQS(rxp, q0, q1);
1808                 switch (rxp->type) {
1809                 case BNA_RXP_SLR:
1810                 case BNA_RXP_HDS:
1811                         /* Small RxQ */
1812                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1813                                                 &q1->qpt);
1814                         cfg_req->q_cfg[i].qs.rx_buffer_size =
1815                                 htons((u16)q1->buffer_size);
1816                         /* Fall through */
1817
1818                 case BNA_RXP_SINGLE:
1819                         /* Large/Single RxQ */
1820                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1821                                                 &q0->qpt);
1822                         if (q0->multi_buffer)
1823                                 /* multi-buffer is enabled by allocating
1824                                  * a new rx with new set of resources.
1825                                  * q0->buffer_size should be initialized to
1826                                  * fragment size.
1827                                  */
1828                                 cfg_req->rx_cfg.multi_buffer =
1829                                         BNA_STATUS_T_ENABLED;
1830                         else
1831                                 q0->buffer_size =
1832                                         bna_enet_mtu_get(&rx->bna->enet);
1833                         cfg_req->q_cfg[i].ql.rx_buffer_size =
1834                                 htons((u16)q0->buffer_size);
1835                         break;
1836
1837                 default:
1838                         BUG_ON(1);
1839                 }
1840
1841                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1842                                         &rxp->cq.qpt);
1843
1844                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1845                         rxp->cq.ib.ib_seg_host_addr.lsb;
1846                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1847                         rxp->cq.ib.ib_seg_host_addr.msb;
1848                 cfg_req->q_cfg[i].ib.intr.msix_index =
1849                         htons((u16)rxp->cq.ib.intr_vector);
1850         }
1851
1852         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1853         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1854         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1855         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1856         cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1857                                 ? BNA_STATUS_T_ENABLED :
1858                                 BNA_STATUS_T_DISABLED;
1859         cfg_req->ib_cfg.coalescing_timeout =
1860                         htonl((u32)rxp->cq.ib.coalescing_timeo);
1861         cfg_req->ib_cfg.inter_pkt_timeout =
1862                         htonl((u32)rxp->cq.ib.interpkt_timeo);
1863         cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1864
1865         switch (rxp->type) {
1866         case BNA_RXP_SLR:
1867                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1868                 break;
1869
1870         case BNA_RXP_HDS:
1871                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1872                 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1873                 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1874                 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1875                 break;
1876
1877         case BNA_RXP_SINGLE:
1878                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1879                 break;
1880
1881         default:
1882                 BUG_ON(1);
1883         }
1884         cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1885
1886         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1887                 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1888         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1889 }
1890
1891 static void
1892 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1893 {
1894         struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1895
1896         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1897                 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1898         req->mh.num_entries = htons(
1899                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1900         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1901                 &req->mh);
1902         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1903 }
1904
1905 static void
1906 bna_rx_enet_stop(struct bna_rx *rx)
1907 {
1908         struct bna_rxp *rxp;
1909         struct list_head                 *qe_rxp;
1910
1911         /* Stop IB */
1912         list_for_each(qe_rxp, &rx->rxp_q) {
1913                 rxp = (struct bna_rxp *)qe_rxp;
1914                 bna_ib_stop(rx->bna, &rxp->cq.ib);
1915         }
1916
1917         bna_bfi_rx_enet_stop(rx);
1918 }
1919
1920 static int
1921 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1922 {
1923         if ((rx_mod->rx_free_count == 0) ||
1924                 (rx_mod->rxp_free_count == 0) ||
1925                 (rx_mod->rxq_free_count == 0))
1926                 return 0;
1927
1928         if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1929                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1930                         (rx_mod->rxq_free_count < rx_cfg->num_paths))
1931                                 return 0;
1932         } else {
1933                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1934                         (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1935                         return 0;
1936         }
1937
1938         return 1;
1939 }
1940
1941 static struct bna_rxq *
1942 bna_rxq_get(struct bna_rx_mod *rx_mod)
1943 {
1944         struct bna_rxq *rxq = NULL;
1945         struct list_head        *qe = NULL;
1946
1947         bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1948         rx_mod->rxq_free_count--;
1949         rxq = (struct bna_rxq *)qe;
1950         bfa_q_qe_init(&rxq->qe);
1951
1952         return rxq;
1953 }
1954
1955 static void
1956 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1957 {
1958         bfa_q_qe_init(&rxq->qe);
1959         list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1960         rx_mod->rxq_free_count++;
1961 }
1962
1963 static struct bna_rxp *
1964 bna_rxp_get(struct bna_rx_mod *rx_mod)
1965 {
1966         struct list_head        *qe = NULL;
1967         struct bna_rxp *rxp = NULL;
1968
1969         bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1970         rx_mod->rxp_free_count--;
1971         rxp = (struct bna_rxp *)qe;
1972         bfa_q_qe_init(&rxp->qe);
1973
1974         return rxp;
1975 }
1976
1977 static void
1978 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1979 {
1980         bfa_q_qe_init(&rxp->qe);
1981         list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1982         rx_mod->rxp_free_count++;
1983 }
1984
1985 static struct bna_rx *
1986 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1987 {
1988         struct list_head        *qe = NULL;
1989         struct bna_rx *rx = NULL;
1990
1991         if (type == BNA_RX_T_REGULAR) {
1992                 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1993         } else
1994                 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1995
1996         rx_mod->rx_free_count--;
1997         rx = (struct bna_rx *)qe;
1998         bfa_q_qe_init(&rx->qe);
1999         list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2000         rx->type = type;
2001
2002         return rx;
2003 }
2004
2005 static void
2006 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2007 {
2008         struct list_head *prev_qe = NULL;
2009         struct list_head *qe;
2010
2011         bfa_q_qe_init(&rx->qe);
2012
2013         list_for_each(qe, &rx_mod->rx_free_q) {
2014                 if (((struct bna_rx *)qe)->rid < rx->rid)
2015                         prev_qe = qe;
2016                 else
2017                         break;
2018         }
2019
2020         if (prev_qe == NULL) {
2021                 /* This is the first entry */
2022                 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
2023         } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
2024                 /* This is the last entry */
2025                 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2026         } else {
2027                 /* Somewhere in the middle */
2028                 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
2029                 bfa_q_prev(&rx->qe) = prev_qe;
2030                 bfa_q_next(prev_qe) = &rx->qe;
2031                 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
2032         }
2033
2034         rx_mod->rx_free_count++;
2035 }
2036
2037 static void
2038 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
2039                 struct bna_rxq *q1)
2040 {
2041         switch (rxp->type) {
2042         case BNA_RXP_SINGLE:
2043                 rxp->rxq.single.only = q0;
2044                 rxp->rxq.single.reserved = NULL;
2045                 break;
2046         case BNA_RXP_SLR:
2047                 rxp->rxq.slr.large = q0;
2048                 rxp->rxq.slr.small = q1;
2049                 break;
2050         case BNA_RXP_HDS:
2051                 rxp->rxq.hds.data = q0;
2052                 rxp->rxq.hds.hdr = q1;
2053                 break;
2054         default:
2055                 break;
2056         }
2057 }
2058
2059 static void
2060 bna_rxq_qpt_setup(struct bna_rxq *rxq,
2061                 struct bna_rxp *rxp,
2062                 u32 page_count,
2063                 u32 page_size,
2064                 struct bna_mem_descr *qpt_mem,
2065                 struct bna_mem_descr *swqpt_mem,
2066                 struct bna_mem_descr *page_mem)
2067 {
2068         u8 *kva;
2069         u64 dma;
2070         struct bna_dma_addr bna_dma;
2071         int     i;
2072
2073         rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2074         rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2075         rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2076         rxq->qpt.page_count = page_count;
2077         rxq->qpt.page_size = page_size;
2078
2079         rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2080         rxq->rcb->sw_q = page_mem->kva;
2081
2082         kva = page_mem->kva;
2083         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2084
2085         for (i = 0; i < rxq->qpt.page_count; i++) {
2086                 rxq->rcb->sw_qpt[i] = kva;
2087                 kva += PAGE_SIZE;
2088
2089                 BNA_SET_DMA_ADDR(dma, &bna_dma);
2090                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2091                         bna_dma.lsb;
2092                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2093                         bna_dma.msb;
2094                 dma += PAGE_SIZE;
2095         }
2096 }
2097
2098 static void
2099 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2100                 u32 page_count,
2101                 u32 page_size,
2102                 struct bna_mem_descr *qpt_mem,
2103                 struct bna_mem_descr *swqpt_mem,
2104                 struct bna_mem_descr *page_mem)
2105 {
2106         u8 *kva;
2107         u64 dma;
2108         struct bna_dma_addr bna_dma;
2109         int     i;
2110
2111         rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2112         rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2113         rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2114         rxp->cq.qpt.page_count = page_count;
2115         rxp->cq.qpt.page_size = page_size;
2116
2117         rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2118         rxp->cq.ccb->sw_q = page_mem->kva;
2119
2120         kva = page_mem->kva;
2121         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2122
2123         for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2124                 rxp->cq.ccb->sw_qpt[i] = kva;
2125                 kva += PAGE_SIZE;
2126
2127                 BNA_SET_DMA_ADDR(dma, &bna_dma);
2128                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2129                         bna_dma.lsb;
2130                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2131                         bna_dma.msb;
2132                 dma += PAGE_SIZE;
2133         }
2134 }
2135
2136 static void
2137 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2138 {
2139         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2140
2141         bfa_wc_down(&rx_mod->rx_stop_wc);
2142 }
2143
2144 static void
2145 bna_rx_mod_cb_rx_stopped_all(void *arg)
2146 {
2147         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2148
2149         if (rx_mod->stop_cbfn)
2150                 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2151         rx_mod->stop_cbfn = NULL;
2152 }
2153
2154 static void
2155 bna_rx_start(struct bna_rx *rx)
2156 {
2157         rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2158         if (rx->rx_flags & BNA_RX_F_ENABLED)
2159                 bfa_fsm_send_event(rx, RX_E_START);
2160 }
2161
2162 static void
2163 bna_rx_stop(struct bna_rx *rx)
2164 {
2165         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2166         if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2167                 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2168         else {
2169                 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2170                 rx->stop_cbarg = &rx->bna->rx_mod;
2171                 bfa_fsm_send_event(rx, RX_E_STOP);
2172         }
2173 }
2174
2175 static void
2176 bna_rx_fail(struct bna_rx *rx)
2177 {
2178         /* Indicate Enet is not enabled, and failed */
2179         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2180         bfa_fsm_send_event(rx, RX_E_FAIL);
2181 }
2182
2183 void
2184 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2185 {
2186         struct bna_rx *rx;
2187         struct list_head *qe;
2188
2189         rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2190         if (type == BNA_RX_T_LOOPBACK)
2191                 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2192
2193         list_for_each(qe, &rx_mod->rx_active_q) {
2194                 rx = (struct bna_rx *)qe;
2195                 if (rx->type == type)
2196                         bna_rx_start(rx);
2197         }
2198 }
2199
2200 void
2201 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2202 {
2203         struct bna_rx *rx;
2204         struct list_head *qe;
2205
2206         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2207         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2208
2209         rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2210
2211         bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2212
2213         list_for_each(qe, &rx_mod->rx_active_q) {
2214                 rx = (struct bna_rx *)qe;
2215                 if (rx->type == type) {
2216                         bfa_wc_up(&rx_mod->rx_stop_wc);
2217                         bna_rx_stop(rx);
2218                 }
2219         }
2220
2221         bfa_wc_wait(&rx_mod->rx_stop_wc);
2222 }
2223
2224 void
2225 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2226 {
2227         struct bna_rx *rx;
2228         struct list_head *qe;
2229
2230         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2231         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2232
2233         list_for_each(qe, &rx_mod->rx_active_q) {
2234                 rx = (struct bna_rx *)qe;
2235                 bna_rx_fail(rx);
2236         }
2237 }
2238
2239 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2240                         struct bna_res_info *res_info)
2241 {
2242         int     index;
2243         struct bna_rx *rx_ptr;
2244         struct bna_rxp *rxp_ptr;
2245         struct bna_rxq *rxq_ptr;
2246
2247         rx_mod->bna = bna;
2248         rx_mod->flags = 0;
2249
2250         rx_mod->rx = (struct bna_rx *)
2251                 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2252         rx_mod->rxp = (struct bna_rxp *)
2253                 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2254         rx_mod->rxq = (struct bna_rxq *)
2255                 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2256
2257         /* Initialize the queues */
2258         INIT_LIST_HEAD(&rx_mod->rx_free_q);
2259         rx_mod->rx_free_count = 0;
2260         INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2261         rx_mod->rxq_free_count = 0;
2262         INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2263         rx_mod->rxp_free_count = 0;
2264         INIT_LIST_HEAD(&rx_mod->rx_active_q);
2265
2266         /* Build RX queues */
2267         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2268                 rx_ptr = &rx_mod->rx[index];
2269
2270                 bfa_q_qe_init(&rx_ptr->qe);
2271                 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2272                 rx_ptr->bna = NULL;
2273                 rx_ptr->rid = index;
2274                 rx_ptr->stop_cbfn = NULL;
2275                 rx_ptr->stop_cbarg = NULL;
2276
2277                 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2278                 rx_mod->rx_free_count++;
2279         }
2280
2281         /* build RX-path queue */
2282         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2283                 rxp_ptr = &rx_mod->rxp[index];
2284                 bfa_q_qe_init(&rxp_ptr->qe);
2285                 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2286                 rx_mod->rxp_free_count++;
2287         }
2288
2289         /* build RXQ queue */
2290         for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2291                 rxq_ptr = &rx_mod->rxq[index];
2292                 bfa_q_qe_init(&rxq_ptr->qe);
2293                 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2294                 rx_mod->rxq_free_count++;
2295         }
2296 }
2297
2298 void
2299 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2300 {
2301         struct list_head                *qe;
2302         int i;
2303
2304         i = 0;
2305         list_for_each(qe, &rx_mod->rx_free_q)
2306                 i++;
2307
2308         i = 0;
2309         list_for_each(qe, &rx_mod->rxp_free_q)
2310                 i++;
2311
2312         i = 0;
2313         list_for_each(qe, &rx_mod->rxq_free_q)
2314                 i++;
2315
2316         rx_mod->bna = NULL;
2317 }
2318
2319 void
2320 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2321 {
2322         struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2323         struct bna_rxp *rxp = NULL;
2324         struct bna_rxq *q0 = NULL, *q1 = NULL;
2325         struct list_head *rxp_qe;
2326         int i;
2327
2328         bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2329                 sizeof(struct bfi_enet_rx_cfg_rsp));
2330
2331         rx->hw_id = cfg_rsp->hw_id;
2332
2333         for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2334                 i < rx->num_paths;
2335                 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2336                 rxp = (struct bna_rxp *)rxp_qe;
2337                 GET_RXQS(rxp, q0, q1);
2338
2339                 /* Setup doorbells */
2340                 rxp->cq.ccb->i_dbell->doorbell_addr =
2341                         rx->bna->pcidev.pci_bar_kva
2342                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
2343                 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2344                 q0->rcb->q_dbell =
2345                         rx->bna->pcidev.pci_bar_kva
2346                         + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2347                 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2348                 if (q1) {
2349                         q1->rcb->q_dbell =
2350                         rx->bna->pcidev.pci_bar_kva
2351                         + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2352                         q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2353                 }
2354
2355                 /* Initialize producer/consumer indexes */
2356                 (*rxp->cq.ccb->hw_producer_index) = 0;
2357                 rxp->cq.ccb->producer_index = 0;
2358                 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2359                 if (q1)
2360                         q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2361         }
2362
2363         bfa_fsm_send_event(rx, RX_E_STARTED);
2364 }
2365
2366 void
2367 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2368 {
2369         bfa_fsm_send_event(rx, RX_E_STOPPED);
2370 }
2371
2372 void
2373 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2374 {
2375         u32 cq_size, hq_size, dq_size;
2376         u32 cpage_count, hpage_count, dpage_count;
2377         struct bna_mem_info *mem_info;
2378         u32 cq_depth;
2379         u32 hq_depth;
2380         u32 dq_depth;
2381
2382         dq_depth = q_cfg->q0_depth;
2383         hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2384         cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2385
2386         cq_size = cq_depth * BFI_CQ_WI_SIZE;
2387         cq_size = ALIGN(cq_size, PAGE_SIZE);
2388         cpage_count = SIZE_TO_PAGES(cq_size);
2389
2390         dq_depth = roundup_pow_of_two(dq_depth);
2391         dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2392         dq_size = ALIGN(dq_size, PAGE_SIZE);
2393         dpage_count = SIZE_TO_PAGES(dq_size);
2394
2395         if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2396                 hq_depth = roundup_pow_of_two(hq_depth);
2397                 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2398                 hq_size = ALIGN(hq_size, PAGE_SIZE);
2399                 hpage_count = SIZE_TO_PAGES(hq_size);
2400         } else
2401                 hpage_count = 0;
2402
2403         res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2404         mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2405         mem_info->mem_type = BNA_MEM_T_KVA;
2406         mem_info->len = sizeof(struct bna_ccb);
2407         mem_info->num = q_cfg->num_paths;
2408
2409         res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2410         mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2411         mem_info->mem_type = BNA_MEM_T_KVA;
2412         mem_info->len = sizeof(struct bna_rcb);
2413         mem_info->num = BNA_GET_RXQS(q_cfg);
2414
2415         res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2416         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2417         mem_info->mem_type = BNA_MEM_T_DMA;
2418         mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2419         mem_info->num = q_cfg->num_paths;
2420
2421         res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2422         mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2423         mem_info->mem_type = BNA_MEM_T_KVA;
2424         mem_info->len = cpage_count * sizeof(void *);
2425         mem_info->num = q_cfg->num_paths;
2426
2427         res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2428         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2429         mem_info->mem_type = BNA_MEM_T_DMA;
2430         mem_info->len = PAGE_SIZE * cpage_count;
2431         mem_info->num = q_cfg->num_paths;
2432
2433         res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2434         mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2435         mem_info->mem_type = BNA_MEM_T_DMA;
2436         mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2437         mem_info->num = q_cfg->num_paths;
2438
2439         res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2440         mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2441         mem_info->mem_type = BNA_MEM_T_KVA;
2442         mem_info->len = dpage_count * sizeof(void *);
2443         mem_info->num = q_cfg->num_paths;
2444
2445         res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2446         mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2447         mem_info->mem_type = BNA_MEM_T_DMA;
2448         mem_info->len = PAGE_SIZE * dpage_count;
2449         mem_info->num = q_cfg->num_paths;
2450
2451         res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2452         mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2453         mem_info->mem_type = BNA_MEM_T_DMA;
2454         mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2455         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2456
2457         res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2458         mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2459         mem_info->mem_type = BNA_MEM_T_KVA;
2460         mem_info->len = hpage_count * sizeof(void *);
2461         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2462
2463         res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2464         mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2465         mem_info->mem_type = BNA_MEM_T_DMA;
2466         mem_info->len = PAGE_SIZE * hpage_count;
2467         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2468
2469         res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2470         mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2471         mem_info->mem_type = BNA_MEM_T_DMA;
2472         mem_info->len = BFI_IBIDX_SIZE;
2473         mem_info->num = q_cfg->num_paths;
2474
2475         res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2476         mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2477         mem_info->mem_type = BNA_MEM_T_KVA;
2478         mem_info->len = BFI_ENET_RSS_RIT_MAX;
2479         mem_info->num = 1;
2480
2481         res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2482         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2483         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2484 }
2485
2486 struct bna_rx *
2487 bna_rx_create(struct bna *bna, struct bnad *bnad,
2488                 struct bna_rx_config *rx_cfg,
2489                 const struct bna_rx_event_cbfn *rx_cbfn,
2490                 struct bna_res_info *res_info,
2491                 void *priv)
2492 {
2493         struct bna_rx_mod *rx_mod = &bna->rx_mod;
2494         struct bna_rx *rx;
2495         struct bna_rxp *rxp;
2496         struct bna_rxq *q0;
2497         struct bna_rxq *q1;
2498         struct bna_intr_info *intr_info;
2499         struct bna_mem_descr *hqunmap_mem;
2500         struct bna_mem_descr *dqunmap_mem;
2501         struct bna_mem_descr *ccb_mem;
2502         struct bna_mem_descr *rcb_mem;
2503         struct bna_mem_descr *cqpt_mem;
2504         struct bna_mem_descr *cswqpt_mem;
2505         struct bna_mem_descr *cpage_mem;
2506         struct bna_mem_descr *hqpt_mem;
2507         struct bna_mem_descr *dqpt_mem;
2508         struct bna_mem_descr *hsqpt_mem;
2509         struct bna_mem_descr *dsqpt_mem;
2510         struct bna_mem_descr *hpage_mem;
2511         struct bna_mem_descr *dpage_mem;
2512         u32 dpage_count, hpage_count;
2513         u32 hq_idx, dq_idx, rcb_idx;
2514         u32 cq_depth, i;
2515         u32 page_count;
2516
2517         if (!bna_rx_res_check(rx_mod, rx_cfg))
2518                 return NULL;
2519
2520         intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2521         ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2522         rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2523         dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2524         hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2525         cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2526         cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2527         cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2528         hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2529         dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2530         hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2531         dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2532         hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2533         dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2534
2535         page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2536                         PAGE_SIZE;
2537
2538         dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2539                         PAGE_SIZE;
2540
2541         hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2542                         PAGE_SIZE;
2543
2544         rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2545         rx->bna = bna;
2546         rx->rx_flags = 0;
2547         INIT_LIST_HEAD(&rx->rxp_q);
2548         rx->stop_cbfn = NULL;
2549         rx->stop_cbarg = NULL;
2550         rx->priv = priv;
2551
2552         rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2553         rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2554         rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2555         rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2556         rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2557         /* Following callbacks are mandatory */
2558         rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2559         rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2560
2561         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2562                 switch (rx->type) {
2563                 case BNA_RX_T_REGULAR:
2564                         if (!(rx->bna->rx_mod.flags &
2565                                 BNA_RX_MOD_F_ENET_LOOPBACK))
2566                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2567                         break;
2568                 case BNA_RX_T_LOOPBACK:
2569                         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2570                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2571                         break;
2572                 }
2573         }
2574
2575         rx->num_paths = rx_cfg->num_paths;
2576         for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2577                         i < rx->num_paths; i++) {
2578                 rxp = bna_rxp_get(rx_mod);
2579                 list_add_tail(&rxp->qe, &rx->rxp_q);
2580                 rxp->type = rx_cfg->rxp_type;
2581                 rxp->rx = rx;
2582                 rxp->cq.rx = rx;
2583
2584                 q0 = bna_rxq_get(rx_mod);
2585                 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2586                         q1 = NULL;
2587                 else
2588                         q1 = bna_rxq_get(rx_mod);
2589
2590                 if (1 == intr_info->num)
2591                         rxp->vector = intr_info->idl[0].vector;
2592                 else
2593                         rxp->vector = intr_info->idl[i].vector;
2594
2595                 /* Setup IB */
2596
2597                 rxp->cq.ib.ib_seg_host_addr.lsb =
2598                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2599                 rxp->cq.ib.ib_seg_host_addr.msb =
2600                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2601                 rxp->cq.ib.ib_seg_host_addr_kva =
2602                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2603                 rxp->cq.ib.intr_type = intr_info->intr_type;
2604                 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2605                         rxp->cq.ib.intr_vector = rxp->vector;
2606                 else
2607                         rxp->cq.ib.intr_vector = BIT(rxp->vector);
2608                 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2609                 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2610                 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2611
2612                 bna_rxp_add_rxqs(rxp, q0, q1);
2613
2614                 /* Setup large Q */
2615
2616                 q0->rx = rx;
2617                 q0->rxp = rxp;
2618
2619                 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2620                 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2621                 rcb_idx++; dq_idx++;
2622                 q0->rcb->q_depth = rx_cfg->q0_depth;
2623                 q0->q_depth = rx_cfg->q0_depth;
2624                 q0->multi_buffer = rx_cfg->q0_multi_buf;
2625                 q0->buffer_size = rx_cfg->q0_buf_size;
2626                 q0->num_vecs = rx_cfg->q0_num_vecs;
2627                 q0->rcb->rxq = q0;
2628                 q0->rcb->bnad = bna->bnad;
2629                 q0->rcb->id = 0;
2630                 q0->rx_packets = q0->rx_bytes = 0;
2631                 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2632
2633                 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2634                         &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2635
2636                 if (rx->rcb_setup_cbfn)
2637                         rx->rcb_setup_cbfn(bnad, q0->rcb);
2638
2639                 /* Setup small Q */
2640
2641                 if (q1) {
2642                         q1->rx = rx;
2643                         q1->rxp = rxp;
2644
2645                         q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2646                         q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2647                         rcb_idx++; hq_idx++;
2648                         q1->rcb->q_depth = rx_cfg->q1_depth;
2649                         q1->q_depth = rx_cfg->q1_depth;
2650                         q1->multi_buffer = BNA_STATUS_T_DISABLED;
2651                         q1->num_vecs = 1;
2652                         q1->rcb->rxq = q1;
2653                         q1->rcb->bnad = bna->bnad;
2654                         q1->rcb->id = 1;
2655                         q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2656                                         rx_cfg->hds_config.forced_offset
2657                                         : rx_cfg->q1_buf_size;
2658                         q1->rx_packets = q1->rx_bytes = 0;
2659                         q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2660
2661                         bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2662                                 &hqpt_mem[i], &hsqpt_mem[i],
2663                                 &hpage_mem[i]);
2664
2665                         if (rx->rcb_setup_cbfn)
2666                                 rx->rcb_setup_cbfn(bnad, q1->rcb);
2667                 }
2668
2669                 /* Setup CQ */
2670
2671                 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2672                 cq_depth = rx_cfg->q0_depth +
2673                         ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2674                          0 : rx_cfg->q1_depth);
2675                 /* if multi-buffer is enabled sum of q0_depth
2676                  * and q1_depth need not be a power of 2
2677                  */
2678                 cq_depth = roundup_pow_of_two(cq_depth);
2679                 rxp->cq.ccb->q_depth = cq_depth;
2680                 rxp->cq.ccb->cq = &rxp->cq;
2681                 rxp->cq.ccb->rcb[0] = q0->rcb;
2682                 q0->rcb->ccb = rxp->cq.ccb;
2683                 if (q1) {
2684                         rxp->cq.ccb->rcb[1] = q1->rcb;
2685                         q1->rcb->ccb = rxp->cq.ccb;
2686                 }
2687                 rxp->cq.ccb->hw_producer_index =
2688                         (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2689                 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2690                 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2691                 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2692                 rxp->cq.ccb->rx_coalescing_timeo =
2693                         rxp->cq.ib.coalescing_timeo;
2694                 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2695                 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2696                 rxp->cq.ccb->bnad = bna->bnad;
2697                 rxp->cq.ccb->id = i;
2698
2699                 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2700                         &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2701
2702                 if (rx->ccb_setup_cbfn)
2703                         rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2704         }
2705
2706         rx->hds_cfg = rx_cfg->hds_config;
2707
2708         bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2709
2710         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2711
2712         rx_mod->rid_mask |= BIT(rx->rid);
2713
2714         return rx;
2715 }
2716
2717 void
2718 bna_rx_destroy(struct bna_rx *rx)
2719 {
2720         struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2721         struct bna_rxq *q0 = NULL;
2722         struct bna_rxq *q1 = NULL;
2723         struct bna_rxp *rxp;
2724         struct list_head *qe;
2725
2726         bna_rxf_uninit(&rx->rxf);
2727
2728         while (!list_empty(&rx->rxp_q)) {
2729                 bfa_q_deq(&rx->rxp_q, &rxp);
2730                 GET_RXQS(rxp, q0, q1);
2731                 if (rx->rcb_destroy_cbfn)
2732                         rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2733                 q0->rcb = NULL;
2734                 q0->rxp = NULL;
2735                 q0->rx = NULL;
2736                 bna_rxq_put(rx_mod, q0);
2737
2738                 if (q1) {
2739                         if (rx->rcb_destroy_cbfn)
2740                                 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2741                         q1->rcb = NULL;
2742                         q1->rxp = NULL;
2743                         q1->rx = NULL;
2744                         bna_rxq_put(rx_mod, q1);
2745                 }
2746                 rxp->rxq.slr.large = NULL;
2747                 rxp->rxq.slr.small = NULL;
2748
2749                 if (rx->ccb_destroy_cbfn)
2750                         rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2751                 rxp->cq.ccb = NULL;
2752                 rxp->rx = NULL;
2753                 bna_rxp_put(rx_mod, rxp);
2754         }
2755
2756         list_for_each(qe, &rx_mod->rx_active_q) {
2757                 if (qe == &rx->qe) {
2758                         list_del(&rx->qe);
2759                         bfa_q_qe_init(&rx->qe);
2760                         break;
2761                 }
2762         }
2763
2764         rx_mod->rid_mask &= ~BIT(rx->rid);
2765
2766         rx->bna = NULL;
2767         rx->priv = NULL;
2768         bna_rx_put(rx_mod, rx);
2769 }
2770
2771 void
2772 bna_rx_enable(struct bna_rx *rx)
2773 {
2774         if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2775                 return;
2776
2777         rx->rx_flags |= BNA_RX_F_ENABLED;
2778         if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2779                 bfa_fsm_send_event(rx, RX_E_START);
2780 }
2781
2782 void
2783 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2784                 void (*cbfn)(void *, struct bna_rx *))
2785 {
2786         if (type == BNA_SOFT_CLEANUP) {
2787                 /* h/w should not be accessed. Treat we're stopped */
2788                 (*cbfn)(rx->bna->bnad, rx);
2789         } else {
2790                 rx->stop_cbfn = cbfn;
2791                 rx->stop_cbarg = rx->bna->bnad;
2792
2793                 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2794
2795                 bfa_fsm_send_event(rx, RX_E_STOP);
2796         }
2797 }
2798
2799 void
2800 bna_rx_cleanup_complete(struct bna_rx *rx)
2801 {
2802         bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2803 }
2804
2805 void
2806 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2807 {
2808         struct bna_rxf *rxf = &rx->rxf;
2809
2810         if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2811                 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2812                 rxf->vlan_strip_pending = true;
2813                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2814         }
2815 }
2816
2817 void
2818 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2819 {
2820         struct bna_rxf *rxf = &rx->rxf;
2821
2822         if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2823                 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2824                 rxf->vlan_strip_pending = true;
2825                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2826         }
2827 }
2828
2829 enum bna_cb_status
2830 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2831                 enum bna_rxmode bitmask)
2832 {
2833         struct bna_rxf *rxf = &rx->rxf;
2834         int need_hw_config = 0;
2835
2836         /* Error checks */
2837
2838         if (is_promisc_enable(new_mode, bitmask)) {
2839                 /* If promisc mode is already enabled elsewhere in the system */
2840                 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2841                         (rx->bna->promisc_rid != rxf->rx->rid))
2842                         goto err_return;
2843
2844                 /* If default mode is already enabled in the system */
2845                 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2846                         goto err_return;
2847
2848                 /* Trying to enable promiscuous and default mode together */
2849                 if (is_default_enable(new_mode, bitmask))
2850                         goto err_return;
2851         }
2852
2853         if (is_default_enable(new_mode, bitmask)) {
2854                 /* If default mode is already enabled elsewhere in the system */
2855                 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2856                         (rx->bna->default_mode_rid != rxf->rx->rid)) {
2857                                 goto err_return;
2858                 }
2859
2860                 /* If promiscuous mode is already enabled in the system */
2861                 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2862                         goto err_return;
2863         }
2864
2865         /* Process the commands */
2866
2867         if (is_promisc_enable(new_mode, bitmask)) {
2868                 if (bna_rxf_promisc_enable(rxf))
2869                         need_hw_config = 1;
2870         } else if (is_promisc_disable(new_mode, bitmask)) {
2871                 if (bna_rxf_promisc_disable(rxf))
2872                         need_hw_config = 1;
2873         }
2874
2875         if (is_allmulti_enable(new_mode, bitmask)) {
2876                 if (bna_rxf_allmulti_enable(rxf))
2877                         need_hw_config = 1;
2878         } else if (is_allmulti_disable(new_mode, bitmask)) {
2879                 if (bna_rxf_allmulti_disable(rxf))
2880                         need_hw_config = 1;
2881         }
2882
2883         /* Trigger h/w if needed */
2884
2885         if (need_hw_config) {
2886                 rxf->cam_fltr_cbfn = NULL;
2887                 rxf->cam_fltr_cbarg = rx->bna->bnad;
2888                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2889         }
2890
2891         return BNA_CB_SUCCESS;
2892
2893 err_return:
2894         return BNA_CB_FAIL;
2895 }
2896
2897 void
2898 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2899 {
2900         struct bna_rxf *rxf = &rx->rxf;
2901
2902         if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2903                 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2904                 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2905                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2906         }
2907 }
2908
2909 void
2910 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2911 {
2912         struct bna_rxp *rxp;
2913         struct list_head *qe;
2914
2915         list_for_each(qe, &rx->rxp_q) {
2916                 rxp = (struct bna_rxp *)qe;
2917                 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2918                 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2919         }
2920 }
2921
2922 void
2923 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2924 {
2925         int i, j;
2926
2927         for (i = 0; i < BNA_LOAD_T_MAX; i++)
2928                 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2929                         bna->rx_mod.dim_vector[i][j] = vector[i][j];
2930 }
2931
2932 void
2933 bna_rx_dim_update(struct bna_ccb *ccb)
2934 {
2935         struct bna *bna = ccb->cq->rx->bna;
2936         u32 load, bias;
2937         u32 pkt_rt, small_rt, large_rt;
2938         u8 coalescing_timeo;
2939
2940         if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2941                 (ccb->pkt_rate.large_pkt_cnt == 0))
2942                 return;
2943
2944         /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2945
2946         small_rt = ccb->pkt_rate.small_pkt_cnt;
2947         large_rt = ccb->pkt_rate.large_pkt_cnt;
2948
2949         pkt_rt = small_rt + large_rt;
2950
2951         if (pkt_rt < BNA_PKT_RATE_10K)
2952                 load = BNA_LOAD_T_LOW_4;
2953         else if (pkt_rt < BNA_PKT_RATE_20K)
2954                 load = BNA_LOAD_T_LOW_3;
2955         else if (pkt_rt < BNA_PKT_RATE_30K)
2956                 load = BNA_LOAD_T_LOW_2;
2957         else if (pkt_rt < BNA_PKT_RATE_40K)
2958                 load = BNA_LOAD_T_LOW_1;
2959         else if (pkt_rt < BNA_PKT_RATE_50K)
2960                 load = BNA_LOAD_T_HIGH_1;
2961         else if (pkt_rt < BNA_PKT_RATE_60K)
2962                 load = BNA_LOAD_T_HIGH_2;
2963         else if (pkt_rt < BNA_PKT_RATE_80K)
2964                 load = BNA_LOAD_T_HIGH_3;
2965         else
2966                 load = BNA_LOAD_T_HIGH_4;
2967
2968         if (small_rt > (large_rt << 1))
2969                 bias = 0;
2970         else
2971                 bias = 1;
2972
2973         ccb->pkt_rate.small_pkt_cnt = 0;
2974         ccb->pkt_rate.large_pkt_cnt = 0;
2975
2976         coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2977         ccb->rx_coalescing_timeo = coalescing_timeo;
2978
2979         /* Set it to IB */
2980         bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2981 }
2982
2983 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2984         {12, 12},
2985         {6, 10},
2986         {5, 10},
2987         {4, 8},
2988         {3, 6},
2989         {3, 6},
2990         {2, 4},
2991         {1, 2},
2992 };
2993
2994 /* TX */
2995
2996 #define call_tx_stop_cbfn(tx)                                           \
2997 do {                                                                    \
2998         if ((tx)->stop_cbfn) {                                          \
2999                 void (*cbfn)(void *, struct bna_tx *);          \
3000                 void *cbarg;                                            \
3001                 cbfn = (tx)->stop_cbfn;                                 \
3002                 cbarg = (tx)->stop_cbarg;                               \
3003                 (tx)->stop_cbfn = NULL;                                 \
3004                 (tx)->stop_cbarg = NULL;                                \
3005                 cbfn(cbarg, (tx));                                      \
3006         }                                                               \
3007 } while (0)
3008
3009 #define call_tx_prio_change_cbfn(tx)                                    \
3010 do {                                                                    \
3011         if ((tx)->prio_change_cbfn) {                                   \
3012                 void (*cbfn)(struct bnad *, struct bna_tx *);   \
3013                 cbfn = (tx)->prio_change_cbfn;                          \
3014                 (tx)->prio_change_cbfn = NULL;                          \
3015                 cbfn((tx)->bna->bnad, (tx));                            \
3016         }                                                               \
3017 } while (0)
3018
3019 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3020 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3021 static void bna_tx_enet_stop(struct bna_tx *tx);
3022
3023 enum bna_tx_event {
3024         TX_E_START                      = 1,
3025         TX_E_STOP                       = 2,
3026         TX_E_FAIL                       = 3,
3027         TX_E_STARTED                    = 4,
3028         TX_E_STOPPED                    = 5,
3029         TX_E_PRIO_CHANGE                = 6,
3030         TX_E_CLEANUP_DONE               = 7,
3031         TX_E_BW_UPDATE                  = 8,
3032 };
3033
3034 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
3035 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
3036 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
3037 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
3038 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
3039                         enum bna_tx_event);
3040 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3041                         enum bna_tx_event);
3042 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
3043                         enum bna_tx_event);
3044 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
3045 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
3046                         enum bna_tx_event);
3047
3048 static void
3049 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3050 {
3051         call_tx_stop_cbfn(tx);
3052 }
3053
3054 static void
3055 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3056 {
3057         switch (event) {
3058         case TX_E_START:
3059                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3060                 break;
3061
3062         case TX_E_STOP:
3063                 call_tx_stop_cbfn(tx);
3064                 break;
3065
3066         case TX_E_FAIL:
3067                 /* No-op */
3068                 break;
3069
3070         case TX_E_PRIO_CHANGE:
3071                 call_tx_prio_change_cbfn(tx);
3072                 break;
3073
3074         case TX_E_BW_UPDATE:
3075                 /* No-op */
3076                 break;
3077
3078         default:
3079                 bfa_sm_fault(event);
3080         }
3081 }
3082
3083 static void
3084 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
3085 {
3086         bna_bfi_tx_enet_start(tx);
3087 }
3088
3089 static void
3090 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3091 {
3092         switch (event) {
3093         case TX_E_STOP:
3094                 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3095                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3096                 break;
3097
3098         case TX_E_FAIL:
3099                 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3100                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3101                 break;
3102
3103         case TX_E_STARTED:
3104                 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3105                         tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3106                                 BNA_TX_F_BW_UPDATED);
3107                         bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3108                 } else
3109                         bfa_fsm_set_state(tx, bna_tx_sm_started);
3110                 break;
3111
3112         case TX_E_PRIO_CHANGE:
3113                 tx->flags |=  BNA_TX_F_PRIO_CHANGED;
3114                 break;
3115
3116         case TX_E_BW_UPDATE:
3117                 tx->flags |= BNA_TX_F_BW_UPDATED;
3118                 break;
3119
3120         default:
3121                 bfa_sm_fault(event);
3122         }
3123 }
3124
3125 static void
3126 bna_tx_sm_started_entry(struct bna_tx *tx)
3127 {
3128         struct bna_txq *txq;
3129         struct list_head                 *qe;
3130         int is_regular = (tx->type == BNA_TX_T_REGULAR);
3131
3132         list_for_each(qe, &tx->txq_q) {
3133                 txq = (struct bna_txq *)qe;
3134                 txq->tcb->priority = txq->priority;
3135                 /* Start IB */
3136                 bna_ib_start(tx->bna, &txq->ib, is_regular);
3137         }
3138         tx->tx_resume_cbfn(tx->bna->bnad, tx);
3139 }
3140
3141 static void
3142 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3143 {
3144         switch (event) {
3145         case TX_E_STOP:
3146                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3147                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3148                 bna_tx_enet_stop(tx);
3149                 break;
3150
3151         case TX_E_FAIL:
3152                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3153                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3154                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3155                 break;
3156
3157         case TX_E_PRIO_CHANGE:
3158         case TX_E_BW_UPDATE:
3159                 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3160                 break;
3161
3162         default:
3163                 bfa_sm_fault(event);
3164         }
3165 }
3166
3167 static void
3168 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3169 {
3170 }
3171
3172 static void
3173 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3174 {
3175         switch (event) {
3176         case TX_E_FAIL:
3177         case TX_E_STOPPED:
3178                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3179                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3180                 break;
3181
3182         case TX_E_STARTED:
3183                 /**
3184                  * We are here due to start_wait -> stop_wait transition on
3185                  * TX_E_STOP event
3186                  */
3187                 bna_tx_enet_stop(tx);
3188                 break;
3189
3190         case TX_E_PRIO_CHANGE:
3191         case TX_E_BW_UPDATE:
3192                 /* No-op */
3193                 break;
3194
3195         default:
3196                 bfa_sm_fault(event);
3197         }
3198 }
3199
3200 static void
3201 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3202 {
3203 }
3204
3205 static void
3206 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3207 {
3208         switch (event) {
3209         case TX_E_FAIL:
3210         case TX_E_PRIO_CHANGE:
3211         case TX_E_BW_UPDATE:
3212                 /* No-op */
3213                 break;
3214
3215         case TX_E_CLEANUP_DONE:
3216                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3217                 break;
3218
3219         default:
3220                 bfa_sm_fault(event);
3221         }
3222 }
3223
3224 static void
3225 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3226 {
3227         tx->tx_stall_cbfn(tx->bna->bnad, tx);
3228         bna_tx_enet_stop(tx);
3229 }
3230
3231 static void
3232 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3233 {
3234         switch (event) {
3235         case TX_E_STOP:
3236                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3237                 break;
3238
3239         case TX_E_FAIL:
3240                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3241                 call_tx_prio_change_cbfn(tx);
3242                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3243                 break;
3244
3245         case TX_E_STOPPED:
3246                 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3247                 break;
3248
3249         case TX_E_PRIO_CHANGE:
3250         case TX_E_BW_UPDATE:
3251                 /* No-op */
3252                 break;
3253
3254         default:
3255                 bfa_sm_fault(event);
3256         }
3257 }
3258
3259 static void
3260 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3261 {
3262         call_tx_prio_change_cbfn(tx);
3263         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3264 }
3265
3266 static void
3267 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3268 {
3269         switch (event) {
3270         case TX_E_STOP:
3271                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3272                 break;
3273
3274         case TX_E_FAIL:
3275                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3276                 break;
3277
3278         case TX_E_PRIO_CHANGE:
3279         case TX_E_BW_UPDATE:
3280                 /* No-op */
3281                 break;
3282
3283         case TX_E_CLEANUP_DONE:
3284                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3285                 break;
3286
3287         default:
3288                 bfa_sm_fault(event);
3289         }
3290 }
3291
3292 static void
3293 bna_tx_sm_failed_entry(struct bna_tx *tx)
3294 {
3295 }
3296
3297 static void
3298 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3299 {
3300         switch (event) {
3301         case TX_E_START:
3302                 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3303                 break;
3304
3305         case TX_E_STOP:
3306                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3307                 break;
3308
3309         case TX_E_FAIL:
3310                 /* No-op */
3311                 break;
3312
3313         case TX_E_CLEANUP_DONE:
3314                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3315                 break;
3316
3317         default:
3318                 bfa_sm_fault(event);
3319         }
3320 }
3321
3322 static void
3323 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3324 {
3325 }
3326
3327 static void
3328 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3329 {
3330         switch (event) {
3331         case TX_E_STOP:
3332                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3333                 break;
3334
3335         case TX_E_FAIL:
3336                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3337                 break;
3338
3339         case TX_E_CLEANUP_DONE:
3340                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3341                 break;
3342
3343         case TX_E_BW_UPDATE:
3344                 /* No-op */
3345                 break;
3346
3347         default:
3348                 bfa_sm_fault(event);
3349         }
3350 }
3351
3352 static void
3353 bna_bfi_tx_enet_start(struct bna_tx *tx)
3354 {
3355         struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3356         struct bna_txq *txq = NULL;
3357         struct list_head *qe;
3358         int i;
3359
3360         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3361                 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3362         cfg_req->mh.num_entries = htons(
3363                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3364
3365         cfg_req->num_queues = tx->num_txq;
3366         for (i = 0, qe = bfa_q_first(&tx->txq_q);
3367                 i < tx->num_txq;
3368                 i++, qe = bfa_q_next(qe)) {
3369                 txq = (struct bna_txq *)qe;
3370
3371                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3372                 cfg_req->q_cfg[i].q.priority = txq->priority;
3373
3374                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3375                         txq->ib.ib_seg_host_addr.lsb;
3376                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3377                         txq->ib.ib_seg_host_addr.msb;
3378                 cfg_req->q_cfg[i].ib.intr.msix_index =
3379                         htons((u16)txq->ib.intr_vector);
3380         }
3381
3382         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3383         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3384         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3385         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3386         cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3387                                 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3388         cfg_req->ib_cfg.coalescing_timeout =
3389                         htonl((u32)txq->ib.coalescing_timeo);
3390         cfg_req->ib_cfg.inter_pkt_timeout =
3391                         htonl((u32)txq->ib.interpkt_timeo);
3392         cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3393
3394         cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3395         cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3396         cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3397         cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3398
3399         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3400                 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3401         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3402 }
3403
3404 static void
3405 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3406 {
3407         struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3408
3409         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3410                 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3411         req->mh.num_entries = htons(
3412                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3413         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3414                 &req->mh);
3415         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3416 }
3417
3418 static void
3419 bna_tx_enet_stop(struct bna_tx *tx)
3420 {
3421         struct bna_txq *txq;
3422         struct list_head                 *qe;
3423
3424         /* Stop IB */
3425         list_for_each(qe, &tx->txq_q) {
3426                 txq = (struct bna_txq *)qe;
3427                 bna_ib_stop(tx->bna, &txq->ib);
3428         }
3429
3430         bna_bfi_tx_enet_stop(tx);
3431 }
3432
3433 static void
3434 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3435                 struct bna_mem_descr *qpt_mem,
3436                 struct bna_mem_descr *swqpt_mem,
3437                 struct bna_mem_descr *page_mem)
3438 {
3439         u8 *kva;
3440         u64 dma;
3441         struct bna_dma_addr bna_dma;
3442         int i;
3443
3444         txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3445         txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3446         txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3447         txq->qpt.page_count = page_count;
3448         txq->qpt.page_size = page_size;
3449
3450         txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3451         txq->tcb->sw_q = page_mem->kva;
3452
3453         kva = page_mem->kva;
3454         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3455
3456         for (i = 0; i < page_count; i++) {
3457                 txq->tcb->sw_qpt[i] = kva;
3458                 kva += PAGE_SIZE;
3459
3460                 BNA_SET_DMA_ADDR(dma, &bna_dma);
3461                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3462                         bna_dma.lsb;
3463                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3464                         bna_dma.msb;
3465                 dma += PAGE_SIZE;
3466         }
3467 }
3468
3469 static struct bna_tx *
3470 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3471 {
3472         struct list_head        *qe = NULL;
3473         struct bna_tx *tx = NULL;
3474
3475         if (list_empty(&tx_mod->tx_free_q))
3476                 return NULL;
3477         if (type == BNA_TX_T_REGULAR) {
3478                 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3479         } else {
3480                 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3481         }
3482         tx = (struct bna_tx *)qe;
3483         bfa_q_qe_init(&tx->qe);
3484         tx->type = type;
3485
3486         return tx;
3487 }
3488
3489 static void
3490 bna_tx_free(struct bna_tx *tx)
3491 {
3492         struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3493         struct bna_txq *txq;
3494         struct list_head *prev_qe;
3495         struct list_head *qe;
3496
3497         while (!list_empty(&tx->txq_q)) {
3498                 bfa_q_deq(&tx->txq_q, &txq);
3499                 bfa_q_qe_init(&txq->qe);
3500                 txq->tcb = NULL;
3501                 txq->tx = NULL;
3502                 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3503         }
3504
3505         list_for_each(qe, &tx_mod->tx_active_q) {
3506                 if (qe == &tx->qe) {
3507                         list_del(&tx->qe);
3508                         bfa_q_qe_init(&tx->qe);
3509                         break;
3510                 }
3511         }
3512
3513         tx->bna = NULL;
3514         tx->priv = NULL;
3515
3516         prev_qe = NULL;
3517         list_for_each(qe, &tx_mod->tx_free_q) {
3518                 if (((struct bna_tx *)qe)->rid < tx->rid)
3519                         prev_qe = qe;
3520                 else {
3521                         break;
3522                 }
3523         }
3524
3525         if (prev_qe == NULL) {
3526                 /* This is the first entry */
3527                 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3528         } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3529                 /* This is the last entry */
3530                 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3531         } else {
3532                 /* Somewhere in the middle */
3533                 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3534                 bfa_q_prev(&tx->qe) = prev_qe;
3535                 bfa_q_next(prev_qe) = &tx->qe;
3536                 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3537         }
3538 }
3539
3540 static void
3541 bna_tx_start(struct bna_tx *tx)
3542 {
3543         tx->flags |= BNA_TX_F_ENET_STARTED;
3544         if (tx->flags & BNA_TX_F_ENABLED)
3545                 bfa_fsm_send_event(tx, TX_E_START);
3546 }
3547
3548 static void
3549 bna_tx_stop(struct bna_tx *tx)
3550 {
3551         tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3552         tx->stop_cbarg = &tx->bna->tx_mod;
3553
3554         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3555         bfa_fsm_send_event(tx, TX_E_STOP);
3556 }
3557
3558 static void
3559 bna_tx_fail(struct bna_tx *tx)
3560 {
3561         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3562         bfa_fsm_send_event(tx, TX_E_FAIL);
3563 }
3564
3565 void
3566 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3567 {
3568         struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3569         struct bna_txq *txq = NULL;
3570         struct list_head *qe;
3571         int i;
3572
3573         bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3574                 sizeof(struct bfi_enet_tx_cfg_rsp));
3575
3576         tx->hw_id = cfg_rsp->hw_id;
3577
3578         for (i = 0, qe = bfa_q_first(&tx->txq_q);
3579                 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3580                 txq = (struct bna_txq *)qe;
3581
3582                 /* Setup doorbells */
3583                 txq->tcb->i_dbell->doorbell_addr =
3584                         tx->bna->pcidev.pci_bar_kva
3585                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
3586                 txq->tcb->q_dbell =
3587                         tx->bna->pcidev.pci_bar_kva
3588                         + ntohl(cfg_rsp->q_handles[i].q_dbell);
3589                 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3590
3591                 /* Initialize producer/consumer indexes */
3592                 (*txq->tcb->hw_consumer_index) = 0;
3593                 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3594         }
3595
3596         bfa_fsm_send_event(tx, TX_E_STARTED);
3597 }
3598
3599 void
3600 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3601 {
3602         bfa_fsm_send_event(tx, TX_E_STOPPED);
3603 }
3604
3605 void
3606 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3607 {
3608         struct bna_tx *tx;
3609         struct list_head                *qe;
3610
3611         list_for_each(qe, &tx_mod->tx_active_q) {
3612                 tx = (struct bna_tx *)qe;
3613                 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3614         }
3615 }
3616
3617 void
3618 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3619 {
3620         u32 q_size;
3621         u32 page_count;
3622         struct bna_mem_info *mem_info;
3623
3624         res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3625         mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3626         mem_info->mem_type = BNA_MEM_T_KVA;
3627         mem_info->len = sizeof(struct bna_tcb);
3628         mem_info->num = num_txq;
3629
3630         q_size = txq_depth * BFI_TXQ_WI_SIZE;
3631         q_size = ALIGN(q_size, PAGE_SIZE);
3632         page_count = q_size >> PAGE_SHIFT;
3633
3634         res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3635         mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3636         mem_info->mem_type = BNA_MEM_T_DMA;
3637         mem_info->len = page_count * sizeof(struct bna_dma_addr);
3638         mem_info->num = num_txq;
3639
3640         res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3641         mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3642         mem_info->mem_type = BNA_MEM_T_KVA;
3643         mem_info->len = page_count * sizeof(void *);
3644         mem_info->num = num_txq;
3645
3646         res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3647         mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3648         mem_info->mem_type = BNA_MEM_T_DMA;
3649         mem_info->len = PAGE_SIZE * page_count;
3650         mem_info->num = num_txq;
3651
3652         res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3653         mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3654         mem_info->mem_type = BNA_MEM_T_DMA;
3655         mem_info->len = BFI_IBIDX_SIZE;
3656         mem_info->num = num_txq;
3657
3658         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3659         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3660                         BNA_INTR_T_MSIX;
3661         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3662 }
3663
3664 struct bna_tx *
3665 bna_tx_create(struct bna *bna, struct bnad *bnad,
3666                 struct bna_tx_config *tx_cfg,
3667                 const struct bna_tx_event_cbfn *tx_cbfn,
3668                 struct bna_res_info *res_info, void *priv)
3669 {
3670         struct bna_intr_info *intr_info;
3671         struct bna_tx_mod *tx_mod = &bna->tx_mod;
3672         struct bna_tx *tx;
3673         struct bna_txq *txq;
3674         struct list_head *qe;
3675         int page_count;
3676         int i;
3677
3678         intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3679         page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3680                                         PAGE_SIZE;
3681
3682         /**
3683          * Get resources
3684          */
3685
3686         if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3687                 return NULL;
3688
3689         /* Tx */
3690
3691         tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3692         if (!tx)
3693                 return NULL;
3694         tx->bna = bna;
3695         tx->priv = priv;
3696
3697         /* TxQs */
3698
3699         INIT_LIST_HEAD(&tx->txq_q);
3700         for (i = 0; i < tx_cfg->num_txq; i++) {
3701                 if (list_empty(&tx_mod->txq_free_q))
3702                         goto err_return;
3703
3704                 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3705                 bfa_q_qe_init(&txq->qe);
3706                 list_add_tail(&txq->qe, &tx->txq_q);
3707                 txq->tx = tx;
3708         }
3709
3710         /*
3711          * Initialize
3712          */
3713
3714         /* Tx */
3715
3716         tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3717         tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3718         /* Following callbacks are mandatory */
3719         tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3720         tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3721         tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3722
3723         list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3724
3725         tx->num_txq = tx_cfg->num_txq;
3726
3727         tx->flags = 0;
3728         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3729                 switch (tx->type) {
3730                 case BNA_TX_T_REGULAR:
3731                         if (!(tx->bna->tx_mod.flags &
3732                                 BNA_TX_MOD_F_ENET_LOOPBACK))
3733                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3734                         break;
3735                 case BNA_TX_T_LOOPBACK:
3736                         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3737                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3738                         break;
3739                 }
3740         }
3741
3742         /* TxQ */
3743
3744         i = 0;
3745         list_for_each(qe, &tx->txq_q) {
3746                 txq = (struct bna_txq *)qe;
3747                 txq->tcb = (struct bna_tcb *)
3748                 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3749                 txq->tx_packets = 0;
3750                 txq->tx_bytes = 0;
3751
3752                 /* IB */
3753                 txq->ib.ib_seg_host_addr.lsb =
3754                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3755                 txq->ib.ib_seg_host_addr.msb =
3756                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3757                 txq->ib.ib_seg_host_addr_kva =
3758                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3759                 txq->ib.intr_type = intr_info->intr_type;
3760                 txq->ib.intr_vector = (intr_info->num == 1) ?
3761                                         intr_info->idl[0].vector :
3762                                         intr_info->idl[i].vector;
3763                 if (intr_info->intr_type == BNA_INTR_T_INTX)
3764                         txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3765                 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3766                 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3767                 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3768
3769                 /* TCB */
3770
3771                 txq->tcb->q_depth = tx_cfg->txq_depth;
3772                 txq->tcb->unmap_q = (void *)
3773                 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3774                 txq->tcb->hw_consumer_index =
3775                         (u32 *)txq->ib.ib_seg_host_addr_kva;
3776                 txq->tcb->i_dbell = &txq->ib.door_bell;
3777                 txq->tcb->intr_type = txq->ib.intr_type;
3778                 txq->tcb->intr_vector = txq->ib.intr_vector;
3779                 txq->tcb->txq = txq;
3780                 txq->tcb->bnad = bnad;
3781                 txq->tcb->id = i;
3782
3783                 /* QPT, SWQPT, Pages */
3784                 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3785                         &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3786                         &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3787                         &res_info[BNA_TX_RES_MEM_T_PAGE].
3788                                   res_u.mem_info.mdl[i]);
3789
3790                 /* Callback to bnad for setting up TCB */
3791                 if (tx->tcb_setup_cbfn)
3792                         (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3793
3794                 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3795                         txq->priority = txq->tcb->id;
3796                 else
3797                         txq->priority = tx_mod->default_prio;
3798
3799                 i++;
3800         }
3801
3802         tx->txf_vlan_id = 0;
3803
3804         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3805
3806         tx_mod->rid_mask |= BIT(tx->rid);
3807
3808         return tx;
3809
3810 err_return:
3811         bna_tx_free(tx);
3812         return NULL;
3813 }
3814
3815 void
3816 bna_tx_destroy(struct bna_tx *tx)
3817 {
3818         struct bna_txq *txq;
3819         struct list_head *qe;
3820
3821         list_for_each(qe, &tx->txq_q) {
3822                 txq = (struct bna_txq *)qe;
3823                 if (tx->tcb_destroy_cbfn)
3824                         (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3825         }
3826
3827         tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3828         bna_tx_free(tx);
3829 }
3830
3831 void
3832 bna_tx_enable(struct bna_tx *tx)
3833 {
3834         if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3835                 return;
3836
3837         tx->flags |= BNA_TX_F_ENABLED;
3838
3839         if (tx->flags & BNA_TX_F_ENET_STARTED)
3840                 bfa_fsm_send_event(tx, TX_E_START);
3841 }
3842
3843 void
3844 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3845                 void (*cbfn)(void *, struct bna_tx *))
3846 {
3847         if (type == BNA_SOFT_CLEANUP) {
3848                 (*cbfn)(tx->bna->bnad, tx);
3849                 return;
3850         }
3851
3852         tx->stop_cbfn = cbfn;
3853         tx->stop_cbarg = tx->bna->bnad;
3854
3855         tx->flags &= ~BNA_TX_F_ENABLED;
3856
3857         bfa_fsm_send_event(tx, TX_E_STOP);
3858 }
3859
3860 void
3861 bna_tx_cleanup_complete(struct bna_tx *tx)
3862 {
3863         bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3864 }
3865
3866 static void
3867 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3868 {
3869         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3870
3871         bfa_wc_down(&tx_mod->tx_stop_wc);
3872 }
3873
3874 static void
3875 bna_tx_mod_cb_tx_stopped_all(void *arg)
3876 {
3877         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3878
3879         if (tx_mod->stop_cbfn)
3880                 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3881         tx_mod->stop_cbfn = NULL;
3882 }
3883
3884 void
3885 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3886                 struct bna_res_info *res_info)
3887 {
3888         int i;
3889
3890         tx_mod->bna = bna;
3891         tx_mod->flags = 0;
3892
3893         tx_mod->tx = (struct bna_tx *)
3894                 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3895         tx_mod->txq = (struct bna_txq *)
3896                 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3897
3898         INIT_LIST_HEAD(&tx_mod->tx_free_q);
3899         INIT_LIST_HEAD(&tx_mod->tx_active_q);
3900
3901         INIT_LIST_HEAD(&tx_mod->txq_free_q);
3902
3903         for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3904                 tx_mod->tx[i].rid = i;
3905                 bfa_q_qe_init(&tx_mod->tx[i].qe);
3906                 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3907                 bfa_q_qe_init(&tx_mod->txq[i].qe);
3908                 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3909         }
3910
3911         tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3912         tx_mod->default_prio = 0;
3913         tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3914         tx_mod->iscsi_prio = -1;
3915 }
3916
3917 void
3918 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3919 {
3920         struct list_head                *qe;
3921         int i;
3922
3923         i = 0;
3924         list_for_each(qe, &tx_mod->tx_free_q)
3925                 i++;
3926
3927         i = 0;
3928         list_for_each(qe, &tx_mod->txq_free_q)
3929                 i++;
3930
3931         tx_mod->bna = NULL;
3932 }
3933
3934 void
3935 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3936 {
3937         struct bna_tx *tx;
3938         struct list_head                *qe;
3939
3940         tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3941         if (type == BNA_TX_T_LOOPBACK)
3942                 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3943
3944         list_for_each(qe, &tx_mod->tx_active_q) {
3945                 tx = (struct bna_tx *)qe;
3946                 if (tx->type == type)
3947                         bna_tx_start(tx);
3948         }
3949 }
3950
3951 void
3952 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3953 {
3954         struct bna_tx *tx;
3955         struct list_head                *qe;
3956
3957         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3958         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3959
3960         tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3961
3962         bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3963
3964         list_for_each(qe, &tx_mod->tx_active_q) {
3965                 tx = (struct bna_tx *)qe;
3966                 if (tx->type == type) {
3967                         bfa_wc_up(&tx_mod->tx_stop_wc);
3968                         bna_tx_stop(tx);
3969                 }
3970         }
3971
3972         bfa_wc_wait(&tx_mod->tx_stop_wc);
3973 }
3974
3975 void
3976 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3977 {
3978         struct bna_tx *tx;
3979         struct list_head                *qe;
3980
3981         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3982         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3983
3984         list_for_each(qe, &tx_mod->tx_active_q) {
3985                 tx = (struct bna_tx *)qe;
3986                 bna_tx_fail(tx);
3987         }
3988 }
3989
3990 void
3991 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3992 {
3993         struct bna_txq *txq;
3994         struct list_head *qe;
3995
3996         list_for_each(qe, &tx->txq_q) {
3997                 txq = (struct bna_txq *)qe;
3998                 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3999         }
4000 }