bna: remove RXF_E_PAUSE and RXF_E_RESUME events
[cascardo/linux.git] / drivers / net / ethernet / brocade / bna / bna_tx_rx.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12   */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include "bna.h"
20 #include "bfi.h"
21
22 /* IB */
23 static void
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 {
26         ib->coalescing_timeo = coalescing_timeo;
27         ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28                                 (u32)ib->coalescing_timeo, 0);
29 }
30
31 /* RXF */
32
33 #define bna_rxf_vlan_cfg_soft_reset(rxf)                                \
34 do {                                                                    \
35         (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;           \
36         (rxf)->vlan_strip_pending = true;                               \
37 } while (0)
38
39 #define bna_rxf_rss_cfg_soft_reset(rxf)                                 \
40 do {                                                                    \
41         if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)                  \
42                 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |           \
43                                 BNA_RSS_F_CFG_PENDING |                 \
44                                 BNA_RSS_F_STATUS_PENDING);              \
45 } while (0)
46
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54                                         enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56                                         enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58                                         enum bna_cleanup_type cleanup);
59
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
61                         enum bna_rxf_event);
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
63                         enum bna_rxf_event);
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
65                         enum bna_rxf_event);
66 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
67                         enum bna_rxf_event);
68 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
69                         enum bna_rxf_event);
70
71 static void
72 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
73 {
74         call_rxf_stop_cbfn(rxf);
75 }
76
77 static void
78 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
79 {
80         switch (event) {
81         case RXF_E_START:
82                 if (rxf->flags & BNA_RXF_F_PAUSED) {
83                         bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
84                         call_rxf_start_cbfn(rxf);
85                 } else
86                         bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
87                 break;
88
89         case RXF_E_STOP:
90                 call_rxf_stop_cbfn(rxf);
91                 break;
92
93         case RXF_E_FAIL:
94                 /* No-op */
95                 break;
96
97         case RXF_E_CONFIG:
98                 call_rxf_cam_fltr_cbfn(rxf);
99                 break;
100
101         default:
102                 bfa_sm_fault(event);
103         }
104 }
105
106 static void
107 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
108 {
109 }
110
111 static void
112 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
113 {
114         switch (event) {
115         case RXF_E_STOP:
116         case RXF_E_FAIL:
117                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
118                 break;
119
120         case RXF_E_CONFIG:
121                 call_rxf_cam_fltr_cbfn(rxf);
122                 break;
123
124         default:
125                 bfa_sm_fault(event);
126         }
127 }
128
129 static void
130 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
131 {
132         if (!bna_rxf_cfg_apply(rxf)) {
133                 /* No more pending config updates */
134                 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
135         }
136 }
137
138 static void
139 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
140 {
141         switch (event) {
142         case RXF_E_STOP:
143                 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
144                 break;
145
146         case RXF_E_FAIL:
147                 bna_rxf_cfg_reset(rxf);
148                 call_rxf_start_cbfn(rxf);
149                 call_rxf_cam_fltr_cbfn(rxf);
150                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
151                 break;
152
153         case RXF_E_CONFIG:
154                 /* No-op */
155                 break;
156
157         case RXF_E_FW_RESP:
158                 if (!bna_rxf_cfg_apply(rxf)) {
159                         /* No more pending config updates */
160                         bfa_fsm_set_state(rxf, bna_rxf_sm_started);
161                 }
162                 break;
163
164         default:
165                 bfa_sm_fault(event);
166         }
167 }
168
169 static void
170 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
171 {
172         call_rxf_start_cbfn(rxf);
173         call_rxf_cam_fltr_cbfn(rxf);
174 }
175
176 static void
177 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
178 {
179         switch (event) {
180         case RXF_E_STOP:
181         case RXF_E_FAIL:
182                 bna_rxf_cfg_reset(rxf);
183                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
184                 break;
185
186         case RXF_E_CONFIG:
187                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
188                 break;
189
190         default:
191                 bfa_sm_fault(event);
192         }
193 }
194
195 static void
196 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
197 {
198 }
199
200 static void
201 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
202 {
203         switch (event) {
204         case RXF_E_FAIL:
205         case RXF_E_FW_RESP:
206                 bna_rxf_cfg_reset(rxf);
207                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
208                 break;
209
210         default:
211                 bfa_sm_fault(event);
212         }
213 }
214
215 static void
216 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
217                 enum bfi_enet_h2i_msgs req_type)
218 {
219         struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
220
221         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
222         req->mh.num_entries = htons(
223         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
224         ether_addr_copy(req->mac_addr, mac->addr);
225         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
226                 sizeof(struct bfi_enet_ucast_req), &req->mh);
227         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
228 }
229
230 static void
231 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
232 {
233         struct bfi_enet_mcast_add_req *req =
234                 &rxf->bfi_enet_cmd.mcast_add_req;
235
236         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
237                 0, rxf->rx->rid);
238         req->mh.num_entries = htons(
239         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
240         ether_addr_copy(req->mac_addr, mac->addr);
241         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
242                 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
243         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
244 }
245
246 static void
247 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
248 {
249         struct bfi_enet_mcast_del_req *req =
250                 &rxf->bfi_enet_cmd.mcast_del_req;
251
252         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
253                 0, rxf->rx->rid);
254         req->mh.num_entries = htons(
255         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
256         req->handle = htons(handle);
257         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
258                 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
259         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
260 }
261
262 static void
263 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
264 {
265         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
266
267         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
268                 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
269         req->mh.num_entries = htons(
270                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
271         req->enable = status;
272         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
273                 sizeof(struct bfi_enet_enable_req), &req->mh);
274         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
275 }
276
277 static void
278 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
279 {
280         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
281
282         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
283                 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
284         req->mh.num_entries = htons(
285                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
286         req->enable = status;
287         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
288                 sizeof(struct bfi_enet_enable_req), &req->mh);
289         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
290 }
291
292 static void
293 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
294 {
295         struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
296         int i;
297         int j;
298
299         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
300                 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
301         req->mh.num_entries = htons(
302                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
303         req->block_idx = block_idx;
304         for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
305                 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
306                 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
307                         req->bit_mask[i] =
308                                 htonl(rxf->vlan_filter_table[j]);
309                 else
310                         req->bit_mask[i] = 0xFFFFFFFF;
311         }
312         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
313                 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
314         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
315 }
316
317 static void
318 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
319 {
320         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
321
322         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
323                 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
324         req->mh.num_entries = htons(
325                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
326         req->enable = rxf->vlan_strip_status;
327         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
328                 sizeof(struct bfi_enet_enable_req), &req->mh);
329         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
330 }
331
332 static void
333 bna_bfi_rit_cfg(struct bna_rxf *rxf)
334 {
335         struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
336
337         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
338                 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
339         req->mh.num_entries = htons(
340                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
341         req->size = htons(rxf->rit_size);
342         memcpy(&req->table[0], rxf->rit, rxf->rit_size);
343         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
344                 sizeof(struct bfi_enet_rit_req), &req->mh);
345         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
346 }
347
348 static void
349 bna_bfi_rss_cfg(struct bna_rxf *rxf)
350 {
351         struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
352         int i;
353
354         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
355                 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
356         req->mh.num_entries = htons(
357                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
358         req->cfg.type = rxf->rss_cfg.hash_type;
359         req->cfg.mask = rxf->rss_cfg.hash_mask;
360         for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
361                 req->cfg.key[i] =
362                         htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
363         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
364                 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
365         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
366 }
367
368 static void
369 bna_bfi_rss_enable(struct bna_rxf *rxf)
370 {
371         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
372
373         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
374                 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
375         req->mh.num_entries = htons(
376                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
377         req->enable = rxf->rss_status;
378         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
379                 sizeof(struct bfi_enet_enable_req), &req->mh);
380         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
381 }
382
383 /* This function gets the multicast MAC that has already been added to CAM */
384 static struct bna_mac *
385 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
386 {
387         struct bna_mac *mac;
388         struct list_head *qe;
389
390         list_for_each(qe, &rxf->mcast_active_q) {
391                 mac = (struct bna_mac *)qe;
392                 if (ether_addr_equal(mac->addr, mac_addr))
393                         return mac;
394         }
395
396         list_for_each(qe, &rxf->mcast_pending_del_q) {
397                 mac = (struct bna_mac *)qe;
398                 if (ether_addr_equal(mac->addr, mac_addr))
399                         return mac;
400         }
401
402         return NULL;
403 }
404
405 static struct bna_mcam_handle *
406 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
407 {
408         struct bna_mcam_handle *mchandle;
409         struct list_head *qe;
410
411         list_for_each(qe, &rxf->mcast_handle_q) {
412                 mchandle = (struct bna_mcam_handle *)qe;
413                 if (mchandle->handle == handle)
414                         return mchandle;
415         }
416
417         return NULL;
418 }
419
420 static void
421 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
422 {
423         struct bna_mac *mcmac;
424         struct bna_mcam_handle *mchandle;
425
426         mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
427         mchandle = bna_rxf_mchandle_get(rxf, handle);
428         if (mchandle == NULL) {
429                 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
430                 mchandle->handle = handle;
431                 mchandle->refcnt = 0;
432                 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
433         }
434         mchandle->refcnt++;
435         mcmac->handle = mchandle;
436 }
437
438 static int
439 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
440                 enum bna_cleanup_type cleanup)
441 {
442         struct bna_mcam_handle *mchandle;
443         int ret = 0;
444
445         mchandle = mac->handle;
446         if (mchandle == NULL)
447                 return ret;
448
449         mchandle->refcnt--;
450         if (mchandle->refcnt == 0) {
451                 if (cleanup == BNA_HARD_CLEANUP) {
452                         bna_bfi_mcast_del_req(rxf, mchandle->handle);
453                         ret = 1;
454                 }
455                 list_del(&mchandle->qe);
456                 bfa_q_qe_init(&mchandle->qe);
457                 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
458         }
459         mac->handle = NULL;
460
461         return ret;
462 }
463
464 static int
465 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
466 {
467         struct bna_mac *mac = NULL;
468         struct list_head *qe;
469         int ret;
470
471         /* First delete multicast entries to maintain the count */
472         while (!list_empty(&rxf->mcast_pending_del_q)) {
473                 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
474                 bfa_q_qe_init(qe);
475                 mac = (struct bna_mac *)qe;
476                 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
477                 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
478                 if (ret)
479                         return ret;
480         }
481
482         /* Add multicast entries */
483         if (!list_empty(&rxf->mcast_pending_add_q)) {
484                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
485                 bfa_q_qe_init(qe);
486                 mac = (struct bna_mac *)qe;
487                 list_add_tail(&mac->qe, &rxf->mcast_active_q);
488                 bna_bfi_mcast_add_req(rxf, mac);
489                 return 1;
490         }
491
492         return 0;
493 }
494
495 static int
496 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
497 {
498         u8 vlan_pending_bitmask;
499         int block_idx = 0;
500
501         if (rxf->vlan_pending_bitmask) {
502                 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
503                 while (!(vlan_pending_bitmask & 0x1)) {
504                         block_idx++;
505                         vlan_pending_bitmask >>= 1;
506                 }
507                 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
508                 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
509                 return 1;
510         }
511
512         return 0;
513 }
514
515 static int
516 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
517 {
518         struct list_head *qe;
519         struct bna_mac *mac;
520         int ret;
521
522         /* Throw away delete pending mcast entries */
523         while (!list_empty(&rxf->mcast_pending_del_q)) {
524                 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
525                 bfa_q_qe_init(qe);
526                 mac = (struct bna_mac *)qe;
527                 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
528                 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
529                 if (ret)
530                         return ret;
531         }
532
533         /* Move active mcast entries to pending_add_q */
534         while (!list_empty(&rxf->mcast_active_q)) {
535                 bfa_q_deq(&rxf->mcast_active_q, &qe);
536                 bfa_q_qe_init(qe);
537                 list_add_tail(qe, &rxf->mcast_pending_add_q);
538                 mac = (struct bna_mac *)qe;
539                 if (bna_rxf_mcast_del(rxf, mac, cleanup))
540                         return 1;
541         }
542
543         return 0;
544 }
545
546 static int
547 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
548 {
549         if (rxf->rss_pending) {
550                 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
551                         rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
552                         bna_bfi_rit_cfg(rxf);
553                         return 1;
554                 }
555
556                 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
557                         rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
558                         bna_bfi_rss_cfg(rxf);
559                         return 1;
560                 }
561
562                 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
563                         rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
564                         bna_bfi_rss_enable(rxf);
565                         return 1;
566                 }
567         }
568
569         return 0;
570 }
571
572 static int
573 bna_rxf_cfg_apply(struct bna_rxf *rxf)
574 {
575         if (bna_rxf_ucast_cfg_apply(rxf))
576                 return 1;
577
578         if (bna_rxf_mcast_cfg_apply(rxf))
579                 return 1;
580
581         if (bna_rxf_promisc_cfg_apply(rxf))
582                 return 1;
583
584         if (bna_rxf_allmulti_cfg_apply(rxf))
585                 return 1;
586
587         if (bna_rxf_vlan_cfg_apply(rxf))
588                 return 1;
589
590         if (bna_rxf_vlan_strip_cfg_apply(rxf))
591                 return 1;
592
593         if (bna_rxf_rss_cfg_apply(rxf))
594                 return 1;
595
596         return 0;
597 }
598
599 static void
600 bna_rxf_cfg_reset(struct bna_rxf *rxf)
601 {
602         bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
603         bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
604         bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
605         bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
606         bna_rxf_vlan_cfg_soft_reset(rxf);
607         bna_rxf_rss_cfg_soft_reset(rxf);
608 }
609
610 static void
611 bna_rit_init(struct bna_rxf *rxf, int rit_size)
612 {
613         struct bna_rx *rx = rxf->rx;
614         struct bna_rxp *rxp;
615         struct list_head *qe;
616         int offset = 0;
617
618         rxf->rit_size = rit_size;
619         list_for_each(qe, &rx->rxp_q) {
620                 rxp = (struct bna_rxp *)qe;
621                 rxf->rit[offset] = rxp->cq.ccb->id;
622                 offset++;
623         }
624
625 }
626
627 void
628 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
629 {
630         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
631 }
632
633 void
634 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
635                         struct bfi_msgq_mhdr *msghdr)
636 {
637         struct bfi_enet_rsp *rsp =
638                 container_of(msghdr, struct bfi_enet_rsp, mh);
639
640         if (rsp->error) {
641                 /* Clear ucast from cache */
642                 rxf->ucast_active_set = 0;
643         }
644
645         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
646 }
647
648 void
649 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
650                         struct bfi_msgq_mhdr *msghdr)
651 {
652         struct bfi_enet_mcast_add_req *req =
653                 &rxf->bfi_enet_cmd.mcast_add_req;
654         struct bfi_enet_mcast_add_rsp *rsp =
655                 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
656
657         bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
658                 ntohs(rsp->handle));
659         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
660 }
661
662 static void
663 bna_rxf_init(struct bna_rxf *rxf,
664                 struct bna_rx *rx,
665                 struct bna_rx_config *q_config,
666                 struct bna_res_info *res_info)
667 {
668         rxf->rx = rx;
669
670         INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
671         INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
672         rxf->ucast_pending_set = 0;
673         rxf->ucast_active_set = 0;
674         INIT_LIST_HEAD(&rxf->ucast_active_q);
675         rxf->ucast_pending_mac = NULL;
676
677         INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
678         INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
679         INIT_LIST_HEAD(&rxf->mcast_active_q);
680         INIT_LIST_HEAD(&rxf->mcast_handle_q);
681
682         if (q_config->paused)
683                 rxf->flags |= BNA_RXF_F_PAUSED;
684
685         rxf->rit = (u8 *)
686                 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
687         bna_rit_init(rxf, q_config->num_paths);
688
689         rxf->rss_status = q_config->rss_status;
690         if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
691                 rxf->rss_cfg = q_config->rss_config;
692                 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
693                 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
694                 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
695         }
696
697         rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
698         memset(rxf->vlan_filter_table, 0,
699                         (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
700         rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
701         rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
702
703         rxf->vlan_strip_status = q_config->vlan_strip_status;
704
705         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
706 }
707
708 static void
709 bna_rxf_uninit(struct bna_rxf *rxf)
710 {
711         struct bna_mac *mac;
712
713         rxf->ucast_pending_set = 0;
714         rxf->ucast_active_set = 0;
715
716         while (!list_empty(&rxf->ucast_pending_add_q)) {
717                 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
718                 bfa_q_qe_init(&mac->qe);
719                 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
720         }
721
722         if (rxf->ucast_pending_mac) {
723                 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
724                 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
725                                     rxf->ucast_pending_mac);
726                 rxf->ucast_pending_mac = NULL;
727         }
728
729         while (!list_empty(&rxf->mcast_pending_add_q)) {
730                 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
731                 bfa_q_qe_init(&mac->qe);
732                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
733         }
734
735         rxf->rxmode_pending = 0;
736         rxf->rxmode_pending_bitmask = 0;
737         if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
738                 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
739         if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
740                 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
741
742         rxf->rss_pending = 0;
743         rxf->vlan_strip_pending = false;
744
745         rxf->flags = 0;
746
747         rxf->rx = NULL;
748 }
749
750 static void
751 bna_rx_cb_rxf_started(struct bna_rx *rx)
752 {
753         bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
754 }
755
756 static void
757 bna_rxf_start(struct bna_rxf *rxf)
758 {
759         rxf->start_cbfn = bna_rx_cb_rxf_started;
760         rxf->start_cbarg = rxf->rx;
761         bfa_fsm_send_event(rxf, RXF_E_START);
762 }
763
764 static void
765 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
766 {
767         bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
768 }
769
770 static void
771 bna_rxf_stop(struct bna_rxf *rxf)
772 {
773         rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
774         rxf->stop_cbarg = rxf->rx;
775         bfa_fsm_send_event(rxf, RXF_E_STOP);
776 }
777
778 static void
779 bna_rxf_fail(struct bna_rxf *rxf)
780 {
781         bfa_fsm_send_event(rxf, RXF_E_FAIL);
782 }
783
784 enum bna_cb_status
785 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
786 {
787         struct bna_rxf *rxf = &rx->rxf;
788
789         if (rxf->ucast_pending_mac == NULL) {
790                 rxf->ucast_pending_mac =
791                         bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
792                 if (rxf->ucast_pending_mac == NULL)
793                         return BNA_CB_UCAST_CAM_FULL;
794                 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
795         }
796
797         ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
798         rxf->ucast_pending_set = 1;
799         rxf->cam_fltr_cbfn = NULL;
800         rxf->cam_fltr_cbarg = rx->bna->bnad;
801
802         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
803
804         return BNA_CB_SUCCESS;
805 }
806
807 enum bna_cb_status
808 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
809                  void (*cbfn)(struct bnad *, struct bna_rx *))
810 {
811         struct bna_rxf *rxf = &rx->rxf;
812         struct bna_mac *mac;
813
814         /* Check if already added or pending addition */
815         if (bna_mac_find(&rxf->mcast_active_q, addr) ||
816                 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
817                 if (cbfn)
818                         cbfn(rx->bna->bnad, rx);
819                 return BNA_CB_SUCCESS;
820         }
821
822         mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
823         if (mac == NULL)
824                 return BNA_CB_MCAST_LIST_FULL;
825         bfa_q_qe_init(&mac->qe);
826         ether_addr_copy(mac->addr, addr);
827         list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
828
829         rxf->cam_fltr_cbfn = cbfn;
830         rxf->cam_fltr_cbarg = rx->bna->bnad;
831
832         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
833
834         return BNA_CB_SUCCESS;
835 }
836
837 enum bna_cb_status
838 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
839 {
840         struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
841         struct bna_rxf *rxf = &rx->rxf;
842         struct list_head list_head;
843         struct list_head *qe;
844         u8 *mcaddr;
845         struct bna_mac *mac, *del_mac;
846         int i;
847
848         /* Purge the pending_add_q */
849         while (!list_empty(&rxf->ucast_pending_add_q)) {
850                 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
851                 bfa_q_qe_init(qe);
852                 mac = (struct bna_mac *)qe;
853                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
854         }
855
856         /* Schedule active_q entries for deletion */
857         while (!list_empty(&rxf->ucast_active_q)) {
858                 bfa_q_deq(&rxf->ucast_active_q, &qe);
859                 mac = (struct bna_mac *)qe;
860                 bfa_q_qe_init(&mac->qe);
861
862                 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
863                 memcpy(del_mac, mac, sizeof(*del_mac));
864                 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
865                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
866         }
867
868         /* Allocate nodes */
869         INIT_LIST_HEAD(&list_head);
870         for (i = 0, mcaddr = uclist; i < count; i++) {
871                 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
872                 if (mac == NULL)
873                         goto err_return;
874                 bfa_q_qe_init(&mac->qe);
875                 ether_addr_copy(mac->addr, mcaddr);
876                 list_add_tail(&mac->qe, &list_head);
877                 mcaddr += ETH_ALEN;
878         }
879
880         /* Add the new entries */
881         while (!list_empty(&list_head)) {
882                 bfa_q_deq(&list_head, &qe);
883                 mac = (struct bna_mac *)qe;
884                 bfa_q_qe_init(&mac->qe);
885                 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
886         }
887
888         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
889
890         return BNA_CB_SUCCESS;
891
892 err_return:
893         while (!list_empty(&list_head)) {
894                 bfa_q_deq(&list_head, &qe);
895                 mac = (struct bna_mac *)qe;
896                 bfa_q_qe_init(&mac->qe);
897                 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
898         }
899
900         return BNA_CB_UCAST_CAM_FULL;
901 }
902
903 enum bna_cb_status
904 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
905 {
906         struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
907         struct bna_rxf *rxf = &rx->rxf;
908         struct list_head list_head;
909         struct list_head *qe;
910         u8 *mcaddr;
911         struct bna_mac *mac, *del_mac;
912         int i;
913
914         /* Purge the pending_add_q */
915         while (!list_empty(&rxf->mcast_pending_add_q)) {
916                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
917                 bfa_q_qe_init(qe);
918                 mac = (struct bna_mac *)qe;
919                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
920         }
921
922         /* Schedule active_q entries for deletion */
923         while (!list_empty(&rxf->mcast_active_q)) {
924                 bfa_q_deq(&rxf->mcast_active_q, &qe);
925                 mac = (struct bna_mac *)qe;
926                 bfa_q_qe_init(&mac->qe);
927
928                 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
929
930                 memcpy(del_mac, mac, sizeof(*del_mac));
931                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
932                 mac->handle = NULL;
933                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
934         }
935
936         /* Allocate nodes */
937         INIT_LIST_HEAD(&list_head);
938         for (i = 0, mcaddr = mclist; i < count; i++) {
939                 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
940                 if (mac == NULL)
941                         goto err_return;
942                 bfa_q_qe_init(&mac->qe);
943                 ether_addr_copy(mac->addr, mcaddr);
944                 list_add_tail(&mac->qe, &list_head);
945
946                 mcaddr += ETH_ALEN;
947         }
948
949         /* Add the new entries */
950         while (!list_empty(&list_head)) {
951                 bfa_q_deq(&list_head, &qe);
952                 mac = (struct bna_mac *)qe;
953                 bfa_q_qe_init(&mac->qe);
954                 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
955         }
956
957         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
958
959         return BNA_CB_SUCCESS;
960
961 err_return:
962         while (!list_empty(&list_head)) {
963                 bfa_q_deq(&list_head, &qe);
964                 mac = (struct bna_mac *)qe;
965                 bfa_q_qe_init(&mac->qe);
966                 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
967         }
968
969         return BNA_CB_MCAST_LIST_FULL;
970 }
971
972 void
973 bna_rx_mcast_delall(struct bna_rx *rx)
974 {
975         struct bna_rxf *rxf = &rx->rxf;
976         struct list_head *qe;
977         struct bna_mac *mac, *del_mac;
978         int need_hw_config = 0;
979
980         /* Purge all entries from pending_add_q */
981         while (!list_empty(&rxf->mcast_pending_add_q)) {
982                 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
983                 mac = (struct bna_mac *)qe;
984                 bfa_q_qe_init(&mac->qe);
985                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
986         }
987
988         /* Schedule all entries in active_q for deletion */
989         while (!list_empty(&rxf->mcast_active_q)) {
990                 bfa_q_deq(&rxf->mcast_active_q, &qe);
991                 mac = (struct bna_mac *)qe;
992                 bfa_q_qe_init(&mac->qe);
993
994                 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
995
996                 memcpy(del_mac, mac, sizeof(*del_mac));
997                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
998                 mac->handle = NULL;
999                 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1000                 need_hw_config = 1;
1001         }
1002
1003         if (need_hw_config)
1004                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1005 }
1006
1007 void
1008 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1009 {
1010         struct bna_rxf *rxf = &rx->rxf;
1011         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1012         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1013         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1014
1015         rxf->vlan_filter_table[index] |= bit;
1016         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1017                 rxf->vlan_pending_bitmask |= BIT(group_id);
1018                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1019         }
1020 }
1021
1022 void
1023 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1024 {
1025         struct bna_rxf *rxf = &rx->rxf;
1026         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1027         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1028         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1029
1030         rxf->vlan_filter_table[index] &= ~bit;
1031         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1032                 rxf->vlan_pending_bitmask |= BIT(group_id);
1033                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1034         }
1035 }
1036
1037 static int
1038 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1039 {
1040         struct bna_mac *mac = NULL;
1041         struct list_head *qe;
1042
1043         /* Delete MAC addresses previousely added */
1044         if (!list_empty(&rxf->ucast_pending_del_q)) {
1045                 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1046                 bfa_q_qe_init(qe);
1047                 mac = (struct bna_mac *)qe;
1048                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1049                 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1050                 return 1;
1051         }
1052
1053         /* Set default unicast MAC */
1054         if (rxf->ucast_pending_set) {
1055                 rxf->ucast_pending_set = 0;
1056                 ether_addr_copy(rxf->ucast_active_mac.addr,
1057                                 rxf->ucast_pending_mac->addr);
1058                 rxf->ucast_active_set = 1;
1059                 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1060                         BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1061                 return 1;
1062         }
1063
1064         /* Add additional MAC entries */
1065         if (!list_empty(&rxf->ucast_pending_add_q)) {
1066                 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1067                 bfa_q_qe_init(qe);
1068                 mac = (struct bna_mac *)qe;
1069                 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1070                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1071                 return 1;
1072         }
1073
1074         return 0;
1075 }
1076
1077 static int
1078 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1079 {
1080         struct list_head *qe;
1081         struct bna_mac *mac;
1082
1083         /* Throw away delete pending ucast entries */
1084         while (!list_empty(&rxf->ucast_pending_del_q)) {
1085                 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1086                 bfa_q_qe_init(qe);
1087                 mac = (struct bna_mac *)qe;
1088                 if (cleanup == BNA_SOFT_CLEANUP)
1089                         bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1090                                             mac);
1091                 else {
1092                         bna_bfi_ucast_req(rxf, mac,
1093                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1094                         bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1095                                             mac);
1096                         return 1;
1097                 }
1098         }
1099
1100         /* Move active ucast entries to pending_add_q */
1101         while (!list_empty(&rxf->ucast_active_q)) {
1102                 bfa_q_deq(&rxf->ucast_active_q, &qe);
1103                 bfa_q_qe_init(qe);
1104                 list_add_tail(qe, &rxf->ucast_pending_add_q);
1105                 if (cleanup == BNA_HARD_CLEANUP) {
1106                         mac = (struct bna_mac *)qe;
1107                         bna_bfi_ucast_req(rxf, mac,
1108                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1109                         return 1;
1110                 }
1111         }
1112
1113         if (rxf->ucast_active_set) {
1114                 rxf->ucast_pending_set = 1;
1115                 rxf->ucast_active_set = 0;
1116                 if (cleanup == BNA_HARD_CLEANUP) {
1117                         bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1118                                 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1119                         return 1;
1120                 }
1121         }
1122
1123         return 0;
1124 }
1125
1126 static int
1127 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1128 {
1129         struct bna *bna = rxf->rx->bna;
1130
1131         /* Enable/disable promiscuous mode */
1132         if (is_promisc_enable(rxf->rxmode_pending,
1133                                 rxf->rxmode_pending_bitmask)) {
1134                 /* move promisc configuration from pending -> active */
1135                 promisc_inactive(rxf->rxmode_pending,
1136                                 rxf->rxmode_pending_bitmask);
1137                 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1138                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1139                 return 1;
1140         } else if (is_promisc_disable(rxf->rxmode_pending,
1141                                 rxf->rxmode_pending_bitmask)) {
1142                 /* move promisc configuration from pending -> active */
1143                 promisc_inactive(rxf->rxmode_pending,
1144                                 rxf->rxmode_pending_bitmask);
1145                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1146                 bna->promisc_rid = BFI_INVALID_RID;
1147                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1148                 return 1;
1149         }
1150
1151         return 0;
1152 }
1153
1154 static int
1155 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1156 {
1157         struct bna *bna = rxf->rx->bna;
1158
1159         /* Clear pending promisc mode disable */
1160         if (is_promisc_disable(rxf->rxmode_pending,
1161                                 rxf->rxmode_pending_bitmask)) {
1162                 promisc_inactive(rxf->rxmode_pending,
1163                                 rxf->rxmode_pending_bitmask);
1164                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1165                 bna->promisc_rid = BFI_INVALID_RID;
1166                 if (cleanup == BNA_HARD_CLEANUP) {
1167                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1168                         return 1;
1169                 }
1170         }
1171
1172         /* Move promisc mode config from active -> pending */
1173         if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1174                 promisc_enable(rxf->rxmode_pending,
1175                                 rxf->rxmode_pending_bitmask);
1176                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1177                 if (cleanup == BNA_HARD_CLEANUP) {
1178                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1179                         return 1;
1180                 }
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int
1187 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1188 {
1189         /* Enable/disable allmulti mode */
1190         if (is_allmulti_enable(rxf->rxmode_pending,
1191                                 rxf->rxmode_pending_bitmask)) {
1192                 /* move allmulti configuration from pending -> active */
1193                 allmulti_inactive(rxf->rxmode_pending,
1194                                 rxf->rxmode_pending_bitmask);
1195                 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1196                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1197                 return 1;
1198         } else if (is_allmulti_disable(rxf->rxmode_pending,
1199                                         rxf->rxmode_pending_bitmask)) {
1200                 /* move allmulti configuration from pending -> active */
1201                 allmulti_inactive(rxf->rxmode_pending,
1202                                 rxf->rxmode_pending_bitmask);
1203                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1204                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1205                 return 1;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int
1212 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1213 {
1214         /* Clear pending allmulti mode disable */
1215         if (is_allmulti_disable(rxf->rxmode_pending,
1216                                 rxf->rxmode_pending_bitmask)) {
1217                 allmulti_inactive(rxf->rxmode_pending,
1218                                 rxf->rxmode_pending_bitmask);
1219                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1220                 if (cleanup == BNA_HARD_CLEANUP) {
1221                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1222                         return 1;
1223                 }
1224         }
1225
1226         /* Move allmulti mode config from active -> pending */
1227         if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1228                 allmulti_enable(rxf->rxmode_pending,
1229                                 rxf->rxmode_pending_bitmask);
1230                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1231                 if (cleanup == BNA_HARD_CLEANUP) {
1232                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1233                         return 1;
1234                 }
1235         }
1236
1237         return 0;
1238 }
1239
1240 static int
1241 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1242 {
1243         struct bna *bna = rxf->rx->bna;
1244         int ret = 0;
1245
1246         if (is_promisc_enable(rxf->rxmode_pending,
1247                                 rxf->rxmode_pending_bitmask) ||
1248                 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1249                 /* Do nothing if pending enable or already enabled */
1250         } else if (is_promisc_disable(rxf->rxmode_pending,
1251                                         rxf->rxmode_pending_bitmask)) {
1252                 /* Turn off pending disable command */
1253                 promisc_inactive(rxf->rxmode_pending,
1254                         rxf->rxmode_pending_bitmask);
1255         } else {
1256                 /* Schedule enable */
1257                 promisc_enable(rxf->rxmode_pending,
1258                                 rxf->rxmode_pending_bitmask);
1259                 bna->promisc_rid = rxf->rx->rid;
1260                 ret = 1;
1261         }
1262
1263         return ret;
1264 }
1265
1266 static int
1267 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1268 {
1269         struct bna *bna = rxf->rx->bna;
1270         int ret = 0;
1271
1272         if (is_promisc_disable(rxf->rxmode_pending,
1273                                 rxf->rxmode_pending_bitmask) ||
1274                 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1275                 /* Do nothing if pending disable or already disabled */
1276         } else if (is_promisc_enable(rxf->rxmode_pending,
1277                                         rxf->rxmode_pending_bitmask)) {
1278                 /* Turn off pending enable command */
1279                 promisc_inactive(rxf->rxmode_pending,
1280                                 rxf->rxmode_pending_bitmask);
1281                 bna->promisc_rid = BFI_INVALID_RID;
1282         } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1283                 /* Schedule disable */
1284                 promisc_disable(rxf->rxmode_pending,
1285                                 rxf->rxmode_pending_bitmask);
1286                 ret = 1;
1287         }
1288
1289         return ret;
1290 }
1291
1292 static int
1293 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1294 {
1295         int ret = 0;
1296
1297         if (is_allmulti_enable(rxf->rxmode_pending,
1298                         rxf->rxmode_pending_bitmask) ||
1299                         (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1300                 /* Do nothing if pending enable or already enabled */
1301         } else if (is_allmulti_disable(rxf->rxmode_pending,
1302                                         rxf->rxmode_pending_bitmask)) {
1303                 /* Turn off pending disable command */
1304                 allmulti_inactive(rxf->rxmode_pending,
1305                         rxf->rxmode_pending_bitmask);
1306         } else {
1307                 /* Schedule enable */
1308                 allmulti_enable(rxf->rxmode_pending,
1309                                 rxf->rxmode_pending_bitmask);
1310                 ret = 1;
1311         }
1312
1313         return ret;
1314 }
1315
1316 static int
1317 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1318 {
1319         int ret = 0;
1320
1321         if (is_allmulti_disable(rxf->rxmode_pending,
1322                                 rxf->rxmode_pending_bitmask) ||
1323                 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1324                 /* Do nothing if pending disable or already disabled */
1325         } else if (is_allmulti_enable(rxf->rxmode_pending,
1326                                         rxf->rxmode_pending_bitmask)) {
1327                 /* Turn off pending enable command */
1328                 allmulti_inactive(rxf->rxmode_pending,
1329                                 rxf->rxmode_pending_bitmask);
1330         } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1331                 /* Schedule disable */
1332                 allmulti_disable(rxf->rxmode_pending,
1333                                 rxf->rxmode_pending_bitmask);
1334                 ret = 1;
1335         }
1336
1337         return ret;
1338 }
1339
1340 static int
1341 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1342 {
1343         if (rxf->vlan_strip_pending) {
1344                         rxf->vlan_strip_pending = false;
1345                         bna_bfi_vlan_strip_enable(rxf);
1346                         return 1;
1347         }
1348
1349         return 0;
1350 }
1351
1352 /* RX */
1353
1354 #define BNA_GET_RXQS(qcfg)      (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1355         (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1356
1357 #define SIZE_TO_PAGES(size)     (((size) >> PAGE_SHIFT) + ((((size) &\
1358         (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1359
1360 #define call_rx_stop_cbfn(rx)                                           \
1361 do {                                                                \
1362         if ((rx)->stop_cbfn) {                                          \
1363                 void (*cbfn)(void *, struct bna_rx *);    \
1364                 void *cbarg;                                        \
1365                 cbfn = (rx)->stop_cbfn;                          \
1366                 cbarg = (rx)->stop_cbarg;                              \
1367                 (rx)->stop_cbfn = NULL;                                 \
1368                 (rx)->stop_cbarg = NULL;                                \
1369                 cbfn(cbarg, rx);                                        \
1370         }                                                              \
1371 } while (0)
1372
1373 #define call_rx_stall_cbfn(rx)                                          \
1374 do {                                                                    \
1375         if ((rx)->rx_stall_cbfn)                                        \
1376                 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));             \
1377 } while (0)
1378
1379 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt)                        \
1380 do {                                                                    \
1381         struct bna_dma_addr cur_q_addr =                                \
1382                 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));      \
1383         (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;        \
1384         (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;        \
1385         (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;              \
1386         (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;              \
1387         (bfi_q)->pages = htons((u16)(bna_qpt)->page_count);     \
1388         (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1389 } while (0)
1390
1391 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1392 static void bna_rx_enet_stop(struct bna_rx *rx);
1393 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1394
1395 bfa_fsm_state_decl(bna_rx, stopped,
1396         struct bna_rx, enum bna_rx_event);
1397 bfa_fsm_state_decl(bna_rx, start_wait,
1398         struct bna_rx, enum bna_rx_event);
1399 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1400         struct bna_rx, enum bna_rx_event);
1401 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1402         struct bna_rx, enum bna_rx_event);
1403 bfa_fsm_state_decl(bna_rx, started,
1404         struct bna_rx, enum bna_rx_event);
1405 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1406         struct bna_rx, enum bna_rx_event);
1407 bfa_fsm_state_decl(bna_rx, stop_wait,
1408         struct bna_rx, enum bna_rx_event);
1409 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1410         struct bna_rx, enum bna_rx_event);
1411 bfa_fsm_state_decl(bna_rx, failed,
1412         struct bna_rx, enum bna_rx_event);
1413 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1414         struct bna_rx, enum bna_rx_event);
1415
1416 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1417 {
1418         call_rx_stop_cbfn(rx);
1419 }
1420
1421 static void bna_rx_sm_stopped(struct bna_rx *rx,
1422                                 enum bna_rx_event event)
1423 {
1424         switch (event) {
1425         case RX_E_START:
1426                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1427                 break;
1428
1429         case RX_E_STOP:
1430                 call_rx_stop_cbfn(rx);
1431                 break;
1432
1433         case RX_E_FAIL:
1434                 /* no-op */
1435                 break;
1436
1437         default:
1438                 bfa_sm_fault(event);
1439                 break;
1440         }
1441 }
1442
1443 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1444 {
1445         bna_bfi_rx_enet_start(rx);
1446 }
1447
1448 static void
1449 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1450 {
1451 }
1452
1453 static void
1454 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1455 {
1456         switch (event) {
1457         case RX_E_FAIL:
1458         case RX_E_STOPPED:
1459                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1460                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1461                 break;
1462
1463         case RX_E_STARTED:
1464                 bna_rx_enet_stop(rx);
1465                 break;
1466
1467         default:
1468                 bfa_sm_fault(event);
1469                 break;
1470         }
1471 }
1472
1473 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1474                                 enum bna_rx_event event)
1475 {
1476         switch (event) {
1477         case RX_E_STOP:
1478                 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1479                 break;
1480
1481         case RX_E_FAIL:
1482                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1483                 break;
1484
1485         case RX_E_STARTED:
1486                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1487                 break;
1488
1489         default:
1490                 bfa_sm_fault(event);
1491                 break;
1492         }
1493 }
1494
1495 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1496 {
1497         rx->rx_post_cbfn(rx->bna->bnad, rx);
1498         bna_rxf_start(&rx->rxf);
1499 }
1500
1501 static void
1502 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1503 {
1504 }
1505
1506 static void
1507 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1508 {
1509         switch (event) {
1510         case RX_E_FAIL:
1511                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1512                 bna_rxf_fail(&rx->rxf);
1513                 call_rx_stall_cbfn(rx);
1514                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1515                 break;
1516
1517         case RX_E_RXF_STARTED:
1518                 bna_rxf_stop(&rx->rxf);
1519                 break;
1520
1521         case RX_E_RXF_STOPPED:
1522                 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1523                 call_rx_stall_cbfn(rx);
1524                 bna_rx_enet_stop(rx);
1525                 break;
1526
1527         default:
1528                 bfa_sm_fault(event);
1529                 break;
1530         }
1531
1532 }
1533
1534 static void
1535 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1536 {
1537 }
1538
1539 static void
1540 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1541 {
1542         switch (event) {
1543         case RX_E_FAIL:
1544         case RX_E_STOPPED:
1545                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1546                 break;
1547
1548         case RX_E_STARTED:
1549                 bna_rx_enet_stop(rx);
1550                 break;
1551
1552         default:
1553                 bfa_sm_fault(event);
1554         }
1555 }
1556
1557 static void
1558 bna_rx_sm_started_entry(struct bna_rx *rx)
1559 {
1560         struct bna_rxp *rxp;
1561         struct list_head *qe_rxp;
1562         int is_regular = (rx->type == BNA_RX_T_REGULAR);
1563
1564         /* Start IB */
1565         list_for_each(qe_rxp, &rx->rxp_q) {
1566                 rxp = (struct bna_rxp *)qe_rxp;
1567                 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1568         }
1569
1570         bna_ethport_cb_rx_started(&rx->bna->ethport);
1571 }
1572
1573 static void
1574 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1575 {
1576         switch (event) {
1577         case RX_E_STOP:
1578                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1579                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1580                 bna_rxf_stop(&rx->rxf);
1581                 break;
1582
1583         case RX_E_FAIL:
1584                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1585                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1586                 bna_rxf_fail(&rx->rxf);
1587                 call_rx_stall_cbfn(rx);
1588                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1589                 break;
1590
1591         default:
1592                 bfa_sm_fault(event);
1593                 break;
1594         }
1595 }
1596
1597 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1598                                 enum bna_rx_event event)
1599 {
1600         switch (event) {
1601         case RX_E_STOP:
1602                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1603                 break;
1604
1605         case RX_E_FAIL:
1606                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1607                 bna_rxf_fail(&rx->rxf);
1608                 call_rx_stall_cbfn(rx);
1609                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1610                 break;
1611
1612         case RX_E_RXF_STARTED:
1613                 bfa_fsm_set_state(rx, bna_rx_sm_started);
1614                 break;
1615
1616         default:
1617                 bfa_sm_fault(event);
1618                 break;
1619         }
1620 }
1621
1622 static void
1623 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1624 {
1625 }
1626
1627 static void
1628 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1629 {
1630         switch (event) {
1631         case RX_E_FAIL:
1632         case RX_E_RXF_STOPPED:
1633                 /* No-op */
1634                 break;
1635
1636         case RX_E_CLEANUP_DONE:
1637                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1638                 break;
1639
1640         default:
1641                 bfa_sm_fault(event);
1642                 break;
1643         }
1644 }
1645
1646 static void
1647 bna_rx_sm_failed_entry(struct bna_rx *rx)
1648 {
1649 }
1650
1651 static void
1652 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1653 {
1654         switch (event) {
1655         case RX_E_START:
1656                 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1657                 break;
1658
1659         case RX_E_STOP:
1660                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1661                 break;
1662
1663         case RX_E_FAIL:
1664         case RX_E_RXF_STARTED:
1665         case RX_E_RXF_STOPPED:
1666                 /* No-op */
1667                 break;
1668
1669         case RX_E_CLEANUP_DONE:
1670                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1671                 break;
1672
1673         default:
1674                 bfa_sm_fault(event);
1675                 break;
1676 }       }
1677
1678 static void
1679 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1680 {
1681 }
1682
1683 static void
1684 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1685 {
1686         switch (event) {
1687         case RX_E_STOP:
1688                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1689                 break;
1690
1691         case RX_E_FAIL:
1692                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1693                 break;
1694
1695         case RX_E_CLEANUP_DONE:
1696                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1697                 break;
1698
1699         default:
1700                 bfa_sm_fault(event);
1701                 break;
1702         }
1703 }
1704
1705 static void
1706 bna_bfi_rx_enet_start(struct bna_rx *rx)
1707 {
1708         struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1709         struct bna_rxp *rxp = NULL;
1710         struct bna_rxq *q0 = NULL, *q1 = NULL;
1711         struct list_head *rxp_qe;
1712         int i;
1713
1714         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1715                 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1716         cfg_req->mh.num_entries = htons(
1717                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1718
1719         cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1720         cfg_req->num_queue_sets = rx->num_paths;
1721         for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1722                 i < rx->num_paths;
1723                 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1724                 rxp = (struct bna_rxp *)rxp_qe;
1725
1726                 GET_RXQS(rxp, q0, q1);
1727                 switch (rxp->type) {
1728                 case BNA_RXP_SLR:
1729                 case BNA_RXP_HDS:
1730                         /* Small RxQ */
1731                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1732                                                 &q1->qpt);
1733                         cfg_req->q_cfg[i].qs.rx_buffer_size =
1734                                 htons((u16)q1->buffer_size);
1735                         /* Fall through */
1736
1737                 case BNA_RXP_SINGLE:
1738                         /* Large/Single RxQ */
1739                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1740                                                 &q0->qpt);
1741                         if (q0->multi_buffer)
1742                                 /* multi-buffer is enabled by allocating
1743                                  * a new rx with new set of resources.
1744                                  * q0->buffer_size should be initialized to
1745                                  * fragment size.
1746                                  */
1747                                 cfg_req->rx_cfg.multi_buffer =
1748                                         BNA_STATUS_T_ENABLED;
1749                         else
1750                                 q0->buffer_size =
1751                                         bna_enet_mtu_get(&rx->bna->enet);
1752                         cfg_req->q_cfg[i].ql.rx_buffer_size =
1753                                 htons((u16)q0->buffer_size);
1754                         break;
1755
1756                 default:
1757                         BUG_ON(1);
1758                 }
1759
1760                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1761                                         &rxp->cq.qpt);
1762
1763                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1764                         rxp->cq.ib.ib_seg_host_addr.lsb;
1765                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1766                         rxp->cq.ib.ib_seg_host_addr.msb;
1767                 cfg_req->q_cfg[i].ib.intr.msix_index =
1768                         htons((u16)rxp->cq.ib.intr_vector);
1769         }
1770
1771         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1772         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1773         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1774         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1775         cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1776                                 ? BNA_STATUS_T_ENABLED :
1777                                 BNA_STATUS_T_DISABLED;
1778         cfg_req->ib_cfg.coalescing_timeout =
1779                         htonl((u32)rxp->cq.ib.coalescing_timeo);
1780         cfg_req->ib_cfg.inter_pkt_timeout =
1781                         htonl((u32)rxp->cq.ib.interpkt_timeo);
1782         cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1783
1784         switch (rxp->type) {
1785         case BNA_RXP_SLR:
1786                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1787                 break;
1788
1789         case BNA_RXP_HDS:
1790                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1791                 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1792                 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1793                 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1794                 break;
1795
1796         case BNA_RXP_SINGLE:
1797                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1798                 break;
1799
1800         default:
1801                 BUG_ON(1);
1802         }
1803         cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1804
1805         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1806                 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1807         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1808 }
1809
1810 static void
1811 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1812 {
1813         struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1814
1815         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1816                 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1817         req->mh.num_entries = htons(
1818                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1819         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1820                 &req->mh);
1821         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1822 }
1823
1824 static void
1825 bna_rx_enet_stop(struct bna_rx *rx)
1826 {
1827         struct bna_rxp *rxp;
1828         struct list_head                 *qe_rxp;
1829
1830         /* Stop IB */
1831         list_for_each(qe_rxp, &rx->rxp_q) {
1832                 rxp = (struct bna_rxp *)qe_rxp;
1833                 bna_ib_stop(rx->bna, &rxp->cq.ib);
1834         }
1835
1836         bna_bfi_rx_enet_stop(rx);
1837 }
1838
1839 static int
1840 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1841 {
1842         if ((rx_mod->rx_free_count == 0) ||
1843                 (rx_mod->rxp_free_count == 0) ||
1844                 (rx_mod->rxq_free_count == 0))
1845                 return 0;
1846
1847         if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1848                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1849                         (rx_mod->rxq_free_count < rx_cfg->num_paths))
1850                                 return 0;
1851         } else {
1852                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1853                         (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1854                         return 0;
1855         }
1856
1857         return 1;
1858 }
1859
1860 static struct bna_rxq *
1861 bna_rxq_get(struct bna_rx_mod *rx_mod)
1862 {
1863         struct bna_rxq *rxq = NULL;
1864         struct list_head        *qe = NULL;
1865
1866         bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1867         rx_mod->rxq_free_count--;
1868         rxq = (struct bna_rxq *)qe;
1869         bfa_q_qe_init(&rxq->qe);
1870
1871         return rxq;
1872 }
1873
1874 static void
1875 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1876 {
1877         bfa_q_qe_init(&rxq->qe);
1878         list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1879         rx_mod->rxq_free_count++;
1880 }
1881
1882 static struct bna_rxp *
1883 bna_rxp_get(struct bna_rx_mod *rx_mod)
1884 {
1885         struct list_head        *qe = NULL;
1886         struct bna_rxp *rxp = NULL;
1887
1888         bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1889         rx_mod->rxp_free_count--;
1890         rxp = (struct bna_rxp *)qe;
1891         bfa_q_qe_init(&rxp->qe);
1892
1893         return rxp;
1894 }
1895
1896 static void
1897 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1898 {
1899         bfa_q_qe_init(&rxp->qe);
1900         list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1901         rx_mod->rxp_free_count++;
1902 }
1903
1904 static struct bna_rx *
1905 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1906 {
1907         struct list_head        *qe = NULL;
1908         struct bna_rx *rx = NULL;
1909
1910         if (type == BNA_RX_T_REGULAR) {
1911                 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1912         } else
1913                 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1914
1915         rx_mod->rx_free_count--;
1916         rx = (struct bna_rx *)qe;
1917         bfa_q_qe_init(&rx->qe);
1918         list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1919         rx->type = type;
1920
1921         return rx;
1922 }
1923
1924 static void
1925 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1926 {
1927         struct list_head *prev_qe = NULL;
1928         struct list_head *qe;
1929
1930         bfa_q_qe_init(&rx->qe);
1931
1932         list_for_each(qe, &rx_mod->rx_free_q) {
1933                 if (((struct bna_rx *)qe)->rid < rx->rid)
1934                         prev_qe = qe;
1935                 else
1936                         break;
1937         }
1938
1939         if (prev_qe == NULL) {
1940                 /* This is the first entry */
1941                 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1942         } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1943                 /* This is the last entry */
1944                 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1945         } else {
1946                 /* Somewhere in the middle */
1947                 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1948                 bfa_q_prev(&rx->qe) = prev_qe;
1949                 bfa_q_next(prev_qe) = &rx->qe;
1950                 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1951         }
1952
1953         rx_mod->rx_free_count++;
1954 }
1955
1956 static void
1957 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1958                 struct bna_rxq *q1)
1959 {
1960         switch (rxp->type) {
1961         case BNA_RXP_SINGLE:
1962                 rxp->rxq.single.only = q0;
1963                 rxp->rxq.single.reserved = NULL;
1964                 break;
1965         case BNA_RXP_SLR:
1966                 rxp->rxq.slr.large = q0;
1967                 rxp->rxq.slr.small = q1;
1968                 break;
1969         case BNA_RXP_HDS:
1970                 rxp->rxq.hds.data = q0;
1971                 rxp->rxq.hds.hdr = q1;
1972                 break;
1973         default:
1974                 break;
1975         }
1976 }
1977
1978 static void
1979 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1980                 struct bna_rxp *rxp,
1981                 u32 page_count,
1982                 u32 page_size,
1983                 struct bna_mem_descr *qpt_mem,
1984                 struct bna_mem_descr *swqpt_mem,
1985                 struct bna_mem_descr *page_mem)
1986 {
1987         u8 *kva;
1988         u64 dma;
1989         struct bna_dma_addr bna_dma;
1990         int     i;
1991
1992         rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1993         rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1994         rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1995         rxq->qpt.page_count = page_count;
1996         rxq->qpt.page_size = page_size;
1997
1998         rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1999         rxq->rcb->sw_q = page_mem->kva;
2000
2001         kva = page_mem->kva;
2002         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2003
2004         for (i = 0; i < rxq->qpt.page_count; i++) {
2005                 rxq->rcb->sw_qpt[i] = kva;
2006                 kva += PAGE_SIZE;
2007
2008                 BNA_SET_DMA_ADDR(dma, &bna_dma);
2009                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2010                         bna_dma.lsb;
2011                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2012                         bna_dma.msb;
2013                 dma += PAGE_SIZE;
2014         }
2015 }
2016
2017 static void
2018 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2019                 u32 page_count,
2020                 u32 page_size,
2021                 struct bna_mem_descr *qpt_mem,
2022                 struct bna_mem_descr *swqpt_mem,
2023                 struct bna_mem_descr *page_mem)
2024 {
2025         u8 *kva;
2026         u64 dma;
2027         struct bna_dma_addr bna_dma;
2028         int     i;
2029
2030         rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2031         rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2032         rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2033         rxp->cq.qpt.page_count = page_count;
2034         rxp->cq.qpt.page_size = page_size;
2035
2036         rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2037         rxp->cq.ccb->sw_q = page_mem->kva;
2038
2039         kva = page_mem->kva;
2040         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2041
2042         for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2043                 rxp->cq.ccb->sw_qpt[i] = kva;
2044                 kva += PAGE_SIZE;
2045
2046                 BNA_SET_DMA_ADDR(dma, &bna_dma);
2047                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2048                         bna_dma.lsb;
2049                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2050                         bna_dma.msb;
2051                 dma += PAGE_SIZE;
2052         }
2053 }
2054
2055 static void
2056 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2057 {
2058         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2059
2060         bfa_wc_down(&rx_mod->rx_stop_wc);
2061 }
2062
2063 static void
2064 bna_rx_mod_cb_rx_stopped_all(void *arg)
2065 {
2066         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2067
2068         if (rx_mod->stop_cbfn)
2069                 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2070         rx_mod->stop_cbfn = NULL;
2071 }
2072
2073 static void
2074 bna_rx_start(struct bna_rx *rx)
2075 {
2076         rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2077         if (rx->rx_flags & BNA_RX_F_ENABLED)
2078                 bfa_fsm_send_event(rx, RX_E_START);
2079 }
2080
2081 static void
2082 bna_rx_stop(struct bna_rx *rx)
2083 {
2084         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2085         if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2086                 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2087         else {
2088                 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2089                 rx->stop_cbarg = &rx->bna->rx_mod;
2090                 bfa_fsm_send_event(rx, RX_E_STOP);
2091         }
2092 }
2093
2094 static void
2095 bna_rx_fail(struct bna_rx *rx)
2096 {
2097         /* Indicate Enet is not enabled, and failed */
2098         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2099         bfa_fsm_send_event(rx, RX_E_FAIL);
2100 }
2101
2102 void
2103 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2104 {
2105         struct bna_rx *rx;
2106         struct list_head *qe;
2107
2108         rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2109         if (type == BNA_RX_T_LOOPBACK)
2110                 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2111
2112         list_for_each(qe, &rx_mod->rx_active_q) {
2113                 rx = (struct bna_rx *)qe;
2114                 if (rx->type == type)
2115                         bna_rx_start(rx);
2116         }
2117 }
2118
2119 void
2120 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2121 {
2122         struct bna_rx *rx;
2123         struct list_head *qe;
2124
2125         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2126         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2127
2128         rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2129
2130         bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2131
2132         list_for_each(qe, &rx_mod->rx_active_q) {
2133                 rx = (struct bna_rx *)qe;
2134                 if (rx->type == type) {
2135                         bfa_wc_up(&rx_mod->rx_stop_wc);
2136                         bna_rx_stop(rx);
2137                 }
2138         }
2139
2140         bfa_wc_wait(&rx_mod->rx_stop_wc);
2141 }
2142
2143 void
2144 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2145 {
2146         struct bna_rx *rx;
2147         struct list_head *qe;
2148
2149         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2150         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2151
2152         list_for_each(qe, &rx_mod->rx_active_q) {
2153                 rx = (struct bna_rx *)qe;
2154                 bna_rx_fail(rx);
2155         }
2156 }
2157
2158 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2159                         struct bna_res_info *res_info)
2160 {
2161         int     index;
2162         struct bna_rx *rx_ptr;
2163         struct bna_rxp *rxp_ptr;
2164         struct bna_rxq *rxq_ptr;
2165
2166         rx_mod->bna = bna;
2167         rx_mod->flags = 0;
2168
2169         rx_mod->rx = (struct bna_rx *)
2170                 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2171         rx_mod->rxp = (struct bna_rxp *)
2172                 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2173         rx_mod->rxq = (struct bna_rxq *)
2174                 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2175
2176         /* Initialize the queues */
2177         INIT_LIST_HEAD(&rx_mod->rx_free_q);
2178         rx_mod->rx_free_count = 0;
2179         INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2180         rx_mod->rxq_free_count = 0;
2181         INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2182         rx_mod->rxp_free_count = 0;
2183         INIT_LIST_HEAD(&rx_mod->rx_active_q);
2184
2185         /* Build RX queues */
2186         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2187                 rx_ptr = &rx_mod->rx[index];
2188
2189                 bfa_q_qe_init(&rx_ptr->qe);
2190                 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2191                 rx_ptr->bna = NULL;
2192                 rx_ptr->rid = index;
2193                 rx_ptr->stop_cbfn = NULL;
2194                 rx_ptr->stop_cbarg = NULL;
2195
2196                 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2197                 rx_mod->rx_free_count++;
2198         }
2199
2200         /* build RX-path queue */
2201         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2202                 rxp_ptr = &rx_mod->rxp[index];
2203                 bfa_q_qe_init(&rxp_ptr->qe);
2204                 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2205                 rx_mod->rxp_free_count++;
2206         }
2207
2208         /* build RXQ queue */
2209         for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2210                 rxq_ptr = &rx_mod->rxq[index];
2211                 bfa_q_qe_init(&rxq_ptr->qe);
2212                 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2213                 rx_mod->rxq_free_count++;
2214         }
2215 }
2216
2217 void
2218 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2219 {
2220         struct list_head                *qe;
2221         int i;
2222
2223         i = 0;
2224         list_for_each(qe, &rx_mod->rx_free_q)
2225                 i++;
2226
2227         i = 0;
2228         list_for_each(qe, &rx_mod->rxp_free_q)
2229                 i++;
2230
2231         i = 0;
2232         list_for_each(qe, &rx_mod->rxq_free_q)
2233                 i++;
2234
2235         rx_mod->bna = NULL;
2236 }
2237
2238 void
2239 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2240 {
2241         struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2242         struct bna_rxp *rxp = NULL;
2243         struct bna_rxq *q0 = NULL, *q1 = NULL;
2244         struct list_head *rxp_qe;
2245         int i;
2246
2247         bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2248                 sizeof(struct bfi_enet_rx_cfg_rsp));
2249
2250         rx->hw_id = cfg_rsp->hw_id;
2251
2252         for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2253                 i < rx->num_paths;
2254                 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2255                 rxp = (struct bna_rxp *)rxp_qe;
2256                 GET_RXQS(rxp, q0, q1);
2257
2258                 /* Setup doorbells */
2259                 rxp->cq.ccb->i_dbell->doorbell_addr =
2260                         rx->bna->pcidev.pci_bar_kva
2261                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
2262                 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2263                 q0->rcb->q_dbell =
2264                         rx->bna->pcidev.pci_bar_kva
2265                         + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2266                 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2267                 if (q1) {
2268                         q1->rcb->q_dbell =
2269                         rx->bna->pcidev.pci_bar_kva
2270                         + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2271                         q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2272                 }
2273
2274                 /* Initialize producer/consumer indexes */
2275                 (*rxp->cq.ccb->hw_producer_index) = 0;
2276                 rxp->cq.ccb->producer_index = 0;
2277                 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2278                 if (q1)
2279                         q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2280         }
2281
2282         bfa_fsm_send_event(rx, RX_E_STARTED);
2283 }
2284
2285 void
2286 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2287 {
2288         bfa_fsm_send_event(rx, RX_E_STOPPED);
2289 }
2290
2291 void
2292 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2293 {
2294         u32 cq_size, hq_size, dq_size;
2295         u32 cpage_count, hpage_count, dpage_count;
2296         struct bna_mem_info *mem_info;
2297         u32 cq_depth;
2298         u32 hq_depth;
2299         u32 dq_depth;
2300
2301         dq_depth = q_cfg->q0_depth;
2302         hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2303         cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2304
2305         cq_size = cq_depth * BFI_CQ_WI_SIZE;
2306         cq_size = ALIGN(cq_size, PAGE_SIZE);
2307         cpage_count = SIZE_TO_PAGES(cq_size);
2308
2309         dq_depth = roundup_pow_of_two(dq_depth);
2310         dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2311         dq_size = ALIGN(dq_size, PAGE_SIZE);
2312         dpage_count = SIZE_TO_PAGES(dq_size);
2313
2314         if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2315                 hq_depth = roundup_pow_of_two(hq_depth);
2316                 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2317                 hq_size = ALIGN(hq_size, PAGE_SIZE);
2318                 hpage_count = SIZE_TO_PAGES(hq_size);
2319         } else
2320                 hpage_count = 0;
2321
2322         res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2323         mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2324         mem_info->mem_type = BNA_MEM_T_KVA;
2325         mem_info->len = sizeof(struct bna_ccb);
2326         mem_info->num = q_cfg->num_paths;
2327
2328         res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2329         mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2330         mem_info->mem_type = BNA_MEM_T_KVA;
2331         mem_info->len = sizeof(struct bna_rcb);
2332         mem_info->num = BNA_GET_RXQS(q_cfg);
2333
2334         res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2335         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2336         mem_info->mem_type = BNA_MEM_T_DMA;
2337         mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2338         mem_info->num = q_cfg->num_paths;
2339
2340         res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2341         mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2342         mem_info->mem_type = BNA_MEM_T_KVA;
2343         mem_info->len = cpage_count * sizeof(void *);
2344         mem_info->num = q_cfg->num_paths;
2345
2346         res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2347         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2348         mem_info->mem_type = BNA_MEM_T_DMA;
2349         mem_info->len = PAGE_SIZE * cpage_count;
2350         mem_info->num = q_cfg->num_paths;
2351
2352         res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2353         mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2354         mem_info->mem_type = BNA_MEM_T_DMA;
2355         mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2356         mem_info->num = q_cfg->num_paths;
2357
2358         res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2359         mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2360         mem_info->mem_type = BNA_MEM_T_KVA;
2361         mem_info->len = dpage_count * sizeof(void *);
2362         mem_info->num = q_cfg->num_paths;
2363
2364         res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2365         mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2366         mem_info->mem_type = BNA_MEM_T_DMA;
2367         mem_info->len = PAGE_SIZE * dpage_count;
2368         mem_info->num = q_cfg->num_paths;
2369
2370         res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2371         mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2372         mem_info->mem_type = BNA_MEM_T_DMA;
2373         mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2374         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2375
2376         res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2377         mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2378         mem_info->mem_type = BNA_MEM_T_KVA;
2379         mem_info->len = hpage_count * sizeof(void *);
2380         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2381
2382         res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2383         mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2384         mem_info->mem_type = BNA_MEM_T_DMA;
2385         mem_info->len = PAGE_SIZE * hpage_count;
2386         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2387
2388         res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2389         mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2390         mem_info->mem_type = BNA_MEM_T_DMA;
2391         mem_info->len = BFI_IBIDX_SIZE;
2392         mem_info->num = q_cfg->num_paths;
2393
2394         res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2395         mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2396         mem_info->mem_type = BNA_MEM_T_KVA;
2397         mem_info->len = BFI_ENET_RSS_RIT_MAX;
2398         mem_info->num = 1;
2399
2400         res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2401         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2402         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2403 }
2404
2405 struct bna_rx *
2406 bna_rx_create(struct bna *bna, struct bnad *bnad,
2407                 struct bna_rx_config *rx_cfg,
2408                 const struct bna_rx_event_cbfn *rx_cbfn,
2409                 struct bna_res_info *res_info,
2410                 void *priv)
2411 {
2412         struct bna_rx_mod *rx_mod = &bna->rx_mod;
2413         struct bna_rx *rx;
2414         struct bna_rxp *rxp;
2415         struct bna_rxq *q0;
2416         struct bna_rxq *q1;
2417         struct bna_intr_info *intr_info;
2418         struct bna_mem_descr *hqunmap_mem;
2419         struct bna_mem_descr *dqunmap_mem;
2420         struct bna_mem_descr *ccb_mem;
2421         struct bna_mem_descr *rcb_mem;
2422         struct bna_mem_descr *cqpt_mem;
2423         struct bna_mem_descr *cswqpt_mem;
2424         struct bna_mem_descr *cpage_mem;
2425         struct bna_mem_descr *hqpt_mem;
2426         struct bna_mem_descr *dqpt_mem;
2427         struct bna_mem_descr *hsqpt_mem;
2428         struct bna_mem_descr *dsqpt_mem;
2429         struct bna_mem_descr *hpage_mem;
2430         struct bna_mem_descr *dpage_mem;
2431         u32 dpage_count, hpage_count;
2432         u32 hq_idx, dq_idx, rcb_idx;
2433         u32 cq_depth, i;
2434         u32 page_count;
2435
2436         if (!bna_rx_res_check(rx_mod, rx_cfg))
2437                 return NULL;
2438
2439         intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2440         ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2441         rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2442         dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2443         hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2444         cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2445         cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2446         cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2447         hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2448         dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2449         hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2450         dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2451         hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2452         dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2453
2454         page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2455                         PAGE_SIZE;
2456
2457         dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2458                         PAGE_SIZE;
2459
2460         hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2461                         PAGE_SIZE;
2462
2463         rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2464         rx->bna = bna;
2465         rx->rx_flags = 0;
2466         INIT_LIST_HEAD(&rx->rxp_q);
2467         rx->stop_cbfn = NULL;
2468         rx->stop_cbarg = NULL;
2469         rx->priv = priv;
2470
2471         rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2472         rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2473         rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2474         rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2475         rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2476         /* Following callbacks are mandatory */
2477         rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2478         rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2479
2480         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2481                 switch (rx->type) {
2482                 case BNA_RX_T_REGULAR:
2483                         if (!(rx->bna->rx_mod.flags &
2484                                 BNA_RX_MOD_F_ENET_LOOPBACK))
2485                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2486                         break;
2487                 case BNA_RX_T_LOOPBACK:
2488                         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2489                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2490                         break;
2491                 }
2492         }
2493
2494         rx->num_paths = rx_cfg->num_paths;
2495         for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2496                         i < rx->num_paths; i++) {
2497                 rxp = bna_rxp_get(rx_mod);
2498                 list_add_tail(&rxp->qe, &rx->rxp_q);
2499                 rxp->type = rx_cfg->rxp_type;
2500                 rxp->rx = rx;
2501                 rxp->cq.rx = rx;
2502
2503                 q0 = bna_rxq_get(rx_mod);
2504                 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2505                         q1 = NULL;
2506                 else
2507                         q1 = bna_rxq_get(rx_mod);
2508
2509                 if (1 == intr_info->num)
2510                         rxp->vector = intr_info->idl[0].vector;
2511                 else
2512                         rxp->vector = intr_info->idl[i].vector;
2513
2514                 /* Setup IB */
2515
2516                 rxp->cq.ib.ib_seg_host_addr.lsb =
2517                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2518                 rxp->cq.ib.ib_seg_host_addr.msb =
2519                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2520                 rxp->cq.ib.ib_seg_host_addr_kva =
2521                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2522                 rxp->cq.ib.intr_type = intr_info->intr_type;
2523                 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2524                         rxp->cq.ib.intr_vector = rxp->vector;
2525                 else
2526                         rxp->cq.ib.intr_vector = BIT(rxp->vector);
2527                 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2528                 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2529                 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2530
2531                 bna_rxp_add_rxqs(rxp, q0, q1);
2532
2533                 /* Setup large Q */
2534
2535                 q0->rx = rx;
2536                 q0->rxp = rxp;
2537
2538                 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2539                 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2540                 rcb_idx++; dq_idx++;
2541                 q0->rcb->q_depth = rx_cfg->q0_depth;
2542                 q0->q_depth = rx_cfg->q0_depth;
2543                 q0->multi_buffer = rx_cfg->q0_multi_buf;
2544                 q0->buffer_size = rx_cfg->q0_buf_size;
2545                 q0->num_vecs = rx_cfg->q0_num_vecs;
2546                 q0->rcb->rxq = q0;
2547                 q0->rcb->bnad = bna->bnad;
2548                 q0->rcb->id = 0;
2549                 q0->rx_packets = q0->rx_bytes = 0;
2550                 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2551
2552                 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2553                         &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2554
2555                 if (rx->rcb_setup_cbfn)
2556                         rx->rcb_setup_cbfn(bnad, q0->rcb);
2557
2558                 /* Setup small Q */
2559
2560                 if (q1) {
2561                         q1->rx = rx;
2562                         q1->rxp = rxp;
2563
2564                         q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2565                         q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2566                         rcb_idx++; hq_idx++;
2567                         q1->rcb->q_depth = rx_cfg->q1_depth;
2568                         q1->q_depth = rx_cfg->q1_depth;
2569                         q1->multi_buffer = BNA_STATUS_T_DISABLED;
2570                         q1->num_vecs = 1;
2571                         q1->rcb->rxq = q1;
2572                         q1->rcb->bnad = bna->bnad;
2573                         q1->rcb->id = 1;
2574                         q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2575                                         rx_cfg->hds_config.forced_offset
2576                                         : rx_cfg->q1_buf_size;
2577                         q1->rx_packets = q1->rx_bytes = 0;
2578                         q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2579
2580                         bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2581                                 &hqpt_mem[i], &hsqpt_mem[i],
2582                                 &hpage_mem[i]);
2583
2584                         if (rx->rcb_setup_cbfn)
2585                                 rx->rcb_setup_cbfn(bnad, q1->rcb);
2586                 }
2587
2588                 /* Setup CQ */
2589
2590                 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2591                 cq_depth = rx_cfg->q0_depth +
2592                         ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2593                          0 : rx_cfg->q1_depth);
2594                 /* if multi-buffer is enabled sum of q0_depth
2595                  * and q1_depth need not be a power of 2
2596                  */
2597                 cq_depth = roundup_pow_of_two(cq_depth);
2598                 rxp->cq.ccb->q_depth = cq_depth;
2599                 rxp->cq.ccb->cq = &rxp->cq;
2600                 rxp->cq.ccb->rcb[0] = q0->rcb;
2601                 q0->rcb->ccb = rxp->cq.ccb;
2602                 if (q1) {
2603                         rxp->cq.ccb->rcb[1] = q1->rcb;
2604                         q1->rcb->ccb = rxp->cq.ccb;
2605                 }
2606                 rxp->cq.ccb->hw_producer_index =
2607                         (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2608                 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2609                 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2610                 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2611                 rxp->cq.ccb->rx_coalescing_timeo =
2612                         rxp->cq.ib.coalescing_timeo;
2613                 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2614                 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2615                 rxp->cq.ccb->bnad = bna->bnad;
2616                 rxp->cq.ccb->id = i;
2617
2618                 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2619                         &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2620
2621                 if (rx->ccb_setup_cbfn)
2622                         rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2623         }
2624
2625         rx->hds_cfg = rx_cfg->hds_config;
2626
2627         bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2628
2629         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2630
2631         rx_mod->rid_mask |= BIT(rx->rid);
2632
2633         return rx;
2634 }
2635
2636 void
2637 bna_rx_destroy(struct bna_rx *rx)
2638 {
2639         struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2640         struct bna_rxq *q0 = NULL;
2641         struct bna_rxq *q1 = NULL;
2642         struct bna_rxp *rxp;
2643         struct list_head *qe;
2644
2645         bna_rxf_uninit(&rx->rxf);
2646
2647         while (!list_empty(&rx->rxp_q)) {
2648                 bfa_q_deq(&rx->rxp_q, &rxp);
2649                 GET_RXQS(rxp, q0, q1);
2650                 if (rx->rcb_destroy_cbfn)
2651                         rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2652                 q0->rcb = NULL;
2653                 q0->rxp = NULL;
2654                 q0->rx = NULL;
2655                 bna_rxq_put(rx_mod, q0);
2656
2657                 if (q1) {
2658                         if (rx->rcb_destroy_cbfn)
2659                                 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2660                         q1->rcb = NULL;
2661                         q1->rxp = NULL;
2662                         q1->rx = NULL;
2663                         bna_rxq_put(rx_mod, q1);
2664                 }
2665                 rxp->rxq.slr.large = NULL;
2666                 rxp->rxq.slr.small = NULL;
2667
2668                 if (rx->ccb_destroy_cbfn)
2669                         rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2670                 rxp->cq.ccb = NULL;
2671                 rxp->rx = NULL;
2672                 bna_rxp_put(rx_mod, rxp);
2673         }
2674
2675         list_for_each(qe, &rx_mod->rx_active_q) {
2676                 if (qe == &rx->qe) {
2677                         list_del(&rx->qe);
2678                         bfa_q_qe_init(&rx->qe);
2679                         break;
2680                 }
2681         }
2682
2683         rx_mod->rid_mask &= ~BIT(rx->rid);
2684
2685         rx->bna = NULL;
2686         rx->priv = NULL;
2687         bna_rx_put(rx_mod, rx);
2688 }
2689
2690 void
2691 bna_rx_enable(struct bna_rx *rx)
2692 {
2693         if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2694                 return;
2695
2696         rx->rx_flags |= BNA_RX_F_ENABLED;
2697         if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2698                 bfa_fsm_send_event(rx, RX_E_START);
2699 }
2700
2701 void
2702 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2703                 void (*cbfn)(void *, struct bna_rx *))
2704 {
2705         if (type == BNA_SOFT_CLEANUP) {
2706                 /* h/w should not be accessed. Treat we're stopped */
2707                 (*cbfn)(rx->bna->bnad, rx);
2708         } else {
2709                 rx->stop_cbfn = cbfn;
2710                 rx->stop_cbarg = rx->bna->bnad;
2711
2712                 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2713
2714                 bfa_fsm_send_event(rx, RX_E_STOP);
2715         }
2716 }
2717
2718 void
2719 bna_rx_cleanup_complete(struct bna_rx *rx)
2720 {
2721         bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2722 }
2723
2724 void
2725 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2726 {
2727         struct bna_rxf *rxf = &rx->rxf;
2728
2729         if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2730                 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2731                 rxf->vlan_strip_pending = true;
2732                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2733         }
2734 }
2735
2736 void
2737 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2738 {
2739         struct bna_rxf *rxf = &rx->rxf;
2740
2741         if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2742                 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2743                 rxf->vlan_strip_pending = true;
2744                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2745         }
2746 }
2747
2748 enum bna_cb_status
2749 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2750                 enum bna_rxmode bitmask)
2751 {
2752         struct bna_rxf *rxf = &rx->rxf;
2753         int need_hw_config = 0;
2754
2755         /* Error checks */
2756
2757         if (is_promisc_enable(new_mode, bitmask)) {
2758                 /* If promisc mode is already enabled elsewhere in the system */
2759                 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2760                         (rx->bna->promisc_rid != rxf->rx->rid))
2761                         goto err_return;
2762
2763                 /* If default mode is already enabled in the system */
2764                 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2765                         goto err_return;
2766
2767                 /* Trying to enable promiscuous and default mode together */
2768                 if (is_default_enable(new_mode, bitmask))
2769                         goto err_return;
2770         }
2771
2772         if (is_default_enable(new_mode, bitmask)) {
2773                 /* If default mode is already enabled elsewhere in the system */
2774                 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2775                         (rx->bna->default_mode_rid != rxf->rx->rid)) {
2776                                 goto err_return;
2777                 }
2778
2779                 /* If promiscuous mode is already enabled in the system */
2780                 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2781                         goto err_return;
2782         }
2783
2784         /* Process the commands */
2785
2786         if (is_promisc_enable(new_mode, bitmask)) {
2787                 if (bna_rxf_promisc_enable(rxf))
2788                         need_hw_config = 1;
2789         } else if (is_promisc_disable(new_mode, bitmask)) {
2790                 if (bna_rxf_promisc_disable(rxf))
2791                         need_hw_config = 1;
2792         }
2793
2794         if (is_allmulti_enable(new_mode, bitmask)) {
2795                 if (bna_rxf_allmulti_enable(rxf))
2796                         need_hw_config = 1;
2797         } else if (is_allmulti_disable(new_mode, bitmask)) {
2798                 if (bna_rxf_allmulti_disable(rxf))
2799                         need_hw_config = 1;
2800         }
2801
2802         /* Trigger h/w if needed */
2803
2804         if (need_hw_config) {
2805                 rxf->cam_fltr_cbfn = NULL;
2806                 rxf->cam_fltr_cbarg = rx->bna->bnad;
2807                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2808         }
2809
2810         return BNA_CB_SUCCESS;
2811
2812 err_return:
2813         return BNA_CB_FAIL;
2814 }
2815
2816 void
2817 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2818 {
2819         struct bna_rxf *rxf = &rx->rxf;
2820
2821         if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2822                 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2823                 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2824                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2825         }
2826 }
2827
2828 void
2829 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2830 {
2831         struct bna_rxp *rxp;
2832         struct list_head *qe;
2833
2834         list_for_each(qe, &rx->rxp_q) {
2835                 rxp = (struct bna_rxp *)qe;
2836                 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2837                 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2838         }
2839 }
2840
2841 void
2842 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2843 {
2844         int i, j;
2845
2846         for (i = 0; i < BNA_LOAD_T_MAX; i++)
2847                 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2848                         bna->rx_mod.dim_vector[i][j] = vector[i][j];
2849 }
2850
2851 void
2852 bna_rx_dim_update(struct bna_ccb *ccb)
2853 {
2854         struct bna *bna = ccb->cq->rx->bna;
2855         u32 load, bias;
2856         u32 pkt_rt, small_rt, large_rt;
2857         u8 coalescing_timeo;
2858
2859         if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2860                 (ccb->pkt_rate.large_pkt_cnt == 0))
2861                 return;
2862
2863         /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2864
2865         small_rt = ccb->pkt_rate.small_pkt_cnt;
2866         large_rt = ccb->pkt_rate.large_pkt_cnt;
2867
2868         pkt_rt = small_rt + large_rt;
2869
2870         if (pkt_rt < BNA_PKT_RATE_10K)
2871                 load = BNA_LOAD_T_LOW_4;
2872         else if (pkt_rt < BNA_PKT_RATE_20K)
2873                 load = BNA_LOAD_T_LOW_3;
2874         else if (pkt_rt < BNA_PKT_RATE_30K)
2875                 load = BNA_LOAD_T_LOW_2;
2876         else if (pkt_rt < BNA_PKT_RATE_40K)
2877                 load = BNA_LOAD_T_LOW_1;
2878         else if (pkt_rt < BNA_PKT_RATE_50K)
2879                 load = BNA_LOAD_T_HIGH_1;
2880         else if (pkt_rt < BNA_PKT_RATE_60K)
2881                 load = BNA_LOAD_T_HIGH_2;
2882         else if (pkt_rt < BNA_PKT_RATE_80K)
2883                 load = BNA_LOAD_T_HIGH_3;
2884         else
2885                 load = BNA_LOAD_T_HIGH_4;
2886
2887         if (small_rt > (large_rt << 1))
2888                 bias = 0;
2889         else
2890                 bias = 1;
2891
2892         ccb->pkt_rate.small_pkt_cnt = 0;
2893         ccb->pkt_rate.large_pkt_cnt = 0;
2894
2895         coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2896         ccb->rx_coalescing_timeo = coalescing_timeo;
2897
2898         /* Set it to IB */
2899         bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2900 }
2901
2902 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2903         {12, 12},
2904         {6, 10},
2905         {5, 10},
2906         {4, 8},
2907         {3, 6},
2908         {3, 6},
2909         {2, 4},
2910         {1, 2},
2911 };
2912
2913 /* TX */
2914
2915 #define call_tx_stop_cbfn(tx)                                           \
2916 do {                                                                    \
2917         if ((tx)->stop_cbfn) {                                          \
2918                 void (*cbfn)(void *, struct bna_tx *);          \
2919                 void *cbarg;                                            \
2920                 cbfn = (tx)->stop_cbfn;                                 \
2921                 cbarg = (tx)->stop_cbarg;                               \
2922                 (tx)->stop_cbfn = NULL;                                 \
2923                 (tx)->stop_cbarg = NULL;                                \
2924                 cbfn(cbarg, (tx));                                      \
2925         }                                                               \
2926 } while (0)
2927
2928 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2929 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2930 static void bna_tx_enet_stop(struct bna_tx *tx);
2931
2932 enum bna_tx_event {
2933         TX_E_START                      = 1,
2934         TX_E_STOP                       = 2,
2935         TX_E_FAIL                       = 3,
2936         TX_E_STARTED                    = 4,
2937         TX_E_STOPPED                    = 5,
2938         TX_E_PRIO_CHANGE                = 6,
2939         TX_E_CLEANUP_DONE               = 7,
2940         TX_E_BW_UPDATE                  = 8,
2941 };
2942
2943 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2944 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2945 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2946 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2947 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2948                         enum bna_tx_event);
2949 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2950                         enum bna_tx_event);
2951 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2952                         enum bna_tx_event);
2953 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2954 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2955                         enum bna_tx_event);
2956
2957 static void
2958 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2959 {
2960         call_tx_stop_cbfn(tx);
2961 }
2962
2963 static void
2964 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2965 {
2966         switch (event) {
2967         case TX_E_START:
2968                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2969                 break;
2970
2971         case TX_E_STOP:
2972                 call_tx_stop_cbfn(tx);
2973                 break;
2974
2975         case TX_E_FAIL:
2976                 /* No-op */
2977                 break;
2978
2979         case TX_E_PRIO_CHANGE:
2980                 break;
2981
2982         case TX_E_BW_UPDATE:
2983                 /* No-op */
2984                 break;
2985
2986         default:
2987                 bfa_sm_fault(event);
2988         }
2989 }
2990
2991 static void
2992 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2993 {
2994         bna_bfi_tx_enet_start(tx);
2995 }
2996
2997 static void
2998 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2999 {
3000         switch (event) {
3001         case TX_E_STOP:
3002                 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3003                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3004                 break;
3005
3006         case TX_E_FAIL:
3007                 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3008                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3009                 break;
3010
3011         case TX_E_STARTED:
3012                 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3013                         tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3014                                 BNA_TX_F_BW_UPDATED);
3015                         bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3016                 } else
3017                         bfa_fsm_set_state(tx, bna_tx_sm_started);
3018                 break;
3019
3020         case TX_E_PRIO_CHANGE:
3021                 tx->flags |=  BNA_TX_F_PRIO_CHANGED;
3022                 break;
3023
3024         case TX_E_BW_UPDATE:
3025                 tx->flags |= BNA_TX_F_BW_UPDATED;
3026                 break;
3027
3028         default:
3029                 bfa_sm_fault(event);
3030         }
3031 }
3032
3033 static void
3034 bna_tx_sm_started_entry(struct bna_tx *tx)
3035 {
3036         struct bna_txq *txq;
3037         struct list_head                 *qe;
3038         int is_regular = (tx->type == BNA_TX_T_REGULAR);
3039
3040         list_for_each(qe, &tx->txq_q) {
3041                 txq = (struct bna_txq *)qe;
3042                 txq->tcb->priority = txq->priority;
3043                 /* Start IB */
3044                 bna_ib_start(tx->bna, &txq->ib, is_regular);
3045         }
3046         tx->tx_resume_cbfn(tx->bna->bnad, tx);
3047 }
3048
3049 static void
3050 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3051 {
3052         switch (event) {
3053         case TX_E_STOP:
3054                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3055                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3056                 bna_tx_enet_stop(tx);
3057                 break;
3058
3059         case TX_E_FAIL:
3060                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3061                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3062                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3063                 break;
3064
3065         case TX_E_PRIO_CHANGE:
3066         case TX_E_BW_UPDATE:
3067                 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3068                 break;
3069
3070         default:
3071                 bfa_sm_fault(event);
3072         }
3073 }
3074
3075 static void
3076 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3077 {
3078 }
3079
3080 static void
3081 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3082 {
3083         switch (event) {
3084         case TX_E_FAIL:
3085         case TX_E_STOPPED:
3086                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3087                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3088                 break;
3089
3090         case TX_E_STARTED:
3091                 /**
3092                  * We are here due to start_wait -> stop_wait transition on
3093                  * TX_E_STOP event
3094                  */
3095                 bna_tx_enet_stop(tx);
3096                 break;
3097
3098         case TX_E_PRIO_CHANGE:
3099         case TX_E_BW_UPDATE:
3100                 /* No-op */
3101                 break;
3102
3103         default:
3104                 bfa_sm_fault(event);
3105         }
3106 }
3107
3108 static void
3109 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3110 {
3111 }
3112
3113 static void
3114 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3115 {
3116         switch (event) {
3117         case TX_E_FAIL:
3118         case TX_E_PRIO_CHANGE:
3119         case TX_E_BW_UPDATE:
3120                 /* No-op */
3121                 break;
3122
3123         case TX_E_CLEANUP_DONE:
3124                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3125                 break;
3126
3127         default:
3128                 bfa_sm_fault(event);
3129         }
3130 }
3131
3132 static void
3133 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3134 {
3135         tx->tx_stall_cbfn(tx->bna->bnad, tx);
3136         bna_tx_enet_stop(tx);
3137 }
3138
3139 static void
3140 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3141 {
3142         switch (event) {
3143         case TX_E_STOP:
3144                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3145                 break;
3146
3147         case TX_E_FAIL:
3148                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3149                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3150                 break;
3151
3152         case TX_E_STOPPED:
3153                 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3154                 break;
3155
3156         case TX_E_PRIO_CHANGE:
3157         case TX_E_BW_UPDATE:
3158                 /* No-op */
3159                 break;
3160
3161         default:
3162                 bfa_sm_fault(event);
3163         }
3164 }
3165
3166 static void
3167 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3168 {
3169         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3170 }
3171
3172 static void
3173 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3174 {
3175         switch (event) {
3176         case TX_E_STOP:
3177                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3178                 break;
3179
3180         case TX_E_FAIL:
3181                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3182                 break;
3183
3184         case TX_E_PRIO_CHANGE:
3185         case TX_E_BW_UPDATE:
3186                 /* No-op */
3187                 break;
3188
3189         case TX_E_CLEANUP_DONE:
3190                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3191                 break;
3192
3193         default:
3194                 bfa_sm_fault(event);
3195         }
3196 }
3197
3198 static void
3199 bna_tx_sm_failed_entry(struct bna_tx *tx)
3200 {
3201 }
3202
3203 static void
3204 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3205 {
3206         switch (event) {
3207         case TX_E_START:
3208                 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3209                 break;
3210
3211         case TX_E_STOP:
3212                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3213                 break;
3214
3215         case TX_E_FAIL:
3216                 /* No-op */
3217                 break;
3218
3219         case TX_E_CLEANUP_DONE:
3220                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3221                 break;
3222
3223         default:
3224                 bfa_sm_fault(event);
3225         }
3226 }
3227
3228 static void
3229 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3230 {
3231 }
3232
3233 static void
3234 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3235 {
3236         switch (event) {
3237         case TX_E_STOP:
3238                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3239                 break;
3240
3241         case TX_E_FAIL:
3242                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3243                 break;
3244
3245         case TX_E_CLEANUP_DONE:
3246                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3247                 break;
3248
3249         case TX_E_BW_UPDATE:
3250                 /* No-op */
3251                 break;
3252
3253         default:
3254                 bfa_sm_fault(event);
3255         }
3256 }
3257
3258 static void
3259 bna_bfi_tx_enet_start(struct bna_tx *tx)
3260 {
3261         struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3262         struct bna_txq *txq = NULL;
3263         struct list_head *qe;
3264         int i;
3265
3266         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3267                 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3268         cfg_req->mh.num_entries = htons(
3269                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3270
3271         cfg_req->num_queues = tx->num_txq;
3272         for (i = 0, qe = bfa_q_first(&tx->txq_q);
3273                 i < tx->num_txq;
3274                 i++, qe = bfa_q_next(qe)) {
3275                 txq = (struct bna_txq *)qe;
3276
3277                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3278                 cfg_req->q_cfg[i].q.priority = txq->priority;
3279
3280                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3281                         txq->ib.ib_seg_host_addr.lsb;
3282                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3283                         txq->ib.ib_seg_host_addr.msb;
3284                 cfg_req->q_cfg[i].ib.intr.msix_index =
3285                         htons((u16)txq->ib.intr_vector);
3286         }
3287
3288         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3289         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3290         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3291         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3292         cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3293                                 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3294         cfg_req->ib_cfg.coalescing_timeout =
3295                         htonl((u32)txq->ib.coalescing_timeo);
3296         cfg_req->ib_cfg.inter_pkt_timeout =
3297                         htonl((u32)txq->ib.interpkt_timeo);
3298         cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3299
3300         cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3301         cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3302         cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3303         cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3304
3305         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3306                 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3307         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3308 }
3309
3310 static void
3311 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3312 {
3313         struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3314
3315         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3316                 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3317         req->mh.num_entries = htons(
3318                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3319         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3320                 &req->mh);
3321         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3322 }
3323
3324 static void
3325 bna_tx_enet_stop(struct bna_tx *tx)
3326 {
3327         struct bna_txq *txq;
3328         struct list_head                 *qe;
3329
3330         /* Stop IB */
3331         list_for_each(qe, &tx->txq_q) {
3332                 txq = (struct bna_txq *)qe;
3333                 bna_ib_stop(tx->bna, &txq->ib);
3334         }
3335
3336         bna_bfi_tx_enet_stop(tx);
3337 }
3338
3339 static void
3340 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3341                 struct bna_mem_descr *qpt_mem,
3342                 struct bna_mem_descr *swqpt_mem,
3343                 struct bna_mem_descr *page_mem)
3344 {
3345         u8 *kva;
3346         u64 dma;
3347         struct bna_dma_addr bna_dma;
3348         int i;
3349
3350         txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3351         txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3352         txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3353         txq->qpt.page_count = page_count;
3354         txq->qpt.page_size = page_size;
3355
3356         txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3357         txq->tcb->sw_q = page_mem->kva;
3358
3359         kva = page_mem->kva;
3360         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3361
3362         for (i = 0; i < page_count; i++) {
3363                 txq->tcb->sw_qpt[i] = kva;
3364                 kva += PAGE_SIZE;
3365
3366                 BNA_SET_DMA_ADDR(dma, &bna_dma);
3367                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3368                         bna_dma.lsb;
3369                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3370                         bna_dma.msb;
3371                 dma += PAGE_SIZE;
3372         }
3373 }
3374
3375 static struct bna_tx *
3376 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3377 {
3378         struct list_head        *qe = NULL;
3379         struct bna_tx *tx = NULL;
3380
3381         if (list_empty(&tx_mod->tx_free_q))
3382                 return NULL;
3383         if (type == BNA_TX_T_REGULAR) {
3384                 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3385         } else {
3386                 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3387         }
3388         tx = (struct bna_tx *)qe;
3389         bfa_q_qe_init(&tx->qe);
3390         tx->type = type;
3391
3392         return tx;
3393 }
3394
3395 static void
3396 bna_tx_free(struct bna_tx *tx)
3397 {
3398         struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3399         struct bna_txq *txq;
3400         struct list_head *prev_qe;
3401         struct list_head *qe;
3402
3403         while (!list_empty(&tx->txq_q)) {
3404                 bfa_q_deq(&tx->txq_q, &txq);
3405                 bfa_q_qe_init(&txq->qe);
3406                 txq->tcb = NULL;
3407                 txq->tx = NULL;
3408                 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3409         }
3410
3411         list_for_each(qe, &tx_mod->tx_active_q) {
3412                 if (qe == &tx->qe) {
3413                         list_del(&tx->qe);
3414                         bfa_q_qe_init(&tx->qe);
3415                         break;
3416                 }
3417         }
3418
3419         tx->bna = NULL;
3420         tx->priv = NULL;
3421
3422         prev_qe = NULL;
3423         list_for_each(qe, &tx_mod->tx_free_q) {
3424                 if (((struct bna_tx *)qe)->rid < tx->rid)
3425                         prev_qe = qe;
3426                 else {
3427                         break;
3428                 }
3429         }
3430
3431         if (prev_qe == NULL) {
3432                 /* This is the first entry */
3433                 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3434         } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3435                 /* This is the last entry */
3436                 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3437         } else {
3438                 /* Somewhere in the middle */
3439                 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3440                 bfa_q_prev(&tx->qe) = prev_qe;
3441                 bfa_q_next(prev_qe) = &tx->qe;
3442                 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3443         }
3444 }
3445
3446 static void
3447 bna_tx_start(struct bna_tx *tx)
3448 {
3449         tx->flags |= BNA_TX_F_ENET_STARTED;
3450         if (tx->flags & BNA_TX_F_ENABLED)
3451                 bfa_fsm_send_event(tx, TX_E_START);
3452 }
3453
3454 static void
3455 bna_tx_stop(struct bna_tx *tx)
3456 {
3457         tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3458         tx->stop_cbarg = &tx->bna->tx_mod;
3459
3460         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3461         bfa_fsm_send_event(tx, TX_E_STOP);
3462 }
3463
3464 static void
3465 bna_tx_fail(struct bna_tx *tx)
3466 {
3467         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3468         bfa_fsm_send_event(tx, TX_E_FAIL);
3469 }
3470
3471 void
3472 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3473 {
3474         struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3475         struct bna_txq *txq = NULL;
3476         struct list_head *qe;
3477         int i;
3478
3479         bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3480                 sizeof(struct bfi_enet_tx_cfg_rsp));
3481
3482         tx->hw_id = cfg_rsp->hw_id;
3483
3484         for (i = 0, qe = bfa_q_first(&tx->txq_q);
3485                 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3486                 txq = (struct bna_txq *)qe;
3487
3488                 /* Setup doorbells */
3489                 txq->tcb->i_dbell->doorbell_addr =
3490                         tx->bna->pcidev.pci_bar_kva
3491                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
3492                 txq->tcb->q_dbell =
3493                         tx->bna->pcidev.pci_bar_kva
3494                         + ntohl(cfg_rsp->q_handles[i].q_dbell);
3495                 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3496
3497                 /* Initialize producer/consumer indexes */
3498                 (*txq->tcb->hw_consumer_index) = 0;
3499                 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3500         }
3501
3502         bfa_fsm_send_event(tx, TX_E_STARTED);
3503 }
3504
3505 void
3506 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3507 {
3508         bfa_fsm_send_event(tx, TX_E_STOPPED);
3509 }
3510
3511 void
3512 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3513 {
3514         struct bna_tx *tx;
3515         struct list_head                *qe;
3516
3517         list_for_each(qe, &tx_mod->tx_active_q) {
3518                 tx = (struct bna_tx *)qe;
3519                 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3520         }
3521 }
3522
3523 void
3524 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3525 {
3526         u32 q_size;
3527         u32 page_count;
3528         struct bna_mem_info *mem_info;
3529
3530         res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3531         mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3532         mem_info->mem_type = BNA_MEM_T_KVA;
3533         mem_info->len = sizeof(struct bna_tcb);
3534         mem_info->num = num_txq;
3535
3536         q_size = txq_depth * BFI_TXQ_WI_SIZE;
3537         q_size = ALIGN(q_size, PAGE_SIZE);
3538         page_count = q_size >> PAGE_SHIFT;
3539
3540         res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3541         mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3542         mem_info->mem_type = BNA_MEM_T_DMA;
3543         mem_info->len = page_count * sizeof(struct bna_dma_addr);
3544         mem_info->num = num_txq;
3545
3546         res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3547         mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3548         mem_info->mem_type = BNA_MEM_T_KVA;
3549         mem_info->len = page_count * sizeof(void *);
3550         mem_info->num = num_txq;
3551
3552         res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3553         mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3554         mem_info->mem_type = BNA_MEM_T_DMA;
3555         mem_info->len = PAGE_SIZE * page_count;
3556         mem_info->num = num_txq;
3557
3558         res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3559         mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3560         mem_info->mem_type = BNA_MEM_T_DMA;
3561         mem_info->len = BFI_IBIDX_SIZE;
3562         mem_info->num = num_txq;
3563
3564         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3565         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3566                         BNA_INTR_T_MSIX;
3567         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3568 }
3569
3570 struct bna_tx *
3571 bna_tx_create(struct bna *bna, struct bnad *bnad,
3572                 struct bna_tx_config *tx_cfg,
3573                 const struct bna_tx_event_cbfn *tx_cbfn,
3574                 struct bna_res_info *res_info, void *priv)
3575 {
3576         struct bna_intr_info *intr_info;
3577         struct bna_tx_mod *tx_mod = &bna->tx_mod;
3578         struct bna_tx *tx;
3579         struct bna_txq *txq;
3580         struct list_head *qe;
3581         int page_count;
3582         int i;
3583
3584         intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3585         page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3586                                         PAGE_SIZE;
3587
3588         /**
3589          * Get resources
3590          */
3591
3592         if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3593                 return NULL;
3594
3595         /* Tx */
3596
3597         tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3598         if (!tx)
3599                 return NULL;
3600         tx->bna = bna;
3601         tx->priv = priv;
3602
3603         /* TxQs */
3604
3605         INIT_LIST_HEAD(&tx->txq_q);
3606         for (i = 0; i < tx_cfg->num_txq; i++) {
3607                 if (list_empty(&tx_mod->txq_free_q))
3608                         goto err_return;
3609
3610                 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3611                 bfa_q_qe_init(&txq->qe);
3612                 list_add_tail(&txq->qe, &tx->txq_q);
3613                 txq->tx = tx;
3614         }
3615
3616         /*
3617          * Initialize
3618          */
3619
3620         /* Tx */
3621
3622         tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3623         tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3624         /* Following callbacks are mandatory */
3625         tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3626         tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3627         tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3628
3629         list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3630
3631         tx->num_txq = tx_cfg->num_txq;
3632
3633         tx->flags = 0;
3634         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3635                 switch (tx->type) {
3636                 case BNA_TX_T_REGULAR:
3637                         if (!(tx->bna->tx_mod.flags &
3638                                 BNA_TX_MOD_F_ENET_LOOPBACK))
3639                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3640                         break;
3641                 case BNA_TX_T_LOOPBACK:
3642                         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3643                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3644                         break;
3645                 }
3646         }
3647
3648         /* TxQ */
3649
3650         i = 0;
3651         list_for_each(qe, &tx->txq_q) {
3652                 txq = (struct bna_txq *)qe;
3653                 txq->tcb = (struct bna_tcb *)
3654                 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3655                 txq->tx_packets = 0;
3656                 txq->tx_bytes = 0;
3657
3658                 /* IB */
3659                 txq->ib.ib_seg_host_addr.lsb =
3660                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3661                 txq->ib.ib_seg_host_addr.msb =
3662                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3663                 txq->ib.ib_seg_host_addr_kva =
3664                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3665                 txq->ib.intr_type = intr_info->intr_type;
3666                 txq->ib.intr_vector = (intr_info->num == 1) ?
3667                                         intr_info->idl[0].vector :
3668                                         intr_info->idl[i].vector;
3669                 if (intr_info->intr_type == BNA_INTR_T_INTX)
3670                         txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3671                 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3672                 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3673                 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3674
3675                 /* TCB */
3676
3677                 txq->tcb->q_depth = tx_cfg->txq_depth;
3678                 txq->tcb->unmap_q = (void *)
3679                 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3680                 txq->tcb->hw_consumer_index =
3681                         (u32 *)txq->ib.ib_seg_host_addr_kva;
3682                 txq->tcb->i_dbell = &txq->ib.door_bell;
3683                 txq->tcb->intr_type = txq->ib.intr_type;
3684                 txq->tcb->intr_vector = txq->ib.intr_vector;
3685                 txq->tcb->txq = txq;
3686                 txq->tcb->bnad = bnad;
3687                 txq->tcb->id = i;
3688
3689                 /* QPT, SWQPT, Pages */
3690                 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3691                         &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3692                         &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3693                         &res_info[BNA_TX_RES_MEM_T_PAGE].
3694                                   res_u.mem_info.mdl[i]);
3695
3696                 /* Callback to bnad for setting up TCB */
3697                 if (tx->tcb_setup_cbfn)
3698                         (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3699
3700                 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3701                         txq->priority = txq->tcb->id;
3702                 else
3703                         txq->priority = tx_mod->default_prio;
3704
3705                 i++;
3706         }
3707
3708         tx->txf_vlan_id = 0;
3709
3710         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3711
3712         tx_mod->rid_mask |= BIT(tx->rid);
3713
3714         return tx;
3715
3716 err_return:
3717         bna_tx_free(tx);
3718         return NULL;
3719 }
3720
3721 void
3722 bna_tx_destroy(struct bna_tx *tx)
3723 {
3724         struct bna_txq *txq;
3725         struct list_head *qe;
3726
3727         list_for_each(qe, &tx->txq_q) {
3728                 txq = (struct bna_txq *)qe;
3729                 if (tx->tcb_destroy_cbfn)
3730                         (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3731         }
3732
3733         tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3734         bna_tx_free(tx);
3735 }
3736
3737 void
3738 bna_tx_enable(struct bna_tx *tx)
3739 {
3740         if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3741                 return;
3742
3743         tx->flags |= BNA_TX_F_ENABLED;
3744
3745         if (tx->flags & BNA_TX_F_ENET_STARTED)
3746                 bfa_fsm_send_event(tx, TX_E_START);
3747 }
3748
3749 void
3750 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3751                 void (*cbfn)(void *, struct bna_tx *))
3752 {
3753         if (type == BNA_SOFT_CLEANUP) {
3754                 (*cbfn)(tx->bna->bnad, tx);
3755                 return;
3756         }
3757
3758         tx->stop_cbfn = cbfn;
3759         tx->stop_cbarg = tx->bna->bnad;
3760
3761         tx->flags &= ~BNA_TX_F_ENABLED;
3762
3763         bfa_fsm_send_event(tx, TX_E_STOP);
3764 }
3765
3766 void
3767 bna_tx_cleanup_complete(struct bna_tx *tx)
3768 {
3769         bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3770 }
3771
3772 static void
3773 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3774 {
3775         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3776
3777         bfa_wc_down(&tx_mod->tx_stop_wc);
3778 }
3779
3780 static void
3781 bna_tx_mod_cb_tx_stopped_all(void *arg)
3782 {
3783         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3784
3785         if (tx_mod->stop_cbfn)
3786                 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3787         tx_mod->stop_cbfn = NULL;
3788 }
3789
3790 void
3791 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3792                 struct bna_res_info *res_info)
3793 {
3794         int i;
3795
3796         tx_mod->bna = bna;
3797         tx_mod->flags = 0;
3798
3799         tx_mod->tx = (struct bna_tx *)
3800                 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3801         tx_mod->txq = (struct bna_txq *)
3802                 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3803
3804         INIT_LIST_HEAD(&tx_mod->tx_free_q);
3805         INIT_LIST_HEAD(&tx_mod->tx_active_q);
3806
3807         INIT_LIST_HEAD(&tx_mod->txq_free_q);
3808
3809         for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3810                 tx_mod->tx[i].rid = i;
3811                 bfa_q_qe_init(&tx_mod->tx[i].qe);
3812                 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3813                 bfa_q_qe_init(&tx_mod->txq[i].qe);
3814                 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3815         }
3816
3817         tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3818         tx_mod->default_prio = 0;
3819         tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3820         tx_mod->iscsi_prio = -1;
3821 }
3822
3823 void
3824 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3825 {
3826         struct list_head                *qe;
3827         int i;
3828
3829         i = 0;
3830         list_for_each(qe, &tx_mod->tx_free_q)
3831                 i++;
3832
3833         i = 0;
3834         list_for_each(qe, &tx_mod->txq_free_q)
3835                 i++;
3836
3837         tx_mod->bna = NULL;
3838 }
3839
3840 void
3841 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3842 {
3843         struct bna_tx *tx;
3844         struct list_head                *qe;
3845
3846         tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3847         if (type == BNA_TX_T_LOOPBACK)
3848                 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3849
3850         list_for_each(qe, &tx_mod->tx_active_q) {
3851                 tx = (struct bna_tx *)qe;
3852                 if (tx->type == type)
3853                         bna_tx_start(tx);
3854         }
3855 }
3856
3857 void
3858 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3859 {
3860         struct bna_tx *tx;
3861         struct list_head                *qe;
3862
3863         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3864         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3865
3866         tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3867
3868         bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3869
3870         list_for_each(qe, &tx_mod->tx_active_q) {
3871                 tx = (struct bna_tx *)qe;
3872                 if (tx->type == type) {
3873                         bfa_wc_up(&tx_mod->tx_stop_wc);
3874                         bna_tx_stop(tx);
3875                 }
3876         }
3877
3878         bfa_wc_wait(&tx_mod->tx_stop_wc);
3879 }
3880
3881 void
3882 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3883 {
3884         struct bna_tx *tx;
3885         struct list_head                *qe;
3886
3887         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3888         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3889
3890         list_for_each(qe, &tx_mod->tx_active_q) {
3891                 tx = (struct bna_tx *)qe;
3892                 bna_tx_fail(tx);
3893         }
3894 }
3895
3896 void
3897 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3898 {
3899         struct bna_txq *txq;
3900         struct list_head *qe;
3901
3902         list_for_each(qe, &tx->txq_q) {
3903                 txq = (struct bna_txq *)qe;
3904                 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3905         }
3906 }