bna: get rid of private macros for manipulation with lists
[cascardo/linux.git] / drivers / net / ethernet / brocade / bna / bna_tx_rx.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12   */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include "bna.h"
20 #include "bfi.h"
21
22 /* IB */
23 static void
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 {
26         ib->coalescing_timeo = coalescing_timeo;
27         ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28                                 (u32)ib->coalescing_timeo, 0);
29 }
30
31 /* RXF */
32
33 #define bna_rxf_vlan_cfg_soft_reset(rxf)                                \
34 do {                                                                    \
35         (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;           \
36         (rxf)->vlan_strip_pending = true;                               \
37 } while (0)
38
39 #define bna_rxf_rss_cfg_soft_reset(rxf)                                 \
40 do {                                                                    \
41         if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)                  \
42                 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |           \
43                                 BNA_RSS_F_CFG_PENDING |                 \
44                                 BNA_RSS_F_STATUS_PENDING);              \
45 } while (0)
46
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54                                         enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56                                         enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58                                         enum bna_cleanup_type cleanup);
59
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
61                         enum bna_rxf_event);
62 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
63                         enum bna_rxf_event);
64 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
65                         enum bna_rxf_event);
66 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
67                         enum bna_rxf_event);
68
69 static void
70 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
71 {
72         call_rxf_stop_cbfn(rxf);
73 }
74
75 static void
76 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
77 {
78         switch (event) {
79         case RXF_E_START:
80                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
81                 break;
82
83         case RXF_E_STOP:
84                 call_rxf_stop_cbfn(rxf);
85                 break;
86
87         case RXF_E_FAIL:
88                 /* No-op */
89                 break;
90
91         case RXF_E_CONFIG:
92                 call_rxf_cam_fltr_cbfn(rxf);
93                 break;
94
95         default:
96                 bfa_sm_fault(event);
97         }
98 }
99
100 static void
101 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
102 {
103         if (!bna_rxf_cfg_apply(rxf)) {
104                 /* No more pending config updates */
105                 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
106         }
107 }
108
109 static void
110 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
111 {
112         switch (event) {
113         case RXF_E_STOP:
114                 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
115                 break;
116
117         case RXF_E_FAIL:
118                 bna_rxf_cfg_reset(rxf);
119                 call_rxf_start_cbfn(rxf);
120                 call_rxf_cam_fltr_cbfn(rxf);
121                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
122                 break;
123
124         case RXF_E_CONFIG:
125                 /* No-op */
126                 break;
127
128         case RXF_E_FW_RESP:
129                 if (!bna_rxf_cfg_apply(rxf)) {
130                         /* No more pending config updates */
131                         bfa_fsm_set_state(rxf, bna_rxf_sm_started);
132                 }
133                 break;
134
135         default:
136                 bfa_sm_fault(event);
137         }
138 }
139
140 static void
141 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
142 {
143         call_rxf_start_cbfn(rxf);
144         call_rxf_cam_fltr_cbfn(rxf);
145 }
146
147 static void
148 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
149 {
150         switch (event) {
151         case RXF_E_STOP:
152         case RXF_E_FAIL:
153                 bna_rxf_cfg_reset(rxf);
154                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
155                 break;
156
157         case RXF_E_CONFIG:
158                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
159                 break;
160
161         default:
162                 bfa_sm_fault(event);
163         }
164 }
165
166 static void
167 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
168 {
169 }
170
171 static void
172 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
173 {
174         switch (event) {
175         case RXF_E_FAIL:
176         case RXF_E_FW_RESP:
177                 bna_rxf_cfg_reset(rxf);
178                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
179                 break;
180
181         default:
182                 bfa_sm_fault(event);
183         }
184 }
185
186 static void
187 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
188                 enum bfi_enet_h2i_msgs req_type)
189 {
190         struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
191
192         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
193         req->mh.num_entries = htons(
194         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
195         ether_addr_copy(req->mac_addr, mac->addr);
196         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
197                 sizeof(struct bfi_enet_ucast_req), &req->mh);
198         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
199 }
200
201 static void
202 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
203 {
204         struct bfi_enet_mcast_add_req *req =
205                 &rxf->bfi_enet_cmd.mcast_add_req;
206
207         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
208                 0, rxf->rx->rid);
209         req->mh.num_entries = htons(
210         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
211         ether_addr_copy(req->mac_addr, mac->addr);
212         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
213                 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
214         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
215 }
216
217 static void
218 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
219 {
220         struct bfi_enet_mcast_del_req *req =
221                 &rxf->bfi_enet_cmd.mcast_del_req;
222
223         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
224                 0, rxf->rx->rid);
225         req->mh.num_entries = htons(
226         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
227         req->handle = htons(handle);
228         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
229                 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
230         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
231 }
232
233 static void
234 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
235 {
236         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
237
238         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
239                 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
240         req->mh.num_entries = htons(
241                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
242         req->enable = status;
243         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
244                 sizeof(struct bfi_enet_enable_req), &req->mh);
245         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
246 }
247
248 static void
249 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
250 {
251         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
252
253         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
254                 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
255         req->mh.num_entries = htons(
256                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
257         req->enable = status;
258         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
259                 sizeof(struct bfi_enet_enable_req), &req->mh);
260         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
261 }
262
263 static void
264 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
265 {
266         struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
267         int i;
268         int j;
269
270         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
271                 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
272         req->mh.num_entries = htons(
273                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
274         req->block_idx = block_idx;
275         for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
276                 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
277                 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
278                         req->bit_mask[i] =
279                                 htonl(rxf->vlan_filter_table[j]);
280                 else
281                         req->bit_mask[i] = 0xFFFFFFFF;
282         }
283         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
284                 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
285         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
286 }
287
288 static void
289 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
290 {
291         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
292
293         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
294                 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
295         req->mh.num_entries = htons(
296                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
297         req->enable = rxf->vlan_strip_status;
298         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
299                 sizeof(struct bfi_enet_enable_req), &req->mh);
300         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
301 }
302
303 static void
304 bna_bfi_rit_cfg(struct bna_rxf *rxf)
305 {
306         struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
307
308         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
309                 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
310         req->mh.num_entries = htons(
311                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
312         req->size = htons(rxf->rit_size);
313         memcpy(&req->table[0], rxf->rit, rxf->rit_size);
314         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
315                 sizeof(struct bfi_enet_rit_req), &req->mh);
316         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
317 }
318
319 static void
320 bna_bfi_rss_cfg(struct bna_rxf *rxf)
321 {
322         struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
323         int i;
324
325         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
326                 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
327         req->mh.num_entries = htons(
328                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
329         req->cfg.type = rxf->rss_cfg.hash_type;
330         req->cfg.mask = rxf->rss_cfg.hash_mask;
331         for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
332                 req->cfg.key[i] =
333                         htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
334         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
335                 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
336         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
337 }
338
339 static void
340 bna_bfi_rss_enable(struct bna_rxf *rxf)
341 {
342         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343
344         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
345                 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
346         req->mh.num_entries = htons(
347                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
348         req->enable = rxf->rss_status;
349         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
350                 sizeof(struct bfi_enet_enable_req), &req->mh);
351         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
352 }
353
354 /* This function gets the multicast MAC that has already been added to CAM */
355 static struct bna_mac *
356 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
357 {
358         struct bna_mac *mac;
359         struct list_head *qe;
360
361         list_for_each(qe, &rxf->mcast_active_q) {
362                 mac = (struct bna_mac *)qe;
363                 if (ether_addr_equal(mac->addr, mac_addr))
364                         return mac;
365         }
366
367         list_for_each(qe, &rxf->mcast_pending_del_q) {
368                 mac = (struct bna_mac *)qe;
369                 if (ether_addr_equal(mac->addr, mac_addr))
370                         return mac;
371         }
372
373         return NULL;
374 }
375
376 static struct bna_mcam_handle *
377 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
378 {
379         struct bna_mcam_handle *mchandle;
380         struct list_head *qe;
381
382         list_for_each(qe, &rxf->mcast_handle_q) {
383                 mchandle = (struct bna_mcam_handle *)qe;
384                 if (mchandle->handle == handle)
385                         return mchandle;
386         }
387
388         return NULL;
389 }
390
391 static void
392 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
393 {
394         struct bna_mac *mcmac;
395         struct bna_mcam_handle *mchandle;
396
397         mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
398         mchandle = bna_rxf_mchandle_get(rxf, handle);
399         if (mchandle == NULL) {
400                 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
401                 mchandle->handle = handle;
402                 mchandle->refcnt = 0;
403                 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
404         }
405         mchandle->refcnt++;
406         mcmac->handle = mchandle;
407 }
408
409 static int
410 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
411                 enum bna_cleanup_type cleanup)
412 {
413         struct bna_mcam_handle *mchandle;
414         int ret = 0;
415
416         mchandle = mac->handle;
417         if (mchandle == NULL)
418                 return ret;
419
420         mchandle->refcnt--;
421         if (mchandle->refcnt == 0) {
422                 if (cleanup == BNA_HARD_CLEANUP) {
423                         bna_bfi_mcast_del_req(rxf, mchandle->handle);
424                         ret = 1;
425                 }
426                 list_del(&mchandle->qe);
427                 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
428         }
429         mac->handle = NULL;
430
431         return ret;
432 }
433
434 static int
435 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
436 {
437         struct bna_mac *mac = NULL;
438         int ret;
439
440         /* First delete multicast entries to maintain the count */
441         while (!list_empty(&rxf->mcast_pending_del_q)) {
442                 mac = list_first_entry(&rxf->mcast_pending_del_q,
443                                        struct bna_mac, qe);
444                 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
445                 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
446                 if (ret)
447                         return ret;
448         }
449
450         /* Add multicast entries */
451         if (!list_empty(&rxf->mcast_pending_add_q)) {
452                 mac = list_first_entry(&rxf->mcast_pending_add_q,
453                                        struct bna_mac, qe);
454                 list_move_tail(&mac->qe, &rxf->mcast_active_q);
455                 bna_bfi_mcast_add_req(rxf, mac);
456                 return 1;
457         }
458
459         return 0;
460 }
461
462 static int
463 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
464 {
465         u8 vlan_pending_bitmask;
466         int block_idx = 0;
467
468         if (rxf->vlan_pending_bitmask) {
469                 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
470                 while (!(vlan_pending_bitmask & 0x1)) {
471                         block_idx++;
472                         vlan_pending_bitmask >>= 1;
473                 }
474                 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
475                 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
476                 return 1;
477         }
478
479         return 0;
480 }
481
482 static int
483 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
484 {
485         struct bna_mac *mac;
486         int ret;
487
488         /* Throw away delete pending mcast entries */
489         while (!list_empty(&rxf->mcast_pending_del_q)) {
490                 mac = list_first_entry(&rxf->mcast_pending_del_q,
491                                        struct bna_mac, qe);
492                 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
493                 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
494                 if (ret)
495                         return ret;
496         }
497
498         /* Move active mcast entries to pending_add_q */
499         while (!list_empty(&rxf->mcast_active_q)) {
500                 mac = list_first_entry(&rxf->mcast_active_q,
501                                        struct bna_mac, qe);
502                 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
503                 if (bna_rxf_mcast_del(rxf, mac, cleanup))
504                         return 1;
505         }
506
507         return 0;
508 }
509
510 static int
511 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
512 {
513         if (rxf->rss_pending) {
514                 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
515                         rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
516                         bna_bfi_rit_cfg(rxf);
517                         return 1;
518                 }
519
520                 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
521                         rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
522                         bna_bfi_rss_cfg(rxf);
523                         return 1;
524                 }
525
526                 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
527                         rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
528                         bna_bfi_rss_enable(rxf);
529                         return 1;
530                 }
531         }
532
533         return 0;
534 }
535
536 static int
537 bna_rxf_cfg_apply(struct bna_rxf *rxf)
538 {
539         if (bna_rxf_ucast_cfg_apply(rxf))
540                 return 1;
541
542         if (bna_rxf_mcast_cfg_apply(rxf))
543                 return 1;
544
545         if (bna_rxf_promisc_cfg_apply(rxf))
546                 return 1;
547
548         if (bna_rxf_allmulti_cfg_apply(rxf))
549                 return 1;
550
551         if (bna_rxf_vlan_cfg_apply(rxf))
552                 return 1;
553
554         if (bna_rxf_vlan_strip_cfg_apply(rxf))
555                 return 1;
556
557         if (bna_rxf_rss_cfg_apply(rxf))
558                 return 1;
559
560         return 0;
561 }
562
563 static void
564 bna_rxf_cfg_reset(struct bna_rxf *rxf)
565 {
566         bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
567         bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
568         bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
569         bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
570         bna_rxf_vlan_cfg_soft_reset(rxf);
571         bna_rxf_rss_cfg_soft_reset(rxf);
572 }
573
574 static void
575 bna_rit_init(struct bna_rxf *rxf, int rit_size)
576 {
577         struct bna_rx *rx = rxf->rx;
578         struct bna_rxp *rxp;
579         struct list_head *qe;
580         int offset = 0;
581
582         rxf->rit_size = rit_size;
583         list_for_each(qe, &rx->rxp_q) {
584                 rxp = (struct bna_rxp *)qe;
585                 rxf->rit[offset] = rxp->cq.ccb->id;
586                 offset++;
587         }
588
589 }
590
591 void
592 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
593 {
594         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
595 }
596
597 void
598 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
599                         struct bfi_msgq_mhdr *msghdr)
600 {
601         struct bfi_enet_rsp *rsp =
602                 container_of(msghdr, struct bfi_enet_rsp, mh);
603
604         if (rsp->error) {
605                 /* Clear ucast from cache */
606                 rxf->ucast_active_set = 0;
607         }
608
609         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
610 }
611
612 void
613 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
614                         struct bfi_msgq_mhdr *msghdr)
615 {
616         struct bfi_enet_mcast_add_req *req =
617                 &rxf->bfi_enet_cmd.mcast_add_req;
618         struct bfi_enet_mcast_add_rsp *rsp =
619                 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
620
621         bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
622                 ntohs(rsp->handle));
623         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
624 }
625
626 static void
627 bna_rxf_init(struct bna_rxf *rxf,
628                 struct bna_rx *rx,
629                 struct bna_rx_config *q_config,
630                 struct bna_res_info *res_info)
631 {
632         rxf->rx = rx;
633
634         INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
635         INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
636         rxf->ucast_pending_set = 0;
637         rxf->ucast_active_set = 0;
638         INIT_LIST_HEAD(&rxf->ucast_active_q);
639         rxf->ucast_pending_mac = NULL;
640
641         INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
642         INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
643         INIT_LIST_HEAD(&rxf->mcast_active_q);
644         INIT_LIST_HEAD(&rxf->mcast_handle_q);
645
646         rxf->rit = (u8 *)
647                 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
648         bna_rit_init(rxf, q_config->num_paths);
649
650         rxf->rss_status = q_config->rss_status;
651         if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
652                 rxf->rss_cfg = q_config->rss_config;
653                 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
654                 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
655                 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
656         }
657
658         rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
659         memset(rxf->vlan_filter_table, 0,
660                         (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
661         rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
662         rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
663
664         rxf->vlan_strip_status = q_config->vlan_strip_status;
665
666         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
667 }
668
669 static void
670 bna_rxf_uninit(struct bna_rxf *rxf)
671 {
672         struct bna_mac *mac;
673
674         rxf->ucast_pending_set = 0;
675         rxf->ucast_active_set = 0;
676
677         while (!list_empty(&rxf->ucast_pending_add_q)) {
678                 mac = list_first_entry(&rxf->ucast_pending_add_q,
679                                        struct bna_mac, qe);
680                 list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
681         }
682
683         if (rxf->ucast_pending_mac) {
684                 list_add_tail(&rxf->ucast_pending_mac->qe,
685                               bna_ucam_mod_free_q(rxf->rx->bna));
686                 rxf->ucast_pending_mac = NULL;
687         }
688
689         while (!list_empty(&rxf->mcast_pending_add_q)) {
690                 mac = list_first_entry(&rxf->mcast_pending_add_q,
691                                        struct bna_mac, qe);
692                 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
693         }
694
695         rxf->rxmode_pending = 0;
696         rxf->rxmode_pending_bitmask = 0;
697         if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
698                 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
699         if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
700                 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
701
702         rxf->rss_pending = 0;
703         rxf->vlan_strip_pending = false;
704
705         rxf->rx = NULL;
706 }
707
708 static void
709 bna_rx_cb_rxf_started(struct bna_rx *rx)
710 {
711         bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
712 }
713
714 static void
715 bna_rxf_start(struct bna_rxf *rxf)
716 {
717         rxf->start_cbfn = bna_rx_cb_rxf_started;
718         rxf->start_cbarg = rxf->rx;
719         bfa_fsm_send_event(rxf, RXF_E_START);
720 }
721
722 static void
723 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
724 {
725         bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
726 }
727
728 static void
729 bna_rxf_stop(struct bna_rxf *rxf)
730 {
731         rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
732         rxf->stop_cbarg = rxf->rx;
733         bfa_fsm_send_event(rxf, RXF_E_STOP);
734 }
735
736 static void
737 bna_rxf_fail(struct bna_rxf *rxf)
738 {
739         bfa_fsm_send_event(rxf, RXF_E_FAIL);
740 }
741
742 enum bna_cb_status
743 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
744 {
745         struct bna_rxf *rxf = &rx->rxf;
746
747         if (rxf->ucast_pending_mac == NULL) {
748                 rxf->ucast_pending_mac =
749                         bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
750                 if (rxf->ucast_pending_mac == NULL)
751                         return BNA_CB_UCAST_CAM_FULL;
752         }
753
754         ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
755         rxf->ucast_pending_set = 1;
756         rxf->cam_fltr_cbfn = NULL;
757         rxf->cam_fltr_cbarg = rx->bna->bnad;
758
759         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
760
761         return BNA_CB_SUCCESS;
762 }
763
764 enum bna_cb_status
765 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
766                  void (*cbfn)(struct bnad *, struct bna_rx *))
767 {
768         struct bna_rxf *rxf = &rx->rxf;
769         struct bna_mac *mac;
770
771         /* Check if already added or pending addition */
772         if (bna_mac_find(&rxf->mcast_active_q, addr) ||
773                 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
774                 if (cbfn)
775                         cbfn(rx->bna->bnad, rx);
776                 return BNA_CB_SUCCESS;
777         }
778
779         mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
780         if (mac == NULL)
781                 return BNA_CB_MCAST_LIST_FULL;
782         ether_addr_copy(mac->addr, addr);
783         list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
784
785         rxf->cam_fltr_cbfn = cbfn;
786         rxf->cam_fltr_cbarg = rx->bna->bnad;
787
788         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
789
790         return BNA_CB_SUCCESS;
791 }
792
793 enum bna_cb_status
794 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
795 {
796         struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
797         struct bna_rxf *rxf = &rx->rxf;
798         struct list_head list_head;
799         u8 *mcaddr;
800         struct bna_mac *mac, *del_mac;
801         int i;
802
803         /* Purge the pending_add_q */
804         while (!list_empty(&rxf->ucast_pending_add_q)) {
805                 mac = list_first_entry(&rxf->ucast_pending_add_q,
806                                        struct bna_mac, qe);
807                 list_move_tail(&mac->qe, &ucam_mod->free_q);
808         }
809
810         /* Schedule active_q entries for deletion */
811         while (!list_empty(&rxf->ucast_active_q)) {
812                 mac = list_first_entry(&rxf->ucast_active_q,
813                                        struct bna_mac, qe);
814                 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
815                 ether_addr_copy(del_mac->addr, mac->addr);
816                 del_mac->handle = mac->handle;
817                 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
818                 list_move_tail(&mac->qe, &ucam_mod->free_q);
819         }
820
821         /* Allocate nodes */
822         INIT_LIST_HEAD(&list_head);
823         for (i = 0, mcaddr = uclist; i < count; i++) {
824                 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
825                 if (mac == NULL)
826                         goto err_return;
827                 ether_addr_copy(mac->addr, mcaddr);
828                 list_add_tail(&mac->qe, &list_head);
829                 mcaddr += ETH_ALEN;
830         }
831
832         /* Add the new entries */
833         while (!list_empty(&list_head)) {
834                 mac = list_first_entry(&list_head, struct bna_mac, qe);
835                 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
836         }
837
838         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
839
840         return BNA_CB_SUCCESS;
841
842 err_return:
843         while (!list_empty(&list_head)) {
844                 mac = list_first_entry(&list_head, struct bna_mac, qe);
845                 list_move_tail(&mac->qe, &ucam_mod->free_q);
846         }
847
848         return BNA_CB_UCAST_CAM_FULL;
849 }
850
851 enum bna_cb_status
852 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
853 {
854         struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
855         struct bna_rxf *rxf = &rx->rxf;
856         struct list_head list_head;
857         u8 *mcaddr;
858         struct bna_mac *mac, *del_mac;
859         int i;
860
861         /* Purge the pending_add_q */
862         while (!list_empty(&rxf->mcast_pending_add_q)) {
863                 mac = list_first_entry(&rxf->mcast_pending_add_q,
864                                        struct bna_mac, qe);
865                 list_move_tail(&mac->qe, &mcam_mod->free_q);
866         }
867
868         /* Schedule active_q entries for deletion */
869         while (!list_empty(&rxf->mcast_active_q)) {
870                 mac = list_first_entry(&rxf->mcast_active_q,
871                                        struct bna_mac, qe);
872                 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
873                 ether_addr_copy(del_mac->addr, mac->addr);
874                 del_mac->handle = mac->handle;
875                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
876                 mac->handle = NULL;
877                 list_move_tail(&mac->qe, &mcam_mod->free_q);
878         }
879
880         /* Allocate nodes */
881         INIT_LIST_HEAD(&list_head);
882         for (i = 0, mcaddr = mclist; i < count; i++) {
883                 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
884                 if (mac == NULL)
885                         goto err_return;
886                 ether_addr_copy(mac->addr, mcaddr);
887                 list_add_tail(&mac->qe, &list_head);
888
889                 mcaddr += ETH_ALEN;
890         }
891
892         /* Add the new entries */
893         while (!list_empty(&list_head)) {
894                 mac = list_first_entry(&list_head, struct bna_mac, qe);
895                 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
896         }
897
898         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
899
900         return BNA_CB_SUCCESS;
901
902 err_return:
903         while (!list_empty(&list_head)) {
904                 mac = list_first_entry(&list_head, struct bna_mac, qe);
905                 list_move_tail(&mac->qe, &mcam_mod->free_q);
906         }
907
908         return BNA_CB_MCAST_LIST_FULL;
909 }
910
911 void
912 bna_rx_mcast_delall(struct bna_rx *rx)
913 {
914         struct bna_rxf *rxf = &rx->rxf;
915         struct bna_mac *mac, *del_mac;
916         int need_hw_config = 0;
917
918         /* Purge all entries from pending_add_q */
919         while (!list_empty(&rxf->mcast_pending_add_q)) {
920                 mac = list_first_entry(&rxf->mcast_pending_add_q,
921                                        struct bna_mac, qe);
922                 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
923         }
924
925         /* Schedule all entries in active_q for deletion */
926         while (!list_empty(&rxf->mcast_active_q)) {
927                 mac = list_first_entry(&rxf->mcast_active_q,
928                                        struct bna_mac, qe);
929                 list_del(&mac->qe);
930                 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
931                 memcpy(del_mac, mac, sizeof(*del_mac));
932                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
933                 mac->handle = NULL;
934                 list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
935                 need_hw_config = 1;
936         }
937
938         if (need_hw_config)
939                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
940 }
941
942 void
943 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
944 {
945         struct bna_rxf *rxf = &rx->rxf;
946         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
947         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
948         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
949
950         rxf->vlan_filter_table[index] |= bit;
951         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
952                 rxf->vlan_pending_bitmask |= BIT(group_id);
953                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
954         }
955 }
956
957 void
958 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
959 {
960         struct bna_rxf *rxf = &rx->rxf;
961         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
962         int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
963         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
964
965         rxf->vlan_filter_table[index] &= ~bit;
966         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
967                 rxf->vlan_pending_bitmask |= BIT(group_id);
968                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
969         }
970 }
971
972 static int
973 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
974 {
975         struct bna_mac *mac = NULL;
976
977         /* Delete MAC addresses previousely added */
978         if (!list_empty(&rxf->ucast_pending_del_q)) {
979                 mac = list_first_entry(&rxf->ucast_pending_del_q,
980                                        struct bna_mac, qe);
981                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
982                 list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
983                 return 1;
984         }
985
986         /* Set default unicast MAC */
987         if (rxf->ucast_pending_set) {
988                 rxf->ucast_pending_set = 0;
989                 ether_addr_copy(rxf->ucast_active_mac.addr,
990                                 rxf->ucast_pending_mac->addr);
991                 rxf->ucast_active_set = 1;
992                 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
993                         BFI_ENET_H2I_MAC_UCAST_SET_REQ);
994                 return 1;
995         }
996
997         /* Add additional MAC entries */
998         if (!list_empty(&rxf->ucast_pending_add_q)) {
999                 mac = list_first_entry(&rxf->ucast_pending_add_q,
1000                                        struct bna_mac, qe);
1001                 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1002                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1003                 return 1;
1004         }
1005
1006         return 0;
1007 }
1008
1009 static int
1010 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1011 {
1012         struct bna_mac *mac;
1013
1014         /* Throw away delete pending ucast entries */
1015         while (!list_empty(&rxf->ucast_pending_del_q)) {
1016                 mac = list_first_entry(&rxf->ucast_pending_del_q,
1017                                        struct bna_mac, qe);
1018                 if (cleanup == BNA_SOFT_CLEANUP)
1019                         list_move_tail(&mac->qe,
1020                                        bna_ucam_mod_del_q(rxf->rx->bna));
1021                 else {
1022                         bna_bfi_ucast_req(rxf, mac,
1023                                           BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1024                         list_move_tail(&mac->qe,
1025                                        bna_ucam_mod_del_q(rxf->rx->bna));
1026                         return 1;
1027                 }
1028         }
1029
1030         /* Move active ucast entries to pending_add_q */
1031         while (!list_empty(&rxf->ucast_active_q)) {
1032                 mac = list_first_entry(&rxf->ucast_active_q,
1033                                        struct bna_mac, qe);
1034                 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
1035                 if (cleanup == BNA_HARD_CLEANUP) {
1036                         bna_bfi_ucast_req(rxf, mac,
1037                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1038                         return 1;
1039                 }
1040         }
1041
1042         if (rxf->ucast_active_set) {
1043                 rxf->ucast_pending_set = 1;
1044                 rxf->ucast_active_set = 0;
1045                 if (cleanup == BNA_HARD_CLEANUP) {
1046                         bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1047                                 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1048                         return 1;
1049                 }
1050         }
1051
1052         return 0;
1053 }
1054
1055 static int
1056 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1057 {
1058         struct bna *bna = rxf->rx->bna;
1059
1060         /* Enable/disable promiscuous mode */
1061         if (is_promisc_enable(rxf->rxmode_pending,
1062                                 rxf->rxmode_pending_bitmask)) {
1063                 /* move promisc configuration from pending -> active */
1064                 promisc_inactive(rxf->rxmode_pending,
1065                                 rxf->rxmode_pending_bitmask);
1066                 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1067                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1068                 return 1;
1069         } else if (is_promisc_disable(rxf->rxmode_pending,
1070                                 rxf->rxmode_pending_bitmask)) {
1071                 /* move promisc configuration from pending -> active */
1072                 promisc_inactive(rxf->rxmode_pending,
1073                                 rxf->rxmode_pending_bitmask);
1074                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1075                 bna->promisc_rid = BFI_INVALID_RID;
1076                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1077                 return 1;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static int
1084 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1085 {
1086         struct bna *bna = rxf->rx->bna;
1087
1088         /* Clear pending promisc mode disable */
1089         if (is_promisc_disable(rxf->rxmode_pending,
1090                                 rxf->rxmode_pending_bitmask)) {
1091                 promisc_inactive(rxf->rxmode_pending,
1092                                 rxf->rxmode_pending_bitmask);
1093                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1094                 bna->promisc_rid = BFI_INVALID_RID;
1095                 if (cleanup == BNA_HARD_CLEANUP) {
1096                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1097                         return 1;
1098                 }
1099         }
1100
1101         /* Move promisc mode config from active -> pending */
1102         if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1103                 promisc_enable(rxf->rxmode_pending,
1104                                 rxf->rxmode_pending_bitmask);
1105                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1106                 if (cleanup == BNA_HARD_CLEANUP) {
1107                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1108                         return 1;
1109                 }
1110         }
1111
1112         return 0;
1113 }
1114
1115 static int
1116 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1117 {
1118         /* Enable/disable allmulti mode */
1119         if (is_allmulti_enable(rxf->rxmode_pending,
1120                                 rxf->rxmode_pending_bitmask)) {
1121                 /* move allmulti configuration from pending -> active */
1122                 allmulti_inactive(rxf->rxmode_pending,
1123                                 rxf->rxmode_pending_bitmask);
1124                 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1125                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1126                 return 1;
1127         } else if (is_allmulti_disable(rxf->rxmode_pending,
1128                                         rxf->rxmode_pending_bitmask)) {
1129                 /* move allmulti configuration from pending -> active */
1130                 allmulti_inactive(rxf->rxmode_pending,
1131                                 rxf->rxmode_pending_bitmask);
1132                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1133                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1134                 return 1;
1135         }
1136
1137         return 0;
1138 }
1139
1140 static int
1141 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1142 {
1143         /* Clear pending allmulti mode disable */
1144         if (is_allmulti_disable(rxf->rxmode_pending,
1145                                 rxf->rxmode_pending_bitmask)) {
1146                 allmulti_inactive(rxf->rxmode_pending,
1147                                 rxf->rxmode_pending_bitmask);
1148                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1149                 if (cleanup == BNA_HARD_CLEANUP) {
1150                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1151                         return 1;
1152                 }
1153         }
1154
1155         /* Move allmulti mode config from active -> pending */
1156         if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1157                 allmulti_enable(rxf->rxmode_pending,
1158                                 rxf->rxmode_pending_bitmask);
1159                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1160                 if (cleanup == BNA_HARD_CLEANUP) {
1161                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1162                         return 1;
1163                 }
1164         }
1165
1166         return 0;
1167 }
1168
1169 static int
1170 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1171 {
1172         struct bna *bna = rxf->rx->bna;
1173         int ret = 0;
1174
1175         if (is_promisc_enable(rxf->rxmode_pending,
1176                                 rxf->rxmode_pending_bitmask) ||
1177                 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1178                 /* Do nothing if pending enable or already enabled */
1179         } else if (is_promisc_disable(rxf->rxmode_pending,
1180                                         rxf->rxmode_pending_bitmask)) {
1181                 /* Turn off pending disable command */
1182                 promisc_inactive(rxf->rxmode_pending,
1183                         rxf->rxmode_pending_bitmask);
1184         } else {
1185                 /* Schedule enable */
1186                 promisc_enable(rxf->rxmode_pending,
1187                                 rxf->rxmode_pending_bitmask);
1188                 bna->promisc_rid = rxf->rx->rid;
1189                 ret = 1;
1190         }
1191
1192         return ret;
1193 }
1194
1195 static int
1196 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1197 {
1198         struct bna *bna = rxf->rx->bna;
1199         int ret = 0;
1200
1201         if (is_promisc_disable(rxf->rxmode_pending,
1202                                 rxf->rxmode_pending_bitmask) ||
1203                 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1204                 /* Do nothing if pending disable or already disabled */
1205         } else if (is_promisc_enable(rxf->rxmode_pending,
1206                                         rxf->rxmode_pending_bitmask)) {
1207                 /* Turn off pending enable command */
1208                 promisc_inactive(rxf->rxmode_pending,
1209                                 rxf->rxmode_pending_bitmask);
1210                 bna->promisc_rid = BFI_INVALID_RID;
1211         } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1212                 /* Schedule disable */
1213                 promisc_disable(rxf->rxmode_pending,
1214                                 rxf->rxmode_pending_bitmask);
1215                 ret = 1;
1216         }
1217
1218         return ret;
1219 }
1220
1221 static int
1222 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1223 {
1224         int ret = 0;
1225
1226         if (is_allmulti_enable(rxf->rxmode_pending,
1227                         rxf->rxmode_pending_bitmask) ||
1228                         (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1229                 /* Do nothing if pending enable or already enabled */
1230         } else if (is_allmulti_disable(rxf->rxmode_pending,
1231                                         rxf->rxmode_pending_bitmask)) {
1232                 /* Turn off pending disable command */
1233                 allmulti_inactive(rxf->rxmode_pending,
1234                         rxf->rxmode_pending_bitmask);
1235         } else {
1236                 /* Schedule enable */
1237                 allmulti_enable(rxf->rxmode_pending,
1238                                 rxf->rxmode_pending_bitmask);
1239                 ret = 1;
1240         }
1241
1242         return ret;
1243 }
1244
1245 static int
1246 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1247 {
1248         int ret = 0;
1249
1250         if (is_allmulti_disable(rxf->rxmode_pending,
1251                                 rxf->rxmode_pending_bitmask) ||
1252                 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1253                 /* Do nothing if pending disable or already disabled */
1254         } else if (is_allmulti_enable(rxf->rxmode_pending,
1255                                         rxf->rxmode_pending_bitmask)) {
1256                 /* Turn off pending enable command */
1257                 allmulti_inactive(rxf->rxmode_pending,
1258                                 rxf->rxmode_pending_bitmask);
1259         } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1260                 /* Schedule disable */
1261                 allmulti_disable(rxf->rxmode_pending,
1262                                 rxf->rxmode_pending_bitmask);
1263                 ret = 1;
1264         }
1265
1266         return ret;
1267 }
1268
1269 static int
1270 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1271 {
1272         if (rxf->vlan_strip_pending) {
1273                         rxf->vlan_strip_pending = false;
1274                         bna_bfi_vlan_strip_enable(rxf);
1275                         return 1;
1276         }
1277
1278         return 0;
1279 }
1280
1281 /* RX */
1282
1283 #define BNA_GET_RXQS(qcfg)      (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1284         (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1285
1286 #define SIZE_TO_PAGES(size)     (((size) >> PAGE_SHIFT) + ((((size) &\
1287         (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1288
1289 #define call_rx_stop_cbfn(rx)                                           \
1290 do {                                                                \
1291         if ((rx)->stop_cbfn) {                                          \
1292                 void (*cbfn)(void *, struct bna_rx *);    \
1293                 void *cbarg;                                        \
1294                 cbfn = (rx)->stop_cbfn;                          \
1295                 cbarg = (rx)->stop_cbarg;                              \
1296                 (rx)->stop_cbfn = NULL;                                 \
1297                 (rx)->stop_cbarg = NULL;                                \
1298                 cbfn(cbarg, rx);                                        \
1299         }                                                              \
1300 } while (0)
1301
1302 #define call_rx_stall_cbfn(rx)                                          \
1303 do {                                                                    \
1304         if ((rx)->rx_stall_cbfn)                                        \
1305                 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));             \
1306 } while (0)
1307
1308 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt)                        \
1309 do {                                                                    \
1310         struct bna_dma_addr cur_q_addr =                                \
1311                 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));      \
1312         (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;        \
1313         (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;        \
1314         (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;              \
1315         (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;              \
1316         (bfi_q)->pages = htons((u16)(bna_qpt)->page_count);     \
1317         (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1318 } while (0)
1319
1320 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1321 static void bna_rx_enet_stop(struct bna_rx *rx);
1322 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1323
1324 bfa_fsm_state_decl(bna_rx, stopped,
1325         struct bna_rx, enum bna_rx_event);
1326 bfa_fsm_state_decl(bna_rx, start_wait,
1327         struct bna_rx, enum bna_rx_event);
1328 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1329         struct bna_rx, enum bna_rx_event);
1330 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1331         struct bna_rx, enum bna_rx_event);
1332 bfa_fsm_state_decl(bna_rx, started,
1333         struct bna_rx, enum bna_rx_event);
1334 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1335         struct bna_rx, enum bna_rx_event);
1336 bfa_fsm_state_decl(bna_rx, stop_wait,
1337         struct bna_rx, enum bna_rx_event);
1338 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1339         struct bna_rx, enum bna_rx_event);
1340 bfa_fsm_state_decl(bna_rx, failed,
1341         struct bna_rx, enum bna_rx_event);
1342 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1343         struct bna_rx, enum bna_rx_event);
1344
1345 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1346 {
1347         call_rx_stop_cbfn(rx);
1348 }
1349
1350 static void bna_rx_sm_stopped(struct bna_rx *rx,
1351                                 enum bna_rx_event event)
1352 {
1353         switch (event) {
1354         case RX_E_START:
1355                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1356                 break;
1357
1358         case RX_E_STOP:
1359                 call_rx_stop_cbfn(rx);
1360                 break;
1361
1362         case RX_E_FAIL:
1363                 /* no-op */
1364                 break;
1365
1366         default:
1367                 bfa_sm_fault(event);
1368                 break;
1369         }
1370 }
1371
1372 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1373 {
1374         bna_bfi_rx_enet_start(rx);
1375 }
1376
1377 static void
1378 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1379 {
1380 }
1381
1382 static void
1383 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1384 {
1385         switch (event) {
1386         case RX_E_FAIL:
1387         case RX_E_STOPPED:
1388                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1389                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1390                 break;
1391
1392         case RX_E_STARTED:
1393                 bna_rx_enet_stop(rx);
1394                 break;
1395
1396         default:
1397                 bfa_sm_fault(event);
1398                 break;
1399         }
1400 }
1401
1402 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1403                                 enum bna_rx_event event)
1404 {
1405         switch (event) {
1406         case RX_E_STOP:
1407                 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1408                 break;
1409
1410         case RX_E_FAIL:
1411                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1412                 break;
1413
1414         case RX_E_STARTED:
1415                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1416                 break;
1417
1418         default:
1419                 bfa_sm_fault(event);
1420                 break;
1421         }
1422 }
1423
1424 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1425 {
1426         rx->rx_post_cbfn(rx->bna->bnad, rx);
1427         bna_rxf_start(&rx->rxf);
1428 }
1429
1430 static void
1431 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1432 {
1433 }
1434
1435 static void
1436 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1437 {
1438         switch (event) {
1439         case RX_E_FAIL:
1440                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1441                 bna_rxf_fail(&rx->rxf);
1442                 call_rx_stall_cbfn(rx);
1443                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1444                 break;
1445
1446         case RX_E_RXF_STARTED:
1447                 bna_rxf_stop(&rx->rxf);
1448                 break;
1449
1450         case RX_E_RXF_STOPPED:
1451                 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1452                 call_rx_stall_cbfn(rx);
1453                 bna_rx_enet_stop(rx);
1454                 break;
1455
1456         default:
1457                 bfa_sm_fault(event);
1458                 break;
1459         }
1460
1461 }
1462
1463 static void
1464 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1465 {
1466 }
1467
1468 static void
1469 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1470 {
1471         switch (event) {
1472         case RX_E_FAIL:
1473         case RX_E_STOPPED:
1474                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1475                 break;
1476
1477         case RX_E_STARTED:
1478                 bna_rx_enet_stop(rx);
1479                 break;
1480
1481         default:
1482                 bfa_sm_fault(event);
1483         }
1484 }
1485
1486 static void
1487 bna_rx_sm_started_entry(struct bna_rx *rx)
1488 {
1489         struct bna_rxp *rxp;
1490         struct list_head *qe_rxp;
1491         int is_regular = (rx->type == BNA_RX_T_REGULAR);
1492
1493         /* Start IB */
1494         list_for_each(qe_rxp, &rx->rxp_q) {
1495                 rxp = (struct bna_rxp *)qe_rxp;
1496                 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1497         }
1498
1499         bna_ethport_cb_rx_started(&rx->bna->ethport);
1500 }
1501
1502 static void
1503 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1504 {
1505         switch (event) {
1506         case RX_E_STOP:
1507                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1508                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1509                 bna_rxf_stop(&rx->rxf);
1510                 break;
1511
1512         case RX_E_FAIL:
1513                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1514                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1515                 bna_rxf_fail(&rx->rxf);
1516                 call_rx_stall_cbfn(rx);
1517                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1518                 break;
1519
1520         default:
1521                 bfa_sm_fault(event);
1522                 break;
1523         }
1524 }
1525
1526 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1527                                 enum bna_rx_event event)
1528 {
1529         switch (event) {
1530         case RX_E_STOP:
1531                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1532                 break;
1533
1534         case RX_E_FAIL:
1535                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1536                 bna_rxf_fail(&rx->rxf);
1537                 call_rx_stall_cbfn(rx);
1538                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1539                 break;
1540
1541         case RX_E_RXF_STARTED:
1542                 bfa_fsm_set_state(rx, bna_rx_sm_started);
1543                 break;
1544
1545         default:
1546                 bfa_sm_fault(event);
1547                 break;
1548         }
1549 }
1550
1551 static void
1552 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1553 {
1554 }
1555
1556 static void
1557 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1558 {
1559         switch (event) {
1560         case RX_E_FAIL:
1561         case RX_E_RXF_STOPPED:
1562                 /* No-op */
1563                 break;
1564
1565         case RX_E_CLEANUP_DONE:
1566                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1567                 break;
1568
1569         default:
1570                 bfa_sm_fault(event);
1571                 break;
1572         }
1573 }
1574
1575 static void
1576 bna_rx_sm_failed_entry(struct bna_rx *rx)
1577 {
1578 }
1579
1580 static void
1581 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1582 {
1583         switch (event) {
1584         case RX_E_START:
1585                 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1586                 break;
1587
1588         case RX_E_STOP:
1589                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1590                 break;
1591
1592         case RX_E_FAIL:
1593         case RX_E_RXF_STARTED:
1594         case RX_E_RXF_STOPPED:
1595                 /* No-op */
1596                 break;
1597
1598         case RX_E_CLEANUP_DONE:
1599                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1600                 break;
1601
1602         default:
1603                 bfa_sm_fault(event);
1604                 break;
1605 }       }
1606
1607 static void
1608 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1609 {
1610 }
1611
1612 static void
1613 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1614 {
1615         switch (event) {
1616         case RX_E_STOP:
1617                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1618                 break;
1619
1620         case RX_E_FAIL:
1621                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1622                 break;
1623
1624         case RX_E_CLEANUP_DONE:
1625                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1626                 break;
1627
1628         default:
1629                 bfa_sm_fault(event);
1630                 break;
1631         }
1632 }
1633
1634 static void
1635 bna_bfi_rx_enet_start(struct bna_rx *rx)
1636 {
1637         struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1638         struct bna_rxp *rxp = NULL;
1639         struct bna_rxq *q0 = NULL, *q1 = NULL;
1640         int i;
1641
1642         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1643                 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1644         cfg_req->mh.num_entries = htons(
1645                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1646
1647         cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1648         cfg_req->num_queue_sets = rx->num_paths;
1649         for (i = 0; i < rx->num_paths; i++) {
1650                 rxp = rxp ? list_next_entry(rxp, qe)
1651                         : list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
1652                 GET_RXQS(rxp, q0, q1);
1653                 switch (rxp->type) {
1654                 case BNA_RXP_SLR:
1655                 case BNA_RXP_HDS:
1656                         /* Small RxQ */
1657                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1658                                                 &q1->qpt);
1659                         cfg_req->q_cfg[i].qs.rx_buffer_size =
1660                                 htons((u16)q1->buffer_size);
1661                         /* Fall through */
1662
1663                 case BNA_RXP_SINGLE:
1664                         /* Large/Single RxQ */
1665                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1666                                                 &q0->qpt);
1667                         if (q0->multi_buffer)
1668                                 /* multi-buffer is enabled by allocating
1669                                  * a new rx with new set of resources.
1670                                  * q0->buffer_size should be initialized to
1671                                  * fragment size.
1672                                  */
1673                                 cfg_req->rx_cfg.multi_buffer =
1674                                         BNA_STATUS_T_ENABLED;
1675                         else
1676                                 q0->buffer_size =
1677                                         bna_enet_mtu_get(&rx->bna->enet);
1678                         cfg_req->q_cfg[i].ql.rx_buffer_size =
1679                                 htons((u16)q0->buffer_size);
1680                         break;
1681
1682                 default:
1683                         BUG_ON(1);
1684                 }
1685
1686                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1687                                         &rxp->cq.qpt);
1688
1689                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1690                         rxp->cq.ib.ib_seg_host_addr.lsb;
1691                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1692                         rxp->cq.ib.ib_seg_host_addr.msb;
1693                 cfg_req->q_cfg[i].ib.intr.msix_index =
1694                         htons((u16)rxp->cq.ib.intr_vector);
1695         }
1696
1697         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1698         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1699         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1700         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1701         cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1702                                 ? BNA_STATUS_T_ENABLED :
1703                                 BNA_STATUS_T_DISABLED;
1704         cfg_req->ib_cfg.coalescing_timeout =
1705                         htonl((u32)rxp->cq.ib.coalescing_timeo);
1706         cfg_req->ib_cfg.inter_pkt_timeout =
1707                         htonl((u32)rxp->cq.ib.interpkt_timeo);
1708         cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1709
1710         switch (rxp->type) {
1711         case BNA_RXP_SLR:
1712                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1713                 break;
1714
1715         case BNA_RXP_HDS:
1716                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1717                 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1718                 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1719                 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1720                 break;
1721
1722         case BNA_RXP_SINGLE:
1723                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1724                 break;
1725
1726         default:
1727                 BUG_ON(1);
1728         }
1729         cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1730
1731         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1732                 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1733         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1734 }
1735
1736 static void
1737 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1738 {
1739         struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1740
1741         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1742                 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1743         req->mh.num_entries = htons(
1744                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1745         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1746                 &req->mh);
1747         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1748 }
1749
1750 static void
1751 bna_rx_enet_stop(struct bna_rx *rx)
1752 {
1753         struct bna_rxp *rxp;
1754         struct list_head                 *qe_rxp;
1755
1756         /* Stop IB */
1757         list_for_each(qe_rxp, &rx->rxp_q) {
1758                 rxp = (struct bna_rxp *)qe_rxp;
1759                 bna_ib_stop(rx->bna, &rxp->cq.ib);
1760         }
1761
1762         bna_bfi_rx_enet_stop(rx);
1763 }
1764
1765 static int
1766 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1767 {
1768         if ((rx_mod->rx_free_count == 0) ||
1769                 (rx_mod->rxp_free_count == 0) ||
1770                 (rx_mod->rxq_free_count == 0))
1771                 return 0;
1772
1773         if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1774                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1775                         (rx_mod->rxq_free_count < rx_cfg->num_paths))
1776                                 return 0;
1777         } else {
1778                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1779                         (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1780                         return 0;
1781         }
1782
1783         return 1;
1784 }
1785
1786 static struct bna_rxq *
1787 bna_rxq_get(struct bna_rx_mod *rx_mod)
1788 {
1789         struct bna_rxq *rxq = NULL;
1790
1791         rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
1792         list_del(&rxq->qe);
1793         rx_mod->rxq_free_count--;
1794
1795         return rxq;
1796 }
1797
1798 static void
1799 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1800 {
1801         list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1802         rx_mod->rxq_free_count++;
1803 }
1804
1805 static struct bna_rxp *
1806 bna_rxp_get(struct bna_rx_mod *rx_mod)
1807 {
1808         struct bna_rxp *rxp = NULL;
1809
1810         rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
1811         list_del(&rxp->qe);
1812         rx_mod->rxp_free_count--;
1813
1814         return rxp;
1815 }
1816
1817 static void
1818 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1819 {
1820         list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1821         rx_mod->rxp_free_count++;
1822 }
1823
1824 static struct bna_rx *
1825 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1826 {
1827         struct bna_rx *rx = NULL;
1828
1829         BUG_ON(list_empty(&rx_mod->rx_free_q));
1830         if (type == BNA_RX_T_REGULAR)
1831                 rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
1832         else
1833                 rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
1834
1835         rx_mod->rx_free_count--;
1836         list_move_tail(&rx->qe, &rx_mod->rx_active_q);
1837         rx->type = type;
1838
1839         return rx;
1840 }
1841
1842 static void
1843 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1844 {
1845         struct list_head *qe;
1846
1847         list_for_each_prev(qe, &rx_mod->rx_free_q)
1848                 if (((struct bna_rx *)qe)->rid < rx->rid)
1849                         break;
1850
1851         list_add(&rx->qe, qe);
1852         rx_mod->rx_free_count++;
1853 }
1854
1855 static void
1856 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1857                 struct bna_rxq *q1)
1858 {
1859         switch (rxp->type) {
1860         case BNA_RXP_SINGLE:
1861                 rxp->rxq.single.only = q0;
1862                 rxp->rxq.single.reserved = NULL;
1863                 break;
1864         case BNA_RXP_SLR:
1865                 rxp->rxq.slr.large = q0;
1866                 rxp->rxq.slr.small = q1;
1867                 break;
1868         case BNA_RXP_HDS:
1869                 rxp->rxq.hds.data = q0;
1870                 rxp->rxq.hds.hdr = q1;
1871                 break;
1872         default:
1873                 break;
1874         }
1875 }
1876
1877 static void
1878 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1879                 struct bna_rxp *rxp,
1880                 u32 page_count,
1881                 u32 page_size,
1882                 struct bna_mem_descr *qpt_mem,
1883                 struct bna_mem_descr *swqpt_mem,
1884                 struct bna_mem_descr *page_mem)
1885 {
1886         u8 *kva;
1887         u64 dma;
1888         struct bna_dma_addr bna_dma;
1889         int     i;
1890
1891         rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1892         rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1893         rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1894         rxq->qpt.page_count = page_count;
1895         rxq->qpt.page_size = page_size;
1896
1897         rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1898         rxq->rcb->sw_q = page_mem->kva;
1899
1900         kva = page_mem->kva;
1901         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1902
1903         for (i = 0; i < rxq->qpt.page_count; i++) {
1904                 rxq->rcb->sw_qpt[i] = kva;
1905                 kva += PAGE_SIZE;
1906
1907                 BNA_SET_DMA_ADDR(dma, &bna_dma);
1908                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1909                         bna_dma.lsb;
1910                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1911                         bna_dma.msb;
1912                 dma += PAGE_SIZE;
1913         }
1914 }
1915
1916 static void
1917 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1918                 u32 page_count,
1919                 u32 page_size,
1920                 struct bna_mem_descr *qpt_mem,
1921                 struct bna_mem_descr *swqpt_mem,
1922                 struct bna_mem_descr *page_mem)
1923 {
1924         u8 *kva;
1925         u64 dma;
1926         struct bna_dma_addr bna_dma;
1927         int     i;
1928
1929         rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1930         rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1931         rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1932         rxp->cq.qpt.page_count = page_count;
1933         rxp->cq.qpt.page_size = page_size;
1934
1935         rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1936         rxp->cq.ccb->sw_q = page_mem->kva;
1937
1938         kva = page_mem->kva;
1939         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1940
1941         for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1942                 rxp->cq.ccb->sw_qpt[i] = kva;
1943                 kva += PAGE_SIZE;
1944
1945                 BNA_SET_DMA_ADDR(dma, &bna_dma);
1946                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1947                         bna_dma.lsb;
1948                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1949                         bna_dma.msb;
1950                 dma += PAGE_SIZE;
1951         }
1952 }
1953
1954 static void
1955 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
1956 {
1957         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1958
1959         bfa_wc_down(&rx_mod->rx_stop_wc);
1960 }
1961
1962 static void
1963 bna_rx_mod_cb_rx_stopped_all(void *arg)
1964 {
1965         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1966
1967         if (rx_mod->stop_cbfn)
1968                 rx_mod->stop_cbfn(&rx_mod->bna->enet);
1969         rx_mod->stop_cbfn = NULL;
1970 }
1971
1972 static void
1973 bna_rx_start(struct bna_rx *rx)
1974 {
1975         rx->rx_flags |= BNA_RX_F_ENET_STARTED;
1976         if (rx->rx_flags & BNA_RX_F_ENABLED)
1977                 bfa_fsm_send_event(rx, RX_E_START);
1978 }
1979
1980 static void
1981 bna_rx_stop(struct bna_rx *rx)
1982 {
1983         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
1984         if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
1985                 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
1986         else {
1987                 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
1988                 rx->stop_cbarg = &rx->bna->rx_mod;
1989                 bfa_fsm_send_event(rx, RX_E_STOP);
1990         }
1991 }
1992
1993 static void
1994 bna_rx_fail(struct bna_rx *rx)
1995 {
1996         /* Indicate Enet is not enabled, and failed */
1997         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
1998         bfa_fsm_send_event(rx, RX_E_FAIL);
1999 }
2000
2001 void
2002 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2003 {
2004         struct bna_rx *rx;
2005         struct list_head *qe;
2006
2007         rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2008         if (type == BNA_RX_T_LOOPBACK)
2009                 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2010
2011         list_for_each(qe, &rx_mod->rx_active_q) {
2012                 rx = (struct bna_rx *)qe;
2013                 if (rx->type == type)
2014                         bna_rx_start(rx);
2015         }
2016 }
2017
2018 void
2019 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2020 {
2021         struct bna_rx *rx;
2022         struct list_head *qe;
2023
2024         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2025         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2026
2027         rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2028
2029         bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2030
2031         list_for_each(qe, &rx_mod->rx_active_q) {
2032                 rx = (struct bna_rx *)qe;
2033                 if (rx->type == type) {
2034                         bfa_wc_up(&rx_mod->rx_stop_wc);
2035                         bna_rx_stop(rx);
2036                 }
2037         }
2038
2039         bfa_wc_wait(&rx_mod->rx_stop_wc);
2040 }
2041
2042 void
2043 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2044 {
2045         struct bna_rx *rx;
2046         struct list_head *qe;
2047
2048         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2049         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2050
2051         list_for_each(qe, &rx_mod->rx_active_q) {
2052                 rx = (struct bna_rx *)qe;
2053                 bna_rx_fail(rx);
2054         }
2055 }
2056
2057 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2058                         struct bna_res_info *res_info)
2059 {
2060         int     index;
2061         struct bna_rx *rx_ptr;
2062         struct bna_rxp *rxp_ptr;
2063         struct bna_rxq *rxq_ptr;
2064
2065         rx_mod->bna = bna;
2066         rx_mod->flags = 0;
2067
2068         rx_mod->rx = (struct bna_rx *)
2069                 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2070         rx_mod->rxp = (struct bna_rxp *)
2071                 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2072         rx_mod->rxq = (struct bna_rxq *)
2073                 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2074
2075         /* Initialize the queues */
2076         INIT_LIST_HEAD(&rx_mod->rx_free_q);
2077         rx_mod->rx_free_count = 0;
2078         INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2079         rx_mod->rxq_free_count = 0;
2080         INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2081         rx_mod->rxp_free_count = 0;
2082         INIT_LIST_HEAD(&rx_mod->rx_active_q);
2083
2084         /* Build RX queues */
2085         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2086                 rx_ptr = &rx_mod->rx[index];
2087
2088                 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2089                 rx_ptr->bna = NULL;
2090                 rx_ptr->rid = index;
2091                 rx_ptr->stop_cbfn = NULL;
2092                 rx_ptr->stop_cbarg = NULL;
2093
2094                 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2095                 rx_mod->rx_free_count++;
2096         }
2097
2098         /* build RX-path queue */
2099         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2100                 rxp_ptr = &rx_mod->rxp[index];
2101                 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2102                 rx_mod->rxp_free_count++;
2103         }
2104
2105         /* build RXQ queue */
2106         for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2107                 rxq_ptr = &rx_mod->rxq[index];
2108                 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2109                 rx_mod->rxq_free_count++;
2110         }
2111 }
2112
2113 void
2114 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2115 {
2116         struct list_head                *qe;
2117         int i;
2118
2119         i = 0;
2120         list_for_each(qe, &rx_mod->rx_free_q)
2121                 i++;
2122
2123         i = 0;
2124         list_for_each(qe, &rx_mod->rxp_free_q)
2125                 i++;
2126
2127         i = 0;
2128         list_for_each(qe, &rx_mod->rxq_free_q)
2129                 i++;
2130
2131         rx_mod->bna = NULL;
2132 }
2133
2134 void
2135 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2136 {
2137         struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2138         struct bna_rxp *rxp = NULL;
2139         struct bna_rxq *q0 = NULL, *q1 = NULL;
2140         int i;
2141
2142         bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2143                 sizeof(struct bfi_enet_rx_cfg_rsp));
2144
2145         rx->hw_id = cfg_rsp->hw_id;
2146
2147         for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
2148              i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
2149                 GET_RXQS(rxp, q0, q1);
2150
2151                 /* Setup doorbells */
2152                 rxp->cq.ccb->i_dbell->doorbell_addr =
2153                         rx->bna->pcidev.pci_bar_kva
2154                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
2155                 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2156                 q0->rcb->q_dbell =
2157                         rx->bna->pcidev.pci_bar_kva
2158                         + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2159                 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2160                 if (q1) {
2161                         q1->rcb->q_dbell =
2162                         rx->bna->pcidev.pci_bar_kva
2163                         + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2164                         q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2165                 }
2166
2167                 /* Initialize producer/consumer indexes */
2168                 (*rxp->cq.ccb->hw_producer_index) = 0;
2169                 rxp->cq.ccb->producer_index = 0;
2170                 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2171                 if (q1)
2172                         q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2173         }
2174
2175         bfa_fsm_send_event(rx, RX_E_STARTED);
2176 }
2177
2178 void
2179 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2180 {
2181         bfa_fsm_send_event(rx, RX_E_STOPPED);
2182 }
2183
2184 void
2185 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2186 {
2187         u32 cq_size, hq_size, dq_size;
2188         u32 cpage_count, hpage_count, dpage_count;
2189         struct bna_mem_info *mem_info;
2190         u32 cq_depth;
2191         u32 hq_depth;
2192         u32 dq_depth;
2193
2194         dq_depth = q_cfg->q0_depth;
2195         hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2196         cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2197
2198         cq_size = cq_depth * BFI_CQ_WI_SIZE;
2199         cq_size = ALIGN(cq_size, PAGE_SIZE);
2200         cpage_count = SIZE_TO_PAGES(cq_size);
2201
2202         dq_depth = roundup_pow_of_two(dq_depth);
2203         dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2204         dq_size = ALIGN(dq_size, PAGE_SIZE);
2205         dpage_count = SIZE_TO_PAGES(dq_size);
2206
2207         if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2208                 hq_depth = roundup_pow_of_two(hq_depth);
2209                 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2210                 hq_size = ALIGN(hq_size, PAGE_SIZE);
2211                 hpage_count = SIZE_TO_PAGES(hq_size);
2212         } else
2213                 hpage_count = 0;
2214
2215         res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2216         mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2217         mem_info->mem_type = BNA_MEM_T_KVA;
2218         mem_info->len = sizeof(struct bna_ccb);
2219         mem_info->num = q_cfg->num_paths;
2220
2221         res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2222         mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2223         mem_info->mem_type = BNA_MEM_T_KVA;
2224         mem_info->len = sizeof(struct bna_rcb);
2225         mem_info->num = BNA_GET_RXQS(q_cfg);
2226
2227         res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2228         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2229         mem_info->mem_type = BNA_MEM_T_DMA;
2230         mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2231         mem_info->num = q_cfg->num_paths;
2232
2233         res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2234         mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2235         mem_info->mem_type = BNA_MEM_T_KVA;
2236         mem_info->len = cpage_count * sizeof(void *);
2237         mem_info->num = q_cfg->num_paths;
2238
2239         res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2240         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2241         mem_info->mem_type = BNA_MEM_T_DMA;
2242         mem_info->len = PAGE_SIZE * cpage_count;
2243         mem_info->num = q_cfg->num_paths;
2244
2245         res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2246         mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2247         mem_info->mem_type = BNA_MEM_T_DMA;
2248         mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2249         mem_info->num = q_cfg->num_paths;
2250
2251         res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2252         mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2253         mem_info->mem_type = BNA_MEM_T_KVA;
2254         mem_info->len = dpage_count * sizeof(void *);
2255         mem_info->num = q_cfg->num_paths;
2256
2257         res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2258         mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2259         mem_info->mem_type = BNA_MEM_T_DMA;
2260         mem_info->len = PAGE_SIZE * dpage_count;
2261         mem_info->num = q_cfg->num_paths;
2262
2263         res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2264         mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2265         mem_info->mem_type = BNA_MEM_T_DMA;
2266         mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2267         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2268
2269         res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2270         mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2271         mem_info->mem_type = BNA_MEM_T_KVA;
2272         mem_info->len = hpage_count * sizeof(void *);
2273         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2274
2275         res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2276         mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2277         mem_info->mem_type = BNA_MEM_T_DMA;
2278         mem_info->len = PAGE_SIZE * hpage_count;
2279         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2280
2281         res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2282         mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2283         mem_info->mem_type = BNA_MEM_T_DMA;
2284         mem_info->len = BFI_IBIDX_SIZE;
2285         mem_info->num = q_cfg->num_paths;
2286
2287         res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2288         mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2289         mem_info->mem_type = BNA_MEM_T_KVA;
2290         mem_info->len = BFI_ENET_RSS_RIT_MAX;
2291         mem_info->num = 1;
2292
2293         res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2294         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2295         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2296 }
2297
2298 struct bna_rx *
2299 bna_rx_create(struct bna *bna, struct bnad *bnad,
2300                 struct bna_rx_config *rx_cfg,
2301                 const struct bna_rx_event_cbfn *rx_cbfn,
2302                 struct bna_res_info *res_info,
2303                 void *priv)
2304 {
2305         struct bna_rx_mod *rx_mod = &bna->rx_mod;
2306         struct bna_rx *rx;
2307         struct bna_rxp *rxp;
2308         struct bna_rxq *q0;
2309         struct bna_rxq *q1;
2310         struct bna_intr_info *intr_info;
2311         struct bna_mem_descr *hqunmap_mem;
2312         struct bna_mem_descr *dqunmap_mem;
2313         struct bna_mem_descr *ccb_mem;
2314         struct bna_mem_descr *rcb_mem;
2315         struct bna_mem_descr *cqpt_mem;
2316         struct bna_mem_descr *cswqpt_mem;
2317         struct bna_mem_descr *cpage_mem;
2318         struct bna_mem_descr *hqpt_mem;
2319         struct bna_mem_descr *dqpt_mem;
2320         struct bna_mem_descr *hsqpt_mem;
2321         struct bna_mem_descr *dsqpt_mem;
2322         struct bna_mem_descr *hpage_mem;
2323         struct bna_mem_descr *dpage_mem;
2324         u32 dpage_count, hpage_count;
2325         u32 hq_idx, dq_idx, rcb_idx;
2326         u32 cq_depth, i;
2327         u32 page_count;
2328
2329         if (!bna_rx_res_check(rx_mod, rx_cfg))
2330                 return NULL;
2331
2332         intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2333         ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2334         rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2335         dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2336         hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2337         cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2338         cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2339         cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2340         hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2341         dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2342         hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2343         dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2344         hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2345         dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2346
2347         page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2348                         PAGE_SIZE;
2349
2350         dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2351                         PAGE_SIZE;
2352
2353         hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2354                         PAGE_SIZE;
2355
2356         rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2357         rx->bna = bna;
2358         rx->rx_flags = 0;
2359         INIT_LIST_HEAD(&rx->rxp_q);
2360         rx->stop_cbfn = NULL;
2361         rx->stop_cbarg = NULL;
2362         rx->priv = priv;
2363
2364         rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2365         rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2366         rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2367         rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2368         rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2369         /* Following callbacks are mandatory */
2370         rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2371         rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2372
2373         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2374                 switch (rx->type) {
2375                 case BNA_RX_T_REGULAR:
2376                         if (!(rx->bna->rx_mod.flags &
2377                                 BNA_RX_MOD_F_ENET_LOOPBACK))
2378                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2379                         break;
2380                 case BNA_RX_T_LOOPBACK:
2381                         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2382                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2383                         break;
2384                 }
2385         }
2386
2387         rx->num_paths = rx_cfg->num_paths;
2388         for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2389                         i < rx->num_paths; i++) {
2390                 rxp = bna_rxp_get(rx_mod);
2391                 list_add_tail(&rxp->qe, &rx->rxp_q);
2392                 rxp->type = rx_cfg->rxp_type;
2393                 rxp->rx = rx;
2394                 rxp->cq.rx = rx;
2395
2396                 q0 = bna_rxq_get(rx_mod);
2397                 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2398                         q1 = NULL;
2399                 else
2400                         q1 = bna_rxq_get(rx_mod);
2401
2402                 if (1 == intr_info->num)
2403                         rxp->vector = intr_info->idl[0].vector;
2404                 else
2405                         rxp->vector = intr_info->idl[i].vector;
2406
2407                 /* Setup IB */
2408
2409                 rxp->cq.ib.ib_seg_host_addr.lsb =
2410                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2411                 rxp->cq.ib.ib_seg_host_addr.msb =
2412                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2413                 rxp->cq.ib.ib_seg_host_addr_kva =
2414                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2415                 rxp->cq.ib.intr_type = intr_info->intr_type;
2416                 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2417                         rxp->cq.ib.intr_vector = rxp->vector;
2418                 else
2419                         rxp->cq.ib.intr_vector = BIT(rxp->vector);
2420                 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2421                 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2422                 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2423
2424                 bna_rxp_add_rxqs(rxp, q0, q1);
2425
2426                 /* Setup large Q */
2427
2428                 q0->rx = rx;
2429                 q0->rxp = rxp;
2430
2431                 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2432                 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2433                 rcb_idx++; dq_idx++;
2434                 q0->rcb->q_depth = rx_cfg->q0_depth;
2435                 q0->q_depth = rx_cfg->q0_depth;
2436                 q0->multi_buffer = rx_cfg->q0_multi_buf;
2437                 q0->buffer_size = rx_cfg->q0_buf_size;
2438                 q0->num_vecs = rx_cfg->q0_num_vecs;
2439                 q0->rcb->rxq = q0;
2440                 q0->rcb->bnad = bna->bnad;
2441                 q0->rcb->id = 0;
2442                 q0->rx_packets = q0->rx_bytes = 0;
2443                 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2444
2445                 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2446                         &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2447
2448                 if (rx->rcb_setup_cbfn)
2449                         rx->rcb_setup_cbfn(bnad, q0->rcb);
2450
2451                 /* Setup small Q */
2452
2453                 if (q1) {
2454                         q1->rx = rx;
2455                         q1->rxp = rxp;
2456
2457                         q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2458                         q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2459                         rcb_idx++; hq_idx++;
2460                         q1->rcb->q_depth = rx_cfg->q1_depth;
2461                         q1->q_depth = rx_cfg->q1_depth;
2462                         q1->multi_buffer = BNA_STATUS_T_DISABLED;
2463                         q1->num_vecs = 1;
2464                         q1->rcb->rxq = q1;
2465                         q1->rcb->bnad = bna->bnad;
2466                         q1->rcb->id = 1;
2467                         q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2468                                         rx_cfg->hds_config.forced_offset
2469                                         : rx_cfg->q1_buf_size;
2470                         q1->rx_packets = q1->rx_bytes = 0;
2471                         q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2472
2473                         bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2474                                 &hqpt_mem[i], &hsqpt_mem[i],
2475                                 &hpage_mem[i]);
2476
2477                         if (rx->rcb_setup_cbfn)
2478                                 rx->rcb_setup_cbfn(bnad, q1->rcb);
2479                 }
2480
2481                 /* Setup CQ */
2482
2483                 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2484                 cq_depth = rx_cfg->q0_depth +
2485                         ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2486                          0 : rx_cfg->q1_depth);
2487                 /* if multi-buffer is enabled sum of q0_depth
2488                  * and q1_depth need not be a power of 2
2489                  */
2490                 cq_depth = roundup_pow_of_two(cq_depth);
2491                 rxp->cq.ccb->q_depth = cq_depth;
2492                 rxp->cq.ccb->cq = &rxp->cq;
2493                 rxp->cq.ccb->rcb[0] = q0->rcb;
2494                 q0->rcb->ccb = rxp->cq.ccb;
2495                 if (q1) {
2496                         rxp->cq.ccb->rcb[1] = q1->rcb;
2497                         q1->rcb->ccb = rxp->cq.ccb;
2498                 }
2499                 rxp->cq.ccb->hw_producer_index =
2500                         (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2501                 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2502                 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2503                 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2504                 rxp->cq.ccb->rx_coalescing_timeo =
2505                         rxp->cq.ib.coalescing_timeo;
2506                 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2507                 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2508                 rxp->cq.ccb->bnad = bna->bnad;
2509                 rxp->cq.ccb->id = i;
2510
2511                 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2512                         &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2513
2514                 if (rx->ccb_setup_cbfn)
2515                         rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2516         }
2517
2518         rx->hds_cfg = rx_cfg->hds_config;
2519
2520         bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2521
2522         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2523
2524         rx_mod->rid_mask |= BIT(rx->rid);
2525
2526         return rx;
2527 }
2528
2529 void
2530 bna_rx_destroy(struct bna_rx *rx)
2531 {
2532         struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2533         struct bna_rxq *q0 = NULL;
2534         struct bna_rxq *q1 = NULL;
2535         struct bna_rxp *rxp;
2536         struct list_head *qe;
2537
2538         bna_rxf_uninit(&rx->rxf);
2539
2540         while (!list_empty(&rx->rxp_q)) {
2541                 rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
2542                 list_del(&rxp->qe);
2543                 GET_RXQS(rxp, q0, q1);
2544                 if (rx->rcb_destroy_cbfn)
2545                         rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2546                 q0->rcb = NULL;
2547                 q0->rxp = NULL;
2548                 q0->rx = NULL;
2549                 bna_rxq_put(rx_mod, q0);
2550
2551                 if (q1) {
2552                         if (rx->rcb_destroy_cbfn)
2553                                 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2554                         q1->rcb = NULL;
2555                         q1->rxp = NULL;
2556                         q1->rx = NULL;
2557                         bna_rxq_put(rx_mod, q1);
2558                 }
2559                 rxp->rxq.slr.large = NULL;
2560                 rxp->rxq.slr.small = NULL;
2561
2562                 if (rx->ccb_destroy_cbfn)
2563                         rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2564                 rxp->cq.ccb = NULL;
2565                 rxp->rx = NULL;
2566                 bna_rxp_put(rx_mod, rxp);
2567         }
2568
2569         list_for_each(qe, &rx_mod->rx_active_q)
2570                 if (qe == &rx->qe) {
2571                         list_del(&rx->qe);
2572                         break;
2573                 }
2574
2575         rx_mod->rid_mask &= ~BIT(rx->rid);
2576
2577         rx->bna = NULL;
2578         rx->priv = NULL;
2579         bna_rx_put(rx_mod, rx);
2580 }
2581
2582 void
2583 bna_rx_enable(struct bna_rx *rx)
2584 {
2585         if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2586                 return;
2587
2588         rx->rx_flags |= BNA_RX_F_ENABLED;
2589         if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2590                 bfa_fsm_send_event(rx, RX_E_START);
2591 }
2592
2593 void
2594 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2595                 void (*cbfn)(void *, struct bna_rx *))
2596 {
2597         if (type == BNA_SOFT_CLEANUP) {
2598                 /* h/w should not be accessed. Treat we're stopped */
2599                 (*cbfn)(rx->bna->bnad, rx);
2600         } else {
2601                 rx->stop_cbfn = cbfn;
2602                 rx->stop_cbarg = rx->bna->bnad;
2603
2604                 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2605
2606                 bfa_fsm_send_event(rx, RX_E_STOP);
2607         }
2608 }
2609
2610 void
2611 bna_rx_cleanup_complete(struct bna_rx *rx)
2612 {
2613         bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2614 }
2615
2616 void
2617 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2618 {
2619         struct bna_rxf *rxf = &rx->rxf;
2620
2621         if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2622                 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2623                 rxf->vlan_strip_pending = true;
2624                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2625         }
2626 }
2627
2628 void
2629 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2630 {
2631         struct bna_rxf *rxf = &rx->rxf;
2632
2633         if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2634                 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2635                 rxf->vlan_strip_pending = true;
2636                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2637         }
2638 }
2639
2640 enum bna_cb_status
2641 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2642                 enum bna_rxmode bitmask)
2643 {
2644         struct bna_rxf *rxf = &rx->rxf;
2645         int need_hw_config = 0;
2646
2647         /* Error checks */
2648
2649         if (is_promisc_enable(new_mode, bitmask)) {
2650                 /* If promisc mode is already enabled elsewhere in the system */
2651                 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2652                         (rx->bna->promisc_rid != rxf->rx->rid))
2653                         goto err_return;
2654
2655                 /* If default mode is already enabled in the system */
2656                 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2657                         goto err_return;
2658
2659                 /* Trying to enable promiscuous and default mode together */
2660                 if (is_default_enable(new_mode, bitmask))
2661                         goto err_return;
2662         }
2663
2664         if (is_default_enable(new_mode, bitmask)) {
2665                 /* If default mode is already enabled elsewhere in the system */
2666                 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2667                         (rx->bna->default_mode_rid != rxf->rx->rid)) {
2668                                 goto err_return;
2669                 }
2670
2671                 /* If promiscuous mode is already enabled in the system */
2672                 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2673                         goto err_return;
2674         }
2675
2676         /* Process the commands */
2677
2678         if (is_promisc_enable(new_mode, bitmask)) {
2679                 if (bna_rxf_promisc_enable(rxf))
2680                         need_hw_config = 1;
2681         } else if (is_promisc_disable(new_mode, bitmask)) {
2682                 if (bna_rxf_promisc_disable(rxf))
2683                         need_hw_config = 1;
2684         }
2685
2686         if (is_allmulti_enable(new_mode, bitmask)) {
2687                 if (bna_rxf_allmulti_enable(rxf))
2688                         need_hw_config = 1;
2689         } else if (is_allmulti_disable(new_mode, bitmask)) {
2690                 if (bna_rxf_allmulti_disable(rxf))
2691                         need_hw_config = 1;
2692         }
2693
2694         /* Trigger h/w if needed */
2695
2696         if (need_hw_config) {
2697                 rxf->cam_fltr_cbfn = NULL;
2698                 rxf->cam_fltr_cbarg = rx->bna->bnad;
2699                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2700         }
2701
2702         return BNA_CB_SUCCESS;
2703
2704 err_return:
2705         return BNA_CB_FAIL;
2706 }
2707
2708 void
2709 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2710 {
2711         struct bna_rxf *rxf = &rx->rxf;
2712
2713         if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2714                 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2715                 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2716                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2717         }
2718 }
2719
2720 void
2721 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2722 {
2723         struct bna_rxp *rxp;
2724         struct list_head *qe;
2725
2726         list_for_each(qe, &rx->rxp_q) {
2727                 rxp = (struct bna_rxp *)qe;
2728                 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2729                 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2730         }
2731 }
2732
2733 void
2734 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2735 {
2736         int i, j;
2737
2738         for (i = 0; i < BNA_LOAD_T_MAX; i++)
2739                 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2740                         bna->rx_mod.dim_vector[i][j] = vector[i][j];
2741 }
2742
2743 void
2744 bna_rx_dim_update(struct bna_ccb *ccb)
2745 {
2746         struct bna *bna = ccb->cq->rx->bna;
2747         u32 load, bias;
2748         u32 pkt_rt, small_rt, large_rt;
2749         u8 coalescing_timeo;
2750
2751         if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2752                 (ccb->pkt_rate.large_pkt_cnt == 0))
2753                 return;
2754
2755         /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2756
2757         small_rt = ccb->pkt_rate.small_pkt_cnt;
2758         large_rt = ccb->pkt_rate.large_pkt_cnt;
2759
2760         pkt_rt = small_rt + large_rt;
2761
2762         if (pkt_rt < BNA_PKT_RATE_10K)
2763                 load = BNA_LOAD_T_LOW_4;
2764         else if (pkt_rt < BNA_PKT_RATE_20K)
2765                 load = BNA_LOAD_T_LOW_3;
2766         else if (pkt_rt < BNA_PKT_RATE_30K)
2767                 load = BNA_LOAD_T_LOW_2;
2768         else if (pkt_rt < BNA_PKT_RATE_40K)
2769                 load = BNA_LOAD_T_LOW_1;
2770         else if (pkt_rt < BNA_PKT_RATE_50K)
2771                 load = BNA_LOAD_T_HIGH_1;
2772         else if (pkt_rt < BNA_PKT_RATE_60K)
2773                 load = BNA_LOAD_T_HIGH_2;
2774         else if (pkt_rt < BNA_PKT_RATE_80K)
2775                 load = BNA_LOAD_T_HIGH_3;
2776         else
2777                 load = BNA_LOAD_T_HIGH_4;
2778
2779         if (small_rt > (large_rt << 1))
2780                 bias = 0;
2781         else
2782                 bias = 1;
2783
2784         ccb->pkt_rate.small_pkt_cnt = 0;
2785         ccb->pkt_rate.large_pkt_cnt = 0;
2786
2787         coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2788         ccb->rx_coalescing_timeo = coalescing_timeo;
2789
2790         /* Set it to IB */
2791         bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2792 }
2793
2794 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2795         {12, 12},
2796         {6, 10},
2797         {5, 10},
2798         {4, 8},
2799         {3, 6},
2800         {3, 6},
2801         {2, 4},
2802         {1, 2},
2803 };
2804
2805 /* TX */
2806
2807 #define call_tx_stop_cbfn(tx)                                           \
2808 do {                                                                    \
2809         if ((tx)->stop_cbfn) {                                          \
2810                 void (*cbfn)(void *, struct bna_tx *);          \
2811                 void *cbarg;                                            \
2812                 cbfn = (tx)->stop_cbfn;                                 \
2813                 cbarg = (tx)->stop_cbarg;                               \
2814                 (tx)->stop_cbfn = NULL;                                 \
2815                 (tx)->stop_cbarg = NULL;                                \
2816                 cbfn(cbarg, (tx));                                      \
2817         }                                                               \
2818 } while (0)
2819
2820 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2821 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2822 static void bna_tx_enet_stop(struct bna_tx *tx);
2823
2824 enum bna_tx_event {
2825         TX_E_START                      = 1,
2826         TX_E_STOP                       = 2,
2827         TX_E_FAIL                       = 3,
2828         TX_E_STARTED                    = 4,
2829         TX_E_STOPPED                    = 5,
2830         TX_E_CLEANUP_DONE               = 7,
2831         TX_E_BW_UPDATE                  = 8,
2832 };
2833
2834 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2835 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2836 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2837 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2838 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2839                         enum bna_tx_event);
2840 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2841                         enum bna_tx_event);
2842 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2843                         enum bna_tx_event);
2844 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2845 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2846                         enum bna_tx_event);
2847
2848 static void
2849 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2850 {
2851         call_tx_stop_cbfn(tx);
2852 }
2853
2854 static void
2855 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2856 {
2857         switch (event) {
2858         case TX_E_START:
2859                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2860                 break;
2861
2862         case TX_E_STOP:
2863                 call_tx_stop_cbfn(tx);
2864                 break;
2865
2866         case TX_E_FAIL:
2867                 /* No-op */
2868                 break;
2869
2870         case TX_E_BW_UPDATE:
2871                 /* No-op */
2872                 break;
2873
2874         default:
2875                 bfa_sm_fault(event);
2876         }
2877 }
2878
2879 static void
2880 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2881 {
2882         bna_bfi_tx_enet_start(tx);
2883 }
2884
2885 static void
2886 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2887 {
2888         switch (event) {
2889         case TX_E_STOP:
2890                 tx->flags &= ~BNA_TX_F_BW_UPDATED;
2891                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2892                 break;
2893
2894         case TX_E_FAIL:
2895                 tx->flags &= ~BNA_TX_F_BW_UPDATED;
2896                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2897                 break;
2898
2899         case TX_E_STARTED:
2900                 if (tx->flags & BNA_TX_F_BW_UPDATED) {
2901                         tx->flags &= ~BNA_TX_F_BW_UPDATED;
2902                         bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2903                 } else
2904                         bfa_fsm_set_state(tx, bna_tx_sm_started);
2905                 break;
2906
2907         case TX_E_BW_UPDATE:
2908                 tx->flags |= BNA_TX_F_BW_UPDATED;
2909                 break;
2910
2911         default:
2912                 bfa_sm_fault(event);
2913         }
2914 }
2915
2916 static void
2917 bna_tx_sm_started_entry(struct bna_tx *tx)
2918 {
2919         struct bna_txq *txq;
2920         struct list_head                 *qe;
2921         int is_regular = (tx->type == BNA_TX_T_REGULAR);
2922
2923         list_for_each(qe, &tx->txq_q) {
2924                 txq = (struct bna_txq *)qe;
2925                 txq->tcb->priority = txq->priority;
2926                 /* Start IB */
2927                 bna_ib_start(tx->bna, &txq->ib, is_regular);
2928         }
2929         tx->tx_resume_cbfn(tx->bna->bnad, tx);
2930 }
2931
2932 static void
2933 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2934 {
2935         switch (event) {
2936         case TX_E_STOP:
2937                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2938                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2939                 bna_tx_enet_stop(tx);
2940                 break;
2941
2942         case TX_E_FAIL:
2943                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
2944                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2945                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2946                 break;
2947
2948         case TX_E_BW_UPDATE:
2949                 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2950                 break;
2951
2952         default:
2953                 bfa_sm_fault(event);
2954         }
2955 }
2956
2957 static void
2958 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
2959 {
2960 }
2961
2962 static void
2963 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2964 {
2965         switch (event) {
2966         case TX_E_FAIL:
2967         case TX_E_STOPPED:
2968                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
2969                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2970                 break;
2971
2972         case TX_E_STARTED:
2973                 /**
2974                  * We are here due to start_wait -> stop_wait transition on
2975                  * TX_E_STOP event
2976                  */
2977                 bna_tx_enet_stop(tx);
2978                 break;
2979
2980         case TX_E_BW_UPDATE:
2981                 /* No-op */
2982                 break;
2983
2984         default:
2985                 bfa_sm_fault(event);
2986         }
2987 }
2988
2989 static void
2990 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
2991 {
2992 }
2993
2994 static void
2995 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
2996 {
2997         switch (event) {
2998         case TX_E_FAIL:
2999         case TX_E_BW_UPDATE:
3000                 /* No-op */
3001                 break;
3002
3003         case TX_E_CLEANUP_DONE:
3004                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3005                 break;
3006
3007         default:
3008                 bfa_sm_fault(event);
3009         }
3010 }
3011
3012 static void
3013 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3014 {
3015         tx->tx_stall_cbfn(tx->bna->bnad, tx);
3016         bna_tx_enet_stop(tx);
3017 }
3018
3019 static void
3020 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3021 {
3022         switch (event) {
3023         case TX_E_STOP:
3024                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3025                 break;
3026
3027         case TX_E_FAIL:
3028                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3029                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3030                 break;
3031
3032         case TX_E_STOPPED:
3033                 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3034                 break;
3035
3036         case TX_E_BW_UPDATE:
3037                 /* No-op */
3038                 break;
3039
3040         default:
3041                 bfa_sm_fault(event);
3042         }
3043 }
3044
3045 static void
3046 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3047 {
3048         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3049 }
3050
3051 static void
3052 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3053 {
3054         switch (event) {
3055         case TX_E_STOP:
3056                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3057                 break;
3058
3059         case TX_E_FAIL:
3060                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3061                 break;
3062
3063         case TX_E_BW_UPDATE:
3064                 /* No-op */
3065                 break;
3066
3067         case TX_E_CLEANUP_DONE:
3068                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3069                 break;
3070
3071         default:
3072                 bfa_sm_fault(event);
3073         }
3074 }
3075
3076 static void
3077 bna_tx_sm_failed_entry(struct bna_tx *tx)
3078 {
3079 }
3080
3081 static void
3082 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3083 {
3084         switch (event) {
3085         case TX_E_START:
3086                 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3087                 break;
3088
3089         case TX_E_STOP:
3090                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3091                 break;
3092
3093         case TX_E_FAIL:
3094                 /* No-op */
3095                 break;
3096
3097         case TX_E_CLEANUP_DONE:
3098                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3099                 break;
3100
3101         default:
3102                 bfa_sm_fault(event);
3103         }
3104 }
3105
3106 static void
3107 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3108 {
3109 }
3110
3111 static void
3112 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3113 {
3114         switch (event) {
3115         case TX_E_STOP:
3116                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3117                 break;
3118
3119         case TX_E_FAIL:
3120                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3121                 break;
3122
3123         case TX_E_CLEANUP_DONE:
3124                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3125                 break;
3126
3127         case TX_E_BW_UPDATE:
3128                 /* No-op */
3129                 break;
3130
3131         default:
3132                 bfa_sm_fault(event);
3133         }
3134 }
3135
3136 static void
3137 bna_bfi_tx_enet_start(struct bna_tx *tx)
3138 {
3139         struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3140         struct bna_txq *txq = NULL;
3141         int i;
3142
3143         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3144                 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3145         cfg_req->mh.num_entries = htons(
3146                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3147
3148         cfg_req->num_queues = tx->num_txq;
3149         for (i = 0; i < tx->num_txq; i++) {
3150                 txq = txq ? list_next_entry(txq, qe)
3151                         : list_first_entry(&tx->txq_q, struct bna_txq, qe);
3152                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3153                 cfg_req->q_cfg[i].q.priority = txq->priority;
3154
3155                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3156                         txq->ib.ib_seg_host_addr.lsb;
3157                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3158                         txq->ib.ib_seg_host_addr.msb;
3159                 cfg_req->q_cfg[i].ib.intr.msix_index =
3160                         htons((u16)txq->ib.intr_vector);
3161         }
3162
3163         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3164         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3165         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3166         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3167         cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3168                                 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3169         cfg_req->ib_cfg.coalescing_timeout =
3170                         htonl((u32)txq->ib.coalescing_timeo);
3171         cfg_req->ib_cfg.inter_pkt_timeout =
3172                         htonl((u32)txq->ib.interpkt_timeo);
3173         cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3174
3175         cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3176         cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3177         cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3178         cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3179
3180         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3181                 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3182         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3183 }
3184
3185 static void
3186 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3187 {
3188         struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3189
3190         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3191                 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3192         req->mh.num_entries = htons(
3193                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3194         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3195                 &req->mh);
3196         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3197 }
3198
3199 static void
3200 bna_tx_enet_stop(struct bna_tx *tx)
3201 {
3202         struct bna_txq *txq;
3203         struct list_head                 *qe;
3204
3205         /* Stop IB */
3206         list_for_each(qe, &tx->txq_q) {
3207                 txq = (struct bna_txq *)qe;
3208                 bna_ib_stop(tx->bna, &txq->ib);
3209         }
3210
3211         bna_bfi_tx_enet_stop(tx);
3212 }
3213
3214 static void
3215 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3216                 struct bna_mem_descr *qpt_mem,
3217                 struct bna_mem_descr *swqpt_mem,
3218                 struct bna_mem_descr *page_mem)
3219 {
3220         u8 *kva;
3221         u64 dma;
3222         struct bna_dma_addr bna_dma;
3223         int i;
3224
3225         txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3226         txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3227         txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3228         txq->qpt.page_count = page_count;
3229         txq->qpt.page_size = page_size;
3230
3231         txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3232         txq->tcb->sw_q = page_mem->kva;
3233
3234         kva = page_mem->kva;
3235         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3236
3237         for (i = 0; i < page_count; i++) {
3238                 txq->tcb->sw_qpt[i] = kva;
3239                 kva += PAGE_SIZE;
3240
3241                 BNA_SET_DMA_ADDR(dma, &bna_dma);
3242                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3243                         bna_dma.lsb;
3244                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3245                         bna_dma.msb;
3246                 dma += PAGE_SIZE;
3247         }
3248 }
3249
3250 static struct bna_tx *
3251 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3252 {
3253         struct bna_tx *tx = NULL;
3254
3255         if (list_empty(&tx_mod->tx_free_q))
3256                 return NULL;
3257         if (type == BNA_TX_T_REGULAR)
3258                 tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
3259         else
3260                 tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
3261         list_del(&tx->qe);
3262         tx->type = type;
3263
3264         return tx;
3265 }
3266
3267 static void
3268 bna_tx_free(struct bna_tx *tx)
3269 {
3270         struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3271         struct bna_txq *txq;
3272         struct list_head *qe;
3273
3274         while (!list_empty(&tx->txq_q)) {
3275                 txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
3276                 txq->tcb = NULL;
3277                 txq->tx = NULL;
3278                 list_move_tail(&txq->qe, &tx_mod->txq_free_q);
3279         }
3280
3281         list_for_each(qe, &tx_mod->tx_active_q) {
3282                 if (qe == &tx->qe) {
3283                         list_del(&tx->qe);
3284                         break;
3285                 }
3286         }
3287
3288         tx->bna = NULL;
3289         tx->priv = NULL;
3290
3291         list_for_each_prev(qe, &tx_mod->tx_free_q)
3292                 if (((struct bna_tx *)qe)->rid < tx->rid)
3293                         break;
3294
3295         list_add(&tx->qe, qe);
3296 }
3297
3298 static void
3299 bna_tx_start(struct bna_tx *tx)
3300 {
3301         tx->flags |= BNA_TX_F_ENET_STARTED;
3302         if (tx->flags & BNA_TX_F_ENABLED)
3303                 bfa_fsm_send_event(tx, TX_E_START);
3304 }
3305
3306 static void
3307 bna_tx_stop(struct bna_tx *tx)
3308 {
3309         tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3310         tx->stop_cbarg = &tx->bna->tx_mod;
3311
3312         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3313         bfa_fsm_send_event(tx, TX_E_STOP);
3314 }
3315
3316 static void
3317 bna_tx_fail(struct bna_tx *tx)
3318 {
3319         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3320         bfa_fsm_send_event(tx, TX_E_FAIL);
3321 }
3322
3323 void
3324 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3325 {
3326         struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3327         struct bna_txq *txq = NULL;
3328         int i;
3329
3330         bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3331                 sizeof(struct bfi_enet_tx_cfg_rsp));
3332
3333         tx->hw_id = cfg_rsp->hw_id;
3334
3335         for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
3336              i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
3337                 /* Setup doorbells */
3338                 txq->tcb->i_dbell->doorbell_addr =
3339                         tx->bna->pcidev.pci_bar_kva
3340                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
3341                 txq->tcb->q_dbell =
3342                         tx->bna->pcidev.pci_bar_kva
3343                         + ntohl(cfg_rsp->q_handles[i].q_dbell);
3344                 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3345
3346                 /* Initialize producer/consumer indexes */
3347                 (*txq->tcb->hw_consumer_index) = 0;
3348                 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3349         }
3350
3351         bfa_fsm_send_event(tx, TX_E_STARTED);
3352 }
3353
3354 void
3355 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3356 {
3357         bfa_fsm_send_event(tx, TX_E_STOPPED);
3358 }
3359
3360 void
3361 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3362 {
3363         struct bna_tx *tx;
3364         struct list_head                *qe;
3365
3366         list_for_each(qe, &tx_mod->tx_active_q) {
3367                 tx = (struct bna_tx *)qe;
3368                 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3369         }
3370 }
3371
3372 void
3373 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3374 {
3375         u32 q_size;
3376         u32 page_count;
3377         struct bna_mem_info *mem_info;
3378
3379         res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3380         mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3381         mem_info->mem_type = BNA_MEM_T_KVA;
3382         mem_info->len = sizeof(struct bna_tcb);
3383         mem_info->num = num_txq;
3384
3385         q_size = txq_depth * BFI_TXQ_WI_SIZE;
3386         q_size = ALIGN(q_size, PAGE_SIZE);
3387         page_count = q_size >> PAGE_SHIFT;
3388
3389         res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3390         mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3391         mem_info->mem_type = BNA_MEM_T_DMA;
3392         mem_info->len = page_count * sizeof(struct bna_dma_addr);
3393         mem_info->num = num_txq;
3394
3395         res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3396         mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3397         mem_info->mem_type = BNA_MEM_T_KVA;
3398         mem_info->len = page_count * sizeof(void *);
3399         mem_info->num = num_txq;
3400
3401         res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3402         mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3403         mem_info->mem_type = BNA_MEM_T_DMA;
3404         mem_info->len = PAGE_SIZE * page_count;
3405         mem_info->num = num_txq;
3406
3407         res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3408         mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3409         mem_info->mem_type = BNA_MEM_T_DMA;
3410         mem_info->len = BFI_IBIDX_SIZE;
3411         mem_info->num = num_txq;
3412
3413         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3414         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3415                         BNA_INTR_T_MSIX;
3416         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3417 }
3418
3419 struct bna_tx *
3420 bna_tx_create(struct bna *bna, struct bnad *bnad,
3421                 struct bna_tx_config *tx_cfg,
3422                 const struct bna_tx_event_cbfn *tx_cbfn,
3423                 struct bna_res_info *res_info, void *priv)
3424 {
3425         struct bna_intr_info *intr_info;
3426         struct bna_tx_mod *tx_mod = &bna->tx_mod;
3427         struct bna_tx *tx;
3428         struct bna_txq *txq;
3429         struct list_head *qe;
3430         int page_count;
3431         int i;
3432
3433         intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3434         page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3435                                         PAGE_SIZE;
3436
3437         /**
3438          * Get resources
3439          */
3440
3441         if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3442                 return NULL;
3443
3444         /* Tx */
3445
3446         tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3447         if (!tx)
3448                 return NULL;
3449         tx->bna = bna;
3450         tx->priv = priv;
3451
3452         /* TxQs */
3453
3454         INIT_LIST_HEAD(&tx->txq_q);
3455         for (i = 0; i < tx_cfg->num_txq; i++) {
3456                 if (list_empty(&tx_mod->txq_free_q))
3457                         goto err_return;
3458
3459                 txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
3460                 list_move_tail(&txq->qe, &tx->txq_q);
3461                 txq->tx = tx;
3462         }
3463
3464         /*
3465          * Initialize
3466          */
3467
3468         /* Tx */
3469
3470         tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3471         tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3472         /* Following callbacks are mandatory */
3473         tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3474         tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3475         tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3476
3477         list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3478
3479         tx->num_txq = tx_cfg->num_txq;
3480
3481         tx->flags = 0;
3482         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3483                 switch (tx->type) {
3484                 case BNA_TX_T_REGULAR:
3485                         if (!(tx->bna->tx_mod.flags &
3486                                 BNA_TX_MOD_F_ENET_LOOPBACK))
3487                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3488                         break;
3489                 case BNA_TX_T_LOOPBACK:
3490                         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3491                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3492                         break;
3493                 }
3494         }
3495
3496         /* TxQ */
3497
3498         i = 0;
3499         list_for_each(qe, &tx->txq_q) {
3500                 txq = (struct bna_txq *)qe;
3501                 txq->tcb = (struct bna_tcb *)
3502                 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3503                 txq->tx_packets = 0;
3504                 txq->tx_bytes = 0;
3505
3506                 /* IB */
3507                 txq->ib.ib_seg_host_addr.lsb =
3508                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3509                 txq->ib.ib_seg_host_addr.msb =
3510                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3511                 txq->ib.ib_seg_host_addr_kva =
3512                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3513                 txq->ib.intr_type = intr_info->intr_type;
3514                 txq->ib.intr_vector = (intr_info->num == 1) ?
3515                                         intr_info->idl[0].vector :
3516                                         intr_info->idl[i].vector;
3517                 if (intr_info->intr_type == BNA_INTR_T_INTX)
3518                         txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3519                 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3520                 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3521                 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3522
3523                 /* TCB */
3524
3525                 txq->tcb->q_depth = tx_cfg->txq_depth;
3526                 txq->tcb->unmap_q = (void *)
3527                 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3528                 txq->tcb->hw_consumer_index =
3529                         (u32 *)txq->ib.ib_seg_host_addr_kva;
3530                 txq->tcb->i_dbell = &txq->ib.door_bell;
3531                 txq->tcb->intr_type = txq->ib.intr_type;
3532                 txq->tcb->intr_vector = txq->ib.intr_vector;
3533                 txq->tcb->txq = txq;
3534                 txq->tcb->bnad = bnad;
3535                 txq->tcb->id = i;
3536
3537                 /* QPT, SWQPT, Pages */
3538                 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3539                         &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3540                         &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3541                         &res_info[BNA_TX_RES_MEM_T_PAGE].
3542                                   res_u.mem_info.mdl[i]);
3543
3544                 /* Callback to bnad for setting up TCB */
3545                 if (tx->tcb_setup_cbfn)
3546                         (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3547
3548                 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3549                         txq->priority = txq->tcb->id;
3550                 else
3551                         txq->priority = tx_mod->default_prio;
3552
3553                 i++;
3554         }
3555
3556         tx->txf_vlan_id = 0;
3557
3558         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3559
3560         tx_mod->rid_mask |= BIT(tx->rid);
3561
3562         return tx;
3563
3564 err_return:
3565         bna_tx_free(tx);
3566         return NULL;
3567 }
3568
3569 void
3570 bna_tx_destroy(struct bna_tx *tx)
3571 {
3572         struct bna_txq *txq;
3573         struct list_head *qe;
3574
3575         list_for_each(qe, &tx->txq_q) {
3576                 txq = (struct bna_txq *)qe;
3577                 if (tx->tcb_destroy_cbfn)
3578                         (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3579         }
3580
3581         tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3582         bna_tx_free(tx);
3583 }
3584
3585 void
3586 bna_tx_enable(struct bna_tx *tx)
3587 {
3588         if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3589                 return;
3590
3591         tx->flags |= BNA_TX_F_ENABLED;
3592
3593         if (tx->flags & BNA_TX_F_ENET_STARTED)
3594                 bfa_fsm_send_event(tx, TX_E_START);
3595 }
3596
3597 void
3598 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3599                 void (*cbfn)(void *, struct bna_tx *))
3600 {
3601         if (type == BNA_SOFT_CLEANUP) {
3602                 (*cbfn)(tx->bna->bnad, tx);
3603                 return;
3604         }
3605
3606         tx->stop_cbfn = cbfn;
3607         tx->stop_cbarg = tx->bna->bnad;
3608
3609         tx->flags &= ~BNA_TX_F_ENABLED;
3610
3611         bfa_fsm_send_event(tx, TX_E_STOP);
3612 }
3613
3614 void
3615 bna_tx_cleanup_complete(struct bna_tx *tx)
3616 {
3617         bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3618 }
3619
3620 static void
3621 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3622 {
3623         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3624
3625         bfa_wc_down(&tx_mod->tx_stop_wc);
3626 }
3627
3628 static void
3629 bna_tx_mod_cb_tx_stopped_all(void *arg)
3630 {
3631         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3632
3633         if (tx_mod->stop_cbfn)
3634                 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3635         tx_mod->stop_cbfn = NULL;
3636 }
3637
3638 void
3639 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3640                 struct bna_res_info *res_info)
3641 {
3642         int i;
3643
3644         tx_mod->bna = bna;
3645         tx_mod->flags = 0;
3646
3647         tx_mod->tx = (struct bna_tx *)
3648                 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3649         tx_mod->txq = (struct bna_txq *)
3650                 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3651
3652         INIT_LIST_HEAD(&tx_mod->tx_free_q);
3653         INIT_LIST_HEAD(&tx_mod->tx_active_q);
3654
3655         INIT_LIST_HEAD(&tx_mod->txq_free_q);
3656
3657         for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3658                 tx_mod->tx[i].rid = i;
3659                 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3660                 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3661         }
3662
3663         tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3664         tx_mod->default_prio = 0;
3665         tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3666         tx_mod->iscsi_prio = -1;
3667 }
3668
3669 void
3670 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3671 {
3672         struct list_head                *qe;
3673         int i;
3674
3675         i = 0;
3676         list_for_each(qe, &tx_mod->tx_free_q)
3677                 i++;
3678
3679         i = 0;
3680         list_for_each(qe, &tx_mod->txq_free_q)
3681                 i++;
3682
3683         tx_mod->bna = NULL;
3684 }
3685
3686 void
3687 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3688 {
3689         struct bna_tx *tx;
3690         struct list_head                *qe;
3691
3692         tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3693         if (type == BNA_TX_T_LOOPBACK)
3694                 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3695
3696         list_for_each(qe, &tx_mod->tx_active_q) {
3697                 tx = (struct bna_tx *)qe;
3698                 if (tx->type == type)
3699                         bna_tx_start(tx);
3700         }
3701 }
3702
3703 void
3704 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3705 {
3706         struct bna_tx *tx;
3707         struct list_head                *qe;
3708
3709         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3710         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3711
3712         tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3713
3714         bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3715
3716         list_for_each(qe, &tx_mod->tx_active_q) {
3717                 tx = (struct bna_tx *)qe;
3718                 if (tx->type == type) {
3719                         bfa_wc_up(&tx_mod->tx_stop_wc);
3720                         bna_tx_stop(tx);
3721                 }
3722         }
3723
3724         bfa_wc_wait(&tx_mod->tx_stop_wc);
3725 }
3726
3727 void
3728 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3729 {
3730         struct bna_tx *tx;
3731         struct list_head                *qe;
3732
3733         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3734         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3735
3736         list_for_each(qe, &tx_mod->tx_active_q) {
3737                 tx = (struct bna_tx *)qe;
3738                 bna_tx_fail(tx);
3739         }
3740 }
3741
3742 void
3743 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3744 {
3745         struct bna_txq *txq;
3746         struct list_head *qe;
3747
3748         list_for_each(qe, &tx->txq_q) {
3749                 txq = (struct bna_txq *)qe;
3750                 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3751         }
3752 }