2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
26 ib->coalescing_timeo = coalescing_timeo;
27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28 (u32)ib->coalescing_timeo, 0);
33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
36 (rxf)->vlan_strip_pending = true; \
39 #define bna_rxf_rss_cfg_soft_reset(rxf) \
41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
43 BNA_RSS_F_CFG_PENDING | \
44 BNA_RSS_F_STATUS_PENDING); \
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
50 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
54 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
55 enum bna_cleanup_type cleanup);
56 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
57 enum bna_cleanup_type cleanup);
58 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
59 enum bna_cleanup_type cleanup);
61 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
63 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
65 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
67 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
69 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
71 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
75 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
77 call_rxf_stop_cbfn(rxf);
81 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
85 if (rxf->flags & BNA_RXF_F_PAUSED) {
86 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
87 call_rxf_start_cbfn(rxf);
89 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
93 call_rxf_stop_cbfn(rxf);
101 call_rxf_cam_fltr_cbfn(rxf);
105 rxf->flags |= BNA_RXF_F_PAUSED;
106 call_rxf_pause_cbfn(rxf);
110 rxf->flags &= ~BNA_RXF_F_PAUSED;
111 call_rxf_resume_cbfn(rxf);
120 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
122 call_rxf_pause_cbfn(rxf);
126 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
131 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
135 call_rxf_cam_fltr_cbfn(rxf);
139 rxf->flags &= ~BNA_RXF_F_PAUSED;
140 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
149 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
151 if (!bna_rxf_cfg_apply(rxf)) {
152 /* No more pending config updates */
153 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
158 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
162 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
166 bna_rxf_cfg_reset(rxf);
167 call_rxf_start_cbfn(rxf);
168 call_rxf_cam_fltr_cbfn(rxf);
169 call_rxf_resume_cbfn(rxf);
170 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
178 rxf->flags |= BNA_RXF_F_PAUSED;
179 call_rxf_start_cbfn(rxf);
180 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
184 if (!bna_rxf_cfg_apply(rxf)) {
185 /* No more pending config updates */
186 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
196 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
198 call_rxf_start_cbfn(rxf);
199 call_rxf_cam_fltr_cbfn(rxf);
200 call_rxf_resume_cbfn(rxf);
204 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
209 bna_rxf_cfg_reset(rxf);
210 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
214 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
218 rxf->flags |= BNA_RXF_F_PAUSED;
219 if (!bna_rxf_fltr_clear(rxf))
220 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
222 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
231 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
236 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
240 bna_rxf_cfg_reset(rxf);
241 call_rxf_pause_cbfn(rxf);
242 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
246 if (!bna_rxf_fltr_clear(rxf)) {
247 /* No more pending CAM entries to clear */
248 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
258 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
263 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
268 bna_rxf_cfg_reset(rxf);
269 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
278 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
279 enum bfi_enet_h2i_msgs req_type)
281 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
283 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
284 req->mh.num_entries = htons(
285 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
286 ether_addr_copy(req->mac_addr, mac->addr);
287 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
288 sizeof(struct bfi_enet_ucast_req), &req->mh);
289 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
293 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
295 struct bfi_enet_mcast_add_req *req =
296 &rxf->bfi_enet_cmd.mcast_add_req;
298 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
300 req->mh.num_entries = htons(
301 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
302 ether_addr_copy(req->mac_addr, mac->addr);
303 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
304 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
305 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
309 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
311 struct bfi_enet_mcast_del_req *req =
312 &rxf->bfi_enet_cmd.mcast_del_req;
314 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
316 req->mh.num_entries = htons(
317 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
318 req->handle = htons(handle);
319 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
320 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
321 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
325 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
327 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
329 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
330 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
331 req->mh.num_entries = htons(
332 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
333 req->enable = status;
334 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
335 sizeof(struct bfi_enet_enable_req), &req->mh);
336 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
340 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
342 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
344 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
345 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
346 req->mh.num_entries = htons(
347 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
348 req->enable = status;
349 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
350 sizeof(struct bfi_enet_enable_req), &req->mh);
351 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
355 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
357 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
361 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
362 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
363 req->mh.num_entries = htons(
364 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
365 req->block_idx = block_idx;
366 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
367 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
368 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
370 htonl(rxf->vlan_filter_table[j]);
372 req->bit_mask[i] = 0xFFFFFFFF;
374 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
375 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
376 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
380 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
382 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
384 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
385 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
386 req->mh.num_entries = htons(
387 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
388 req->enable = rxf->vlan_strip_status;
389 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
390 sizeof(struct bfi_enet_enable_req), &req->mh);
391 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
395 bna_bfi_rit_cfg(struct bna_rxf *rxf)
397 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
399 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
400 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
401 req->mh.num_entries = htons(
402 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
403 req->size = htons(rxf->rit_size);
404 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
405 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
406 sizeof(struct bfi_enet_rit_req), &req->mh);
407 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
411 bna_bfi_rss_cfg(struct bna_rxf *rxf)
413 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
416 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
417 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
418 req->mh.num_entries = htons(
419 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
420 req->cfg.type = rxf->rss_cfg.hash_type;
421 req->cfg.mask = rxf->rss_cfg.hash_mask;
422 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
424 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
425 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
426 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
427 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
431 bna_bfi_rss_enable(struct bna_rxf *rxf)
433 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
435 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
436 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
437 req->mh.num_entries = htons(
438 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
439 req->enable = rxf->rss_status;
440 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
441 sizeof(struct bfi_enet_enable_req), &req->mh);
442 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
445 /* This function gets the multicast MAC that has already been added to CAM */
446 static struct bna_mac *
447 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
450 struct list_head *qe;
452 list_for_each(qe, &rxf->mcast_active_q) {
453 mac = (struct bna_mac *)qe;
454 if (ether_addr_equal(mac->addr, mac_addr))
458 list_for_each(qe, &rxf->mcast_pending_del_q) {
459 mac = (struct bna_mac *)qe;
460 if (ether_addr_equal(mac->addr, mac_addr))
467 static struct bna_mcam_handle *
468 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
470 struct bna_mcam_handle *mchandle;
471 struct list_head *qe;
473 list_for_each(qe, &rxf->mcast_handle_q) {
474 mchandle = (struct bna_mcam_handle *)qe;
475 if (mchandle->handle == handle)
483 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
485 struct bna_mac *mcmac;
486 struct bna_mcam_handle *mchandle;
488 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
489 mchandle = bna_rxf_mchandle_get(rxf, handle);
490 if (mchandle == NULL) {
491 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
492 mchandle->handle = handle;
493 mchandle->refcnt = 0;
494 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
497 mcmac->handle = mchandle;
501 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
502 enum bna_cleanup_type cleanup)
504 struct bna_mcam_handle *mchandle;
507 mchandle = mac->handle;
508 if (mchandle == NULL)
512 if (mchandle->refcnt == 0) {
513 if (cleanup == BNA_HARD_CLEANUP) {
514 bna_bfi_mcast_del_req(rxf, mchandle->handle);
517 list_del(&mchandle->qe);
518 bfa_q_qe_init(&mchandle->qe);
519 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
527 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
529 struct bna_mac *mac = NULL;
530 struct list_head *qe;
533 /* First delete multicast entries to maintain the count */
534 while (!list_empty(&rxf->mcast_pending_del_q)) {
535 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
537 mac = (struct bna_mac *)qe;
538 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
539 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
544 /* Add multicast entries */
545 if (!list_empty(&rxf->mcast_pending_add_q)) {
546 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
548 mac = (struct bna_mac *)qe;
549 list_add_tail(&mac->qe, &rxf->mcast_active_q);
550 bna_bfi_mcast_add_req(rxf, mac);
558 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
560 u8 vlan_pending_bitmask;
563 if (rxf->vlan_pending_bitmask) {
564 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
565 while (!(vlan_pending_bitmask & 0x1)) {
567 vlan_pending_bitmask >>= 1;
569 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
570 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
578 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
580 struct list_head *qe;
584 /* Throw away delete pending mcast entries */
585 while (!list_empty(&rxf->mcast_pending_del_q)) {
586 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
588 mac = (struct bna_mac *)qe;
589 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
590 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
595 /* Move active mcast entries to pending_add_q */
596 while (!list_empty(&rxf->mcast_active_q)) {
597 bfa_q_deq(&rxf->mcast_active_q, &qe);
599 list_add_tail(qe, &rxf->mcast_pending_add_q);
600 mac = (struct bna_mac *)qe;
601 if (bna_rxf_mcast_del(rxf, mac, cleanup))
609 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
611 if (rxf->rss_pending) {
612 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
613 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
614 bna_bfi_rit_cfg(rxf);
618 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
619 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
620 bna_bfi_rss_cfg(rxf);
624 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
625 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
626 bna_bfi_rss_enable(rxf);
635 bna_rxf_cfg_apply(struct bna_rxf *rxf)
637 if (bna_rxf_ucast_cfg_apply(rxf))
640 if (bna_rxf_mcast_cfg_apply(rxf))
643 if (bna_rxf_promisc_cfg_apply(rxf))
646 if (bna_rxf_allmulti_cfg_apply(rxf))
649 if (bna_rxf_vlan_cfg_apply(rxf))
652 if (bna_rxf_vlan_strip_cfg_apply(rxf))
655 if (bna_rxf_rss_cfg_apply(rxf))
661 /* Only software reset */
663 bna_rxf_fltr_clear(struct bna_rxf *rxf)
665 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
668 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
671 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
674 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
681 bna_rxf_cfg_reset(struct bna_rxf *rxf)
683 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
687 bna_rxf_vlan_cfg_soft_reset(rxf);
688 bna_rxf_rss_cfg_soft_reset(rxf);
692 bna_rit_init(struct bna_rxf *rxf, int rit_size)
694 struct bna_rx *rx = rxf->rx;
696 struct list_head *qe;
699 rxf->rit_size = rit_size;
700 list_for_each(qe, &rx->rxp_q) {
701 rxp = (struct bna_rxp *)qe;
702 rxf->rit[offset] = rxp->cq.ccb->id;
709 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
711 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
715 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
716 struct bfi_msgq_mhdr *msghdr)
718 struct bfi_enet_rsp *rsp =
719 container_of(msghdr, struct bfi_enet_rsp, mh);
722 /* Clear ucast from cache */
723 rxf->ucast_active_set = 0;
726 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
730 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
731 struct bfi_msgq_mhdr *msghdr)
733 struct bfi_enet_mcast_add_req *req =
734 &rxf->bfi_enet_cmd.mcast_add_req;
735 struct bfi_enet_mcast_add_rsp *rsp =
736 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
738 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
740 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
744 bna_rxf_init(struct bna_rxf *rxf,
746 struct bna_rx_config *q_config,
747 struct bna_res_info *res_info)
751 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
752 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
753 rxf->ucast_pending_set = 0;
754 rxf->ucast_active_set = 0;
755 INIT_LIST_HEAD(&rxf->ucast_active_q);
756 rxf->ucast_pending_mac = NULL;
758 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
759 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
760 INIT_LIST_HEAD(&rxf->mcast_active_q);
761 INIT_LIST_HEAD(&rxf->mcast_handle_q);
763 if (q_config->paused)
764 rxf->flags |= BNA_RXF_F_PAUSED;
767 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
768 bna_rit_init(rxf, q_config->num_paths);
770 rxf->rss_status = q_config->rss_status;
771 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
772 rxf->rss_cfg = q_config->rss_config;
773 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
774 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
775 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
778 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
779 memset(rxf->vlan_filter_table, 0,
780 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
781 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
782 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
784 rxf->vlan_strip_status = q_config->vlan_strip_status;
786 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
790 bna_rxf_uninit(struct bna_rxf *rxf)
794 rxf->ucast_pending_set = 0;
795 rxf->ucast_active_set = 0;
797 while (!list_empty(&rxf->ucast_pending_add_q)) {
798 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
799 bfa_q_qe_init(&mac->qe);
800 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
803 if (rxf->ucast_pending_mac) {
804 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
805 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
806 rxf->ucast_pending_mac);
807 rxf->ucast_pending_mac = NULL;
810 while (!list_empty(&rxf->mcast_pending_add_q)) {
811 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
812 bfa_q_qe_init(&mac->qe);
813 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
816 rxf->rxmode_pending = 0;
817 rxf->rxmode_pending_bitmask = 0;
818 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
819 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
820 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
821 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
823 rxf->rss_pending = 0;
824 rxf->vlan_strip_pending = false;
832 bna_rx_cb_rxf_started(struct bna_rx *rx)
834 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
838 bna_rxf_start(struct bna_rxf *rxf)
840 rxf->start_cbfn = bna_rx_cb_rxf_started;
841 rxf->start_cbarg = rxf->rx;
842 bfa_fsm_send_event(rxf, RXF_E_START);
846 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
848 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
852 bna_rxf_stop(struct bna_rxf *rxf)
854 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
855 rxf->stop_cbarg = rxf->rx;
856 bfa_fsm_send_event(rxf, RXF_E_STOP);
860 bna_rxf_fail(struct bna_rxf *rxf)
862 bfa_fsm_send_event(rxf, RXF_E_FAIL);
866 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
868 struct bna_rxf *rxf = &rx->rxf;
870 if (rxf->ucast_pending_mac == NULL) {
871 rxf->ucast_pending_mac =
872 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
873 if (rxf->ucast_pending_mac == NULL)
874 return BNA_CB_UCAST_CAM_FULL;
875 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
878 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
879 rxf->ucast_pending_set = 1;
880 rxf->cam_fltr_cbfn = NULL;
881 rxf->cam_fltr_cbarg = rx->bna->bnad;
883 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
885 return BNA_CB_SUCCESS;
889 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
890 void (*cbfn)(struct bnad *, struct bna_rx *))
892 struct bna_rxf *rxf = &rx->rxf;
895 /* Check if already added or pending addition */
896 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
897 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
899 cbfn(rx->bna->bnad, rx);
900 return BNA_CB_SUCCESS;
903 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
905 return BNA_CB_MCAST_LIST_FULL;
906 bfa_q_qe_init(&mac->qe);
907 ether_addr_copy(mac->addr, addr);
908 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
910 rxf->cam_fltr_cbfn = cbfn;
911 rxf->cam_fltr_cbarg = rx->bna->bnad;
913 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
915 return BNA_CB_SUCCESS;
919 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
921 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
922 struct bna_rxf *rxf = &rx->rxf;
923 struct list_head list_head;
924 struct list_head *qe;
926 struct bna_mac *mac, *del_mac;
929 /* Purge the pending_add_q */
930 while (!list_empty(&rxf->ucast_pending_add_q)) {
931 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
933 mac = (struct bna_mac *)qe;
934 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
937 /* Schedule active_q entries for deletion */
938 while (!list_empty(&rxf->ucast_active_q)) {
939 bfa_q_deq(&rxf->ucast_active_q, &qe);
940 mac = (struct bna_mac *)qe;
941 bfa_q_qe_init(&mac->qe);
943 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
944 memcpy(del_mac, mac, sizeof(*del_mac));
945 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
946 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
950 INIT_LIST_HEAD(&list_head);
951 for (i = 0, mcaddr = uclist; i < count; i++) {
952 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
955 bfa_q_qe_init(&mac->qe);
956 ether_addr_copy(mac->addr, mcaddr);
957 list_add_tail(&mac->qe, &list_head);
961 /* Add the new entries */
962 while (!list_empty(&list_head)) {
963 bfa_q_deq(&list_head, &qe);
964 mac = (struct bna_mac *)qe;
965 bfa_q_qe_init(&mac->qe);
966 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
969 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
971 return BNA_CB_SUCCESS;
974 while (!list_empty(&list_head)) {
975 bfa_q_deq(&list_head, &qe);
976 mac = (struct bna_mac *)qe;
977 bfa_q_qe_init(&mac->qe);
978 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
981 return BNA_CB_UCAST_CAM_FULL;
985 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
987 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
988 struct bna_rxf *rxf = &rx->rxf;
989 struct list_head list_head;
990 struct list_head *qe;
992 struct bna_mac *mac, *del_mac;
995 /* Purge the pending_add_q */
996 while (!list_empty(&rxf->mcast_pending_add_q)) {
997 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
999 mac = (struct bna_mac *)qe;
1000 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1003 /* Schedule active_q entries for deletion */
1004 while (!list_empty(&rxf->mcast_active_q)) {
1005 bfa_q_deq(&rxf->mcast_active_q, &qe);
1006 mac = (struct bna_mac *)qe;
1007 bfa_q_qe_init(&mac->qe);
1009 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
1011 memcpy(del_mac, mac, sizeof(*del_mac));
1012 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1014 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1017 /* Allocate nodes */
1018 INIT_LIST_HEAD(&list_head);
1019 for (i = 0, mcaddr = mclist; i < count; i++) {
1020 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
1023 bfa_q_qe_init(&mac->qe);
1024 ether_addr_copy(mac->addr, mcaddr);
1025 list_add_tail(&mac->qe, &list_head);
1030 /* Add the new entries */
1031 while (!list_empty(&list_head)) {
1032 bfa_q_deq(&list_head, &qe);
1033 mac = (struct bna_mac *)qe;
1034 bfa_q_qe_init(&mac->qe);
1035 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1038 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1040 return BNA_CB_SUCCESS;
1043 while (!list_empty(&list_head)) {
1044 bfa_q_deq(&list_head, &qe);
1045 mac = (struct bna_mac *)qe;
1046 bfa_q_qe_init(&mac->qe);
1047 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1050 return BNA_CB_MCAST_LIST_FULL;
1054 bna_rx_mcast_delall(struct bna_rx *rx)
1056 struct bna_rxf *rxf = &rx->rxf;
1057 struct list_head *qe;
1058 struct bna_mac *mac, *del_mac;
1059 int need_hw_config = 0;
1061 /* Purge all entries from pending_add_q */
1062 while (!list_empty(&rxf->mcast_pending_add_q)) {
1063 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1064 mac = (struct bna_mac *)qe;
1065 bfa_q_qe_init(&mac->qe);
1066 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1069 /* Schedule all entries in active_q for deletion */
1070 while (!list_empty(&rxf->mcast_active_q)) {
1071 bfa_q_deq(&rxf->mcast_active_q, &qe);
1072 mac = (struct bna_mac *)qe;
1073 bfa_q_qe_init(&mac->qe);
1075 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
1077 memcpy(del_mac, mac, sizeof(*del_mac));
1078 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1080 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1085 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1089 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1091 struct bna_rxf *rxf = &rx->rxf;
1092 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1093 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1094 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1096 rxf->vlan_filter_table[index] |= bit;
1097 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1098 rxf->vlan_pending_bitmask |= BIT(group_id);
1099 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1104 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1106 struct bna_rxf *rxf = &rx->rxf;
1107 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1108 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1109 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1111 rxf->vlan_filter_table[index] &= ~bit;
1112 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1113 rxf->vlan_pending_bitmask |= BIT(group_id);
1114 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1119 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1121 struct bna_mac *mac = NULL;
1122 struct list_head *qe;
1124 /* Delete MAC addresses previousely added */
1125 if (!list_empty(&rxf->ucast_pending_del_q)) {
1126 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1128 mac = (struct bna_mac *)qe;
1129 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1130 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1134 /* Set default unicast MAC */
1135 if (rxf->ucast_pending_set) {
1136 rxf->ucast_pending_set = 0;
1137 ether_addr_copy(rxf->ucast_active_mac.addr,
1138 rxf->ucast_pending_mac->addr);
1139 rxf->ucast_active_set = 1;
1140 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1141 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1145 /* Add additional MAC entries */
1146 if (!list_empty(&rxf->ucast_pending_add_q)) {
1147 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1149 mac = (struct bna_mac *)qe;
1150 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1151 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1159 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1161 struct list_head *qe;
1162 struct bna_mac *mac;
1164 /* Throw away delete pending ucast entries */
1165 while (!list_empty(&rxf->ucast_pending_del_q)) {
1166 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1168 mac = (struct bna_mac *)qe;
1169 if (cleanup == BNA_SOFT_CLEANUP)
1170 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1173 bna_bfi_ucast_req(rxf, mac,
1174 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1175 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1181 /* Move active ucast entries to pending_add_q */
1182 while (!list_empty(&rxf->ucast_active_q)) {
1183 bfa_q_deq(&rxf->ucast_active_q, &qe);
1185 list_add_tail(qe, &rxf->ucast_pending_add_q);
1186 if (cleanup == BNA_HARD_CLEANUP) {
1187 mac = (struct bna_mac *)qe;
1188 bna_bfi_ucast_req(rxf, mac,
1189 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1194 if (rxf->ucast_active_set) {
1195 rxf->ucast_pending_set = 1;
1196 rxf->ucast_active_set = 0;
1197 if (cleanup == BNA_HARD_CLEANUP) {
1198 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1199 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1208 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1210 struct bna *bna = rxf->rx->bna;
1212 /* Enable/disable promiscuous mode */
1213 if (is_promisc_enable(rxf->rxmode_pending,
1214 rxf->rxmode_pending_bitmask)) {
1215 /* move promisc configuration from pending -> active */
1216 promisc_inactive(rxf->rxmode_pending,
1217 rxf->rxmode_pending_bitmask);
1218 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1219 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1221 } else if (is_promisc_disable(rxf->rxmode_pending,
1222 rxf->rxmode_pending_bitmask)) {
1223 /* move promisc configuration from pending -> active */
1224 promisc_inactive(rxf->rxmode_pending,
1225 rxf->rxmode_pending_bitmask);
1226 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1227 bna->promisc_rid = BFI_INVALID_RID;
1228 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1236 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1238 struct bna *bna = rxf->rx->bna;
1240 /* Clear pending promisc mode disable */
1241 if (is_promisc_disable(rxf->rxmode_pending,
1242 rxf->rxmode_pending_bitmask)) {
1243 promisc_inactive(rxf->rxmode_pending,
1244 rxf->rxmode_pending_bitmask);
1245 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1246 bna->promisc_rid = BFI_INVALID_RID;
1247 if (cleanup == BNA_HARD_CLEANUP) {
1248 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1253 /* Move promisc mode config from active -> pending */
1254 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1255 promisc_enable(rxf->rxmode_pending,
1256 rxf->rxmode_pending_bitmask);
1257 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1258 if (cleanup == BNA_HARD_CLEANUP) {
1259 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1268 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1270 /* Enable/disable allmulti mode */
1271 if (is_allmulti_enable(rxf->rxmode_pending,
1272 rxf->rxmode_pending_bitmask)) {
1273 /* move allmulti configuration from pending -> active */
1274 allmulti_inactive(rxf->rxmode_pending,
1275 rxf->rxmode_pending_bitmask);
1276 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1277 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1279 } else if (is_allmulti_disable(rxf->rxmode_pending,
1280 rxf->rxmode_pending_bitmask)) {
1281 /* move allmulti configuration from pending -> active */
1282 allmulti_inactive(rxf->rxmode_pending,
1283 rxf->rxmode_pending_bitmask);
1284 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1285 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1293 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1295 /* Clear pending allmulti mode disable */
1296 if (is_allmulti_disable(rxf->rxmode_pending,
1297 rxf->rxmode_pending_bitmask)) {
1298 allmulti_inactive(rxf->rxmode_pending,
1299 rxf->rxmode_pending_bitmask);
1300 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1301 if (cleanup == BNA_HARD_CLEANUP) {
1302 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1307 /* Move allmulti mode config from active -> pending */
1308 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1309 allmulti_enable(rxf->rxmode_pending,
1310 rxf->rxmode_pending_bitmask);
1311 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1312 if (cleanup == BNA_HARD_CLEANUP) {
1313 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1322 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1324 struct bna *bna = rxf->rx->bna;
1327 if (is_promisc_enable(rxf->rxmode_pending,
1328 rxf->rxmode_pending_bitmask) ||
1329 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1330 /* Do nothing if pending enable or already enabled */
1331 } else if (is_promisc_disable(rxf->rxmode_pending,
1332 rxf->rxmode_pending_bitmask)) {
1333 /* Turn off pending disable command */
1334 promisc_inactive(rxf->rxmode_pending,
1335 rxf->rxmode_pending_bitmask);
1337 /* Schedule enable */
1338 promisc_enable(rxf->rxmode_pending,
1339 rxf->rxmode_pending_bitmask);
1340 bna->promisc_rid = rxf->rx->rid;
1348 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1350 struct bna *bna = rxf->rx->bna;
1353 if (is_promisc_disable(rxf->rxmode_pending,
1354 rxf->rxmode_pending_bitmask) ||
1355 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1356 /* Do nothing if pending disable or already disabled */
1357 } else if (is_promisc_enable(rxf->rxmode_pending,
1358 rxf->rxmode_pending_bitmask)) {
1359 /* Turn off pending enable command */
1360 promisc_inactive(rxf->rxmode_pending,
1361 rxf->rxmode_pending_bitmask);
1362 bna->promisc_rid = BFI_INVALID_RID;
1363 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1364 /* Schedule disable */
1365 promisc_disable(rxf->rxmode_pending,
1366 rxf->rxmode_pending_bitmask);
1374 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1378 if (is_allmulti_enable(rxf->rxmode_pending,
1379 rxf->rxmode_pending_bitmask) ||
1380 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1381 /* Do nothing if pending enable or already enabled */
1382 } else if (is_allmulti_disable(rxf->rxmode_pending,
1383 rxf->rxmode_pending_bitmask)) {
1384 /* Turn off pending disable command */
1385 allmulti_inactive(rxf->rxmode_pending,
1386 rxf->rxmode_pending_bitmask);
1388 /* Schedule enable */
1389 allmulti_enable(rxf->rxmode_pending,
1390 rxf->rxmode_pending_bitmask);
1398 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1402 if (is_allmulti_disable(rxf->rxmode_pending,
1403 rxf->rxmode_pending_bitmask) ||
1404 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1405 /* Do nothing if pending disable or already disabled */
1406 } else if (is_allmulti_enable(rxf->rxmode_pending,
1407 rxf->rxmode_pending_bitmask)) {
1408 /* Turn off pending enable command */
1409 allmulti_inactive(rxf->rxmode_pending,
1410 rxf->rxmode_pending_bitmask);
1411 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1412 /* Schedule disable */
1413 allmulti_disable(rxf->rxmode_pending,
1414 rxf->rxmode_pending_bitmask);
1422 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1424 if (rxf->vlan_strip_pending) {
1425 rxf->vlan_strip_pending = false;
1426 bna_bfi_vlan_strip_enable(rxf);
1435 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1436 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1438 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1439 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1441 #define call_rx_stop_cbfn(rx) \
1443 if ((rx)->stop_cbfn) { \
1444 void (*cbfn)(void *, struct bna_rx *); \
1446 cbfn = (rx)->stop_cbfn; \
1447 cbarg = (rx)->stop_cbarg; \
1448 (rx)->stop_cbfn = NULL; \
1449 (rx)->stop_cbarg = NULL; \
1454 #define call_rx_stall_cbfn(rx) \
1456 if ((rx)->rx_stall_cbfn) \
1457 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1460 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1462 struct bna_dma_addr cur_q_addr = \
1463 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1464 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1465 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1466 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1467 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1468 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1469 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1472 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1473 static void bna_rx_enet_stop(struct bna_rx *rx);
1474 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1476 bfa_fsm_state_decl(bna_rx, stopped,
1477 struct bna_rx, enum bna_rx_event);
1478 bfa_fsm_state_decl(bna_rx, start_wait,
1479 struct bna_rx, enum bna_rx_event);
1480 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1481 struct bna_rx, enum bna_rx_event);
1482 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1483 struct bna_rx, enum bna_rx_event);
1484 bfa_fsm_state_decl(bna_rx, started,
1485 struct bna_rx, enum bna_rx_event);
1486 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1487 struct bna_rx, enum bna_rx_event);
1488 bfa_fsm_state_decl(bna_rx, stop_wait,
1489 struct bna_rx, enum bna_rx_event);
1490 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1491 struct bna_rx, enum bna_rx_event);
1492 bfa_fsm_state_decl(bna_rx, failed,
1493 struct bna_rx, enum bna_rx_event);
1494 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1495 struct bna_rx, enum bna_rx_event);
1497 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1499 call_rx_stop_cbfn(rx);
1502 static void bna_rx_sm_stopped(struct bna_rx *rx,
1503 enum bna_rx_event event)
1507 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1511 call_rx_stop_cbfn(rx);
1519 bfa_sm_fault(event);
1524 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1526 bna_bfi_rx_enet_start(rx);
1530 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1535 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1540 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1541 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1545 bna_rx_enet_stop(rx);
1549 bfa_sm_fault(event);
1554 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1555 enum bna_rx_event event)
1559 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1563 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1567 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1571 bfa_sm_fault(event);
1576 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1578 rx->rx_post_cbfn(rx->bna->bnad, rx);
1579 bna_rxf_start(&rx->rxf);
1583 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1588 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1592 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1593 bna_rxf_fail(&rx->rxf);
1594 call_rx_stall_cbfn(rx);
1595 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1598 case RX_E_RXF_STARTED:
1599 bna_rxf_stop(&rx->rxf);
1602 case RX_E_RXF_STOPPED:
1603 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1604 call_rx_stall_cbfn(rx);
1605 bna_rx_enet_stop(rx);
1609 bfa_sm_fault(event);
1616 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1621 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1626 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1630 bna_rx_enet_stop(rx);
1634 bfa_sm_fault(event);
1639 bna_rx_sm_started_entry(struct bna_rx *rx)
1641 struct bna_rxp *rxp;
1642 struct list_head *qe_rxp;
1643 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1646 list_for_each(qe_rxp, &rx->rxp_q) {
1647 rxp = (struct bna_rxp *)qe_rxp;
1648 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1651 bna_ethport_cb_rx_started(&rx->bna->ethport);
1655 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1659 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1660 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1661 bna_rxf_stop(&rx->rxf);
1665 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1666 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1667 bna_rxf_fail(&rx->rxf);
1668 call_rx_stall_cbfn(rx);
1669 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1673 bfa_sm_fault(event);
1678 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1679 enum bna_rx_event event)
1683 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1687 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1688 bna_rxf_fail(&rx->rxf);
1689 call_rx_stall_cbfn(rx);
1690 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1693 case RX_E_RXF_STARTED:
1694 bfa_fsm_set_state(rx, bna_rx_sm_started);
1698 bfa_sm_fault(event);
1704 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1709 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1713 case RX_E_RXF_STOPPED:
1717 case RX_E_CLEANUP_DONE:
1718 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1722 bfa_sm_fault(event);
1728 bna_rx_sm_failed_entry(struct bna_rx *rx)
1733 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1737 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1741 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1745 case RX_E_RXF_STARTED:
1746 case RX_E_RXF_STOPPED:
1750 case RX_E_CLEANUP_DONE:
1751 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1755 bfa_sm_fault(event);
1760 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1765 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1769 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1773 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1776 case RX_E_CLEANUP_DONE:
1777 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1781 bfa_sm_fault(event);
1787 bna_bfi_rx_enet_start(struct bna_rx *rx)
1789 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1790 struct bna_rxp *rxp = NULL;
1791 struct bna_rxq *q0 = NULL, *q1 = NULL;
1792 struct list_head *rxp_qe;
1795 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1796 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1797 cfg_req->mh.num_entries = htons(
1798 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1800 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1801 cfg_req->num_queue_sets = rx->num_paths;
1802 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1804 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1805 rxp = (struct bna_rxp *)rxp_qe;
1807 GET_RXQS(rxp, q0, q1);
1808 switch (rxp->type) {
1812 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1814 cfg_req->q_cfg[i].qs.rx_buffer_size =
1815 htons((u16)q1->buffer_size);
1818 case BNA_RXP_SINGLE:
1819 /* Large/Single RxQ */
1820 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1822 if (q0->multi_buffer)
1823 /* multi-buffer is enabled by allocating
1824 * a new rx with new set of resources.
1825 * q0->buffer_size should be initialized to
1828 cfg_req->rx_cfg.multi_buffer =
1829 BNA_STATUS_T_ENABLED;
1832 bna_enet_mtu_get(&rx->bna->enet);
1833 cfg_req->q_cfg[i].ql.rx_buffer_size =
1834 htons((u16)q0->buffer_size);
1841 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1844 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1845 rxp->cq.ib.ib_seg_host_addr.lsb;
1846 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1847 rxp->cq.ib.ib_seg_host_addr.msb;
1848 cfg_req->q_cfg[i].ib.intr.msix_index =
1849 htons((u16)rxp->cq.ib.intr_vector);
1852 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1853 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1854 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1855 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1856 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1857 ? BNA_STATUS_T_ENABLED :
1858 BNA_STATUS_T_DISABLED;
1859 cfg_req->ib_cfg.coalescing_timeout =
1860 htonl((u32)rxp->cq.ib.coalescing_timeo);
1861 cfg_req->ib_cfg.inter_pkt_timeout =
1862 htonl((u32)rxp->cq.ib.interpkt_timeo);
1863 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1865 switch (rxp->type) {
1867 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1871 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1872 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1873 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1874 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1877 case BNA_RXP_SINGLE:
1878 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1884 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1886 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1887 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1888 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1892 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1894 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1896 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1897 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1898 req->mh.num_entries = htons(
1899 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1900 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1902 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1906 bna_rx_enet_stop(struct bna_rx *rx)
1908 struct bna_rxp *rxp;
1909 struct list_head *qe_rxp;
1912 list_for_each(qe_rxp, &rx->rxp_q) {
1913 rxp = (struct bna_rxp *)qe_rxp;
1914 bna_ib_stop(rx->bna, &rxp->cq.ib);
1917 bna_bfi_rx_enet_stop(rx);
1921 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1923 if ((rx_mod->rx_free_count == 0) ||
1924 (rx_mod->rxp_free_count == 0) ||
1925 (rx_mod->rxq_free_count == 0))
1928 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1929 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1930 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1933 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1934 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1941 static struct bna_rxq *
1942 bna_rxq_get(struct bna_rx_mod *rx_mod)
1944 struct bna_rxq *rxq = NULL;
1945 struct list_head *qe = NULL;
1947 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1948 rx_mod->rxq_free_count--;
1949 rxq = (struct bna_rxq *)qe;
1950 bfa_q_qe_init(&rxq->qe);
1956 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1958 bfa_q_qe_init(&rxq->qe);
1959 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1960 rx_mod->rxq_free_count++;
1963 static struct bna_rxp *
1964 bna_rxp_get(struct bna_rx_mod *rx_mod)
1966 struct list_head *qe = NULL;
1967 struct bna_rxp *rxp = NULL;
1969 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1970 rx_mod->rxp_free_count--;
1971 rxp = (struct bna_rxp *)qe;
1972 bfa_q_qe_init(&rxp->qe);
1978 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1980 bfa_q_qe_init(&rxp->qe);
1981 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1982 rx_mod->rxp_free_count++;
1985 static struct bna_rx *
1986 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1988 struct list_head *qe = NULL;
1989 struct bna_rx *rx = NULL;
1991 if (type == BNA_RX_T_REGULAR) {
1992 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1994 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1996 rx_mod->rx_free_count--;
1997 rx = (struct bna_rx *)qe;
1998 bfa_q_qe_init(&rx->qe);
1999 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2006 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2008 struct list_head *prev_qe = NULL;
2009 struct list_head *qe;
2011 bfa_q_qe_init(&rx->qe);
2013 list_for_each(qe, &rx_mod->rx_free_q) {
2014 if (((struct bna_rx *)qe)->rid < rx->rid)
2020 if (prev_qe == NULL) {
2021 /* This is the first entry */
2022 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
2023 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
2024 /* This is the last entry */
2025 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2027 /* Somewhere in the middle */
2028 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
2029 bfa_q_prev(&rx->qe) = prev_qe;
2030 bfa_q_next(prev_qe) = &rx->qe;
2031 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
2034 rx_mod->rx_free_count++;
2038 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
2041 switch (rxp->type) {
2042 case BNA_RXP_SINGLE:
2043 rxp->rxq.single.only = q0;
2044 rxp->rxq.single.reserved = NULL;
2047 rxp->rxq.slr.large = q0;
2048 rxp->rxq.slr.small = q1;
2051 rxp->rxq.hds.data = q0;
2052 rxp->rxq.hds.hdr = q1;
2060 bna_rxq_qpt_setup(struct bna_rxq *rxq,
2061 struct bna_rxp *rxp,
2064 struct bna_mem_descr *qpt_mem,
2065 struct bna_mem_descr *swqpt_mem,
2066 struct bna_mem_descr *page_mem)
2070 struct bna_dma_addr bna_dma;
2073 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2074 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2075 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2076 rxq->qpt.page_count = page_count;
2077 rxq->qpt.page_size = page_size;
2079 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2080 rxq->rcb->sw_q = page_mem->kva;
2082 kva = page_mem->kva;
2083 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2085 for (i = 0; i < rxq->qpt.page_count; i++) {
2086 rxq->rcb->sw_qpt[i] = kva;
2089 BNA_SET_DMA_ADDR(dma, &bna_dma);
2090 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2092 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2099 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2102 struct bna_mem_descr *qpt_mem,
2103 struct bna_mem_descr *swqpt_mem,
2104 struct bna_mem_descr *page_mem)
2108 struct bna_dma_addr bna_dma;
2111 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2112 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2113 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2114 rxp->cq.qpt.page_count = page_count;
2115 rxp->cq.qpt.page_size = page_size;
2117 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2118 rxp->cq.ccb->sw_q = page_mem->kva;
2120 kva = page_mem->kva;
2121 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2123 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2124 rxp->cq.ccb->sw_qpt[i] = kva;
2127 BNA_SET_DMA_ADDR(dma, &bna_dma);
2128 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2130 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2137 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2139 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2141 bfa_wc_down(&rx_mod->rx_stop_wc);
2145 bna_rx_mod_cb_rx_stopped_all(void *arg)
2147 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2149 if (rx_mod->stop_cbfn)
2150 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2151 rx_mod->stop_cbfn = NULL;
2155 bna_rx_start(struct bna_rx *rx)
2157 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2158 if (rx->rx_flags & BNA_RX_F_ENABLED)
2159 bfa_fsm_send_event(rx, RX_E_START);
2163 bna_rx_stop(struct bna_rx *rx)
2165 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2166 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2167 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2169 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2170 rx->stop_cbarg = &rx->bna->rx_mod;
2171 bfa_fsm_send_event(rx, RX_E_STOP);
2176 bna_rx_fail(struct bna_rx *rx)
2178 /* Indicate Enet is not enabled, and failed */
2179 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2180 bfa_fsm_send_event(rx, RX_E_FAIL);
2184 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2187 struct list_head *qe;
2189 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2190 if (type == BNA_RX_T_LOOPBACK)
2191 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2193 list_for_each(qe, &rx_mod->rx_active_q) {
2194 rx = (struct bna_rx *)qe;
2195 if (rx->type == type)
2201 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2204 struct list_head *qe;
2206 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2207 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2209 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2211 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2213 list_for_each(qe, &rx_mod->rx_active_q) {
2214 rx = (struct bna_rx *)qe;
2215 if (rx->type == type) {
2216 bfa_wc_up(&rx_mod->rx_stop_wc);
2221 bfa_wc_wait(&rx_mod->rx_stop_wc);
2225 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2228 struct list_head *qe;
2230 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2231 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2233 list_for_each(qe, &rx_mod->rx_active_q) {
2234 rx = (struct bna_rx *)qe;
2239 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2240 struct bna_res_info *res_info)
2243 struct bna_rx *rx_ptr;
2244 struct bna_rxp *rxp_ptr;
2245 struct bna_rxq *rxq_ptr;
2250 rx_mod->rx = (struct bna_rx *)
2251 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2252 rx_mod->rxp = (struct bna_rxp *)
2253 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2254 rx_mod->rxq = (struct bna_rxq *)
2255 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2257 /* Initialize the queues */
2258 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2259 rx_mod->rx_free_count = 0;
2260 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2261 rx_mod->rxq_free_count = 0;
2262 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2263 rx_mod->rxp_free_count = 0;
2264 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2266 /* Build RX queues */
2267 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2268 rx_ptr = &rx_mod->rx[index];
2270 bfa_q_qe_init(&rx_ptr->qe);
2271 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2273 rx_ptr->rid = index;
2274 rx_ptr->stop_cbfn = NULL;
2275 rx_ptr->stop_cbarg = NULL;
2277 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2278 rx_mod->rx_free_count++;
2281 /* build RX-path queue */
2282 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2283 rxp_ptr = &rx_mod->rxp[index];
2284 bfa_q_qe_init(&rxp_ptr->qe);
2285 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2286 rx_mod->rxp_free_count++;
2289 /* build RXQ queue */
2290 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2291 rxq_ptr = &rx_mod->rxq[index];
2292 bfa_q_qe_init(&rxq_ptr->qe);
2293 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2294 rx_mod->rxq_free_count++;
2299 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2301 struct list_head *qe;
2305 list_for_each(qe, &rx_mod->rx_free_q)
2309 list_for_each(qe, &rx_mod->rxp_free_q)
2313 list_for_each(qe, &rx_mod->rxq_free_q)
2320 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2322 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2323 struct bna_rxp *rxp = NULL;
2324 struct bna_rxq *q0 = NULL, *q1 = NULL;
2325 struct list_head *rxp_qe;
2328 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2329 sizeof(struct bfi_enet_rx_cfg_rsp));
2331 rx->hw_id = cfg_rsp->hw_id;
2333 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2335 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2336 rxp = (struct bna_rxp *)rxp_qe;
2337 GET_RXQS(rxp, q0, q1);
2339 /* Setup doorbells */
2340 rxp->cq.ccb->i_dbell->doorbell_addr =
2341 rx->bna->pcidev.pci_bar_kva
2342 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2343 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2345 rx->bna->pcidev.pci_bar_kva
2346 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2347 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2350 rx->bna->pcidev.pci_bar_kva
2351 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2352 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2355 /* Initialize producer/consumer indexes */
2356 (*rxp->cq.ccb->hw_producer_index) = 0;
2357 rxp->cq.ccb->producer_index = 0;
2358 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2360 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2363 bfa_fsm_send_event(rx, RX_E_STARTED);
2367 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2369 bfa_fsm_send_event(rx, RX_E_STOPPED);
2373 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2375 u32 cq_size, hq_size, dq_size;
2376 u32 cpage_count, hpage_count, dpage_count;
2377 struct bna_mem_info *mem_info;
2382 dq_depth = q_cfg->q0_depth;
2383 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2384 cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2386 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2387 cq_size = ALIGN(cq_size, PAGE_SIZE);
2388 cpage_count = SIZE_TO_PAGES(cq_size);
2390 dq_depth = roundup_pow_of_two(dq_depth);
2391 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2392 dq_size = ALIGN(dq_size, PAGE_SIZE);
2393 dpage_count = SIZE_TO_PAGES(dq_size);
2395 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2396 hq_depth = roundup_pow_of_two(hq_depth);
2397 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2398 hq_size = ALIGN(hq_size, PAGE_SIZE);
2399 hpage_count = SIZE_TO_PAGES(hq_size);
2403 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2404 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2405 mem_info->mem_type = BNA_MEM_T_KVA;
2406 mem_info->len = sizeof(struct bna_ccb);
2407 mem_info->num = q_cfg->num_paths;
2409 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2410 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2411 mem_info->mem_type = BNA_MEM_T_KVA;
2412 mem_info->len = sizeof(struct bna_rcb);
2413 mem_info->num = BNA_GET_RXQS(q_cfg);
2415 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2416 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2417 mem_info->mem_type = BNA_MEM_T_DMA;
2418 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2419 mem_info->num = q_cfg->num_paths;
2421 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2422 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2423 mem_info->mem_type = BNA_MEM_T_KVA;
2424 mem_info->len = cpage_count * sizeof(void *);
2425 mem_info->num = q_cfg->num_paths;
2427 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2428 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2429 mem_info->mem_type = BNA_MEM_T_DMA;
2430 mem_info->len = PAGE_SIZE * cpage_count;
2431 mem_info->num = q_cfg->num_paths;
2433 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2434 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2435 mem_info->mem_type = BNA_MEM_T_DMA;
2436 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2437 mem_info->num = q_cfg->num_paths;
2439 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2440 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2441 mem_info->mem_type = BNA_MEM_T_KVA;
2442 mem_info->len = dpage_count * sizeof(void *);
2443 mem_info->num = q_cfg->num_paths;
2445 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2446 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2447 mem_info->mem_type = BNA_MEM_T_DMA;
2448 mem_info->len = PAGE_SIZE * dpage_count;
2449 mem_info->num = q_cfg->num_paths;
2451 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2452 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2453 mem_info->mem_type = BNA_MEM_T_DMA;
2454 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2455 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2457 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2458 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2459 mem_info->mem_type = BNA_MEM_T_KVA;
2460 mem_info->len = hpage_count * sizeof(void *);
2461 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2463 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2464 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2465 mem_info->mem_type = BNA_MEM_T_DMA;
2466 mem_info->len = PAGE_SIZE * hpage_count;
2467 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2469 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2470 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2471 mem_info->mem_type = BNA_MEM_T_DMA;
2472 mem_info->len = BFI_IBIDX_SIZE;
2473 mem_info->num = q_cfg->num_paths;
2475 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2476 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2477 mem_info->mem_type = BNA_MEM_T_KVA;
2478 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2481 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2482 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2483 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2487 bna_rx_create(struct bna *bna, struct bnad *bnad,
2488 struct bna_rx_config *rx_cfg,
2489 const struct bna_rx_event_cbfn *rx_cbfn,
2490 struct bna_res_info *res_info,
2493 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2495 struct bna_rxp *rxp;
2498 struct bna_intr_info *intr_info;
2499 struct bna_mem_descr *hqunmap_mem;
2500 struct bna_mem_descr *dqunmap_mem;
2501 struct bna_mem_descr *ccb_mem;
2502 struct bna_mem_descr *rcb_mem;
2503 struct bna_mem_descr *cqpt_mem;
2504 struct bna_mem_descr *cswqpt_mem;
2505 struct bna_mem_descr *cpage_mem;
2506 struct bna_mem_descr *hqpt_mem;
2507 struct bna_mem_descr *dqpt_mem;
2508 struct bna_mem_descr *hsqpt_mem;
2509 struct bna_mem_descr *dsqpt_mem;
2510 struct bna_mem_descr *hpage_mem;
2511 struct bna_mem_descr *dpage_mem;
2512 u32 dpage_count, hpage_count;
2513 u32 hq_idx, dq_idx, rcb_idx;
2517 if (!bna_rx_res_check(rx_mod, rx_cfg))
2520 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2521 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2522 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2523 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2524 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2525 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2526 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2527 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2528 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2529 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2530 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2531 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2532 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2533 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2535 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2538 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2541 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2544 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2547 INIT_LIST_HEAD(&rx->rxp_q);
2548 rx->stop_cbfn = NULL;
2549 rx->stop_cbarg = NULL;
2552 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2553 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2554 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2555 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2556 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2557 /* Following callbacks are mandatory */
2558 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2559 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2561 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2563 case BNA_RX_T_REGULAR:
2564 if (!(rx->bna->rx_mod.flags &
2565 BNA_RX_MOD_F_ENET_LOOPBACK))
2566 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2568 case BNA_RX_T_LOOPBACK:
2569 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2570 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2575 rx->num_paths = rx_cfg->num_paths;
2576 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2577 i < rx->num_paths; i++) {
2578 rxp = bna_rxp_get(rx_mod);
2579 list_add_tail(&rxp->qe, &rx->rxp_q);
2580 rxp->type = rx_cfg->rxp_type;
2584 q0 = bna_rxq_get(rx_mod);
2585 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2588 q1 = bna_rxq_get(rx_mod);
2590 if (1 == intr_info->num)
2591 rxp->vector = intr_info->idl[0].vector;
2593 rxp->vector = intr_info->idl[i].vector;
2597 rxp->cq.ib.ib_seg_host_addr.lsb =
2598 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2599 rxp->cq.ib.ib_seg_host_addr.msb =
2600 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2601 rxp->cq.ib.ib_seg_host_addr_kva =
2602 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2603 rxp->cq.ib.intr_type = intr_info->intr_type;
2604 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2605 rxp->cq.ib.intr_vector = rxp->vector;
2607 rxp->cq.ib.intr_vector = BIT(rxp->vector);
2608 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2609 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2610 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2612 bna_rxp_add_rxqs(rxp, q0, q1);
2619 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2620 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2621 rcb_idx++; dq_idx++;
2622 q0->rcb->q_depth = rx_cfg->q0_depth;
2623 q0->q_depth = rx_cfg->q0_depth;
2624 q0->multi_buffer = rx_cfg->q0_multi_buf;
2625 q0->buffer_size = rx_cfg->q0_buf_size;
2626 q0->num_vecs = rx_cfg->q0_num_vecs;
2628 q0->rcb->bnad = bna->bnad;
2630 q0->rx_packets = q0->rx_bytes = 0;
2631 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2633 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2634 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2636 if (rx->rcb_setup_cbfn)
2637 rx->rcb_setup_cbfn(bnad, q0->rcb);
2645 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2646 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2647 rcb_idx++; hq_idx++;
2648 q1->rcb->q_depth = rx_cfg->q1_depth;
2649 q1->q_depth = rx_cfg->q1_depth;
2650 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2653 q1->rcb->bnad = bna->bnad;
2655 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2656 rx_cfg->hds_config.forced_offset
2657 : rx_cfg->q1_buf_size;
2658 q1->rx_packets = q1->rx_bytes = 0;
2659 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2661 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2662 &hqpt_mem[i], &hsqpt_mem[i],
2665 if (rx->rcb_setup_cbfn)
2666 rx->rcb_setup_cbfn(bnad, q1->rcb);
2671 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2672 cq_depth = rx_cfg->q0_depth +
2673 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2674 0 : rx_cfg->q1_depth);
2675 /* if multi-buffer is enabled sum of q0_depth
2676 * and q1_depth need not be a power of 2
2678 cq_depth = roundup_pow_of_two(cq_depth);
2679 rxp->cq.ccb->q_depth = cq_depth;
2680 rxp->cq.ccb->cq = &rxp->cq;
2681 rxp->cq.ccb->rcb[0] = q0->rcb;
2682 q0->rcb->ccb = rxp->cq.ccb;
2684 rxp->cq.ccb->rcb[1] = q1->rcb;
2685 q1->rcb->ccb = rxp->cq.ccb;
2687 rxp->cq.ccb->hw_producer_index =
2688 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2689 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2690 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2691 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2692 rxp->cq.ccb->rx_coalescing_timeo =
2693 rxp->cq.ib.coalescing_timeo;
2694 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2695 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2696 rxp->cq.ccb->bnad = bna->bnad;
2697 rxp->cq.ccb->id = i;
2699 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2700 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2702 if (rx->ccb_setup_cbfn)
2703 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2706 rx->hds_cfg = rx_cfg->hds_config;
2708 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2710 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2712 rx_mod->rid_mask |= BIT(rx->rid);
2718 bna_rx_destroy(struct bna_rx *rx)
2720 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2721 struct bna_rxq *q0 = NULL;
2722 struct bna_rxq *q1 = NULL;
2723 struct bna_rxp *rxp;
2724 struct list_head *qe;
2726 bna_rxf_uninit(&rx->rxf);
2728 while (!list_empty(&rx->rxp_q)) {
2729 bfa_q_deq(&rx->rxp_q, &rxp);
2730 GET_RXQS(rxp, q0, q1);
2731 if (rx->rcb_destroy_cbfn)
2732 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2736 bna_rxq_put(rx_mod, q0);
2739 if (rx->rcb_destroy_cbfn)
2740 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2744 bna_rxq_put(rx_mod, q1);
2746 rxp->rxq.slr.large = NULL;
2747 rxp->rxq.slr.small = NULL;
2749 if (rx->ccb_destroy_cbfn)
2750 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2753 bna_rxp_put(rx_mod, rxp);
2756 list_for_each(qe, &rx_mod->rx_active_q) {
2757 if (qe == &rx->qe) {
2759 bfa_q_qe_init(&rx->qe);
2764 rx_mod->rid_mask &= ~BIT(rx->rid);
2768 bna_rx_put(rx_mod, rx);
2772 bna_rx_enable(struct bna_rx *rx)
2774 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2777 rx->rx_flags |= BNA_RX_F_ENABLED;
2778 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2779 bfa_fsm_send_event(rx, RX_E_START);
2783 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2784 void (*cbfn)(void *, struct bna_rx *))
2786 if (type == BNA_SOFT_CLEANUP) {
2787 /* h/w should not be accessed. Treat we're stopped */
2788 (*cbfn)(rx->bna->bnad, rx);
2790 rx->stop_cbfn = cbfn;
2791 rx->stop_cbarg = rx->bna->bnad;
2793 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2795 bfa_fsm_send_event(rx, RX_E_STOP);
2800 bna_rx_cleanup_complete(struct bna_rx *rx)
2802 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2806 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2808 struct bna_rxf *rxf = &rx->rxf;
2810 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2811 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2812 rxf->vlan_strip_pending = true;
2813 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2818 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2820 struct bna_rxf *rxf = &rx->rxf;
2822 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2823 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2824 rxf->vlan_strip_pending = true;
2825 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2830 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2831 enum bna_rxmode bitmask)
2833 struct bna_rxf *rxf = &rx->rxf;
2834 int need_hw_config = 0;
2838 if (is_promisc_enable(new_mode, bitmask)) {
2839 /* If promisc mode is already enabled elsewhere in the system */
2840 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2841 (rx->bna->promisc_rid != rxf->rx->rid))
2844 /* If default mode is already enabled in the system */
2845 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2848 /* Trying to enable promiscuous and default mode together */
2849 if (is_default_enable(new_mode, bitmask))
2853 if (is_default_enable(new_mode, bitmask)) {
2854 /* If default mode is already enabled elsewhere in the system */
2855 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2856 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2860 /* If promiscuous mode is already enabled in the system */
2861 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2865 /* Process the commands */
2867 if (is_promisc_enable(new_mode, bitmask)) {
2868 if (bna_rxf_promisc_enable(rxf))
2870 } else if (is_promisc_disable(new_mode, bitmask)) {
2871 if (bna_rxf_promisc_disable(rxf))
2875 if (is_allmulti_enable(new_mode, bitmask)) {
2876 if (bna_rxf_allmulti_enable(rxf))
2878 } else if (is_allmulti_disable(new_mode, bitmask)) {
2879 if (bna_rxf_allmulti_disable(rxf))
2883 /* Trigger h/w if needed */
2885 if (need_hw_config) {
2886 rxf->cam_fltr_cbfn = NULL;
2887 rxf->cam_fltr_cbarg = rx->bna->bnad;
2888 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2891 return BNA_CB_SUCCESS;
2898 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2900 struct bna_rxf *rxf = &rx->rxf;
2902 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2903 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2904 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2905 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2910 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2912 struct bna_rxp *rxp;
2913 struct list_head *qe;
2915 list_for_each(qe, &rx->rxp_q) {
2916 rxp = (struct bna_rxp *)qe;
2917 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2918 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2923 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2927 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2928 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2929 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2933 bna_rx_dim_update(struct bna_ccb *ccb)
2935 struct bna *bna = ccb->cq->rx->bna;
2937 u32 pkt_rt, small_rt, large_rt;
2938 u8 coalescing_timeo;
2940 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2941 (ccb->pkt_rate.large_pkt_cnt == 0))
2944 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2946 small_rt = ccb->pkt_rate.small_pkt_cnt;
2947 large_rt = ccb->pkt_rate.large_pkt_cnt;
2949 pkt_rt = small_rt + large_rt;
2951 if (pkt_rt < BNA_PKT_RATE_10K)
2952 load = BNA_LOAD_T_LOW_4;
2953 else if (pkt_rt < BNA_PKT_RATE_20K)
2954 load = BNA_LOAD_T_LOW_3;
2955 else if (pkt_rt < BNA_PKT_RATE_30K)
2956 load = BNA_LOAD_T_LOW_2;
2957 else if (pkt_rt < BNA_PKT_RATE_40K)
2958 load = BNA_LOAD_T_LOW_1;
2959 else if (pkt_rt < BNA_PKT_RATE_50K)
2960 load = BNA_LOAD_T_HIGH_1;
2961 else if (pkt_rt < BNA_PKT_RATE_60K)
2962 load = BNA_LOAD_T_HIGH_2;
2963 else if (pkt_rt < BNA_PKT_RATE_80K)
2964 load = BNA_LOAD_T_HIGH_3;
2966 load = BNA_LOAD_T_HIGH_4;
2968 if (small_rt > (large_rt << 1))
2973 ccb->pkt_rate.small_pkt_cnt = 0;
2974 ccb->pkt_rate.large_pkt_cnt = 0;
2976 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2977 ccb->rx_coalescing_timeo = coalescing_timeo;
2980 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2983 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2996 #define call_tx_stop_cbfn(tx) \
2998 if ((tx)->stop_cbfn) { \
2999 void (*cbfn)(void *, struct bna_tx *); \
3001 cbfn = (tx)->stop_cbfn; \
3002 cbarg = (tx)->stop_cbarg; \
3003 (tx)->stop_cbfn = NULL; \
3004 (tx)->stop_cbarg = NULL; \
3005 cbfn(cbarg, (tx)); \
3009 #define call_tx_prio_change_cbfn(tx) \
3011 if ((tx)->prio_change_cbfn) { \
3012 void (*cbfn)(struct bnad *, struct bna_tx *); \
3013 cbfn = (tx)->prio_change_cbfn; \
3014 (tx)->prio_change_cbfn = NULL; \
3015 cbfn((tx)->bna->bnad, (tx)); \
3019 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3020 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3021 static void bna_tx_enet_stop(struct bna_tx *tx);
3029 TX_E_PRIO_CHANGE = 6,
3030 TX_E_CLEANUP_DONE = 7,
3034 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
3035 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
3036 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
3037 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
3038 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
3040 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3042 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
3044 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
3045 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
3049 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3051 call_tx_stop_cbfn(tx);
3055 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3059 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3063 call_tx_stop_cbfn(tx);
3070 case TX_E_PRIO_CHANGE:
3071 call_tx_prio_change_cbfn(tx);
3074 case TX_E_BW_UPDATE:
3079 bfa_sm_fault(event);
3084 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
3086 bna_bfi_tx_enet_start(tx);
3090 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3094 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3095 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3099 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3100 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3104 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3105 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3106 BNA_TX_F_BW_UPDATED);
3107 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3109 bfa_fsm_set_state(tx, bna_tx_sm_started);
3112 case TX_E_PRIO_CHANGE:
3113 tx->flags |= BNA_TX_F_PRIO_CHANGED;
3116 case TX_E_BW_UPDATE:
3117 tx->flags |= BNA_TX_F_BW_UPDATED;
3121 bfa_sm_fault(event);
3126 bna_tx_sm_started_entry(struct bna_tx *tx)
3128 struct bna_txq *txq;
3129 struct list_head *qe;
3130 int is_regular = (tx->type == BNA_TX_T_REGULAR);
3132 list_for_each(qe, &tx->txq_q) {
3133 txq = (struct bna_txq *)qe;
3134 txq->tcb->priority = txq->priority;
3136 bna_ib_start(tx->bna, &txq->ib, is_regular);
3138 tx->tx_resume_cbfn(tx->bna->bnad, tx);
3142 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3146 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3147 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3148 bna_tx_enet_stop(tx);
3152 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3153 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3154 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3157 case TX_E_PRIO_CHANGE:
3158 case TX_E_BW_UPDATE:
3159 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3163 bfa_sm_fault(event);
3168 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3173 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3178 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3179 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3184 * We are here due to start_wait -> stop_wait transition on
3187 bna_tx_enet_stop(tx);
3190 case TX_E_PRIO_CHANGE:
3191 case TX_E_BW_UPDATE:
3196 bfa_sm_fault(event);
3201 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3206 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3210 case TX_E_PRIO_CHANGE:
3211 case TX_E_BW_UPDATE:
3215 case TX_E_CLEANUP_DONE:
3216 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3220 bfa_sm_fault(event);
3225 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3227 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3228 bna_tx_enet_stop(tx);
3232 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3236 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3240 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3241 call_tx_prio_change_cbfn(tx);
3242 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3246 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3249 case TX_E_PRIO_CHANGE:
3250 case TX_E_BW_UPDATE:
3255 bfa_sm_fault(event);
3260 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3262 call_tx_prio_change_cbfn(tx);
3263 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3267 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3271 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3275 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3278 case TX_E_PRIO_CHANGE:
3279 case TX_E_BW_UPDATE:
3283 case TX_E_CLEANUP_DONE:
3284 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3288 bfa_sm_fault(event);
3293 bna_tx_sm_failed_entry(struct bna_tx *tx)
3298 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3302 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3306 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3313 case TX_E_CLEANUP_DONE:
3314 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3318 bfa_sm_fault(event);
3323 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3328 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3332 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3336 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3339 case TX_E_CLEANUP_DONE:
3340 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3343 case TX_E_BW_UPDATE:
3348 bfa_sm_fault(event);
3353 bna_bfi_tx_enet_start(struct bna_tx *tx)
3355 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3356 struct bna_txq *txq = NULL;
3357 struct list_head *qe;
3360 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3361 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3362 cfg_req->mh.num_entries = htons(
3363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3365 cfg_req->num_queues = tx->num_txq;
3366 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3368 i++, qe = bfa_q_next(qe)) {
3369 txq = (struct bna_txq *)qe;
3371 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3372 cfg_req->q_cfg[i].q.priority = txq->priority;
3374 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3375 txq->ib.ib_seg_host_addr.lsb;
3376 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3377 txq->ib.ib_seg_host_addr.msb;
3378 cfg_req->q_cfg[i].ib.intr.msix_index =
3379 htons((u16)txq->ib.intr_vector);
3382 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3383 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3384 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3385 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3386 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3387 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3388 cfg_req->ib_cfg.coalescing_timeout =
3389 htonl((u32)txq->ib.coalescing_timeo);
3390 cfg_req->ib_cfg.inter_pkt_timeout =
3391 htonl((u32)txq->ib.interpkt_timeo);
3392 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3394 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3395 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3396 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3397 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3399 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3400 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3401 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3405 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3407 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3409 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3410 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3411 req->mh.num_entries = htons(
3412 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3413 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3415 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3419 bna_tx_enet_stop(struct bna_tx *tx)
3421 struct bna_txq *txq;
3422 struct list_head *qe;
3425 list_for_each(qe, &tx->txq_q) {
3426 txq = (struct bna_txq *)qe;
3427 bna_ib_stop(tx->bna, &txq->ib);
3430 bna_bfi_tx_enet_stop(tx);
3434 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3435 struct bna_mem_descr *qpt_mem,
3436 struct bna_mem_descr *swqpt_mem,
3437 struct bna_mem_descr *page_mem)
3441 struct bna_dma_addr bna_dma;
3444 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3445 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3446 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3447 txq->qpt.page_count = page_count;
3448 txq->qpt.page_size = page_size;
3450 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3451 txq->tcb->sw_q = page_mem->kva;
3453 kva = page_mem->kva;
3454 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3456 for (i = 0; i < page_count; i++) {
3457 txq->tcb->sw_qpt[i] = kva;
3460 BNA_SET_DMA_ADDR(dma, &bna_dma);
3461 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3463 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3469 static struct bna_tx *
3470 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3472 struct list_head *qe = NULL;
3473 struct bna_tx *tx = NULL;
3475 if (list_empty(&tx_mod->tx_free_q))
3477 if (type == BNA_TX_T_REGULAR) {
3478 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3480 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3482 tx = (struct bna_tx *)qe;
3483 bfa_q_qe_init(&tx->qe);
3490 bna_tx_free(struct bna_tx *tx)
3492 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3493 struct bna_txq *txq;
3494 struct list_head *prev_qe;
3495 struct list_head *qe;
3497 while (!list_empty(&tx->txq_q)) {
3498 bfa_q_deq(&tx->txq_q, &txq);
3499 bfa_q_qe_init(&txq->qe);
3502 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3505 list_for_each(qe, &tx_mod->tx_active_q) {
3506 if (qe == &tx->qe) {
3508 bfa_q_qe_init(&tx->qe);
3517 list_for_each(qe, &tx_mod->tx_free_q) {
3518 if (((struct bna_tx *)qe)->rid < tx->rid)
3525 if (prev_qe == NULL) {
3526 /* This is the first entry */
3527 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3528 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3529 /* This is the last entry */
3530 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3532 /* Somewhere in the middle */
3533 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3534 bfa_q_prev(&tx->qe) = prev_qe;
3535 bfa_q_next(prev_qe) = &tx->qe;
3536 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3541 bna_tx_start(struct bna_tx *tx)
3543 tx->flags |= BNA_TX_F_ENET_STARTED;
3544 if (tx->flags & BNA_TX_F_ENABLED)
3545 bfa_fsm_send_event(tx, TX_E_START);
3549 bna_tx_stop(struct bna_tx *tx)
3551 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3552 tx->stop_cbarg = &tx->bna->tx_mod;
3554 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3555 bfa_fsm_send_event(tx, TX_E_STOP);
3559 bna_tx_fail(struct bna_tx *tx)
3561 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3562 bfa_fsm_send_event(tx, TX_E_FAIL);
3566 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3568 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3569 struct bna_txq *txq = NULL;
3570 struct list_head *qe;
3573 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3574 sizeof(struct bfi_enet_tx_cfg_rsp));
3576 tx->hw_id = cfg_rsp->hw_id;
3578 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3579 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3580 txq = (struct bna_txq *)qe;
3582 /* Setup doorbells */
3583 txq->tcb->i_dbell->doorbell_addr =
3584 tx->bna->pcidev.pci_bar_kva
3585 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3587 tx->bna->pcidev.pci_bar_kva
3588 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3589 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3591 /* Initialize producer/consumer indexes */
3592 (*txq->tcb->hw_consumer_index) = 0;
3593 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3596 bfa_fsm_send_event(tx, TX_E_STARTED);
3600 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3602 bfa_fsm_send_event(tx, TX_E_STOPPED);
3606 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3609 struct list_head *qe;
3611 list_for_each(qe, &tx_mod->tx_active_q) {
3612 tx = (struct bna_tx *)qe;
3613 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3618 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3622 struct bna_mem_info *mem_info;
3624 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3625 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3626 mem_info->mem_type = BNA_MEM_T_KVA;
3627 mem_info->len = sizeof(struct bna_tcb);
3628 mem_info->num = num_txq;
3630 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3631 q_size = ALIGN(q_size, PAGE_SIZE);
3632 page_count = q_size >> PAGE_SHIFT;
3634 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3635 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3636 mem_info->mem_type = BNA_MEM_T_DMA;
3637 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3638 mem_info->num = num_txq;
3640 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3641 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3642 mem_info->mem_type = BNA_MEM_T_KVA;
3643 mem_info->len = page_count * sizeof(void *);
3644 mem_info->num = num_txq;
3646 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3647 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3648 mem_info->mem_type = BNA_MEM_T_DMA;
3649 mem_info->len = PAGE_SIZE * page_count;
3650 mem_info->num = num_txq;
3652 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3653 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3654 mem_info->mem_type = BNA_MEM_T_DMA;
3655 mem_info->len = BFI_IBIDX_SIZE;
3656 mem_info->num = num_txq;
3658 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3659 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3661 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3665 bna_tx_create(struct bna *bna, struct bnad *bnad,
3666 struct bna_tx_config *tx_cfg,
3667 const struct bna_tx_event_cbfn *tx_cbfn,
3668 struct bna_res_info *res_info, void *priv)
3670 struct bna_intr_info *intr_info;
3671 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3673 struct bna_txq *txq;
3674 struct list_head *qe;
3678 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3679 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3686 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3691 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3699 INIT_LIST_HEAD(&tx->txq_q);
3700 for (i = 0; i < tx_cfg->num_txq; i++) {
3701 if (list_empty(&tx_mod->txq_free_q))
3704 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3705 bfa_q_qe_init(&txq->qe);
3706 list_add_tail(&txq->qe, &tx->txq_q);
3716 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3717 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3718 /* Following callbacks are mandatory */
3719 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3720 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3721 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3723 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3725 tx->num_txq = tx_cfg->num_txq;
3728 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3730 case BNA_TX_T_REGULAR:
3731 if (!(tx->bna->tx_mod.flags &
3732 BNA_TX_MOD_F_ENET_LOOPBACK))
3733 tx->flags |= BNA_TX_F_ENET_STARTED;
3735 case BNA_TX_T_LOOPBACK:
3736 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3737 tx->flags |= BNA_TX_F_ENET_STARTED;
3745 list_for_each(qe, &tx->txq_q) {
3746 txq = (struct bna_txq *)qe;
3747 txq->tcb = (struct bna_tcb *)
3748 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3749 txq->tx_packets = 0;
3753 txq->ib.ib_seg_host_addr.lsb =
3754 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3755 txq->ib.ib_seg_host_addr.msb =
3756 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3757 txq->ib.ib_seg_host_addr_kva =
3758 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3759 txq->ib.intr_type = intr_info->intr_type;
3760 txq->ib.intr_vector = (intr_info->num == 1) ?
3761 intr_info->idl[0].vector :
3762 intr_info->idl[i].vector;
3763 if (intr_info->intr_type == BNA_INTR_T_INTX)
3764 txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3765 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3766 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3767 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3771 txq->tcb->q_depth = tx_cfg->txq_depth;
3772 txq->tcb->unmap_q = (void *)
3773 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3774 txq->tcb->hw_consumer_index =
3775 (u32 *)txq->ib.ib_seg_host_addr_kva;
3776 txq->tcb->i_dbell = &txq->ib.door_bell;
3777 txq->tcb->intr_type = txq->ib.intr_type;
3778 txq->tcb->intr_vector = txq->ib.intr_vector;
3779 txq->tcb->txq = txq;
3780 txq->tcb->bnad = bnad;
3783 /* QPT, SWQPT, Pages */
3784 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3785 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3786 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3787 &res_info[BNA_TX_RES_MEM_T_PAGE].
3788 res_u.mem_info.mdl[i]);
3790 /* Callback to bnad for setting up TCB */
3791 if (tx->tcb_setup_cbfn)
3792 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3794 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3795 txq->priority = txq->tcb->id;
3797 txq->priority = tx_mod->default_prio;
3802 tx->txf_vlan_id = 0;
3804 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3806 tx_mod->rid_mask |= BIT(tx->rid);
3816 bna_tx_destroy(struct bna_tx *tx)
3818 struct bna_txq *txq;
3819 struct list_head *qe;
3821 list_for_each(qe, &tx->txq_q) {
3822 txq = (struct bna_txq *)qe;
3823 if (tx->tcb_destroy_cbfn)
3824 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3827 tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3832 bna_tx_enable(struct bna_tx *tx)
3834 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3837 tx->flags |= BNA_TX_F_ENABLED;
3839 if (tx->flags & BNA_TX_F_ENET_STARTED)
3840 bfa_fsm_send_event(tx, TX_E_START);
3844 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3845 void (*cbfn)(void *, struct bna_tx *))
3847 if (type == BNA_SOFT_CLEANUP) {
3848 (*cbfn)(tx->bna->bnad, tx);
3852 tx->stop_cbfn = cbfn;
3853 tx->stop_cbarg = tx->bna->bnad;
3855 tx->flags &= ~BNA_TX_F_ENABLED;
3857 bfa_fsm_send_event(tx, TX_E_STOP);
3861 bna_tx_cleanup_complete(struct bna_tx *tx)
3863 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3867 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3869 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3871 bfa_wc_down(&tx_mod->tx_stop_wc);
3875 bna_tx_mod_cb_tx_stopped_all(void *arg)
3877 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3879 if (tx_mod->stop_cbfn)
3880 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3881 tx_mod->stop_cbfn = NULL;
3885 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3886 struct bna_res_info *res_info)
3893 tx_mod->tx = (struct bna_tx *)
3894 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3895 tx_mod->txq = (struct bna_txq *)
3896 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3898 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3899 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3901 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3903 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3904 tx_mod->tx[i].rid = i;
3905 bfa_q_qe_init(&tx_mod->tx[i].qe);
3906 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3907 bfa_q_qe_init(&tx_mod->txq[i].qe);
3908 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3911 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3912 tx_mod->default_prio = 0;
3913 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3914 tx_mod->iscsi_prio = -1;
3918 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3920 struct list_head *qe;
3924 list_for_each(qe, &tx_mod->tx_free_q)
3928 list_for_each(qe, &tx_mod->txq_free_q)
3935 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3938 struct list_head *qe;
3940 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3941 if (type == BNA_TX_T_LOOPBACK)
3942 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3944 list_for_each(qe, &tx_mod->tx_active_q) {
3945 tx = (struct bna_tx *)qe;
3946 if (tx->type == type)
3952 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3955 struct list_head *qe;
3957 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3958 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3960 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3962 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3964 list_for_each(qe, &tx_mod->tx_active_q) {
3965 tx = (struct bna_tx *)qe;
3966 if (tx->type == type) {
3967 bfa_wc_up(&tx_mod->tx_stop_wc);
3972 bfa_wc_wait(&tx_mod->tx_stop_wc);
3976 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3979 struct list_head *qe;
3981 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3982 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3984 list_for_each(qe, &tx_mod->tx_active_q) {
3985 tx = (struct bna_tx *)qe;
3991 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3993 struct bna_txq *txq;
3994 struct list_head *qe;
3996 list_for_each(qe, &tx->txq_q) {
3997 txq = (struct bna_txq *)qe;
3998 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);