2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
26 ib->coalescing_timeo = coalescing_timeo;
27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28 (u32)ib->coalescing_timeo, 0);
33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
36 (rxf)->vlan_strip_pending = true; \
39 #define bna_rxf_rss_cfg_soft_reset(rxf) \
41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
43 BNA_RSS_F_CFG_PENDING | \
44 BNA_RSS_F_STATUS_PENDING); \
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 enum bna_cleanup_type cleanup);
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
64 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
66 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
70 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
72 call_rxf_stop_cbfn(rxf);
76 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
80 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
84 call_rxf_stop_cbfn(rxf);
92 call_rxf_cam_fltr_cbfn(rxf);
101 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
103 if (!bna_rxf_cfg_apply(rxf)) {
104 /* No more pending config updates */
105 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
110 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
114 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
118 bna_rxf_cfg_reset(rxf);
119 call_rxf_start_cbfn(rxf);
120 call_rxf_cam_fltr_cbfn(rxf);
121 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
129 if (!bna_rxf_cfg_apply(rxf)) {
130 /* No more pending config updates */
131 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
141 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
143 call_rxf_start_cbfn(rxf);
144 call_rxf_cam_fltr_cbfn(rxf);
148 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
153 bna_rxf_cfg_reset(rxf);
154 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
158 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
167 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
172 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
177 bna_rxf_cfg_reset(rxf);
178 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
187 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
188 enum bfi_enet_h2i_msgs req_type)
190 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
192 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
193 req->mh.num_entries = htons(
194 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
195 ether_addr_copy(req->mac_addr, mac->addr);
196 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
197 sizeof(struct bfi_enet_ucast_req), &req->mh);
198 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
202 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
204 struct bfi_enet_mcast_add_req *req =
205 &rxf->bfi_enet_cmd.mcast_add_req;
207 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
209 req->mh.num_entries = htons(
210 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
211 ether_addr_copy(req->mac_addr, mac->addr);
212 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
213 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
214 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
218 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
220 struct bfi_enet_mcast_del_req *req =
221 &rxf->bfi_enet_cmd.mcast_del_req;
223 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
225 req->mh.num_entries = htons(
226 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
227 req->handle = htons(handle);
228 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
229 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
230 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
234 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
236 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
238 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
239 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
240 req->mh.num_entries = htons(
241 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
242 req->enable = status;
243 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
244 sizeof(struct bfi_enet_enable_req), &req->mh);
245 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
249 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
251 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
253 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
254 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
255 req->mh.num_entries = htons(
256 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
257 req->enable = status;
258 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
259 sizeof(struct bfi_enet_enable_req), &req->mh);
260 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
264 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
266 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
270 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
271 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
272 req->mh.num_entries = htons(
273 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
274 req->block_idx = block_idx;
275 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
276 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
277 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
279 htonl(rxf->vlan_filter_table[j]);
281 req->bit_mask[i] = 0xFFFFFFFF;
283 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
284 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
285 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
289 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
291 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
293 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
294 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
295 req->mh.num_entries = htons(
296 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
297 req->enable = rxf->vlan_strip_status;
298 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
299 sizeof(struct bfi_enet_enable_req), &req->mh);
300 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
304 bna_bfi_rit_cfg(struct bna_rxf *rxf)
306 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
308 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
309 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
310 req->mh.num_entries = htons(
311 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
312 req->size = htons(rxf->rit_size);
313 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
314 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
315 sizeof(struct bfi_enet_rit_req), &req->mh);
316 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
320 bna_bfi_rss_cfg(struct bna_rxf *rxf)
322 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
325 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
326 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
327 req->mh.num_entries = htons(
328 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
329 req->cfg.type = rxf->rss_cfg.hash_type;
330 req->cfg.mask = rxf->rss_cfg.hash_mask;
331 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
333 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
334 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
335 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
336 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
340 bna_bfi_rss_enable(struct bna_rxf *rxf)
342 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
344 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
345 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
346 req->mh.num_entries = htons(
347 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
348 req->enable = rxf->rss_status;
349 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
350 sizeof(struct bfi_enet_enable_req), &req->mh);
351 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
354 /* This function gets the multicast MAC that has already been added to CAM */
355 static struct bna_mac *
356 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
359 struct list_head *qe;
361 list_for_each(qe, &rxf->mcast_active_q) {
362 mac = (struct bna_mac *)qe;
363 if (ether_addr_equal(mac->addr, mac_addr))
367 list_for_each(qe, &rxf->mcast_pending_del_q) {
368 mac = (struct bna_mac *)qe;
369 if (ether_addr_equal(mac->addr, mac_addr))
376 static struct bna_mcam_handle *
377 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
379 struct bna_mcam_handle *mchandle;
380 struct list_head *qe;
382 list_for_each(qe, &rxf->mcast_handle_q) {
383 mchandle = (struct bna_mcam_handle *)qe;
384 if (mchandle->handle == handle)
392 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
394 struct bna_mac *mcmac;
395 struct bna_mcam_handle *mchandle;
397 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
398 mchandle = bna_rxf_mchandle_get(rxf, handle);
399 if (mchandle == NULL) {
400 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
401 mchandle->handle = handle;
402 mchandle->refcnt = 0;
403 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
406 mcmac->handle = mchandle;
410 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
411 enum bna_cleanup_type cleanup)
413 struct bna_mcam_handle *mchandle;
416 mchandle = mac->handle;
417 if (mchandle == NULL)
421 if (mchandle->refcnt == 0) {
422 if (cleanup == BNA_HARD_CLEANUP) {
423 bna_bfi_mcast_del_req(rxf, mchandle->handle);
426 list_del(&mchandle->qe);
427 bfa_q_qe_init(&mchandle->qe);
428 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
436 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
438 struct bna_mac *mac = NULL;
439 struct list_head *qe;
442 /* First delete multicast entries to maintain the count */
443 while (!list_empty(&rxf->mcast_pending_del_q)) {
444 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
446 mac = (struct bna_mac *)qe;
447 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
448 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
453 /* Add multicast entries */
454 if (!list_empty(&rxf->mcast_pending_add_q)) {
455 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
457 mac = (struct bna_mac *)qe;
458 list_add_tail(&mac->qe, &rxf->mcast_active_q);
459 bna_bfi_mcast_add_req(rxf, mac);
467 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
469 u8 vlan_pending_bitmask;
472 if (rxf->vlan_pending_bitmask) {
473 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
474 while (!(vlan_pending_bitmask & 0x1)) {
476 vlan_pending_bitmask >>= 1;
478 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
479 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
487 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
489 struct list_head *qe;
493 /* Throw away delete pending mcast entries */
494 while (!list_empty(&rxf->mcast_pending_del_q)) {
495 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
497 mac = (struct bna_mac *)qe;
498 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
499 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
504 /* Move active mcast entries to pending_add_q */
505 while (!list_empty(&rxf->mcast_active_q)) {
506 bfa_q_deq(&rxf->mcast_active_q, &qe);
508 list_add_tail(qe, &rxf->mcast_pending_add_q);
509 mac = (struct bna_mac *)qe;
510 if (bna_rxf_mcast_del(rxf, mac, cleanup))
518 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
520 if (rxf->rss_pending) {
521 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
522 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
523 bna_bfi_rit_cfg(rxf);
527 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
528 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
529 bna_bfi_rss_cfg(rxf);
533 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
534 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
535 bna_bfi_rss_enable(rxf);
544 bna_rxf_cfg_apply(struct bna_rxf *rxf)
546 if (bna_rxf_ucast_cfg_apply(rxf))
549 if (bna_rxf_mcast_cfg_apply(rxf))
552 if (bna_rxf_promisc_cfg_apply(rxf))
555 if (bna_rxf_allmulti_cfg_apply(rxf))
558 if (bna_rxf_vlan_cfg_apply(rxf))
561 if (bna_rxf_vlan_strip_cfg_apply(rxf))
564 if (bna_rxf_rss_cfg_apply(rxf))
571 bna_rxf_cfg_reset(struct bna_rxf *rxf)
573 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
574 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
575 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
576 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
577 bna_rxf_vlan_cfg_soft_reset(rxf);
578 bna_rxf_rss_cfg_soft_reset(rxf);
582 bna_rit_init(struct bna_rxf *rxf, int rit_size)
584 struct bna_rx *rx = rxf->rx;
586 struct list_head *qe;
589 rxf->rit_size = rit_size;
590 list_for_each(qe, &rx->rxp_q) {
591 rxp = (struct bna_rxp *)qe;
592 rxf->rit[offset] = rxp->cq.ccb->id;
599 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
601 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
605 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
606 struct bfi_msgq_mhdr *msghdr)
608 struct bfi_enet_rsp *rsp =
609 container_of(msghdr, struct bfi_enet_rsp, mh);
612 /* Clear ucast from cache */
613 rxf->ucast_active_set = 0;
616 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
620 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
621 struct bfi_msgq_mhdr *msghdr)
623 struct bfi_enet_mcast_add_req *req =
624 &rxf->bfi_enet_cmd.mcast_add_req;
625 struct bfi_enet_mcast_add_rsp *rsp =
626 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
628 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
630 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
634 bna_rxf_init(struct bna_rxf *rxf,
636 struct bna_rx_config *q_config,
637 struct bna_res_info *res_info)
641 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
642 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
643 rxf->ucast_pending_set = 0;
644 rxf->ucast_active_set = 0;
645 INIT_LIST_HEAD(&rxf->ucast_active_q);
646 rxf->ucast_pending_mac = NULL;
648 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
649 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
650 INIT_LIST_HEAD(&rxf->mcast_active_q);
651 INIT_LIST_HEAD(&rxf->mcast_handle_q);
654 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
655 bna_rit_init(rxf, q_config->num_paths);
657 rxf->rss_status = q_config->rss_status;
658 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
659 rxf->rss_cfg = q_config->rss_config;
660 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
661 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
662 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
665 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
666 memset(rxf->vlan_filter_table, 0,
667 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
668 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
669 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
671 rxf->vlan_strip_status = q_config->vlan_strip_status;
673 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
677 bna_rxf_uninit(struct bna_rxf *rxf)
681 rxf->ucast_pending_set = 0;
682 rxf->ucast_active_set = 0;
684 while (!list_empty(&rxf->ucast_pending_add_q)) {
685 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
686 bfa_q_qe_init(&mac->qe);
687 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
690 if (rxf->ucast_pending_mac) {
691 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
692 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
693 rxf->ucast_pending_mac);
694 rxf->ucast_pending_mac = NULL;
697 while (!list_empty(&rxf->mcast_pending_add_q)) {
698 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
699 bfa_q_qe_init(&mac->qe);
700 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
703 rxf->rxmode_pending = 0;
704 rxf->rxmode_pending_bitmask = 0;
705 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
706 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
707 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
708 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
710 rxf->rss_pending = 0;
711 rxf->vlan_strip_pending = false;
717 bna_rx_cb_rxf_started(struct bna_rx *rx)
719 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
723 bna_rxf_start(struct bna_rxf *rxf)
725 rxf->start_cbfn = bna_rx_cb_rxf_started;
726 rxf->start_cbarg = rxf->rx;
727 bfa_fsm_send_event(rxf, RXF_E_START);
731 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
733 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
737 bna_rxf_stop(struct bna_rxf *rxf)
739 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
740 rxf->stop_cbarg = rxf->rx;
741 bfa_fsm_send_event(rxf, RXF_E_STOP);
745 bna_rxf_fail(struct bna_rxf *rxf)
747 bfa_fsm_send_event(rxf, RXF_E_FAIL);
751 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
753 struct bna_rxf *rxf = &rx->rxf;
755 if (rxf->ucast_pending_mac == NULL) {
756 rxf->ucast_pending_mac =
757 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
758 if (rxf->ucast_pending_mac == NULL)
759 return BNA_CB_UCAST_CAM_FULL;
760 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
763 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
764 rxf->ucast_pending_set = 1;
765 rxf->cam_fltr_cbfn = NULL;
766 rxf->cam_fltr_cbarg = rx->bna->bnad;
768 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
770 return BNA_CB_SUCCESS;
774 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
775 void (*cbfn)(struct bnad *, struct bna_rx *))
777 struct bna_rxf *rxf = &rx->rxf;
780 /* Check if already added or pending addition */
781 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
782 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
784 cbfn(rx->bna->bnad, rx);
785 return BNA_CB_SUCCESS;
788 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
790 return BNA_CB_MCAST_LIST_FULL;
791 bfa_q_qe_init(&mac->qe);
792 ether_addr_copy(mac->addr, addr);
793 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
795 rxf->cam_fltr_cbfn = cbfn;
796 rxf->cam_fltr_cbarg = rx->bna->bnad;
798 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
800 return BNA_CB_SUCCESS;
804 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
806 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
807 struct bna_rxf *rxf = &rx->rxf;
808 struct list_head list_head;
809 struct list_head *qe;
811 struct bna_mac *mac, *del_mac;
814 /* Purge the pending_add_q */
815 while (!list_empty(&rxf->ucast_pending_add_q)) {
816 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
818 mac = (struct bna_mac *)qe;
819 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
822 /* Schedule active_q entries for deletion */
823 while (!list_empty(&rxf->ucast_active_q)) {
824 bfa_q_deq(&rxf->ucast_active_q, &qe);
825 mac = (struct bna_mac *)qe;
826 bfa_q_qe_init(&mac->qe);
828 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
829 memcpy(del_mac, mac, sizeof(*del_mac));
830 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
831 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
835 INIT_LIST_HEAD(&list_head);
836 for (i = 0, mcaddr = uclist; i < count; i++) {
837 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
840 bfa_q_qe_init(&mac->qe);
841 ether_addr_copy(mac->addr, mcaddr);
842 list_add_tail(&mac->qe, &list_head);
846 /* Add the new entries */
847 while (!list_empty(&list_head)) {
848 bfa_q_deq(&list_head, &qe);
849 mac = (struct bna_mac *)qe;
850 bfa_q_qe_init(&mac->qe);
851 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
854 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
856 return BNA_CB_SUCCESS;
859 while (!list_empty(&list_head)) {
860 bfa_q_deq(&list_head, &qe);
861 mac = (struct bna_mac *)qe;
862 bfa_q_qe_init(&mac->qe);
863 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
866 return BNA_CB_UCAST_CAM_FULL;
870 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
872 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
873 struct bna_rxf *rxf = &rx->rxf;
874 struct list_head list_head;
875 struct list_head *qe;
877 struct bna_mac *mac, *del_mac;
880 /* Purge the pending_add_q */
881 while (!list_empty(&rxf->mcast_pending_add_q)) {
882 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
884 mac = (struct bna_mac *)qe;
885 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
888 /* Schedule active_q entries for deletion */
889 while (!list_empty(&rxf->mcast_active_q)) {
890 bfa_q_deq(&rxf->mcast_active_q, &qe);
891 mac = (struct bna_mac *)qe;
892 bfa_q_qe_init(&mac->qe);
894 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
896 memcpy(del_mac, mac, sizeof(*del_mac));
897 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
899 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
903 INIT_LIST_HEAD(&list_head);
904 for (i = 0, mcaddr = mclist; i < count; i++) {
905 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
908 bfa_q_qe_init(&mac->qe);
909 ether_addr_copy(mac->addr, mcaddr);
910 list_add_tail(&mac->qe, &list_head);
915 /* Add the new entries */
916 while (!list_empty(&list_head)) {
917 bfa_q_deq(&list_head, &qe);
918 mac = (struct bna_mac *)qe;
919 bfa_q_qe_init(&mac->qe);
920 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
923 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
925 return BNA_CB_SUCCESS;
928 while (!list_empty(&list_head)) {
929 bfa_q_deq(&list_head, &qe);
930 mac = (struct bna_mac *)qe;
931 bfa_q_qe_init(&mac->qe);
932 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
935 return BNA_CB_MCAST_LIST_FULL;
939 bna_rx_mcast_delall(struct bna_rx *rx)
941 struct bna_rxf *rxf = &rx->rxf;
942 struct list_head *qe;
943 struct bna_mac *mac, *del_mac;
944 int need_hw_config = 0;
946 /* Purge all entries from pending_add_q */
947 while (!list_empty(&rxf->mcast_pending_add_q)) {
948 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
949 mac = (struct bna_mac *)qe;
950 bfa_q_qe_init(&mac->qe);
951 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
954 /* Schedule all entries in active_q for deletion */
955 while (!list_empty(&rxf->mcast_active_q)) {
956 bfa_q_deq(&rxf->mcast_active_q, &qe);
957 mac = (struct bna_mac *)qe;
958 bfa_q_qe_init(&mac->qe);
960 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
962 memcpy(del_mac, mac, sizeof(*del_mac));
963 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
965 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
970 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
974 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
976 struct bna_rxf *rxf = &rx->rxf;
977 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
978 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
979 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
981 rxf->vlan_filter_table[index] |= bit;
982 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
983 rxf->vlan_pending_bitmask |= BIT(group_id);
984 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
989 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
991 struct bna_rxf *rxf = &rx->rxf;
992 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
993 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
994 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
996 rxf->vlan_filter_table[index] &= ~bit;
997 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
998 rxf->vlan_pending_bitmask |= BIT(group_id);
999 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1004 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1006 struct bna_mac *mac = NULL;
1007 struct list_head *qe;
1009 /* Delete MAC addresses previousely added */
1010 if (!list_empty(&rxf->ucast_pending_del_q)) {
1011 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1013 mac = (struct bna_mac *)qe;
1014 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1015 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1019 /* Set default unicast MAC */
1020 if (rxf->ucast_pending_set) {
1021 rxf->ucast_pending_set = 0;
1022 ether_addr_copy(rxf->ucast_active_mac.addr,
1023 rxf->ucast_pending_mac->addr);
1024 rxf->ucast_active_set = 1;
1025 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1026 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1030 /* Add additional MAC entries */
1031 if (!list_empty(&rxf->ucast_pending_add_q)) {
1032 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1034 mac = (struct bna_mac *)qe;
1035 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1036 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1044 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1046 struct list_head *qe;
1047 struct bna_mac *mac;
1049 /* Throw away delete pending ucast entries */
1050 while (!list_empty(&rxf->ucast_pending_del_q)) {
1051 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1053 mac = (struct bna_mac *)qe;
1054 if (cleanup == BNA_SOFT_CLEANUP)
1055 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1058 bna_bfi_ucast_req(rxf, mac,
1059 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1060 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1066 /* Move active ucast entries to pending_add_q */
1067 while (!list_empty(&rxf->ucast_active_q)) {
1068 bfa_q_deq(&rxf->ucast_active_q, &qe);
1070 list_add_tail(qe, &rxf->ucast_pending_add_q);
1071 if (cleanup == BNA_HARD_CLEANUP) {
1072 mac = (struct bna_mac *)qe;
1073 bna_bfi_ucast_req(rxf, mac,
1074 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1079 if (rxf->ucast_active_set) {
1080 rxf->ucast_pending_set = 1;
1081 rxf->ucast_active_set = 0;
1082 if (cleanup == BNA_HARD_CLEANUP) {
1083 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1084 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1093 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1095 struct bna *bna = rxf->rx->bna;
1097 /* Enable/disable promiscuous mode */
1098 if (is_promisc_enable(rxf->rxmode_pending,
1099 rxf->rxmode_pending_bitmask)) {
1100 /* move promisc configuration from pending -> active */
1101 promisc_inactive(rxf->rxmode_pending,
1102 rxf->rxmode_pending_bitmask);
1103 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1104 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1106 } else if (is_promisc_disable(rxf->rxmode_pending,
1107 rxf->rxmode_pending_bitmask)) {
1108 /* move promisc configuration from pending -> active */
1109 promisc_inactive(rxf->rxmode_pending,
1110 rxf->rxmode_pending_bitmask);
1111 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1112 bna->promisc_rid = BFI_INVALID_RID;
1113 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1121 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1123 struct bna *bna = rxf->rx->bna;
1125 /* Clear pending promisc mode disable */
1126 if (is_promisc_disable(rxf->rxmode_pending,
1127 rxf->rxmode_pending_bitmask)) {
1128 promisc_inactive(rxf->rxmode_pending,
1129 rxf->rxmode_pending_bitmask);
1130 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1131 bna->promisc_rid = BFI_INVALID_RID;
1132 if (cleanup == BNA_HARD_CLEANUP) {
1133 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1138 /* Move promisc mode config from active -> pending */
1139 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1140 promisc_enable(rxf->rxmode_pending,
1141 rxf->rxmode_pending_bitmask);
1142 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1143 if (cleanup == BNA_HARD_CLEANUP) {
1144 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1153 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1155 /* Enable/disable allmulti mode */
1156 if (is_allmulti_enable(rxf->rxmode_pending,
1157 rxf->rxmode_pending_bitmask)) {
1158 /* move allmulti configuration from pending -> active */
1159 allmulti_inactive(rxf->rxmode_pending,
1160 rxf->rxmode_pending_bitmask);
1161 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1162 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1164 } else if (is_allmulti_disable(rxf->rxmode_pending,
1165 rxf->rxmode_pending_bitmask)) {
1166 /* move allmulti configuration from pending -> active */
1167 allmulti_inactive(rxf->rxmode_pending,
1168 rxf->rxmode_pending_bitmask);
1169 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1170 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1178 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1180 /* Clear pending allmulti mode disable */
1181 if (is_allmulti_disable(rxf->rxmode_pending,
1182 rxf->rxmode_pending_bitmask)) {
1183 allmulti_inactive(rxf->rxmode_pending,
1184 rxf->rxmode_pending_bitmask);
1185 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1186 if (cleanup == BNA_HARD_CLEANUP) {
1187 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1192 /* Move allmulti mode config from active -> pending */
1193 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1194 allmulti_enable(rxf->rxmode_pending,
1195 rxf->rxmode_pending_bitmask);
1196 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1197 if (cleanup == BNA_HARD_CLEANUP) {
1198 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1207 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1209 struct bna *bna = rxf->rx->bna;
1212 if (is_promisc_enable(rxf->rxmode_pending,
1213 rxf->rxmode_pending_bitmask) ||
1214 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1215 /* Do nothing if pending enable or already enabled */
1216 } else if (is_promisc_disable(rxf->rxmode_pending,
1217 rxf->rxmode_pending_bitmask)) {
1218 /* Turn off pending disable command */
1219 promisc_inactive(rxf->rxmode_pending,
1220 rxf->rxmode_pending_bitmask);
1222 /* Schedule enable */
1223 promisc_enable(rxf->rxmode_pending,
1224 rxf->rxmode_pending_bitmask);
1225 bna->promisc_rid = rxf->rx->rid;
1233 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1235 struct bna *bna = rxf->rx->bna;
1238 if (is_promisc_disable(rxf->rxmode_pending,
1239 rxf->rxmode_pending_bitmask) ||
1240 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1241 /* Do nothing if pending disable or already disabled */
1242 } else if (is_promisc_enable(rxf->rxmode_pending,
1243 rxf->rxmode_pending_bitmask)) {
1244 /* Turn off pending enable command */
1245 promisc_inactive(rxf->rxmode_pending,
1246 rxf->rxmode_pending_bitmask);
1247 bna->promisc_rid = BFI_INVALID_RID;
1248 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1249 /* Schedule disable */
1250 promisc_disable(rxf->rxmode_pending,
1251 rxf->rxmode_pending_bitmask);
1259 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1263 if (is_allmulti_enable(rxf->rxmode_pending,
1264 rxf->rxmode_pending_bitmask) ||
1265 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1266 /* Do nothing if pending enable or already enabled */
1267 } else if (is_allmulti_disable(rxf->rxmode_pending,
1268 rxf->rxmode_pending_bitmask)) {
1269 /* Turn off pending disable command */
1270 allmulti_inactive(rxf->rxmode_pending,
1271 rxf->rxmode_pending_bitmask);
1273 /* Schedule enable */
1274 allmulti_enable(rxf->rxmode_pending,
1275 rxf->rxmode_pending_bitmask);
1283 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1287 if (is_allmulti_disable(rxf->rxmode_pending,
1288 rxf->rxmode_pending_bitmask) ||
1289 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1290 /* Do nothing if pending disable or already disabled */
1291 } else if (is_allmulti_enable(rxf->rxmode_pending,
1292 rxf->rxmode_pending_bitmask)) {
1293 /* Turn off pending enable command */
1294 allmulti_inactive(rxf->rxmode_pending,
1295 rxf->rxmode_pending_bitmask);
1296 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1297 /* Schedule disable */
1298 allmulti_disable(rxf->rxmode_pending,
1299 rxf->rxmode_pending_bitmask);
1307 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1309 if (rxf->vlan_strip_pending) {
1310 rxf->vlan_strip_pending = false;
1311 bna_bfi_vlan_strip_enable(rxf);
1320 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1321 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1323 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1324 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1326 #define call_rx_stop_cbfn(rx) \
1328 if ((rx)->stop_cbfn) { \
1329 void (*cbfn)(void *, struct bna_rx *); \
1331 cbfn = (rx)->stop_cbfn; \
1332 cbarg = (rx)->stop_cbarg; \
1333 (rx)->stop_cbfn = NULL; \
1334 (rx)->stop_cbarg = NULL; \
1339 #define call_rx_stall_cbfn(rx) \
1341 if ((rx)->rx_stall_cbfn) \
1342 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1345 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1347 struct bna_dma_addr cur_q_addr = \
1348 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1349 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1350 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1351 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1352 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1353 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1354 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1357 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1358 static void bna_rx_enet_stop(struct bna_rx *rx);
1359 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1361 bfa_fsm_state_decl(bna_rx, stopped,
1362 struct bna_rx, enum bna_rx_event);
1363 bfa_fsm_state_decl(bna_rx, start_wait,
1364 struct bna_rx, enum bna_rx_event);
1365 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1366 struct bna_rx, enum bna_rx_event);
1367 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1368 struct bna_rx, enum bna_rx_event);
1369 bfa_fsm_state_decl(bna_rx, started,
1370 struct bna_rx, enum bna_rx_event);
1371 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1372 struct bna_rx, enum bna_rx_event);
1373 bfa_fsm_state_decl(bna_rx, stop_wait,
1374 struct bna_rx, enum bna_rx_event);
1375 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1376 struct bna_rx, enum bna_rx_event);
1377 bfa_fsm_state_decl(bna_rx, failed,
1378 struct bna_rx, enum bna_rx_event);
1379 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1380 struct bna_rx, enum bna_rx_event);
1382 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1384 call_rx_stop_cbfn(rx);
1387 static void bna_rx_sm_stopped(struct bna_rx *rx,
1388 enum bna_rx_event event)
1392 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1396 call_rx_stop_cbfn(rx);
1404 bfa_sm_fault(event);
1409 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1411 bna_bfi_rx_enet_start(rx);
1415 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1420 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1425 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1426 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1430 bna_rx_enet_stop(rx);
1434 bfa_sm_fault(event);
1439 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1440 enum bna_rx_event event)
1444 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1448 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1452 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1456 bfa_sm_fault(event);
1461 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1463 rx->rx_post_cbfn(rx->bna->bnad, rx);
1464 bna_rxf_start(&rx->rxf);
1468 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1473 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1477 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1478 bna_rxf_fail(&rx->rxf);
1479 call_rx_stall_cbfn(rx);
1480 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1483 case RX_E_RXF_STARTED:
1484 bna_rxf_stop(&rx->rxf);
1487 case RX_E_RXF_STOPPED:
1488 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1489 call_rx_stall_cbfn(rx);
1490 bna_rx_enet_stop(rx);
1494 bfa_sm_fault(event);
1501 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1506 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1511 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1515 bna_rx_enet_stop(rx);
1519 bfa_sm_fault(event);
1524 bna_rx_sm_started_entry(struct bna_rx *rx)
1526 struct bna_rxp *rxp;
1527 struct list_head *qe_rxp;
1528 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1531 list_for_each(qe_rxp, &rx->rxp_q) {
1532 rxp = (struct bna_rxp *)qe_rxp;
1533 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1536 bna_ethport_cb_rx_started(&rx->bna->ethport);
1540 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1544 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1545 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1546 bna_rxf_stop(&rx->rxf);
1550 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1551 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1552 bna_rxf_fail(&rx->rxf);
1553 call_rx_stall_cbfn(rx);
1554 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1558 bfa_sm_fault(event);
1563 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1564 enum bna_rx_event event)
1568 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1572 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1573 bna_rxf_fail(&rx->rxf);
1574 call_rx_stall_cbfn(rx);
1575 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1578 case RX_E_RXF_STARTED:
1579 bfa_fsm_set_state(rx, bna_rx_sm_started);
1583 bfa_sm_fault(event);
1589 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1594 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1598 case RX_E_RXF_STOPPED:
1602 case RX_E_CLEANUP_DONE:
1603 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1607 bfa_sm_fault(event);
1613 bna_rx_sm_failed_entry(struct bna_rx *rx)
1618 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1622 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1626 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1630 case RX_E_RXF_STARTED:
1631 case RX_E_RXF_STOPPED:
1635 case RX_E_CLEANUP_DONE:
1636 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1640 bfa_sm_fault(event);
1645 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1650 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1654 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1658 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1661 case RX_E_CLEANUP_DONE:
1662 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1666 bfa_sm_fault(event);
1672 bna_bfi_rx_enet_start(struct bna_rx *rx)
1674 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1675 struct bna_rxp *rxp = NULL;
1676 struct bna_rxq *q0 = NULL, *q1 = NULL;
1677 struct list_head *rxp_qe;
1680 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1681 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1682 cfg_req->mh.num_entries = htons(
1683 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1685 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1686 cfg_req->num_queue_sets = rx->num_paths;
1687 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1689 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1690 rxp = (struct bna_rxp *)rxp_qe;
1692 GET_RXQS(rxp, q0, q1);
1693 switch (rxp->type) {
1697 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1699 cfg_req->q_cfg[i].qs.rx_buffer_size =
1700 htons((u16)q1->buffer_size);
1703 case BNA_RXP_SINGLE:
1704 /* Large/Single RxQ */
1705 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1707 if (q0->multi_buffer)
1708 /* multi-buffer is enabled by allocating
1709 * a new rx with new set of resources.
1710 * q0->buffer_size should be initialized to
1713 cfg_req->rx_cfg.multi_buffer =
1714 BNA_STATUS_T_ENABLED;
1717 bna_enet_mtu_get(&rx->bna->enet);
1718 cfg_req->q_cfg[i].ql.rx_buffer_size =
1719 htons((u16)q0->buffer_size);
1726 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1729 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1730 rxp->cq.ib.ib_seg_host_addr.lsb;
1731 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1732 rxp->cq.ib.ib_seg_host_addr.msb;
1733 cfg_req->q_cfg[i].ib.intr.msix_index =
1734 htons((u16)rxp->cq.ib.intr_vector);
1737 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1738 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1739 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1740 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1741 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1742 ? BNA_STATUS_T_ENABLED :
1743 BNA_STATUS_T_DISABLED;
1744 cfg_req->ib_cfg.coalescing_timeout =
1745 htonl((u32)rxp->cq.ib.coalescing_timeo);
1746 cfg_req->ib_cfg.inter_pkt_timeout =
1747 htonl((u32)rxp->cq.ib.interpkt_timeo);
1748 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1750 switch (rxp->type) {
1752 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1756 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1757 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1758 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1759 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1762 case BNA_RXP_SINGLE:
1763 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1769 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1771 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1772 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1773 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1777 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1779 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1781 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1782 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1783 req->mh.num_entries = htons(
1784 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1785 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1787 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1791 bna_rx_enet_stop(struct bna_rx *rx)
1793 struct bna_rxp *rxp;
1794 struct list_head *qe_rxp;
1797 list_for_each(qe_rxp, &rx->rxp_q) {
1798 rxp = (struct bna_rxp *)qe_rxp;
1799 bna_ib_stop(rx->bna, &rxp->cq.ib);
1802 bna_bfi_rx_enet_stop(rx);
1806 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1808 if ((rx_mod->rx_free_count == 0) ||
1809 (rx_mod->rxp_free_count == 0) ||
1810 (rx_mod->rxq_free_count == 0))
1813 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1814 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1815 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1818 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1819 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1826 static struct bna_rxq *
1827 bna_rxq_get(struct bna_rx_mod *rx_mod)
1829 struct bna_rxq *rxq = NULL;
1830 struct list_head *qe = NULL;
1832 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1833 rx_mod->rxq_free_count--;
1834 rxq = (struct bna_rxq *)qe;
1835 bfa_q_qe_init(&rxq->qe);
1841 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1843 bfa_q_qe_init(&rxq->qe);
1844 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1845 rx_mod->rxq_free_count++;
1848 static struct bna_rxp *
1849 bna_rxp_get(struct bna_rx_mod *rx_mod)
1851 struct list_head *qe = NULL;
1852 struct bna_rxp *rxp = NULL;
1854 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1855 rx_mod->rxp_free_count--;
1856 rxp = (struct bna_rxp *)qe;
1857 bfa_q_qe_init(&rxp->qe);
1863 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1865 bfa_q_qe_init(&rxp->qe);
1866 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1867 rx_mod->rxp_free_count++;
1870 static struct bna_rx *
1871 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1873 struct list_head *qe = NULL;
1874 struct bna_rx *rx = NULL;
1876 if (type == BNA_RX_T_REGULAR) {
1877 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1879 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1881 rx_mod->rx_free_count--;
1882 rx = (struct bna_rx *)qe;
1883 bfa_q_qe_init(&rx->qe);
1884 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1891 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1893 struct list_head *prev_qe = NULL;
1894 struct list_head *qe;
1896 bfa_q_qe_init(&rx->qe);
1898 list_for_each(qe, &rx_mod->rx_free_q) {
1899 if (((struct bna_rx *)qe)->rid < rx->rid)
1905 if (prev_qe == NULL) {
1906 /* This is the first entry */
1907 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1908 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1909 /* This is the last entry */
1910 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1912 /* Somewhere in the middle */
1913 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1914 bfa_q_prev(&rx->qe) = prev_qe;
1915 bfa_q_next(prev_qe) = &rx->qe;
1916 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1919 rx_mod->rx_free_count++;
1923 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1926 switch (rxp->type) {
1927 case BNA_RXP_SINGLE:
1928 rxp->rxq.single.only = q0;
1929 rxp->rxq.single.reserved = NULL;
1932 rxp->rxq.slr.large = q0;
1933 rxp->rxq.slr.small = q1;
1936 rxp->rxq.hds.data = q0;
1937 rxp->rxq.hds.hdr = q1;
1945 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1946 struct bna_rxp *rxp,
1949 struct bna_mem_descr *qpt_mem,
1950 struct bna_mem_descr *swqpt_mem,
1951 struct bna_mem_descr *page_mem)
1955 struct bna_dma_addr bna_dma;
1958 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1959 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1960 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1961 rxq->qpt.page_count = page_count;
1962 rxq->qpt.page_size = page_size;
1964 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1965 rxq->rcb->sw_q = page_mem->kva;
1967 kva = page_mem->kva;
1968 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1970 for (i = 0; i < rxq->qpt.page_count; i++) {
1971 rxq->rcb->sw_qpt[i] = kva;
1974 BNA_SET_DMA_ADDR(dma, &bna_dma);
1975 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1977 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1984 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1987 struct bna_mem_descr *qpt_mem,
1988 struct bna_mem_descr *swqpt_mem,
1989 struct bna_mem_descr *page_mem)
1993 struct bna_dma_addr bna_dma;
1996 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1997 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1998 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1999 rxp->cq.qpt.page_count = page_count;
2000 rxp->cq.qpt.page_size = page_size;
2002 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2003 rxp->cq.ccb->sw_q = page_mem->kva;
2005 kva = page_mem->kva;
2006 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2008 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2009 rxp->cq.ccb->sw_qpt[i] = kva;
2012 BNA_SET_DMA_ADDR(dma, &bna_dma);
2013 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2015 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2022 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2024 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2026 bfa_wc_down(&rx_mod->rx_stop_wc);
2030 bna_rx_mod_cb_rx_stopped_all(void *arg)
2032 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2034 if (rx_mod->stop_cbfn)
2035 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2036 rx_mod->stop_cbfn = NULL;
2040 bna_rx_start(struct bna_rx *rx)
2042 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2043 if (rx->rx_flags & BNA_RX_F_ENABLED)
2044 bfa_fsm_send_event(rx, RX_E_START);
2048 bna_rx_stop(struct bna_rx *rx)
2050 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2051 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2052 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2054 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2055 rx->stop_cbarg = &rx->bna->rx_mod;
2056 bfa_fsm_send_event(rx, RX_E_STOP);
2061 bna_rx_fail(struct bna_rx *rx)
2063 /* Indicate Enet is not enabled, and failed */
2064 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2065 bfa_fsm_send_event(rx, RX_E_FAIL);
2069 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2072 struct list_head *qe;
2074 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2075 if (type == BNA_RX_T_LOOPBACK)
2076 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2078 list_for_each(qe, &rx_mod->rx_active_q) {
2079 rx = (struct bna_rx *)qe;
2080 if (rx->type == type)
2086 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2089 struct list_head *qe;
2091 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2092 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2094 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2096 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2098 list_for_each(qe, &rx_mod->rx_active_q) {
2099 rx = (struct bna_rx *)qe;
2100 if (rx->type == type) {
2101 bfa_wc_up(&rx_mod->rx_stop_wc);
2106 bfa_wc_wait(&rx_mod->rx_stop_wc);
2110 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2113 struct list_head *qe;
2115 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2116 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2118 list_for_each(qe, &rx_mod->rx_active_q) {
2119 rx = (struct bna_rx *)qe;
2124 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2125 struct bna_res_info *res_info)
2128 struct bna_rx *rx_ptr;
2129 struct bna_rxp *rxp_ptr;
2130 struct bna_rxq *rxq_ptr;
2135 rx_mod->rx = (struct bna_rx *)
2136 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2137 rx_mod->rxp = (struct bna_rxp *)
2138 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2139 rx_mod->rxq = (struct bna_rxq *)
2140 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2142 /* Initialize the queues */
2143 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2144 rx_mod->rx_free_count = 0;
2145 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2146 rx_mod->rxq_free_count = 0;
2147 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2148 rx_mod->rxp_free_count = 0;
2149 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2151 /* Build RX queues */
2152 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2153 rx_ptr = &rx_mod->rx[index];
2155 bfa_q_qe_init(&rx_ptr->qe);
2156 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2158 rx_ptr->rid = index;
2159 rx_ptr->stop_cbfn = NULL;
2160 rx_ptr->stop_cbarg = NULL;
2162 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2163 rx_mod->rx_free_count++;
2166 /* build RX-path queue */
2167 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2168 rxp_ptr = &rx_mod->rxp[index];
2169 bfa_q_qe_init(&rxp_ptr->qe);
2170 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2171 rx_mod->rxp_free_count++;
2174 /* build RXQ queue */
2175 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2176 rxq_ptr = &rx_mod->rxq[index];
2177 bfa_q_qe_init(&rxq_ptr->qe);
2178 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2179 rx_mod->rxq_free_count++;
2184 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2186 struct list_head *qe;
2190 list_for_each(qe, &rx_mod->rx_free_q)
2194 list_for_each(qe, &rx_mod->rxp_free_q)
2198 list_for_each(qe, &rx_mod->rxq_free_q)
2205 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2207 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2208 struct bna_rxp *rxp = NULL;
2209 struct bna_rxq *q0 = NULL, *q1 = NULL;
2210 struct list_head *rxp_qe;
2213 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2214 sizeof(struct bfi_enet_rx_cfg_rsp));
2216 rx->hw_id = cfg_rsp->hw_id;
2218 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2220 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2221 rxp = (struct bna_rxp *)rxp_qe;
2222 GET_RXQS(rxp, q0, q1);
2224 /* Setup doorbells */
2225 rxp->cq.ccb->i_dbell->doorbell_addr =
2226 rx->bna->pcidev.pci_bar_kva
2227 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2228 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2230 rx->bna->pcidev.pci_bar_kva
2231 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2232 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2235 rx->bna->pcidev.pci_bar_kva
2236 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2237 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2240 /* Initialize producer/consumer indexes */
2241 (*rxp->cq.ccb->hw_producer_index) = 0;
2242 rxp->cq.ccb->producer_index = 0;
2243 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2245 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2248 bfa_fsm_send_event(rx, RX_E_STARTED);
2252 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2254 bfa_fsm_send_event(rx, RX_E_STOPPED);
2258 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2260 u32 cq_size, hq_size, dq_size;
2261 u32 cpage_count, hpage_count, dpage_count;
2262 struct bna_mem_info *mem_info;
2267 dq_depth = q_cfg->q0_depth;
2268 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2269 cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2271 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2272 cq_size = ALIGN(cq_size, PAGE_SIZE);
2273 cpage_count = SIZE_TO_PAGES(cq_size);
2275 dq_depth = roundup_pow_of_two(dq_depth);
2276 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2277 dq_size = ALIGN(dq_size, PAGE_SIZE);
2278 dpage_count = SIZE_TO_PAGES(dq_size);
2280 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2281 hq_depth = roundup_pow_of_two(hq_depth);
2282 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2283 hq_size = ALIGN(hq_size, PAGE_SIZE);
2284 hpage_count = SIZE_TO_PAGES(hq_size);
2288 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2289 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2290 mem_info->mem_type = BNA_MEM_T_KVA;
2291 mem_info->len = sizeof(struct bna_ccb);
2292 mem_info->num = q_cfg->num_paths;
2294 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2295 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2296 mem_info->mem_type = BNA_MEM_T_KVA;
2297 mem_info->len = sizeof(struct bna_rcb);
2298 mem_info->num = BNA_GET_RXQS(q_cfg);
2300 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2301 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2302 mem_info->mem_type = BNA_MEM_T_DMA;
2303 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2304 mem_info->num = q_cfg->num_paths;
2306 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2307 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2308 mem_info->mem_type = BNA_MEM_T_KVA;
2309 mem_info->len = cpage_count * sizeof(void *);
2310 mem_info->num = q_cfg->num_paths;
2312 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2313 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2314 mem_info->mem_type = BNA_MEM_T_DMA;
2315 mem_info->len = PAGE_SIZE * cpage_count;
2316 mem_info->num = q_cfg->num_paths;
2318 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2319 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2320 mem_info->mem_type = BNA_MEM_T_DMA;
2321 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2322 mem_info->num = q_cfg->num_paths;
2324 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2325 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2326 mem_info->mem_type = BNA_MEM_T_KVA;
2327 mem_info->len = dpage_count * sizeof(void *);
2328 mem_info->num = q_cfg->num_paths;
2330 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2331 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2332 mem_info->mem_type = BNA_MEM_T_DMA;
2333 mem_info->len = PAGE_SIZE * dpage_count;
2334 mem_info->num = q_cfg->num_paths;
2336 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2337 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2338 mem_info->mem_type = BNA_MEM_T_DMA;
2339 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2340 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2342 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2343 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2344 mem_info->mem_type = BNA_MEM_T_KVA;
2345 mem_info->len = hpage_count * sizeof(void *);
2346 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2348 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2349 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2350 mem_info->mem_type = BNA_MEM_T_DMA;
2351 mem_info->len = PAGE_SIZE * hpage_count;
2352 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2354 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2355 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2356 mem_info->mem_type = BNA_MEM_T_DMA;
2357 mem_info->len = BFI_IBIDX_SIZE;
2358 mem_info->num = q_cfg->num_paths;
2360 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2361 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2362 mem_info->mem_type = BNA_MEM_T_KVA;
2363 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2366 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2367 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2368 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2372 bna_rx_create(struct bna *bna, struct bnad *bnad,
2373 struct bna_rx_config *rx_cfg,
2374 const struct bna_rx_event_cbfn *rx_cbfn,
2375 struct bna_res_info *res_info,
2378 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2380 struct bna_rxp *rxp;
2383 struct bna_intr_info *intr_info;
2384 struct bna_mem_descr *hqunmap_mem;
2385 struct bna_mem_descr *dqunmap_mem;
2386 struct bna_mem_descr *ccb_mem;
2387 struct bna_mem_descr *rcb_mem;
2388 struct bna_mem_descr *cqpt_mem;
2389 struct bna_mem_descr *cswqpt_mem;
2390 struct bna_mem_descr *cpage_mem;
2391 struct bna_mem_descr *hqpt_mem;
2392 struct bna_mem_descr *dqpt_mem;
2393 struct bna_mem_descr *hsqpt_mem;
2394 struct bna_mem_descr *dsqpt_mem;
2395 struct bna_mem_descr *hpage_mem;
2396 struct bna_mem_descr *dpage_mem;
2397 u32 dpage_count, hpage_count;
2398 u32 hq_idx, dq_idx, rcb_idx;
2402 if (!bna_rx_res_check(rx_mod, rx_cfg))
2405 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2406 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2407 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2408 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2409 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2410 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2411 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2412 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2413 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2414 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2415 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2416 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2417 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2418 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2420 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2423 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2426 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2429 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2432 INIT_LIST_HEAD(&rx->rxp_q);
2433 rx->stop_cbfn = NULL;
2434 rx->stop_cbarg = NULL;
2437 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2438 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2439 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2440 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2441 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2442 /* Following callbacks are mandatory */
2443 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2444 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2446 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2448 case BNA_RX_T_REGULAR:
2449 if (!(rx->bna->rx_mod.flags &
2450 BNA_RX_MOD_F_ENET_LOOPBACK))
2451 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2453 case BNA_RX_T_LOOPBACK:
2454 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2455 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2460 rx->num_paths = rx_cfg->num_paths;
2461 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2462 i < rx->num_paths; i++) {
2463 rxp = bna_rxp_get(rx_mod);
2464 list_add_tail(&rxp->qe, &rx->rxp_q);
2465 rxp->type = rx_cfg->rxp_type;
2469 q0 = bna_rxq_get(rx_mod);
2470 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2473 q1 = bna_rxq_get(rx_mod);
2475 if (1 == intr_info->num)
2476 rxp->vector = intr_info->idl[0].vector;
2478 rxp->vector = intr_info->idl[i].vector;
2482 rxp->cq.ib.ib_seg_host_addr.lsb =
2483 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2484 rxp->cq.ib.ib_seg_host_addr.msb =
2485 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2486 rxp->cq.ib.ib_seg_host_addr_kva =
2487 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2488 rxp->cq.ib.intr_type = intr_info->intr_type;
2489 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2490 rxp->cq.ib.intr_vector = rxp->vector;
2492 rxp->cq.ib.intr_vector = BIT(rxp->vector);
2493 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2494 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2495 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2497 bna_rxp_add_rxqs(rxp, q0, q1);
2504 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2505 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2506 rcb_idx++; dq_idx++;
2507 q0->rcb->q_depth = rx_cfg->q0_depth;
2508 q0->q_depth = rx_cfg->q0_depth;
2509 q0->multi_buffer = rx_cfg->q0_multi_buf;
2510 q0->buffer_size = rx_cfg->q0_buf_size;
2511 q0->num_vecs = rx_cfg->q0_num_vecs;
2513 q0->rcb->bnad = bna->bnad;
2515 q0->rx_packets = q0->rx_bytes = 0;
2516 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2518 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2519 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2521 if (rx->rcb_setup_cbfn)
2522 rx->rcb_setup_cbfn(bnad, q0->rcb);
2530 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2531 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2532 rcb_idx++; hq_idx++;
2533 q1->rcb->q_depth = rx_cfg->q1_depth;
2534 q1->q_depth = rx_cfg->q1_depth;
2535 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2538 q1->rcb->bnad = bna->bnad;
2540 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2541 rx_cfg->hds_config.forced_offset
2542 : rx_cfg->q1_buf_size;
2543 q1->rx_packets = q1->rx_bytes = 0;
2544 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2546 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2547 &hqpt_mem[i], &hsqpt_mem[i],
2550 if (rx->rcb_setup_cbfn)
2551 rx->rcb_setup_cbfn(bnad, q1->rcb);
2556 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2557 cq_depth = rx_cfg->q0_depth +
2558 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2559 0 : rx_cfg->q1_depth);
2560 /* if multi-buffer is enabled sum of q0_depth
2561 * and q1_depth need not be a power of 2
2563 cq_depth = roundup_pow_of_two(cq_depth);
2564 rxp->cq.ccb->q_depth = cq_depth;
2565 rxp->cq.ccb->cq = &rxp->cq;
2566 rxp->cq.ccb->rcb[0] = q0->rcb;
2567 q0->rcb->ccb = rxp->cq.ccb;
2569 rxp->cq.ccb->rcb[1] = q1->rcb;
2570 q1->rcb->ccb = rxp->cq.ccb;
2572 rxp->cq.ccb->hw_producer_index =
2573 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2574 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2575 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2576 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2577 rxp->cq.ccb->rx_coalescing_timeo =
2578 rxp->cq.ib.coalescing_timeo;
2579 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2580 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2581 rxp->cq.ccb->bnad = bna->bnad;
2582 rxp->cq.ccb->id = i;
2584 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2585 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2587 if (rx->ccb_setup_cbfn)
2588 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2591 rx->hds_cfg = rx_cfg->hds_config;
2593 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2595 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2597 rx_mod->rid_mask |= BIT(rx->rid);
2603 bna_rx_destroy(struct bna_rx *rx)
2605 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2606 struct bna_rxq *q0 = NULL;
2607 struct bna_rxq *q1 = NULL;
2608 struct bna_rxp *rxp;
2609 struct list_head *qe;
2611 bna_rxf_uninit(&rx->rxf);
2613 while (!list_empty(&rx->rxp_q)) {
2614 bfa_q_deq(&rx->rxp_q, &rxp);
2615 GET_RXQS(rxp, q0, q1);
2616 if (rx->rcb_destroy_cbfn)
2617 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2621 bna_rxq_put(rx_mod, q0);
2624 if (rx->rcb_destroy_cbfn)
2625 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2629 bna_rxq_put(rx_mod, q1);
2631 rxp->rxq.slr.large = NULL;
2632 rxp->rxq.slr.small = NULL;
2634 if (rx->ccb_destroy_cbfn)
2635 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2638 bna_rxp_put(rx_mod, rxp);
2641 list_for_each(qe, &rx_mod->rx_active_q) {
2642 if (qe == &rx->qe) {
2644 bfa_q_qe_init(&rx->qe);
2649 rx_mod->rid_mask &= ~BIT(rx->rid);
2653 bna_rx_put(rx_mod, rx);
2657 bna_rx_enable(struct bna_rx *rx)
2659 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2662 rx->rx_flags |= BNA_RX_F_ENABLED;
2663 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2664 bfa_fsm_send_event(rx, RX_E_START);
2668 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2669 void (*cbfn)(void *, struct bna_rx *))
2671 if (type == BNA_SOFT_CLEANUP) {
2672 /* h/w should not be accessed. Treat we're stopped */
2673 (*cbfn)(rx->bna->bnad, rx);
2675 rx->stop_cbfn = cbfn;
2676 rx->stop_cbarg = rx->bna->bnad;
2678 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2680 bfa_fsm_send_event(rx, RX_E_STOP);
2685 bna_rx_cleanup_complete(struct bna_rx *rx)
2687 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2691 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2693 struct bna_rxf *rxf = &rx->rxf;
2695 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2696 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2697 rxf->vlan_strip_pending = true;
2698 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2703 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2705 struct bna_rxf *rxf = &rx->rxf;
2707 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2708 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2709 rxf->vlan_strip_pending = true;
2710 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2715 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2716 enum bna_rxmode bitmask)
2718 struct bna_rxf *rxf = &rx->rxf;
2719 int need_hw_config = 0;
2723 if (is_promisc_enable(new_mode, bitmask)) {
2724 /* If promisc mode is already enabled elsewhere in the system */
2725 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2726 (rx->bna->promisc_rid != rxf->rx->rid))
2729 /* If default mode is already enabled in the system */
2730 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2733 /* Trying to enable promiscuous and default mode together */
2734 if (is_default_enable(new_mode, bitmask))
2738 if (is_default_enable(new_mode, bitmask)) {
2739 /* If default mode is already enabled elsewhere in the system */
2740 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2741 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2745 /* If promiscuous mode is already enabled in the system */
2746 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2750 /* Process the commands */
2752 if (is_promisc_enable(new_mode, bitmask)) {
2753 if (bna_rxf_promisc_enable(rxf))
2755 } else if (is_promisc_disable(new_mode, bitmask)) {
2756 if (bna_rxf_promisc_disable(rxf))
2760 if (is_allmulti_enable(new_mode, bitmask)) {
2761 if (bna_rxf_allmulti_enable(rxf))
2763 } else if (is_allmulti_disable(new_mode, bitmask)) {
2764 if (bna_rxf_allmulti_disable(rxf))
2768 /* Trigger h/w if needed */
2770 if (need_hw_config) {
2771 rxf->cam_fltr_cbfn = NULL;
2772 rxf->cam_fltr_cbarg = rx->bna->bnad;
2773 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2776 return BNA_CB_SUCCESS;
2783 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2785 struct bna_rxf *rxf = &rx->rxf;
2787 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2788 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2789 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2790 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2795 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2797 struct bna_rxp *rxp;
2798 struct list_head *qe;
2800 list_for_each(qe, &rx->rxp_q) {
2801 rxp = (struct bna_rxp *)qe;
2802 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2803 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2808 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2812 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2813 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2814 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2818 bna_rx_dim_update(struct bna_ccb *ccb)
2820 struct bna *bna = ccb->cq->rx->bna;
2822 u32 pkt_rt, small_rt, large_rt;
2823 u8 coalescing_timeo;
2825 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2826 (ccb->pkt_rate.large_pkt_cnt == 0))
2829 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2831 small_rt = ccb->pkt_rate.small_pkt_cnt;
2832 large_rt = ccb->pkt_rate.large_pkt_cnt;
2834 pkt_rt = small_rt + large_rt;
2836 if (pkt_rt < BNA_PKT_RATE_10K)
2837 load = BNA_LOAD_T_LOW_4;
2838 else if (pkt_rt < BNA_PKT_RATE_20K)
2839 load = BNA_LOAD_T_LOW_3;
2840 else if (pkt_rt < BNA_PKT_RATE_30K)
2841 load = BNA_LOAD_T_LOW_2;
2842 else if (pkt_rt < BNA_PKT_RATE_40K)
2843 load = BNA_LOAD_T_LOW_1;
2844 else if (pkt_rt < BNA_PKT_RATE_50K)
2845 load = BNA_LOAD_T_HIGH_1;
2846 else if (pkt_rt < BNA_PKT_RATE_60K)
2847 load = BNA_LOAD_T_HIGH_2;
2848 else if (pkt_rt < BNA_PKT_RATE_80K)
2849 load = BNA_LOAD_T_HIGH_3;
2851 load = BNA_LOAD_T_HIGH_4;
2853 if (small_rt > (large_rt << 1))
2858 ccb->pkt_rate.small_pkt_cnt = 0;
2859 ccb->pkt_rate.large_pkt_cnt = 0;
2861 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2862 ccb->rx_coalescing_timeo = coalescing_timeo;
2865 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2868 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2881 #define call_tx_stop_cbfn(tx) \
2883 if ((tx)->stop_cbfn) { \
2884 void (*cbfn)(void *, struct bna_tx *); \
2886 cbfn = (tx)->stop_cbfn; \
2887 cbarg = (tx)->stop_cbarg; \
2888 (tx)->stop_cbfn = NULL; \
2889 (tx)->stop_cbarg = NULL; \
2890 cbfn(cbarg, (tx)); \
2894 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2895 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2896 static void bna_tx_enet_stop(struct bna_tx *tx);
2904 TX_E_PRIO_CHANGE = 6,
2905 TX_E_CLEANUP_DONE = 7,
2909 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2910 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2911 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2912 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2913 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2915 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2917 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2919 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2920 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2924 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2926 call_tx_stop_cbfn(tx);
2930 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2934 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2938 call_tx_stop_cbfn(tx);
2945 case TX_E_PRIO_CHANGE:
2948 case TX_E_BW_UPDATE:
2953 bfa_sm_fault(event);
2958 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2960 bna_bfi_tx_enet_start(tx);
2964 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2968 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2969 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2973 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2974 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2978 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
2979 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2980 BNA_TX_F_BW_UPDATED);
2981 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2983 bfa_fsm_set_state(tx, bna_tx_sm_started);
2986 case TX_E_PRIO_CHANGE:
2987 tx->flags |= BNA_TX_F_PRIO_CHANGED;
2990 case TX_E_BW_UPDATE:
2991 tx->flags |= BNA_TX_F_BW_UPDATED;
2995 bfa_sm_fault(event);
3000 bna_tx_sm_started_entry(struct bna_tx *tx)
3002 struct bna_txq *txq;
3003 struct list_head *qe;
3004 int is_regular = (tx->type == BNA_TX_T_REGULAR);
3006 list_for_each(qe, &tx->txq_q) {
3007 txq = (struct bna_txq *)qe;
3008 txq->tcb->priority = txq->priority;
3010 bna_ib_start(tx->bna, &txq->ib, is_regular);
3012 tx->tx_resume_cbfn(tx->bna->bnad, tx);
3016 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3020 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3021 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3022 bna_tx_enet_stop(tx);
3026 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3027 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3028 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3031 case TX_E_PRIO_CHANGE:
3032 case TX_E_BW_UPDATE:
3033 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3037 bfa_sm_fault(event);
3042 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3047 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3052 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3053 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3058 * We are here due to start_wait -> stop_wait transition on
3061 bna_tx_enet_stop(tx);
3064 case TX_E_PRIO_CHANGE:
3065 case TX_E_BW_UPDATE:
3070 bfa_sm_fault(event);
3075 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3080 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3084 case TX_E_PRIO_CHANGE:
3085 case TX_E_BW_UPDATE:
3089 case TX_E_CLEANUP_DONE:
3090 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3094 bfa_sm_fault(event);
3099 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3101 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3102 bna_tx_enet_stop(tx);
3106 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3110 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3114 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3115 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3119 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3122 case TX_E_PRIO_CHANGE:
3123 case TX_E_BW_UPDATE:
3128 bfa_sm_fault(event);
3133 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3135 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3139 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3143 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3147 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3150 case TX_E_PRIO_CHANGE:
3151 case TX_E_BW_UPDATE:
3155 case TX_E_CLEANUP_DONE:
3156 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3160 bfa_sm_fault(event);
3165 bna_tx_sm_failed_entry(struct bna_tx *tx)
3170 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3174 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3178 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3185 case TX_E_CLEANUP_DONE:
3186 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3190 bfa_sm_fault(event);
3195 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3200 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3204 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3208 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3211 case TX_E_CLEANUP_DONE:
3212 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3215 case TX_E_BW_UPDATE:
3220 bfa_sm_fault(event);
3225 bna_bfi_tx_enet_start(struct bna_tx *tx)
3227 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3228 struct bna_txq *txq = NULL;
3229 struct list_head *qe;
3232 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3233 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3234 cfg_req->mh.num_entries = htons(
3235 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3237 cfg_req->num_queues = tx->num_txq;
3238 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3240 i++, qe = bfa_q_next(qe)) {
3241 txq = (struct bna_txq *)qe;
3243 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3244 cfg_req->q_cfg[i].q.priority = txq->priority;
3246 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3247 txq->ib.ib_seg_host_addr.lsb;
3248 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3249 txq->ib.ib_seg_host_addr.msb;
3250 cfg_req->q_cfg[i].ib.intr.msix_index =
3251 htons((u16)txq->ib.intr_vector);
3254 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3255 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3256 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3257 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3258 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3259 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3260 cfg_req->ib_cfg.coalescing_timeout =
3261 htonl((u32)txq->ib.coalescing_timeo);
3262 cfg_req->ib_cfg.inter_pkt_timeout =
3263 htonl((u32)txq->ib.interpkt_timeo);
3264 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3266 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3267 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3268 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3269 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3271 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3272 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3273 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3277 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3279 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3281 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3282 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3283 req->mh.num_entries = htons(
3284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3285 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3287 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3291 bna_tx_enet_stop(struct bna_tx *tx)
3293 struct bna_txq *txq;
3294 struct list_head *qe;
3297 list_for_each(qe, &tx->txq_q) {
3298 txq = (struct bna_txq *)qe;
3299 bna_ib_stop(tx->bna, &txq->ib);
3302 bna_bfi_tx_enet_stop(tx);
3306 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3307 struct bna_mem_descr *qpt_mem,
3308 struct bna_mem_descr *swqpt_mem,
3309 struct bna_mem_descr *page_mem)
3313 struct bna_dma_addr bna_dma;
3316 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3317 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3318 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3319 txq->qpt.page_count = page_count;
3320 txq->qpt.page_size = page_size;
3322 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3323 txq->tcb->sw_q = page_mem->kva;
3325 kva = page_mem->kva;
3326 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3328 for (i = 0; i < page_count; i++) {
3329 txq->tcb->sw_qpt[i] = kva;
3332 BNA_SET_DMA_ADDR(dma, &bna_dma);
3333 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3335 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3341 static struct bna_tx *
3342 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3344 struct list_head *qe = NULL;
3345 struct bna_tx *tx = NULL;
3347 if (list_empty(&tx_mod->tx_free_q))
3349 if (type == BNA_TX_T_REGULAR) {
3350 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3352 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3354 tx = (struct bna_tx *)qe;
3355 bfa_q_qe_init(&tx->qe);
3362 bna_tx_free(struct bna_tx *tx)
3364 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3365 struct bna_txq *txq;
3366 struct list_head *prev_qe;
3367 struct list_head *qe;
3369 while (!list_empty(&tx->txq_q)) {
3370 bfa_q_deq(&tx->txq_q, &txq);
3371 bfa_q_qe_init(&txq->qe);
3374 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3377 list_for_each(qe, &tx_mod->tx_active_q) {
3378 if (qe == &tx->qe) {
3380 bfa_q_qe_init(&tx->qe);
3389 list_for_each(qe, &tx_mod->tx_free_q) {
3390 if (((struct bna_tx *)qe)->rid < tx->rid)
3397 if (prev_qe == NULL) {
3398 /* This is the first entry */
3399 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3400 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3401 /* This is the last entry */
3402 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3404 /* Somewhere in the middle */
3405 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3406 bfa_q_prev(&tx->qe) = prev_qe;
3407 bfa_q_next(prev_qe) = &tx->qe;
3408 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3413 bna_tx_start(struct bna_tx *tx)
3415 tx->flags |= BNA_TX_F_ENET_STARTED;
3416 if (tx->flags & BNA_TX_F_ENABLED)
3417 bfa_fsm_send_event(tx, TX_E_START);
3421 bna_tx_stop(struct bna_tx *tx)
3423 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3424 tx->stop_cbarg = &tx->bna->tx_mod;
3426 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3427 bfa_fsm_send_event(tx, TX_E_STOP);
3431 bna_tx_fail(struct bna_tx *tx)
3433 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3434 bfa_fsm_send_event(tx, TX_E_FAIL);
3438 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3440 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3441 struct bna_txq *txq = NULL;
3442 struct list_head *qe;
3445 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3446 sizeof(struct bfi_enet_tx_cfg_rsp));
3448 tx->hw_id = cfg_rsp->hw_id;
3450 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3451 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3452 txq = (struct bna_txq *)qe;
3454 /* Setup doorbells */
3455 txq->tcb->i_dbell->doorbell_addr =
3456 tx->bna->pcidev.pci_bar_kva
3457 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3459 tx->bna->pcidev.pci_bar_kva
3460 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3461 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3463 /* Initialize producer/consumer indexes */
3464 (*txq->tcb->hw_consumer_index) = 0;
3465 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3468 bfa_fsm_send_event(tx, TX_E_STARTED);
3472 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3474 bfa_fsm_send_event(tx, TX_E_STOPPED);
3478 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3481 struct list_head *qe;
3483 list_for_each(qe, &tx_mod->tx_active_q) {
3484 tx = (struct bna_tx *)qe;
3485 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3490 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3494 struct bna_mem_info *mem_info;
3496 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3497 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3498 mem_info->mem_type = BNA_MEM_T_KVA;
3499 mem_info->len = sizeof(struct bna_tcb);
3500 mem_info->num = num_txq;
3502 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3503 q_size = ALIGN(q_size, PAGE_SIZE);
3504 page_count = q_size >> PAGE_SHIFT;
3506 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3507 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3508 mem_info->mem_type = BNA_MEM_T_DMA;
3509 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3510 mem_info->num = num_txq;
3512 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3513 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3514 mem_info->mem_type = BNA_MEM_T_KVA;
3515 mem_info->len = page_count * sizeof(void *);
3516 mem_info->num = num_txq;
3518 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3519 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3520 mem_info->mem_type = BNA_MEM_T_DMA;
3521 mem_info->len = PAGE_SIZE * page_count;
3522 mem_info->num = num_txq;
3524 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3525 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3526 mem_info->mem_type = BNA_MEM_T_DMA;
3527 mem_info->len = BFI_IBIDX_SIZE;
3528 mem_info->num = num_txq;
3530 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3531 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3533 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3537 bna_tx_create(struct bna *bna, struct bnad *bnad,
3538 struct bna_tx_config *tx_cfg,
3539 const struct bna_tx_event_cbfn *tx_cbfn,
3540 struct bna_res_info *res_info, void *priv)
3542 struct bna_intr_info *intr_info;
3543 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3545 struct bna_txq *txq;
3546 struct list_head *qe;
3550 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3551 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3558 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3563 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3571 INIT_LIST_HEAD(&tx->txq_q);
3572 for (i = 0; i < tx_cfg->num_txq; i++) {
3573 if (list_empty(&tx_mod->txq_free_q))
3576 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3577 bfa_q_qe_init(&txq->qe);
3578 list_add_tail(&txq->qe, &tx->txq_q);
3588 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3589 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3590 /* Following callbacks are mandatory */
3591 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3592 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3593 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3595 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3597 tx->num_txq = tx_cfg->num_txq;
3600 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3602 case BNA_TX_T_REGULAR:
3603 if (!(tx->bna->tx_mod.flags &
3604 BNA_TX_MOD_F_ENET_LOOPBACK))
3605 tx->flags |= BNA_TX_F_ENET_STARTED;
3607 case BNA_TX_T_LOOPBACK:
3608 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3609 tx->flags |= BNA_TX_F_ENET_STARTED;
3617 list_for_each(qe, &tx->txq_q) {
3618 txq = (struct bna_txq *)qe;
3619 txq->tcb = (struct bna_tcb *)
3620 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3621 txq->tx_packets = 0;
3625 txq->ib.ib_seg_host_addr.lsb =
3626 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3627 txq->ib.ib_seg_host_addr.msb =
3628 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3629 txq->ib.ib_seg_host_addr_kva =
3630 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3631 txq->ib.intr_type = intr_info->intr_type;
3632 txq->ib.intr_vector = (intr_info->num == 1) ?
3633 intr_info->idl[0].vector :
3634 intr_info->idl[i].vector;
3635 if (intr_info->intr_type == BNA_INTR_T_INTX)
3636 txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3637 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3638 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3639 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3643 txq->tcb->q_depth = tx_cfg->txq_depth;
3644 txq->tcb->unmap_q = (void *)
3645 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3646 txq->tcb->hw_consumer_index =
3647 (u32 *)txq->ib.ib_seg_host_addr_kva;
3648 txq->tcb->i_dbell = &txq->ib.door_bell;
3649 txq->tcb->intr_type = txq->ib.intr_type;
3650 txq->tcb->intr_vector = txq->ib.intr_vector;
3651 txq->tcb->txq = txq;
3652 txq->tcb->bnad = bnad;
3655 /* QPT, SWQPT, Pages */
3656 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3657 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3658 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3659 &res_info[BNA_TX_RES_MEM_T_PAGE].
3660 res_u.mem_info.mdl[i]);
3662 /* Callback to bnad for setting up TCB */
3663 if (tx->tcb_setup_cbfn)
3664 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3666 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3667 txq->priority = txq->tcb->id;
3669 txq->priority = tx_mod->default_prio;
3674 tx->txf_vlan_id = 0;
3676 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3678 tx_mod->rid_mask |= BIT(tx->rid);
3688 bna_tx_destroy(struct bna_tx *tx)
3690 struct bna_txq *txq;
3691 struct list_head *qe;
3693 list_for_each(qe, &tx->txq_q) {
3694 txq = (struct bna_txq *)qe;
3695 if (tx->tcb_destroy_cbfn)
3696 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3699 tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3704 bna_tx_enable(struct bna_tx *tx)
3706 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3709 tx->flags |= BNA_TX_F_ENABLED;
3711 if (tx->flags & BNA_TX_F_ENET_STARTED)
3712 bfa_fsm_send_event(tx, TX_E_START);
3716 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3717 void (*cbfn)(void *, struct bna_tx *))
3719 if (type == BNA_SOFT_CLEANUP) {
3720 (*cbfn)(tx->bna->bnad, tx);
3724 tx->stop_cbfn = cbfn;
3725 tx->stop_cbarg = tx->bna->bnad;
3727 tx->flags &= ~BNA_TX_F_ENABLED;
3729 bfa_fsm_send_event(tx, TX_E_STOP);
3733 bna_tx_cleanup_complete(struct bna_tx *tx)
3735 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3739 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3741 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3743 bfa_wc_down(&tx_mod->tx_stop_wc);
3747 bna_tx_mod_cb_tx_stopped_all(void *arg)
3749 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3751 if (tx_mod->stop_cbfn)
3752 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3753 tx_mod->stop_cbfn = NULL;
3757 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3758 struct bna_res_info *res_info)
3765 tx_mod->tx = (struct bna_tx *)
3766 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3767 tx_mod->txq = (struct bna_txq *)
3768 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3770 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3771 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3773 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3775 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3776 tx_mod->tx[i].rid = i;
3777 bfa_q_qe_init(&tx_mod->tx[i].qe);
3778 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3779 bfa_q_qe_init(&tx_mod->txq[i].qe);
3780 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3783 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3784 tx_mod->default_prio = 0;
3785 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3786 tx_mod->iscsi_prio = -1;
3790 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3792 struct list_head *qe;
3796 list_for_each(qe, &tx_mod->tx_free_q)
3800 list_for_each(qe, &tx_mod->txq_free_q)
3807 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3810 struct list_head *qe;
3812 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3813 if (type == BNA_TX_T_LOOPBACK)
3814 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3816 list_for_each(qe, &tx_mod->tx_active_q) {
3817 tx = (struct bna_tx *)qe;
3818 if (tx->type == type)
3824 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3827 struct list_head *qe;
3829 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3830 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3832 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3834 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3836 list_for_each(qe, &tx_mod->tx_active_q) {
3837 tx = (struct bna_tx *)qe;
3838 if (tx->type == type) {
3839 bfa_wc_up(&tx_mod->tx_stop_wc);
3844 bfa_wc_wait(&tx_mod->tx_stop_wc);
3848 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3851 struct list_head *qe;
3853 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3854 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3856 list_for_each(qe, &tx_mod->tx_active_q) {
3857 tx = (struct bna_tx *)qe;
3863 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3865 struct bna_txq *txq;
3866 struct list_head *qe;
3868 list_for_each(qe, &tx->txq_q) {
3869 txq = (struct bna_txq *)qe;
3870 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);