2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
26 ib->coalescing_timeo = coalescing_timeo;
27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28 (u32)ib->coalescing_timeo, 0);
33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
36 (rxf)->vlan_strip_pending = true; \
39 #define bna_rxf_rss_cfg_soft_reset(rxf) \
41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
43 BNA_RSS_F_CFG_PENDING | \
44 BNA_RSS_F_STATUS_PENDING); \
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 enum bna_cleanup_type cleanup);
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
66 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
68 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
72 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
74 call_rxf_stop_cbfn(rxf);
78 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
82 if (rxf->flags & BNA_RXF_F_PAUSED) {
83 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
84 call_rxf_start_cbfn(rxf);
86 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
90 call_rxf_stop_cbfn(rxf);
98 call_rxf_cam_fltr_cbfn(rxf);
107 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
112 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
117 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
121 call_rxf_cam_fltr_cbfn(rxf);
130 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
132 if (!bna_rxf_cfg_apply(rxf)) {
133 /* No more pending config updates */
134 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
139 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
143 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
147 bna_rxf_cfg_reset(rxf);
148 call_rxf_start_cbfn(rxf);
149 call_rxf_cam_fltr_cbfn(rxf);
150 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
158 if (!bna_rxf_cfg_apply(rxf)) {
159 /* No more pending config updates */
160 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
170 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
172 call_rxf_start_cbfn(rxf);
173 call_rxf_cam_fltr_cbfn(rxf);
177 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
182 bna_rxf_cfg_reset(rxf);
183 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
187 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
196 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
201 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
206 bna_rxf_cfg_reset(rxf);
207 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
216 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
217 enum bfi_enet_h2i_msgs req_type)
219 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
221 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
222 req->mh.num_entries = htons(
223 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
224 ether_addr_copy(req->mac_addr, mac->addr);
225 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
226 sizeof(struct bfi_enet_ucast_req), &req->mh);
227 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
231 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
233 struct bfi_enet_mcast_add_req *req =
234 &rxf->bfi_enet_cmd.mcast_add_req;
236 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
238 req->mh.num_entries = htons(
239 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
240 ether_addr_copy(req->mac_addr, mac->addr);
241 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
242 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
243 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
247 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
249 struct bfi_enet_mcast_del_req *req =
250 &rxf->bfi_enet_cmd.mcast_del_req;
252 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
254 req->mh.num_entries = htons(
255 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
256 req->handle = htons(handle);
257 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
258 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
259 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
263 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
265 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
267 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
268 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
269 req->mh.num_entries = htons(
270 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
271 req->enable = status;
272 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
273 sizeof(struct bfi_enet_enable_req), &req->mh);
274 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
278 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
280 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
282 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
283 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
284 req->mh.num_entries = htons(
285 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
286 req->enable = status;
287 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
288 sizeof(struct bfi_enet_enable_req), &req->mh);
289 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
293 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
295 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
299 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
300 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
301 req->mh.num_entries = htons(
302 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
303 req->block_idx = block_idx;
304 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
305 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
306 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
308 htonl(rxf->vlan_filter_table[j]);
310 req->bit_mask[i] = 0xFFFFFFFF;
312 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
313 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
314 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
318 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
320 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
322 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
323 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
324 req->mh.num_entries = htons(
325 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
326 req->enable = rxf->vlan_strip_status;
327 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
328 sizeof(struct bfi_enet_enable_req), &req->mh);
329 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
333 bna_bfi_rit_cfg(struct bna_rxf *rxf)
335 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
337 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
338 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
339 req->mh.num_entries = htons(
340 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
341 req->size = htons(rxf->rit_size);
342 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
343 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
344 sizeof(struct bfi_enet_rit_req), &req->mh);
345 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
349 bna_bfi_rss_cfg(struct bna_rxf *rxf)
351 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
354 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
355 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
356 req->mh.num_entries = htons(
357 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
358 req->cfg.type = rxf->rss_cfg.hash_type;
359 req->cfg.mask = rxf->rss_cfg.hash_mask;
360 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
362 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
363 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
364 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
365 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
369 bna_bfi_rss_enable(struct bna_rxf *rxf)
371 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
373 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
374 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
375 req->mh.num_entries = htons(
376 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
377 req->enable = rxf->rss_status;
378 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
379 sizeof(struct bfi_enet_enable_req), &req->mh);
380 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
383 /* This function gets the multicast MAC that has already been added to CAM */
384 static struct bna_mac *
385 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
388 struct list_head *qe;
390 list_for_each(qe, &rxf->mcast_active_q) {
391 mac = (struct bna_mac *)qe;
392 if (ether_addr_equal(mac->addr, mac_addr))
396 list_for_each(qe, &rxf->mcast_pending_del_q) {
397 mac = (struct bna_mac *)qe;
398 if (ether_addr_equal(mac->addr, mac_addr))
405 static struct bna_mcam_handle *
406 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
408 struct bna_mcam_handle *mchandle;
409 struct list_head *qe;
411 list_for_each(qe, &rxf->mcast_handle_q) {
412 mchandle = (struct bna_mcam_handle *)qe;
413 if (mchandle->handle == handle)
421 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
423 struct bna_mac *mcmac;
424 struct bna_mcam_handle *mchandle;
426 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
427 mchandle = bna_rxf_mchandle_get(rxf, handle);
428 if (mchandle == NULL) {
429 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
430 mchandle->handle = handle;
431 mchandle->refcnt = 0;
432 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
435 mcmac->handle = mchandle;
439 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
440 enum bna_cleanup_type cleanup)
442 struct bna_mcam_handle *mchandle;
445 mchandle = mac->handle;
446 if (mchandle == NULL)
450 if (mchandle->refcnt == 0) {
451 if (cleanup == BNA_HARD_CLEANUP) {
452 bna_bfi_mcast_del_req(rxf, mchandle->handle);
455 list_del(&mchandle->qe);
456 bfa_q_qe_init(&mchandle->qe);
457 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
465 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
467 struct bna_mac *mac = NULL;
468 struct list_head *qe;
471 /* First delete multicast entries to maintain the count */
472 while (!list_empty(&rxf->mcast_pending_del_q)) {
473 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
475 mac = (struct bna_mac *)qe;
476 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
477 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
482 /* Add multicast entries */
483 if (!list_empty(&rxf->mcast_pending_add_q)) {
484 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
486 mac = (struct bna_mac *)qe;
487 list_add_tail(&mac->qe, &rxf->mcast_active_q);
488 bna_bfi_mcast_add_req(rxf, mac);
496 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
498 u8 vlan_pending_bitmask;
501 if (rxf->vlan_pending_bitmask) {
502 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
503 while (!(vlan_pending_bitmask & 0x1)) {
505 vlan_pending_bitmask >>= 1;
507 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
508 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
516 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
518 struct list_head *qe;
522 /* Throw away delete pending mcast entries */
523 while (!list_empty(&rxf->mcast_pending_del_q)) {
524 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
526 mac = (struct bna_mac *)qe;
527 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
528 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
533 /* Move active mcast entries to pending_add_q */
534 while (!list_empty(&rxf->mcast_active_q)) {
535 bfa_q_deq(&rxf->mcast_active_q, &qe);
537 list_add_tail(qe, &rxf->mcast_pending_add_q);
538 mac = (struct bna_mac *)qe;
539 if (bna_rxf_mcast_del(rxf, mac, cleanup))
547 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
549 if (rxf->rss_pending) {
550 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
551 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
552 bna_bfi_rit_cfg(rxf);
556 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
557 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
558 bna_bfi_rss_cfg(rxf);
562 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
563 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
564 bna_bfi_rss_enable(rxf);
573 bna_rxf_cfg_apply(struct bna_rxf *rxf)
575 if (bna_rxf_ucast_cfg_apply(rxf))
578 if (bna_rxf_mcast_cfg_apply(rxf))
581 if (bna_rxf_promisc_cfg_apply(rxf))
584 if (bna_rxf_allmulti_cfg_apply(rxf))
587 if (bna_rxf_vlan_cfg_apply(rxf))
590 if (bna_rxf_vlan_strip_cfg_apply(rxf))
593 if (bna_rxf_rss_cfg_apply(rxf))
600 bna_rxf_cfg_reset(struct bna_rxf *rxf)
602 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
603 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
604 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
605 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
606 bna_rxf_vlan_cfg_soft_reset(rxf);
607 bna_rxf_rss_cfg_soft_reset(rxf);
611 bna_rit_init(struct bna_rxf *rxf, int rit_size)
613 struct bna_rx *rx = rxf->rx;
615 struct list_head *qe;
618 rxf->rit_size = rit_size;
619 list_for_each(qe, &rx->rxp_q) {
620 rxp = (struct bna_rxp *)qe;
621 rxf->rit[offset] = rxp->cq.ccb->id;
628 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
630 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
634 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
635 struct bfi_msgq_mhdr *msghdr)
637 struct bfi_enet_rsp *rsp =
638 container_of(msghdr, struct bfi_enet_rsp, mh);
641 /* Clear ucast from cache */
642 rxf->ucast_active_set = 0;
645 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
649 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
650 struct bfi_msgq_mhdr *msghdr)
652 struct bfi_enet_mcast_add_req *req =
653 &rxf->bfi_enet_cmd.mcast_add_req;
654 struct bfi_enet_mcast_add_rsp *rsp =
655 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
657 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
659 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
663 bna_rxf_init(struct bna_rxf *rxf,
665 struct bna_rx_config *q_config,
666 struct bna_res_info *res_info)
670 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
671 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
672 rxf->ucast_pending_set = 0;
673 rxf->ucast_active_set = 0;
674 INIT_LIST_HEAD(&rxf->ucast_active_q);
675 rxf->ucast_pending_mac = NULL;
677 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
678 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
679 INIT_LIST_HEAD(&rxf->mcast_active_q);
680 INIT_LIST_HEAD(&rxf->mcast_handle_q);
682 if (q_config->paused)
683 rxf->flags |= BNA_RXF_F_PAUSED;
686 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
687 bna_rit_init(rxf, q_config->num_paths);
689 rxf->rss_status = q_config->rss_status;
690 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
691 rxf->rss_cfg = q_config->rss_config;
692 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
693 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
694 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
697 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
698 memset(rxf->vlan_filter_table, 0,
699 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
700 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
701 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
703 rxf->vlan_strip_status = q_config->vlan_strip_status;
705 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
709 bna_rxf_uninit(struct bna_rxf *rxf)
713 rxf->ucast_pending_set = 0;
714 rxf->ucast_active_set = 0;
716 while (!list_empty(&rxf->ucast_pending_add_q)) {
717 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
718 bfa_q_qe_init(&mac->qe);
719 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
722 if (rxf->ucast_pending_mac) {
723 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
724 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
725 rxf->ucast_pending_mac);
726 rxf->ucast_pending_mac = NULL;
729 while (!list_empty(&rxf->mcast_pending_add_q)) {
730 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
731 bfa_q_qe_init(&mac->qe);
732 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
735 rxf->rxmode_pending = 0;
736 rxf->rxmode_pending_bitmask = 0;
737 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
738 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
739 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
740 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
742 rxf->rss_pending = 0;
743 rxf->vlan_strip_pending = false;
751 bna_rx_cb_rxf_started(struct bna_rx *rx)
753 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
757 bna_rxf_start(struct bna_rxf *rxf)
759 rxf->start_cbfn = bna_rx_cb_rxf_started;
760 rxf->start_cbarg = rxf->rx;
761 bfa_fsm_send_event(rxf, RXF_E_START);
765 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
767 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
771 bna_rxf_stop(struct bna_rxf *rxf)
773 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
774 rxf->stop_cbarg = rxf->rx;
775 bfa_fsm_send_event(rxf, RXF_E_STOP);
779 bna_rxf_fail(struct bna_rxf *rxf)
781 bfa_fsm_send_event(rxf, RXF_E_FAIL);
785 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
787 struct bna_rxf *rxf = &rx->rxf;
789 if (rxf->ucast_pending_mac == NULL) {
790 rxf->ucast_pending_mac =
791 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
792 if (rxf->ucast_pending_mac == NULL)
793 return BNA_CB_UCAST_CAM_FULL;
794 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
797 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
798 rxf->ucast_pending_set = 1;
799 rxf->cam_fltr_cbfn = NULL;
800 rxf->cam_fltr_cbarg = rx->bna->bnad;
802 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
804 return BNA_CB_SUCCESS;
808 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
809 void (*cbfn)(struct bnad *, struct bna_rx *))
811 struct bna_rxf *rxf = &rx->rxf;
814 /* Check if already added or pending addition */
815 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
816 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
818 cbfn(rx->bna->bnad, rx);
819 return BNA_CB_SUCCESS;
822 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
824 return BNA_CB_MCAST_LIST_FULL;
825 bfa_q_qe_init(&mac->qe);
826 ether_addr_copy(mac->addr, addr);
827 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
829 rxf->cam_fltr_cbfn = cbfn;
830 rxf->cam_fltr_cbarg = rx->bna->bnad;
832 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
834 return BNA_CB_SUCCESS;
838 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
840 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
841 struct bna_rxf *rxf = &rx->rxf;
842 struct list_head list_head;
843 struct list_head *qe;
845 struct bna_mac *mac, *del_mac;
848 /* Purge the pending_add_q */
849 while (!list_empty(&rxf->ucast_pending_add_q)) {
850 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
852 mac = (struct bna_mac *)qe;
853 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
856 /* Schedule active_q entries for deletion */
857 while (!list_empty(&rxf->ucast_active_q)) {
858 bfa_q_deq(&rxf->ucast_active_q, &qe);
859 mac = (struct bna_mac *)qe;
860 bfa_q_qe_init(&mac->qe);
862 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
863 memcpy(del_mac, mac, sizeof(*del_mac));
864 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
865 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
869 INIT_LIST_HEAD(&list_head);
870 for (i = 0, mcaddr = uclist; i < count; i++) {
871 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
874 bfa_q_qe_init(&mac->qe);
875 ether_addr_copy(mac->addr, mcaddr);
876 list_add_tail(&mac->qe, &list_head);
880 /* Add the new entries */
881 while (!list_empty(&list_head)) {
882 bfa_q_deq(&list_head, &qe);
883 mac = (struct bna_mac *)qe;
884 bfa_q_qe_init(&mac->qe);
885 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
888 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
890 return BNA_CB_SUCCESS;
893 while (!list_empty(&list_head)) {
894 bfa_q_deq(&list_head, &qe);
895 mac = (struct bna_mac *)qe;
896 bfa_q_qe_init(&mac->qe);
897 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
900 return BNA_CB_UCAST_CAM_FULL;
904 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
906 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
907 struct bna_rxf *rxf = &rx->rxf;
908 struct list_head list_head;
909 struct list_head *qe;
911 struct bna_mac *mac, *del_mac;
914 /* Purge the pending_add_q */
915 while (!list_empty(&rxf->mcast_pending_add_q)) {
916 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
918 mac = (struct bna_mac *)qe;
919 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
922 /* Schedule active_q entries for deletion */
923 while (!list_empty(&rxf->mcast_active_q)) {
924 bfa_q_deq(&rxf->mcast_active_q, &qe);
925 mac = (struct bna_mac *)qe;
926 bfa_q_qe_init(&mac->qe);
928 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
930 memcpy(del_mac, mac, sizeof(*del_mac));
931 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
933 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
937 INIT_LIST_HEAD(&list_head);
938 for (i = 0, mcaddr = mclist; i < count; i++) {
939 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
942 bfa_q_qe_init(&mac->qe);
943 ether_addr_copy(mac->addr, mcaddr);
944 list_add_tail(&mac->qe, &list_head);
949 /* Add the new entries */
950 while (!list_empty(&list_head)) {
951 bfa_q_deq(&list_head, &qe);
952 mac = (struct bna_mac *)qe;
953 bfa_q_qe_init(&mac->qe);
954 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
957 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
959 return BNA_CB_SUCCESS;
962 while (!list_empty(&list_head)) {
963 bfa_q_deq(&list_head, &qe);
964 mac = (struct bna_mac *)qe;
965 bfa_q_qe_init(&mac->qe);
966 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
969 return BNA_CB_MCAST_LIST_FULL;
973 bna_rx_mcast_delall(struct bna_rx *rx)
975 struct bna_rxf *rxf = &rx->rxf;
976 struct list_head *qe;
977 struct bna_mac *mac, *del_mac;
978 int need_hw_config = 0;
980 /* Purge all entries from pending_add_q */
981 while (!list_empty(&rxf->mcast_pending_add_q)) {
982 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
983 mac = (struct bna_mac *)qe;
984 bfa_q_qe_init(&mac->qe);
985 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
988 /* Schedule all entries in active_q for deletion */
989 while (!list_empty(&rxf->mcast_active_q)) {
990 bfa_q_deq(&rxf->mcast_active_q, &qe);
991 mac = (struct bna_mac *)qe;
992 bfa_q_qe_init(&mac->qe);
994 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
996 memcpy(del_mac, mac, sizeof(*del_mac));
997 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
999 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1004 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1008 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1010 struct bna_rxf *rxf = &rx->rxf;
1011 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1012 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1013 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1015 rxf->vlan_filter_table[index] |= bit;
1016 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1017 rxf->vlan_pending_bitmask |= BIT(group_id);
1018 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1023 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1025 struct bna_rxf *rxf = &rx->rxf;
1026 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1027 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1028 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1030 rxf->vlan_filter_table[index] &= ~bit;
1031 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1032 rxf->vlan_pending_bitmask |= BIT(group_id);
1033 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1038 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1040 struct bna_mac *mac = NULL;
1041 struct list_head *qe;
1043 /* Delete MAC addresses previousely added */
1044 if (!list_empty(&rxf->ucast_pending_del_q)) {
1045 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1047 mac = (struct bna_mac *)qe;
1048 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1049 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1053 /* Set default unicast MAC */
1054 if (rxf->ucast_pending_set) {
1055 rxf->ucast_pending_set = 0;
1056 ether_addr_copy(rxf->ucast_active_mac.addr,
1057 rxf->ucast_pending_mac->addr);
1058 rxf->ucast_active_set = 1;
1059 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1060 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1064 /* Add additional MAC entries */
1065 if (!list_empty(&rxf->ucast_pending_add_q)) {
1066 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1068 mac = (struct bna_mac *)qe;
1069 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1070 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1078 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1080 struct list_head *qe;
1081 struct bna_mac *mac;
1083 /* Throw away delete pending ucast entries */
1084 while (!list_empty(&rxf->ucast_pending_del_q)) {
1085 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1087 mac = (struct bna_mac *)qe;
1088 if (cleanup == BNA_SOFT_CLEANUP)
1089 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1092 bna_bfi_ucast_req(rxf, mac,
1093 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1094 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1100 /* Move active ucast entries to pending_add_q */
1101 while (!list_empty(&rxf->ucast_active_q)) {
1102 bfa_q_deq(&rxf->ucast_active_q, &qe);
1104 list_add_tail(qe, &rxf->ucast_pending_add_q);
1105 if (cleanup == BNA_HARD_CLEANUP) {
1106 mac = (struct bna_mac *)qe;
1107 bna_bfi_ucast_req(rxf, mac,
1108 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1113 if (rxf->ucast_active_set) {
1114 rxf->ucast_pending_set = 1;
1115 rxf->ucast_active_set = 0;
1116 if (cleanup == BNA_HARD_CLEANUP) {
1117 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1118 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1127 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1129 struct bna *bna = rxf->rx->bna;
1131 /* Enable/disable promiscuous mode */
1132 if (is_promisc_enable(rxf->rxmode_pending,
1133 rxf->rxmode_pending_bitmask)) {
1134 /* move promisc configuration from pending -> active */
1135 promisc_inactive(rxf->rxmode_pending,
1136 rxf->rxmode_pending_bitmask);
1137 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1138 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1140 } else if (is_promisc_disable(rxf->rxmode_pending,
1141 rxf->rxmode_pending_bitmask)) {
1142 /* move promisc configuration from pending -> active */
1143 promisc_inactive(rxf->rxmode_pending,
1144 rxf->rxmode_pending_bitmask);
1145 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1146 bna->promisc_rid = BFI_INVALID_RID;
1147 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1155 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1157 struct bna *bna = rxf->rx->bna;
1159 /* Clear pending promisc mode disable */
1160 if (is_promisc_disable(rxf->rxmode_pending,
1161 rxf->rxmode_pending_bitmask)) {
1162 promisc_inactive(rxf->rxmode_pending,
1163 rxf->rxmode_pending_bitmask);
1164 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1165 bna->promisc_rid = BFI_INVALID_RID;
1166 if (cleanup == BNA_HARD_CLEANUP) {
1167 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1172 /* Move promisc mode config from active -> pending */
1173 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1174 promisc_enable(rxf->rxmode_pending,
1175 rxf->rxmode_pending_bitmask);
1176 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1177 if (cleanup == BNA_HARD_CLEANUP) {
1178 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1187 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1189 /* Enable/disable allmulti mode */
1190 if (is_allmulti_enable(rxf->rxmode_pending,
1191 rxf->rxmode_pending_bitmask)) {
1192 /* move allmulti configuration from pending -> active */
1193 allmulti_inactive(rxf->rxmode_pending,
1194 rxf->rxmode_pending_bitmask);
1195 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1196 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1198 } else if (is_allmulti_disable(rxf->rxmode_pending,
1199 rxf->rxmode_pending_bitmask)) {
1200 /* move allmulti configuration from pending -> active */
1201 allmulti_inactive(rxf->rxmode_pending,
1202 rxf->rxmode_pending_bitmask);
1203 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1204 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1212 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1214 /* Clear pending allmulti mode disable */
1215 if (is_allmulti_disable(rxf->rxmode_pending,
1216 rxf->rxmode_pending_bitmask)) {
1217 allmulti_inactive(rxf->rxmode_pending,
1218 rxf->rxmode_pending_bitmask);
1219 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1220 if (cleanup == BNA_HARD_CLEANUP) {
1221 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1226 /* Move allmulti mode config from active -> pending */
1227 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1228 allmulti_enable(rxf->rxmode_pending,
1229 rxf->rxmode_pending_bitmask);
1230 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1231 if (cleanup == BNA_HARD_CLEANUP) {
1232 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1241 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1243 struct bna *bna = rxf->rx->bna;
1246 if (is_promisc_enable(rxf->rxmode_pending,
1247 rxf->rxmode_pending_bitmask) ||
1248 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1249 /* Do nothing if pending enable or already enabled */
1250 } else if (is_promisc_disable(rxf->rxmode_pending,
1251 rxf->rxmode_pending_bitmask)) {
1252 /* Turn off pending disable command */
1253 promisc_inactive(rxf->rxmode_pending,
1254 rxf->rxmode_pending_bitmask);
1256 /* Schedule enable */
1257 promisc_enable(rxf->rxmode_pending,
1258 rxf->rxmode_pending_bitmask);
1259 bna->promisc_rid = rxf->rx->rid;
1267 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1269 struct bna *bna = rxf->rx->bna;
1272 if (is_promisc_disable(rxf->rxmode_pending,
1273 rxf->rxmode_pending_bitmask) ||
1274 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1275 /* Do nothing if pending disable or already disabled */
1276 } else if (is_promisc_enable(rxf->rxmode_pending,
1277 rxf->rxmode_pending_bitmask)) {
1278 /* Turn off pending enable command */
1279 promisc_inactive(rxf->rxmode_pending,
1280 rxf->rxmode_pending_bitmask);
1281 bna->promisc_rid = BFI_INVALID_RID;
1282 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1283 /* Schedule disable */
1284 promisc_disable(rxf->rxmode_pending,
1285 rxf->rxmode_pending_bitmask);
1293 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1297 if (is_allmulti_enable(rxf->rxmode_pending,
1298 rxf->rxmode_pending_bitmask) ||
1299 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1300 /* Do nothing if pending enable or already enabled */
1301 } else if (is_allmulti_disable(rxf->rxmode_pending,
1302 rxf->rxmode_pending_bitmask)) {
1303 /* Turn off pending disable command */
1304 allmulti_inactive(rxf->rxmode_pending,
1305 rxf->rxmode_pending_bitmask);
1307 /* Schedule enable */
1308 allmulti_enable(rxf->rxmode_pending,
1309 rxf->rxmode_pending_bitmask);
1317 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1321 if (is_allmulti_disable(rxf->rxmode_pending,
1322 rxf->rxmode_pending_bitmask) ||
1323 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1324 /* Do nothing if pending disable or already disabled */
1325 } else if (is_allmulti_enable(rxf->rxmode_pending,
1326 rxf->rxmode_pending_bitmask)) {
1327 /* Turn off pending enable command */
1328 allmulti_inactive(rxf->rxmode_pending,
1329 rxf->rxmode_pending_bitmask);
1330 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1331 /* Schedule disable */
1332 allmulti_disable(rxf->rxmode_pending,
1333 rxf->rxmode_pending_bitmask);
1341 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1343 if (rxf->vlan_strip_pending) {
1344 rxf->vlan_strip_pending = false;
1345 bna_bfi_vlan_strip_enable(rxf);
1354 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1355 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1357 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1358 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1360 #define call_rx_stop_cbfn(rx) \
1362 if ((rx)->stop_cbfn) { \
1363 void (*cbfn)(void *, struct bna_rx *); \
1365 cbfn = (rx)->stop_cbfn; \
1366 cbarg = (rx)->stop_cbarg; \
1367 (rx)->stop_cbfn = NULL; \
1368 (rx)->stop_cbarg = NULL; \
1373 #define call_rx_stall_cbfn(rx) \
1375 if ((rx)->rx_stall_cbfn) \
1376 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1379 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1381 struct bna_dma_addr cur_q_addr = \
1382 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1383 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1384 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1385 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1386 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1387 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1388 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1391 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1392 static void bna_rx_enet_stop(struct bna_rx *rx);
1393 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1395 bfa_fsm_state_decl(bna_rx, stopped,
1396 struct bna_rx, enum bna_rx_event);
1397 bfa_fsm_state_decl(bna_rx, start_wait,
1398 struct bna_rx, enum bna_rx_event);
1399 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1400 struct bna_rx, enum bna_rx_event);
1401 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1402 struct bna_rx, enum bna_rx_event);
1403 bfa_fsm_state_decl(bna_rx, started,
1404 struct bna_rx, enum bna_rx_event);
1405 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1406 struct bna_rx, enum bna_rx_event);
1407 bfa_fsm_state_decl(bna_rx, stop_wait,
1408 struct bna_rx, enum bna_rx_event);
1409 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1410 struct bna_rx, enum bna_rx_event);
1411 bfa_fsm_state_decl(bna_rx, failed,
1412 struct bna_rx, enum bna_rx_event);
1413 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1414 struct bna_rx, enum bna_rx_event);
1416 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1418 call_rx_stop_cbfn(rx);
1421 static void bna_rx_sm_stopped(struct bna_rx *rx,
1422 enum bna_rx_event event)
1426 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1430 call_rx_stop_cbfn(rx);
1438 bfa_sm_fault(event);
1443 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1445 bna_bfi_rx_enet_start(rx);
1449 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1454 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1459 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1460 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1464 bna_rx_enet_stop(rx);
1468 bfa_sm_fault(event);
1473 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1474 enum bna_rx_event event)
1478 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1482 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1486 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1490 bfa_sm_fault(event);
1495 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1497 rx->rx_post_cbfn(rx->bna->bnad, rx);
1498 bna_rxf_start(&rx->rxf);
1502 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1507 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1511 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1512 bna_rxf_fail(&rx->rxf);
1513 call_rx_stall_cbfn(rx);
1514 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1517 case RX_E_RXF_STARTED:
1518 bna_rxf_stop(&rx->rxf);
1521 case RX_E_RXF_STOPPED:
1522 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1523 call_rx_stall_cbfn(rx);
1524 bna_rx_enet_stop(rx);
1528 bfa_sm_fault(event);
1535 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1540 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1545 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1549 bna_rx_enet_stop(rx);
1553 bfa_sm_fault(event);
1558 bna_rx_sm_started_entry(struct bna_rx *rx)
1560 struct bna_rxp *rxp;
1561 struct list_head *qe_rxp;
1562 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1565 list_for_each(qe_rxp, &rx->rxp_q) {
1566 rxp = (struct bna_rxp *)qe_rxp;
1567 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1570 bna_ethport_cb_rx_started(&rx->bna->ethport);
1574 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1578 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1579 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1580 bna_rxf_stop(&rx->rxf);
1584 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1585 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1586 bna_rxf_fail(&rx->rxf);
1587 call_rx_stall_cbfn(rx);
1588 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1592 bfa_sm_fault(event);
1597 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1598 enum bna_rx_event event)
1602 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1606 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1607 bna_rxf_fail(&rx->rxf);
1608 call_rx_stall_cbfn(rx);
1609 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1612 case RX_E_RXF_STARTED:
1613 bfa_fsm_set_state(rx, bna_rx_sm_started);
1617 bfa_sm_fault(event);
1623 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1628 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1632 case RX_E_RXF_STOPPED:
1636 case RX_E_CLEANUP_DONE:
1637 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1641 bfa_sm_fault(event);
1647 bna_rx_sm_failed_entry(struct bna_rx *rx)
1652 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1656 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1660 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1664 case RX_E_RXF_STARTED:
1665 case RX_E_RXF_STOPPED:
1669 case RX_E_CLEANUP_DONE:
1670 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1674 bfa_sm_fault(event);
1679 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1684 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1688 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1692 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1695 case RX_E_CLEANUP_DONE:
1696 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1700 bfa_sm_fault(event);
1706 bna_bfi_rx_enet_start(struct bna_rx *rx)
1708 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1709 struct bna_rxp *rxp = NULL;
1710 struct bna_rxq *q0 = NULL, *q1 = NULL;
1711 struct list_head *rxp_qe;
1714 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1715 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1716 cfg_req->mh.num_entries = htons(
1717 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1719 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1720 cfg_req->num_queue_sets = rx->num_paths;
1721 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1723 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1724 rxp = (struct bna_rxp *)rxp_qe;
1726 GET_RXQS(rxp, q0, q1);
1727 switch (rxp->type) {
1731 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1733 cfg_req->q_cfg[i].qs.rx_buffer_size =
1734 htons((u16)q1->buffer_size);
1737 case BNA_RXP_SINGLE:
1738 /* Large/Single RxQ */
1739 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1741 if (q0->multi_buffer)
1742 /* multi-buffer is enabled by allocating
1743 * a new rx with new set of resources.
1744 * q0->buffer_size should be initialized to
1747 cfg_req->rx_cfg.multi_buffer =
1748 BNA_STATUS_T_ENABLED;
1751 bna_enet_mtu_get(&rx->bna->enet);
1752 cfg_req->q_cfg[i].ql.rx_buffer_size =
1753 htons((u16)q0->buffer_size);
1760 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1763 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1764 rxp->cq.ib.ib_seg_host_addr.lsb;
1765 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1766 rxp->cq.ib.ib_seg_host_addr.msb;
1767 cfg_req->q_cfg[i].ib.intr.msix_index =
1768 htons((u16)rxp->cq.ib.intr_vector);
1771 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1772 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1773 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1774 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1775 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1776 ? BNA_STATUS_T_ENABLED :
1777 BNA_STATUS_T_DISABLED;
1778 cfg_req->ib_cfg.coalescing_timeout =
1779 htonl((u32)rxp->cq.ib.coalescing_timeo);
1780 cfg_req->ib_cfg.inter_pkt_timeout =
1781 htonl((u32)rxp->cq.ib.interpkt_timeo);
1782 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1784 switch (rxp->type) {
1786 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1790 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1791 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1792 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1793 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1796 case BNA_RXP_SINGLE:
1797 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1803 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1805 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1806 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1807 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1811 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1813 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1815 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1816 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1817 req->mh.num_entries = htons(
1818 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1819 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1821 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1825 bna_rx_enet_stop(struct bna_rx *rx)
1827 struct bna_rxp *rxp;
1828 struct list_head *qe_rxp;
1831 list_for_each(qe_rxp, &rx->rxp_q) {
1832 rxp = (struct bna_rxp *)qe_rxp;
1833 bna_ib_stop(rx->bna, &rxp->cq.ib);
1836 bna_bfi_rx_enet_stop(rx);
1840 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1842 if ((rx_mod->rx_free_count == 0) ||
1843 (rx_mod->rxp_free_count == 0) ||
1844 (rx_mod->rxq_free_count == 0))
1847 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1848 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1849 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1852 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1853 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1860 static struct bna_rxq *
1861 bna_rxq_get(struct bna_rx_mod *rx_mod)
1863 struct bna_rxq *rxq = NULL;
1864 struct list_head *qe = NULL;
1866 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1867 rx_mod->rxq_free_count--;
1868 rxq = (struct bna_rxq *)qe;
1869 bfa_q_qe_init(&rxq->qe);
1875 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1877 bfa_q_qe_init(&rxq->qe);
1878 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1879 rx_mod->rxq_free_count++;
1882 static struct bna_rxp *
1883 bna_rxp_get(struct bna_rx_mod *rx_mod)
1885 struct list_head *qe = NULL;
1886 struct bna_rxp *rxp = NULL;
1888 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1889 rx_mod->rxp_free_count--;
1890 rxp = (struct bna_rxp *)qe;
1891 bfa_q_qe_init(&rxp->qe);
1897 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1899 bfa_q_qe_init(&rxp->qe);
1900 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1901 rx_mod->rxp_free_count++;
1904 static struct bna_rx *
1905 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1907 struct list_head *qe = NULL;
1908 struct bna_rx *rx = NULL;
1910 if (type == BNA_RX_T_REGULAR) {
1911 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1913 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1915 rx_mod->rx_free_count--;
1916 rx = (struct bna_rx *)qe;
1917 bfa_q_qe_init(&rx->qe);
1918 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1925 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1927 struct list_head *prev_qe = NULL;
1928 struct list_head *qe;
1930 bfa_q_qe_init(&rx->qe);
1932 list_for_each(qe, &rx_mod->rx_free_q) {
1933 if (((struct bna_rx *)qe)->rid < rx->rid)
1939 if (prev_qe == NULL) {
1940 /* This is the first entry */
1941 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1942 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1943 /* This is the last entry */
1944 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1946 /* Somewhere in the middle */
1947 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1948 bfa_q_prev(&rx->qe) = prev_qe;
1949 bfa_q_next(prev_qe) = &rx->qe;
1950 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1953 rx_mod->rx_free_count++;
1957 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1960 switch (rxp->type) {
1961 case BNA_RXP_SINGLE:
1962 rxp->rxq.single.only = q0;
1963 rxp->rxq.single.reserved = NULL;
1966 rxp->rxq.slr.large = q0;
1967 rxp->rxq.slr.small = q1;
1970 rxp->rxq.hds.data = q0;
1971 rxp->rxq.hds.hdr = q1;
1979 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1980 struct bna_rxp *rxp,
1983 struct bna_mem_descr *qpt_mem,
1984 struct bna_mem_descr *swqpt_mem,
1985 struct bna_mem_descr *page_mem)
1989 struct bna_dma_addr bna_dma;
1992 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1993 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1994 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1995 rxq->qpt.page_count = page_count;
1996 rxq->qpt.page_size = page_size;
1998 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1999 rxq->rcb->sw_q = page_mem->kva;
2001 kva = page_mem->kva;
2002 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2004 for (i = 0; i < rxq->qpt.page_count; i++) {
2005 rxq->rcb->sw_qpt[i] = kva;
2008 BNA_SET_DMA_ADDR(dma, &bna_dma);
2009 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2011 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2018 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2021 struct bna_mem_descr *qpt_mem,
2022 struct bna_mem_descr *swqpt_mem,
2023 struct bna_mem_descr *page_mem)
2027 struct bna_dma_addr bna_dma;
2030 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2031 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2032 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2033 rxp->cq.qpt.page_count = page_count;
2034 rxp->cq.qpt.page_size = page_size;
2036 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2037 rxp->cq.ccb->sw_q = page_mem->kva;
2039 kva = page_mem->kva;
2040 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2042 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2043 rxp->cq.ccb->sw_qpt[i] = kva;
2046 BNA_SET_DMA_ADDR(dma, &bna_dma);
2047 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2049 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2056 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2058 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2060 bfa_wc_down(&rx_mod->rx_stop_wc);
2064 bna_rx_mod_cb_rx_stopped_all(void *arg)
2066 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2068 if (rx_mod->stop_cbfn)
2069 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2070 rx_mod->stop_cbfn = NULL;
2074 bna_rx_start(struct bna_rx *rx)
2076 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2077 if (rx->rx_flags & BNA_RX_F_ENABLED)
2078 bfa_fsm_send_event(rx, RX_E_START);
2082 bna_rx_stop(struct bna_rx *rx)
2084 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2085 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2086 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2088 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2089 rx->stop_cbarg = &rx->bna->rx_mod;
2090 bfa_fsm_send_event(rx, RX_E_STOP);
2095 bna_rx_fail(struct bna_rx *rx)
2097 /* Indicate Enet is not enabled, and failed */
2098 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2099 bfa_fsm_send_event(rx, RX_E_FAIL);
2103 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2106 struct list_head *qe;
2108 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2109 if (type == BNA_RX_T_LOOPBACK)
2110 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2112 list_for_each(qe, &rx_mod->rx_active_q) {
2113 rx = (struct bna_rx *)qe;
2114 if (rx->type == type)
2120 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2123 struct list_head *qe;
2125 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2126 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2128 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2130 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2132 list_for_each(qe, &rx_mod->rx_active_q) {
2133 rx = (struct bna_rx *)qe;
2134 if (rx->type == type) {
2135 bfa_wc_up(&rx_mod->rx_stop_wc);
2140 bfa_wc_wait(&rx_mod->rx_stop_wc);
2144 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2147 struct list_head *qe;
2149 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2150 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2152 list_for_each(qe, &rx_mod->rx_active_q) {
2153 rx = (struct bna_rx *)qe;
2158 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2159 struct bna_res_info *res_info)
2162 struct bna_rx *rx_ptr;
2163 struct bna_rxp *rxp_ptr;
2164 struct bna_rxq *rxq_ptr;
2169 rx_mod->rx = (struct bna_rx *)
2170 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2171 rx_mod->rxp = (struct bna_rxp *)
2172 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2173 rx_mod->rxq = (struct bna_rxq *)
2174 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2176 /* Initialize the queues */
2177 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2178 rx_mod->rx_free_count = 0;
2179 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2180 rx_mod->rxq_free_count = 0;
2181 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2182 rx_mod->rxp_free_count = 0;
2183 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2185 /* Build RX queues */
2186 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2187 rx_ptr = &rx_mod->rx[index];
2189 bfa_q_qe_init(&rx_ptr->qe);
2190 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2192 rx_ptr->rid = index;
2193 rx_ptr->stop_cbfn = NULL;
2194 rx_ptr->stop_cbarg = NULL;
2196 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2197 rx_mod->rx_free_count++;
2200 /* build RX-path queue */
2201 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2202 rxp_ptr = &rx_mod->rxp[index];
2203 bfa_q_qe_init(&rxp_ptr->qe);
2204 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2205 rx_mod->rxp_free_count++;
2208 /* build RXQ queue */
2209 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2210 rxq_ptr = &rx_mod->rxq[index];
2211 bfa_q_qe_init(&rxq_ptr->qe);
2212 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2213 rx_mod->rxq_free_count++;
2218 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2220 struct list_head *qe;
2224 list_for_each(qe, &rx_mod->rx_free_q)
2228 list_for_each(qe, &rx_mod->rxp_free_q)
2232 list_for_each(qe, &rx_mod->rxq_free_q)
2239 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2241 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2242 struct bna_rxp *rxp = NULL;
2243 struct bna_rxq *q0 = NULL, *q1 = NULL;
2244 struct list_head *rxp_qe;
2247 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2248 sizeof(struct bfi_enet_rx_cfg_rsp));
2250 rx->hw_id = cfg_rsp->hw_id;
2252 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2254 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2255 rxp = (struct bna_rxp *)rxp_qe;
2256 GET_RXQS(rxp, q0, q1);
2258 /* Setup doorbells */
2259 rxp->cq.ccb->i_dbell->doorbell_addr =
2260 rx->bna->pcidev.pci_bar_kva
2261 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2262 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2264 rx->bna->pcidev.pci_bar_kva
2265 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2266 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2269 rx->bna->pcidev.pci_bar_kva
2270 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2271 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2274 /* Initialize producer/consumer indexes */
2275 (*rxp->cq.ccb->hw_producer_index) = 0;
2276 rxp->cq.ccb->producer_index = 0;
2277 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2279 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2282 bfa_fsm_send_event(rx, RX_E_STARTED);
2286 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2288 bfa_fsm_send_event(rx, RX_E_STOPPED);
2292 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2294 u32 cq_size, hq_size, dq_size;
2295 u32 cpage_count, hpage_count, dpage_count;
2296 struct bna_mem_info *mem_info;
2301 dq_depth = q_cfg->q0_depth;
2302 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2303 cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2305 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2306 cq_size = ALIGN(cq_size, PAGE_SIZE);
2307 cpage_count = SIZE_TO_PAGES(cq_size);
2309 dq_depth = roundup_pow_of_two(dq_depth);
2310 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2311 dq_size = ALIGN(dq_size, PAGE_SIZE);
2312 dpage_count = SIZE_TO_PAGES(dq_size);
2314 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2315 hq_depth = roundup_pow_of_two(hq_depth);
2316 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2317 hq_size = ALIGN(hq_size, PAGE_SIZE);
2318 hpage_count = SIZE_TO_PAGES(hq_size);
2322 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2323 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2324 mem_info->mem_type = BNA_MEM_T_KVA;
2325 mem_info->len = sizeof(struct bna_ccb);
2326 mem_info->num = q_cfg->num_paths;
2328 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2329 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2330 mem_info->mem_type = BNA_MEM_T_KVA;
2331 mem_info->len = sizeof(struct bna_rcb);
2332 mem_info->num = BNA_GET_RXQS(q_cfg);
2334 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2335 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2336 mem_info->mem_type = BNA_MEM_T_DMA;
2337 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2338 mem_info->num = q_cfg->num_paths;
2340 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2341 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2342 mem_info->mem_type = BNA_MEM_T_KVA;
2343 mem_info->len = cpage_count * sizeof(void *);
2344 mem_info->num = q_cfg->num_paths;
2346 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2347 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2348 mem_info->mem_type = BNA_MEM_T_DMA;
2349 mem_info->len = PAGE_SIZE * cpage_count;
2350 mem_info->num = q_cfg->num_paths;
2352 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2353 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2354 mem_info->mem_type = BNA_MEM_T_DMA;
2355 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2356 mem_info->num = q_cfg->num_paths;
2358 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2359 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2360 mem_info->mem_type = BNA_MEM_T_KVA;
2361 mem_info->len = dpage_count * sizeof(void *);
2362 mem_info->num = q_cfg->num_paths;
2364 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2365 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2366 mem_info->mem_type = BNA_MEM_T_DMA;
2367 mem_info->len = PAGE_SIZE * dpage_count;
2368 mem_info->num = q_cfg->num_paths;
2370 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2371 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2372 mem_info->mem_type = BNA_MEM_T_DMA;
2373 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2374 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2376 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2377 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2378 mem_info->mem_type = BNA_MEM_T_KVA;
2379 mem_info->len = hpage_count * sizeof(void *);
2380 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2382 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2383 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2384 mem_info->mem_type = BNA_MEM_T_DMA;
2385 mem_info->len = PAGE_SIZE * hpage_count;
2386 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2388 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2389 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2390 mem_info->mem_type = BNA_MEM_T_DMA;
2391 mem_info->len = BFI_IBIDX_SIZE;
2392 mem_info->num = q_cfg->num_paths;
2394 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2395 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2396 mem_info->mem_type = BNA_MEM_T_KVA;
2397 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2400 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2401 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2402 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2406 bna_rx_create(struct bna *bna, struct bnad *bnad,
2407 struct bna_rx_config *rx_cfg,
2408 const struct bna_rx_event_cbfn *rx_cbfn,
2409 struct bna_res_info *res_info,
2412 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2414 struct bna_rxp *rxp;
2417 struct bna_intr_info *intr_info;
2418 struct bna_mem_descr *hqunmap_mem;
2419 struct bna_mem_descr *dqunmap_mem;
2420 struct bna_mem_descr *ccb_mem;
2421 struct bna_mem_descr *rcb_mem;
2422 struct bna_mem_descr *cqpt_mem;
2423 struct bna_mem_descr *cswqpt_mem;
2424 struct bna_mem_descr *cpage_mem;
2425 struct bna_mem_descr *hqpt_mem;
2426 struct bna_mem_descr *dqpt_mem;
2427 struct bna_mem_descr *hsqpt_mem;
2428 struct bna_mem_descr *dsqpt_mem;
2429 struct bna_mem_descr *hpage_mem;
2430 struct bna_mem_descr *dpage_mem;
2431 u32 dpage_count, hpage_count;
2432 u32 hq_idx, dq_idx, rcb_idx;
2436 if (!bna_rx_res_check(rx_mod, rx_cfg))
2439 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2440 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2441 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2442 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2443 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2444 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2445 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2446 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2447 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2448 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2449 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2450 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2451 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2452 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2454 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2457 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2460 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2463 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2466 INIT_LIST_HEAD(&rx->rxp_q);
2467 rx->stop_cbfn = NULL;
2468 rx->stop_cbarg = NULL;
2471 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2472 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2473 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2474 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2475 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2476 /* Following callbacks are mandatory */
2477 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2478 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2480 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2482 case BNA_RX_T_REGULAR:
2483 if (!(rx->bna->rx_mod.flags &
2484 BNA_RX_MOD_F_ENET_LOOPBACK))
2485 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2487 case BNA_RX_T_LOOPBACK:
2488 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2489 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2494 rx->num_paths = rx_cfg->num_paths;
2495 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2496 i < rx->num_paths; i++) {
2497 rxp = bna_rxp_get(rx_mod);
2498 list_add_tail(&rxp->qe, &rx->rxp_q);
2499 rxp->type = rx_cfg->rxp_type;
2503 q0 = bna_rxq_get(rx_mod);
2504 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2507 q1 = bna_rxq_get(rx_mod);
2509 if (1 == intr_info->num)
2510 rxp->vector = intr_info->idl[0].vector;
2512 rxp->vector = intr_info->idl[i].vector;
2516 rxp->cq.ib.ib_seg_host_addr.lsb =
2517 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2518 rxp->cq.ib.ib_seg_host_addr.msb =
2519 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2520 rxp->cq.ib.ib_seg_host_addr_kva =
2521 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2522 rxp->cq.ib.intr_type = intr_info->intr_type;
2523 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2524 rxp->cq.ib.intr_vector = rxp->vector;
2526 rxp->cq.ib.intr_vector = BIT(rxp->vector);
2527 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2528 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2529 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2531 bna_rxp_add_rxqs(rxp, q0, q1);
2538 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2539 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2540 rcb_idx++; dq_idx++;
2541 q0->rcb->q_depth = rx_cfg->q0_depth;
2542 q0->q_depth = rx_cfg->q0_depth;
2543 q0->multi_buffer = rx_cfg->q0_multi_buf;
2544 q0->buffer_size = rx_cfg->q0_buf_size;
2545 q0->num_vecs = rx_cfg->q0_num_vecs;
2547 q0->rcb->bnad = bna->bnad;
2549 q0->rx_packets = q0->rx_bytes = 0;
2550 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2552 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2553 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2555 if (rx->rcb_setup_cbfn)
2556 rx->rcb_setup_cbfn(bnad, q0->rcb);
2564 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2565 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2566 rcb_idx++; hq_idx++;
2567 q1->rcb->q_depth = rx_cfg->q1_depth;
2568 q1->q_depth = rx_cfg->q1_depth;
2569 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2572 q1->rcb->bnad = bna->bnad;
2574 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2575 rx_cfg->hds_config.forced_offset
2576 : rx_cfg->q1_buf_size;
2577 q1->rx_packets = q1->rx_bytes = 0;
2578 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2580 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2581 &hqpt_mem[i], &hsqpt_mem[i],
2584 if (rx->rcb_setup_cbfn)
2585 rx->rcb_setup_cbfn(bnad, q1->rcb);
2590 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2591 cq_depth = rx_cfg->q0_depth +
2592 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2593 0 : rx_cfg->q1_depth);
2594 /* if multi-buffer is enabled sum of q0_depth
2595 * and q1_depth need not be a power of 2
2597 cq_depth = roundup_pow_of_two(cq_depth);
2598 rxp->cq.ccb->q_depth = cq_depth;
2599 rxp->cq.ccb->cq = &rxp->cq;
2600 rxp->cq.ccb->rcb[0] = q0->rcb;
2601 q0->rcb->ccb = rxp->cq.ccb;
2603 rxp->cq.ccb->rcb[1] = q1->rcb;
2604 q1->rcb->ccb = rxp->cq.ccb;
2606 rxp->cq.ccb->hw_producer_index =
2607 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2608 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2609 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2610 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2611 rxp->cq.ccb->rx_coalescing_timeo =
2612 rxp->cq.ib.coalescing_timeo;
2613 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2614 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2615 rxp->cq.ccb->bnad = bna->bnad;
2616 rxp->cq.ccb->id = i;
2618 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2619 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2621 if (rx->ccb_setup_cbfn)
2622 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2625 rx->hds_cfg = rx_cfg->hds_config;
2627 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2629 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2631 rx_mod->rid_mask |= BIT(rx->rid);
2637 bna_rx_destroy(struct bna_rx *rx)
2639 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2640 struct bna_rxq *q0 = NULL;
2641 struct bna_rxq *q1 = NULL;
2642 struct bna_rxp *rxp;
2643 struct list_head *qe;
2645 bna_rxf_uninit(&rx->rxf);
2647 while (!list_empty(&rx->rxp_q)) {
2648 bfa_q_deq(&rx->rxp_q, &rxp);
2649 GET_RXQS(rxp, q0, q1);
2650 if (rx->rcb_destroy_cbfn)
2651 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2655 bna_rxq_put(rx_mod, q0);
2658 if (rx->rcb_destroy_cbfn)
2659 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2663 bna_rxq_put(rx_mod, q1);
2665 rxp->rxq.slr.large = NULL;
2666 rxp->rxq.slr.small = NULL;
2668 if (rx->ccb_destroy_cbfn)
2669 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2672 bna_rxp_put(rx_mod, rxp);
2675 list_for_each(qe, &rx_mod->rx_active_q) {
2676 if (qe == &rx->qe) {
2678 bfa_q_qe_init(&rx->qe);
2683 rx_mod->rid_mask &= ~BIT(rx->rid);
2687 bna_rx_put(rx_mod, rx);
2691 bna_rx_enable(struct bna_rx *rx)
2693 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2696 rx->rx_flags |= BNA_RX_F_ENABLED;
2697 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2698 bfa_fsm_send_event(rx, RX_E_START);
2702 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2703 void (*cbfn)(void *, struct bna_rx *))
2705 if (type == BNA_SOFT_CLEANUP) {
2706 /* h/w should not be accessed. Treat we're stopped */
2707 (*cbfn)(rx->bna->bnad, rx);
2709 rx->stop_cbfn = cbfn;
2710 rx->stop_cbarg = rx->bna->bnad;
2712 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2714 bfa_fsm_send_event(rx, RX_E_STOP);
2719 bna_rx_cleanup_complete(struct bna_rx *rx)
2721 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2725 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2727 struct bna_rxf *rxf = &rx->rxf;
2729 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2730 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2731 rxf->vlan_strip_pending = true;
2732 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2737 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2739 struct bna_rxf *rxf = &rx->rxf;
2741 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2742 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2743 rxf->vlan_strip_pending = true;
2744 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2749 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2750 enum bna_rxmode bitmask)
2752 struct bna_rxf *rxf = &rx->rxf;
2753 int need_hw_config = 0;
2757 if (is_promisc_enable(new_mode, bitmask)) {
2758 /* If promisc mode is already enabled elsewhere in the system */
2759 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2760 (rx->bna->promisc_rid != rxf->rx->rid))
2763 /* If default mode is already enabled in the system */
2764 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2767 /* Trying to enable promiscuous and default mode together */
2768 if (is_default_enable(new_mode, bitmask))
2772 if (is_default_enable(new_mode, bitmask)) {
2773 /* If default mode is already enabled elsewhere in the system */
2774 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2775 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2779 /* If promiscuous mode is already enabled in the system */
2780 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2784 /* Process the commands */
2786 if (is_promisc_enable(new_mode, bitmask)) {
2787 if (bna_rxf_promisc_enable(rxf))
2789 } else if (is_promisc_disable(new_mode, bitmask)) {
2790 if (bna_rxf_promisc_disable(rxf))
2794 if (is_allmulti_enable(new_mode, bitmask)) {
2795 if (bna_rxf_allmulti_enable(rxf))
2797 } else if (is_allmulti_disable(new_mode, bitmask)) {
2798 if (bna_rxf_allmulti_disable(rxf))
2802 /* Trigger h/w if needed */
2804 if (need_hw_config) {
2805 rxf->cam_fltr_cbfn = NULL;
2806 rxf->cam_fltr_cbarg = rx->bna->bnad;
2807 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2810 return BNA_CB_SUCCESS;
2817 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2819 struct bna_rxf *rxf = &rx->rxf;
2821 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2822 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2823 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2824 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2829 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2831 struct bna_rxp *rxp;
2832 struct list_head *qe;
2834 list_for_each(qe, &rx->rxp_q) {
2835 rxp = (struct bna_rxp *)qe;
2836 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2837 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2842 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2846 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2847 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2848 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2852 bna_rx_dim_update(struct bna_ccb *ccb)
2854 struct bna *bna = ccb->cq->rx->bna;
2856 u32 pkt_rt, small_rt, large_rt;
2857 u8 coalescing_timeo;
2859 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2860 (ccb->pkt_rate.large_pkt_cnt == 0))
2863 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2865 small_rt = ccb->pkt_rate.small_pkt_cnt;
2866 large_rt = ccb->pkt_rate.large_pkt_cnt;
2868 pkt_rt = small_rt + large_rt;
2870 if (pkt_rt < BNA_PKT_RATE_10K)
2871 load = BNA_LOAD_T_LOW_4;
2872 else if (pkt_rt < BNA_PKT_RATE_20K)
2873 load = BNA_LOAD_T_LOW_3;
2874 else if (pkt_rt < BNA_PKT_RATE_30K)
2875 load = BNA_LOAD_T_LOW_2;
2876 else if (pkt_rt < BNA_PKT_RATE_40K)
2877 load = BNA_LOAD_T_LOW_1;
2878 else if (pkt_rt < BNA_PKT_RATE_50K)
2879 load = BNA_LOAD_T_HIGH_1;
2880 else if (pkt_rt < BNA_PKT_RATE_60K)
2881 load = BNA_LOAD_T_HIGH_2;
2882 else if (pkt_rt < BNA_PKT_RATE_80K)
2883 load = BNA_LOAD_T_HIGH_3;
2885 load = BNA_LOAD_T_HIGH_4;
2887 if (small_rt > (large_rt << 1))
2892 ccb->pkt_rate.small_pkt_cnt = 0;
2893 ccb->pkt_rate.large_pkt_cnt = 0;
2895 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2896 ccb->rx_coalescing_timeo = coalescing_timeo;
2899 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2902 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2915 #define call_tx_stop_cbfn(tx) \
2917 if ((tx)->stop_cbfn) { \
2918 void (*cbfn)(void *, struct bna_tx *); \
2920 cbfn = (tx)->stop_cbfn; \
2921 cbarg = (tx)->stop_cbarg; \
2922 (tx)->stop_cbfn = NULL; \
2923 (tx)->stop_cbarg = NULL; \
2924 cbfn(cbarg, (tx)); \
2928 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2929 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2930 static void bna_tx_enet_stop(struct bna_tx *tx);
2938 TX_E_PRIO_CHANGE = 6,
2939 TX_E_CLEANUP_DONE = 7,
2943 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2944 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2945 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2946 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2947 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2949 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2951 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2953 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2954 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2958 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2960 call_tx_stop_cbfn(tx);
2964 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2968 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2972 call_tx_stop_cbfn(tx);
2979 case TX_E_PRIO_CHANGE:
2982 case TX_E_BW_UPDATE:
2987 bfa_sm_fault(event);
2992 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2994 bna_bfi_tx_enet_start(tx);
2998 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3002 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3003 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3007 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3008 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3012 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3013 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3014 BNA_TX_F_BW_UPDATED);
3015 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3017 bfa_fsm_set_state(tx, bna_tx_sm_started);
3020 case TX_E_PRIO_CHANGE:
3021 tx->flags |= BNA_TX_F_PRIO_CHANGED;
3024 case TX_E_BW_UPDATE:
3025 tx->flags |= BNA_TX_F_BW_UPDATED;
3029 bfa_sm_fault(event);
3034 bna_tx_sm_started_entry(struct bna_tx *tx)
3036 struct bna_txq *txq;
3037 struct list_head *qe;
3038 int is_regular = (tx->type == BNA_TX_T_REGULAR);
3040 list_for_each(qe, &tx->txq_q) {
3041 txq = (struct bna_txq *)qe;
3042 txq->tcb->priority = txq->priority;
3044 bna_ib_start(tx->bna, &txq->ib, is_regular);
3046 tx->tx_resume_cbfn(tx->bna->bnad, tx);
3050 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3054 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3055 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3056 bna_tx_enet_stop(tx);
3060 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3061 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3062 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3065 case TX_E_PRIO_CHANGE:
3066 case TX_E_BW_UPDATE:
3067 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3071 bfa_sm_fault(event);
3076 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3081 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3086 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3087 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3092 * We are here due to start_wait -> stop_wait transition on
3095 bna_tx_enet_stop(tx);
3098 case TX_E_PRIO_CHANGE:
3099 case TX_E_BW_UPDATE:
3104 bfa_sm_fault(event);
3109 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3114 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3118 case TX_E_PRIO_CHANGE:
3119 case TX_E_BW_UPDATE:
3123 case TX_E_CLEANUP_DONE:
3124 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3128 bfa_sm_fault(event);
3133 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3135 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3136 bna_tx_enet_stop(tx);
3140 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3144 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3148 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3149 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3153 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3156 case TX_E_PRIO_CHANGE:
3157 case TX_E_BW_UPDATE:
3162 bfa_sm_fault(event);
3167 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3169 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3173 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3177 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3181 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3184 case TX_E_PRIO_CHANGE:
3185 case TX_E_BW_UPDATE:
3189 case TX_E_CLEANUP_DONE:
3190 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3194 bfa_sm_fault(event);
3199 bna_tx_sm_failed_entry(struct bna_tx *tx)
3204 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3208 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3212 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3219 case TX_E_CLEANUP_DONE:
3220 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3224 bfa_sm_fault(event);
3229 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3234 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3238 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3242 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3245 case TX_E_CLEANUP_DONE:
3246 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3249 case TX_E_BW_UPDATE:
3254 bfa_sm_fault(event);
3259 bna_bfi_tx_enet_start(struct bna_tx *tx)
3261 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3262 struct bna_txq *txq = NULL;
3263 struct list_head *qe;
3266 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3267 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3268 cfg_req->mh.num_entries = htons(
3269 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3271 cfg_req->num_queues = tx->num_txq;
3272 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3274 i++, qe = bfa_q_next(qe)) {
3275 txq = (struct bna_txq *)qe;
3277 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3278 cfg_req->q_cfg[i].q.priority = txq->priority;
3280 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3281 txq->ib.ib_seg_host_addr.lsb;
3282 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3283 txq->ib.ib_seg_host_addr.msb;
3284 cfg_req->q_cfg[i].ib.intr.msix_index =
3285 htons((u16)txq->ib.intr_vector);
3288 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3289 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3290 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3291 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3292 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3293 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3294 cfg_req->ib_cfg.coalescing_timeout =
3295 htonl((u32)txq->ib.coalescing_timeo);
3296 cfg_req->ib_cfg.inter_pkt_timeout =
3297 htonl((u32)txq->ib.interpkt_timeo);
3298 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3300 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3301 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3302 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3303 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3305 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3306 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3307 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3311 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3313 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3315 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3316 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3317 req->mh.num_entries = htons(
3318 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3319 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3321 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3325 bna_tx_enet_stop(struct bna_tx *tx)
3327 struct bna_txq *txq;
3328 struct list_head *qe;
3331 list_for_each(qe, &tx->txq_q) {
3332 txq = (struct bna_txq *)qe;
3333 bna_ib_stop(tx->bna, &txq->ib);
3336 bna_bfi_tx_enet_stop(tx);
3340 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3341 struct bna_mem_descr *qpt_mem,
3342 struct bna_mem_descr *swqpt_mem,
3343 struct bna_mem_descr *page_mem)
3347 struct bna_dma_addr bna_dma;
3350 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3351 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3352 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3353 txq->qpt.page_count = page_count;
3354 txq->qpt.page_size = page_size;
3356 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3357 txq->tcb->sw_q = page_mem->kva;
3359 kva = page_mem->kva;
3360 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3362 for (i = 0; i < page_count; i++) {
3363 txq->tcb->sw_qpt[i] = kva;
3366 BNA_SET_DMA_ADDR(dma, &bna_dma);
3367 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3369 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3375 static struct bna_tx *
3376 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3378 struct list_head *qe = NULL;
3379 struct bna_tx *tx = NULL;
3381 if (list_empty(&tx_mod->tx_free_q))
3383 if (type == BNA_TX_T_REGULAR) {
3384 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3386 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3388 tx = (struct bna_tx *)qe;
3389 bfa_q_qe_init(&tx->qe);
3396 bna_tx_free(struct bna_tx *tx)
3398 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3399 struct bna_txq *txq;
3400 struct list_head *prev_qe;
3401 struct list_head *qe;
3403 while (!list_empty(&tx->txq_q)) {
3404 bfa_q_deq(&tx->txq_q, &txq);
3405 bfa_q_qe_init(&txq->qe);
3408 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3411 list_for_each(qe, &tx_mod->tx_active_q) {
3412 if (qe == &tx->qe) {
3414 bfa_q_qe_init(&tx->qe);
3423 list_for_each(qe, &tx_mod->tx_free_q) {
3424 if (((struct bna_tx *)qe)->rid < tx->rid)
3431 if (prev_qe == NULL) {
3432 /* This is the first entry */
3433 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3434 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3435 /* This is the last entry */
3436 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3438 /* Somewhere in the middle */
3439 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3440 bfa_q_prev(&tx->qe) = prev_qe;
3441 bfa_q_next(prev_qe) = &tx->qe;
3442 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3447 bna_tx_start(struct bna_tx *tx)
3449 tx->flags |= BNA_TX_F_ENET_STARTED;
3450 if (tx->flags & BNA_TX_F_ENABLED)
3451 bfa_fsm_send_event(tx, TX_E_START);
3455 bna_tx_stop(struct bna_tx *tx)
3457 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3458 tx->stop_cbarg = &tx->bna->tx_mod;
3460 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3461 bfa_fsm_send_event(tx, TX_E_STOP);
3465 bna_tx_fail(struct bna_tx *tx)
3467 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3468 bfa_fsm_send_event(tx, TX_E_FAIL);
3472 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3474 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3475 struct bna_txq *txq = NULL;
3476 struct list_head *qe;
3479 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3480 sizeof(struct bfi_enet_tx_cfg_rsp));
3482 tx->hw_id = cfg_rsp->hw_id;
3484 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3485 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3486 txq = (struct bna_txq *)qe;
3488 /* Setup doorbells */
3489 txq->tcb->i_dbell->doorbell_addr =
3490 tx->bna->pcidev.pci_bar_kva
3491 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3493 tx->bna->pcidev.pci_bar_kva
3494 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3495 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3497 /* Initialize producer/consumer indexes */
3498 (*txq->tcb->hw_consumer_index) = 0;
3499 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3502 bfa_fsm_send_event(tx, TX_E_STARTED);
3506 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3508 bfa_fsm_send_event(tx, TX_E_STOPPED);
3512 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3515 struct list_head *qe;
3517 list_for_each(qe, &tx_mod->tx_active_q) {
3518 tx = (struct bna_tx *)qe;
3519 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3524 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3528 struct bna_mem_info *mem_info;
3530 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3531 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3532 mem_info->mem_type = BNA_MEM_T_KVA;
3533 mem_info->len = sizeof(struct bna_tcb);
3534 mem_info->num = num_txq;
3536 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3537 q_size = ALIGN(q_size, PAGE_SIZE);
3538 page_count = q_size >> PAGE_SHIFT;
3540 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3541 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3542 mem_info->mem_type = BNA_MEM_T_DMA;
3543 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3544 mem_info->num = num_txq;
3546 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3547 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3548 mem_info->mem_type = BNA_MEM_T_KVA;
3549 mem_info->len = page_count * sizeof(void *);
3550 mem_info->num = num_txq;
3552 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3553 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3554 mem_info->mem_type = BNA_MEM_T_DMA;
3555 mem_info->len = PAGE_SIZE * page_count;
3556 mem_info->num = num_txq;
3558 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3559 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3560 mem_info->mem_type = BNA_MEM_T_DMA;
3561 mem_info->len = BFI_IBIDX_SIZE;
3562 mem_info->num = num_txq;
3564 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3565 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3567 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3571 bna_tx_create(struct bna *bna, struct bnad *bnad,
3572 struct bna_tx_config *tx_cfg,
3573 const struct bna_tx_event_cbfn *tx_cbfn,
3574 struct bna_res_info *res_info, void *priv)
3576 struct bna_intr_info *intr_info;
3577 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3579 struct bna_txq *txq;
3580 struct list_head *qe;
3584 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3585 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3592 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3597 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3605 INIT_LIST_HEAD(&tx->txq_q);
3606 for (i = 0; i < tx_cfg->num_txq; i++) {
3607 if (list_empty(&tx_mod->txq_free_q))
3610 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3611 bfa_q_qe_init(&txq->qe);
3612 list_add_tail(&txq->qe, &tx->txq_q);
3622 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3623 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3624 /* Following callbacks are mandatory */
3625 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3626 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3627 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3629 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3631 tx->num_txq = tx_cfg->num_txq;
3634 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3636 case BNA_TX_T_REGULAR:
3637 if (!(tx->bna->tx_mod.flags &
3638 BNA_TX_MOD_F_ENET_LOOPBACK))
3639 tx->flags |= BNA_TX_F_ENET_STARTED;
3641 case BNA_TX_T_LOOPBACK:
3642 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3643 tx->flags |= BNA_TX_F_ENET_STARTED;
3651 list_for_each(qe, &tx->txq_q) {
3652 txq = (struct bna_txq *)qe;
3653 txq->tcb = (struct bna_tcb *)
3654 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3655 txq->tx_packets = 0;
3659 txq->ib.ib_seg_host_addr.lsb =
3660 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3661 txq->ib.ib_seg_host_addr.msb =
3662 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3663 txq->ib.ib_seg_host_addr_kva =
3664 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3665 txq->ib.intr_type = intr_info->intr_type;
3666 txq->ib.intr_vector = (intr_info->num == 1) ?
3667 intr_info->idl[0].vector :
3668 intr_info->idl[i].vector;
3669 if (intr_info->intr_type == BNA_INTR_T_INTX)
3670 txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3671 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3672 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3673 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3677 txq->tcb->q_depth = tx_cfg->txq_depth;
3678 txq->tcb->unmap_q = (void *)
3679 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3680 txq->tcb->hw_consumer_index =
3681 (u32 *)txq->ib.ib_seg_host_addr_kva;
3682 txq->tcb->i_dbell = &txq->ib.door_bell;
3683 txq->tcb->intr_type = txq->ib.intr_type;
3684 txq->tcb->intr_vector = txq->ib.intr_vector;
3685 txq->tcb->txq = txq;
3686 txq->tcb->bnad = bnad;
3689 /* QPT, SWQPT, Pages */
3690 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3691 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3692 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3693 &res_info[BNA_TX_RES_MEM_T_PAGE].
3694 res_u.mem_info.mdl[i]);
3696 /* Callback to bnad for setting up TCB */
3697 if (tx->tcb_setup_cbfn)
3698 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3700 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3701 txq->priority = txq->tcb->id;
3703 txq->priority = tx_mod->default_prio;
3708 tx->txf_vlan_id = 0;
3710 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3712 tx_mod->rid_mask |= BIT(tx->rid);
3722 bna_tx_destroy(struct bna_tx *tx)
3724 struct bna_txq *txq;
3725 struct list_head *qe;
3727 list_for_each(qe, &tx->txq_q) {
3728 txq = (struct bna_txq *)qe;
3729 if (tx->tcb_destroy_cbfn)
3730 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3733 tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3738 bna_tx_enable(struct bna_tx *tx)
3740 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3743 tx->flags |= BNA_TX_F_ENABLED;
3745 if (tx->flags & BNA_TX_F_ENET_STARTED)
3746 bfa_fsm_send_event(tx, TX_E_START);
3750 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3751 void (*cbfn)(void *, struct bna_tx *))
3753 if (type == BNA_SOFT_CLEANUP) {
3754 (*cbfn)(tx->bna->bnad, tx);
3758 tx->stop_cbfn = cbfn;
3759 tx->stop_cbarg = tx->bna->bnad;
3761 tx->flags &= ~BNA_TX_F_ENABLED;
3763 bfa_fsm_send_event(tx, TX_E_STOP);
3767 bna_tx_cleanup_complete(struct bna_tx *tx)
3769 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3773 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3775 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3777 bfa_wc_down(&tx_mod->tx_stop_wc);
3781 bna_tx_mod_cb_tx_stopped_all(void *arg)
3783 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3785 if (tx_mod->stop_cbfn)
3786 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3787 tx_mod->stop_cbfn = NULL;
3791 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3792 struct bna_res_info *res_info)
3799 tx_mod->tx = (struct bna_tx *)
3800 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3801 tx_mod->txq = (struct bna_txq *)
3802 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3804 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3805 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3807 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3809 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3810 tx_mod->tx[i].rid = i;
3811 bfa_q_qe_init(&tx_mod->tx[i].qe);
3812 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3813 bfa_q_qe_init(&tx_mod->txq[i].qe);
3814 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3817 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3818 tx_mod->default_prio = 0;
3819 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3820 tx_mod->iscsi_prio = -1;
3824 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3826 struct list_head *qe;
3830 list_for_each(qe, &tx_mod->tx_free_q)
3834 list_for_each(qe, &tx_mod->txq_free_q)
3841 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3844 struct list_head *qe;
3846 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3847 if (type == BNA_TX_T_LOOPBACK)
3848 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3850 list_for_each(qe, &tx_mod->tx_active_q) {
3851 tx = (struct bna_tx *)qe;
3852 if (tx->type == type)
3858 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3861 struct list_head *qe;
3863 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3864 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3866 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3868 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3870 list_for_each(qe, &tx_mod->tx_active_q) {
3871 tx = (struct bna_tx *)qe;
3872 if (tx->type == type) {
3873 bfa_wc_up(&tx_mod->tx_stop_wc);
3878 bfa_wc_wait(&tx_mod->tx_stop_wc);
3882 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3885 struct list_head *qe;
3887 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3888 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3890 list_for_each(qe, &tx_mod->tx_active_q) {
3891 tx = (struct bna_tx *)qe;
3897 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3899 struct bna_txq *txq;
3900 struct list_head *qe;
3902 list_for_each(qe, &tx->txq_q) {
3903 txq = (struct bna_txq *)qe;
3904 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);