2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq = 64;
24 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
26 return wrb->payload.embedded_payload;
29 static void be_mcc_notify(struct be_adapter *adapter)
31 struct be_queue_info *mccq = &adapter->mcc_obj.q;
34 if (be_error(adapter))
37 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
38 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
41 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
44 /* To check if valid bit is set, check the entire word as we don't know
45 * the endianness of the data (old entry is host endian while a new entry is
47 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
49 if (compl->flags != 0) {
50 compl->flags = le32_to_cpu(compl->flags);
51 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
58 /* Need to reset the entire word that houses the valid bit */
59 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
64 static int be_mcc_compl_process(struct be_adapter *adapter,
65 struct be_mcc_compl *compl)
67 u16 compl_status, extd_status;
69 /* Just swap the status to host endian; mcc tag is opaquely copied
71 be_dws_le_to_cpu(compl, 4);
73 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
74 CQE_STATUS_COMPL_MASK;
76 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
77 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
78 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
79 adapter->flash_status = compl_status;
80 complete(&adapter->flash_compl);
83 if (compl_status == MCC_STATUS_SUCCESS) {
84 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
85 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
86 (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
87 be_parse_stats(adapter);
88 adapter->stats_cmd_sent = false;
91 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
92 struct be_mcc_wrb *mcc_wrb =
93 queue_index_node(&adapter->mcc_obj.q,
95 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
96 embedded_payload(mcc_wrb);
97 adapter->drv_stats.be_on_die_temperature =
98 resp->on_die_temperature;
101 if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
102 be_get_temp_freq = 0;
104 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
105 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
108 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
109 dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
110 "permitted to execute this cmd (opcode %d)\n",
113 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
114 CQE_STATUS_EXTD_MASK;
115 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
116 "status %d, extd-status %d\n",
117 compl->tag0, compl_status, extd_status);
124 /* Link state evt is a string of bytes; no need for endian swapping */
125 static void be_async_link_state_process(struct be_adapter *adapter,
126 struct be_async_event_link_state *evt)
128 /* When link status changes, link speed must be re-queried from FW */
129 adapter->link_speed = -1;
131 /* For the initial link status do not rely on the ASYNC event as
132 * it may not be received in some cases.
134 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
135 be_link_status_update(adapter, evt->port_link_status);
138 /* Grp5 CoS Priority evt */
139 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
140 struct be_async_event_grp5_cos_priority *evt)
143 adapter->vlan_prio_bmap = evt->available_priority_bmap;
144 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
145 adapter->recommended_prio =
146 evt->reco_default_priority << VLAN_PRIO_SHIFT;
150 /* Grp5 QOS Speed evt */
151 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
152 struct be_async_event_grp5_qos_link_speed *evt)
154 if (evt->physical_port == adapter->port_num) {
155 /* qos_link_speed is in units of 10 Mbps */
156 adapter->link_speed = evt->qos_link_speed * 10;
161 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
162 struct be_async_event_grp5_pvid_state *evt)
165 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
170 static void be_async_grp5_evt_process(struct be_adapter *adapter,
171 u32 trailer, struct be_mcc_compl *evt)
175 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
176 ASYNC_TRAILER_EVENT_TYPE_MASK;
178 switch (event_type) {
179 case ASYNC_EVENT_COS_PRIORITY:
180 be_async_grp5_cos_priority_process(adapter,
181 (struct be_async_event_grp5_cos_priority *)evt);
183 case ASYNC_EVENT_QOS_SPEED:
184 be_async_grp5_qos_speed_process(adapter,
185 (struct be_async_event_grp5_qos_link_speed *)evt);
187 case ASYNC_EVENT_PVID_STATE:
188 be_async_grp5_pvid_state_process(adapter,
189 (struct be_async_event_grp5_pvid_state *)evt);
192 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
197 static inline bool is_link_state_evt(u32 trailer)
199 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
200 ASYNC_TRAILER_EVENT_CODE_MASK) ==
201 ASYNC_EVENT_CODE_LINK_STATE;
204 static inline bool is_grp5_evt(u32 trailer)
206 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
207 ASYNC_TRAILER_EVENT_CODE_MASK) ==
208 ASYNC_EVENT_CODE_GRP_5);
211 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
213 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
214 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
216 if (be_mcc_compl_is_new(compl)) {
217 queue_tail_inc(mcc_cq);
223 void be_async_mcc_enable(struct be_adapter *adapter)
225 spin_lock_bh(&adapter->mcc_cq_lock);
227 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
228 adapter->mcc_obj.rearm_cq = true;
230 spin_unlock_bh(&adapter->mcc_cq_lock);
233 void be_async_mcc_disable(struct be_adapter *adapter)
235 adapter->mcc_obj.rearm_cq = false;
238 int be_process_mcc(struct be_adapter *adapter)
240 struct be_mcc_compl *compl;
241 int num = 0, status = 0;
242 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
244 spin_lock_bh(&adapter->mcc_cq_lock);
245 while ((compl = be_mcc_compl_get(adapter))) {
246 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
247 /* Interpret flags as an async trailer */
248 if (is_link_state_evt(compl->flags))
249 be_async_link_state_process(adapter,
250 (struct be_async_event_link_state *) compl);
251 else if (is_grp5_evt(compl->flags))
252 be_async_grp5_evt_process(adapter,
253 compl->flags, compl);
254 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
255 status = be_mcc_compl_process(adapter, compl);
256 atomic_dec(&mcc_obj->q.used);
258 be_mcc_compl_use(compl);
263 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
265 spin_unlock_bh(&adapter->mcc_cq_lock);
269 /* Wait till no more pending mcc requests are present */
270 static int be_mcc_wait_compl(struct be_adapter *adapter)
272 #define mcc_timeout 120000 /* 12s timeout */
274 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
276 for (i = 0; i < mcc_timeout; i++) {
277 if (be_error(adapter))
280 status = be_process_mcc(adapter);
282 if (atomic_read(&mcc_obj->q.used) == 0)
286 if (i == mcc_timeout) {
287 dev_err(&adapter->pdev->dev, "FW not responding\n");
288 adapter->fw_timeout = true;
294 /* Notify MCC requests and wait for completion */
295 static int be_mcc_notify_wait(struct be_adapter *adapter)
297 be_mcc_notify(adapter);
298 return be_mcc_wait_compl(adapter);
301 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
307 if (be_error(adapter))
310 ready = ioread32(db);
311 if (ready == 0xffffffff)
314 ready &= MPU_MAILBOX_DB_RDY_MASK;
319 dev_err(&adapter->pdev->dev, "FW not responding\n");
320 adapter->fw_timeout = true;
321 be_detect_dump_ue(adapter);
333 * Insert the mailbox address into the doorbell in two steps
334 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
336 static int be_mbox_notify_wait(struct be_adapter *adapter)
340 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
341 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
342 struct be_mcc_mailbox *mbox = mbox_mem->va;
343 struct be_mcc_compl *compl = &mbox->compl;
345 /* wait for ready to be set */
346 status = be_mbox_db_ready_wait(adapter, db);
350 val |= MPU_MAILBOX_DB_HI_MASK;
351 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
352 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
355 /* wait for ready to be set */
356 status = be_mbox_db_ready_wait(adapter, db);
361 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
362 val |= (u32)(mbox_mem->dma >> 4) << 2;
365 status = be_mbox_db_ready_wait(adapter, db);
369 /* A cq entry has been made now */
370 if (be_mcc_compl_is_new(compl)) {
371 status = be_mcc_compl_process(adapter, &mbox->compl);
372 be_mcc_compl_use(compl);
376 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
382 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
386 if (lancer_chip(adapter))
387 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
389 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
391 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
392 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
398 int be_cmd_POST(struct be_adapter *adapter)
401 int status, timeout = 0;
402 struct device *dev = &adapter->pdev->dev;
405 status = be_POST_stage_get(adapter, &stage);
407 dev_err(dev, "POST error; stage=0x%x\n", stage);
409 } else if (stage != POST_STAGE_ARMFW_RDY) {
410 if (msleep_interruptible(2000)) {
411 dev_err(dev, "Waiting for POST aborted\n");
418 } while (timeout < 60);
420 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
425 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
427 return &wrb->payload.sgl[0];
431 /* Don't touch the hdr after it's prepared */
432 /* mem will be NULL for embedded commands */
433 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
434 u8 subsystem, u8 opcode, int cmd_len,
435 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
439 req_hdr->opcode = opcode;
440 req_hdr->subsystem = subsystem;
441 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
442 req_hdr->version = 0;
445 wrb->tag1 = subsystem;
446 wrb->payload_length = cmd_len;
448 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
449 MCC_WRB_SGE_CNT_SHIFT;
450 sge = nonembedded_sgl(wrb);
451 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
452 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
453 sge->len = cpu_to_le32(mem->size);
455 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
456 be_dws_cpu_to_le(wrb, 8);
459 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
460 struct be_dma_mem *mem)
462 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
463 u64 dma = (u64)mem->dma;
465 for (i = 0; i < buf_pages; i++) {
466 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
467 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
472 /* Converts interrupt delay in microseconds to multiplier value */
473 static u32 eq_delay_to_mult(u32 usec_delay)
475 #define MAX_INTR_RATE 651042
476 const u32 round = 10;
482 u32 interrupt_rate = 1000000 / usec_delay;
483 /* Max delay, corresponding to the lowest interrupt rate */
484 if (interrupt_rate == 0)
487 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
488 multiplier /= interrupt_rate;
489 /* Round the multiplier to the closest value.*/
490 multiplier = (multiplier + round/2) / round;
491 multiplier = min(multiplier, (u32)1023);
497 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
499 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
500 struct be_mcc_wrb *wrb
501 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
502 memset(wrb, 0, sizeof(*wrb));
506 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
508 struct be_queue_info *mccq = &adapter->mcc_obj.q;
509 struct be_mcc_wrb *wrb;
511 if (atomic_read(&mccq->used) >= mccq->len) {
512 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
516 wrb = queue_head_node(mccq);
517 queue_head_inc(mccq);
518 atomic_inc(&mccq->used);
519 memset(wrb, 0, sizeof(*wrb));
523 /* Tell fw we're about to start firing cmds by writing a
524 * special pattern across the wrb hdr; uses mbox
526 int be_cmd_fw_init(struct be_adapter *adapter)
531 if (mutex_lock_interruptible(&adapter->mbox_lock))
534 wrb = (u8 *)wrb_from_mbox(adapter);
544 status = be_mbox_notify_wait(adapter);
546 mutex_unlock(&adapter->mbox_lock);
550 /* Tell fw we're done with firing cmds by writing a
551 * special pattern across the wrb hdr; uses mbox
553 int be_cmd_fw_clean(struct be_adapter *adapter)
558 if (mutex_lock_interruptible(&adapter->mbox_lock))
561 wrb = (u8 *)wrb_from_mbox(adapter);
571 status = be_mbox_notify_wait(adapter);
573 mutex_unlock(&adapter->mbox_lock);
576 int be_cmd_eq_create(struct be_adapter *adapter,
577 struct be_queue_info *eq, int eq_delay)
579 struct be_mcc_wrb *wrb;
580 struct be_cmd_req_eq_create *req;
581 struct be_dma_mem *q_mem = &eq->dma_mem;
584 if (mutex_lock_interruptible(&adapter->mbox_lock))
587 wrb = wrb_from_mbox(adapter);
588 req = embedded_payload(wrb);
590 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
591 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
593 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
595 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
597 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
598 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
599 __ilog2_u32(eq->len/256));
600 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
601 eq_delay_to_mult(eq_delay));
602 be_dws_cpu_to_le(req->context, sizeof(req->context));
604 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
606 status = be_mbox_notify_wait(adapter);
608 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
609 eq->id = le16_to_cpu(resp->eq_id);
613 mutex_unlock(&adapter->mbox_lock);
618 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
619 u8 type, bool permanent, u32 if_handle, u32 pmac_id)
621 struct be_mcc_wrb *wrb;
622 struct be_cmd_req_mac_query *req;
625 spin_lock_bh(&adapter->mcc_lock);
627 wrb = wrb_from_mccq(adapter);
632 req = embedded_payload(wrb);
634 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
635 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
640 req->if_id = cpu_to_le16((u16) if_handle);
641 req->pmac_id = cpu_to_le32(pmac_id);
645 status = be_mcc_notify_wait(adapter);
647 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
648 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
652 spin_unlock_bh(&adapter->mcc_lock);
656 /* Uses synchronous MCCQ */
657 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
658 u32 if_id, u32 *pmac_id, u32 domain)
660 struct be_mcc_wrb *wrb;
661 struct be_cmd_req_pmac_add *req;
664 spin_lock_bh(&adapter->mcc_lock);
666 wrb = wrb_from_mccq(adapter);
671 req = embedded_payload(wrb);
673 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
674 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
676 req->hdr.domain = domain;
677 req->if_id = cpu_to_le32(if_id);
678 memcpy(req->mac_address, mac_addr, ETH_ALEN);
680 status = be_mcc_notify_wait(adapter);
682 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
683 *pmac_id = le32_to_cpu(resp->pmac_id);
687 spin_unlock_bh(&adapter->mcc_lock);
689 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
695 /* Uses synchronous MCCQ */
696 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
698 struct be_mcc_wrb *wrb;
699 struct be_cmd_req_pmac_del *req;
705 spin_lock_bh(&adapter->mcc_lock);
707 wrb = wrb_from_mccq(adapter);
712 req = embedded_payload(wrb);
714 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
715 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
717 req->hdr.domain = dom;
718 req->if_id = cpu_to_le32(if_id);
719 req->pmac_id = cpu_to_le32(pmac_id);
721 status = be_mcc_notify_wait(adapter);
724 spin_unlock_bh(&adapter->mcc_lock);
729 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
730 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
732 struct be_mcc_wrb *wrb;
733 struct be_cmd_req_cq_create *req;
734 struct be_dma_mem *q_mem = &cq->dma_mem;
738 if (mutex_lock_interruptible(&adapter->mbox_lock))
741 wrb = wrb_from_mbox(adapter);
742 req = embedded_payload(wrb);
743 ctxt = &req->context;
745 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
746 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
748 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
749 if (lancer_chip(adapter)) {
750 req->hdr.version = 2;
751 req->page_size = 1; /* 1 for 4K */
752 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
754 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
755 __ilog2_u32(cq->len/256));
756 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
757 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
759 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
762 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
764 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
766 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
767 __ilog2_u32(cq->len/256));
768 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
769 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
770 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
773 be_dws_cpu_to_le(ctxt, sizeof(req->context));
775 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
777 status = be_mbox_notify_wait(adapter);
779 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
780 cq->id = le16_to_cpu(resp->cq_id);
784 mutex_unlock(&adapter->mbox_lock);
789 static u32 be_encoded_q_len(int q_len)
791 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
792 if (len_encoded == 16)
797 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
798 struct be_queue_info *mccq,
799 struct be_queue_info *cq)
801 struct be_mcc_wrb *wrb;
802 struct be_cmd_req_mcc_ext_create *req;
803 struct be_dma_mem *q_mem = &mccq->dma_mem;
807 if (mutex_lock_interruptible(&adapter->mbox_lock))
810 wrb = wrb_from_mbox(adapter);
811 req = embedded_payload(wrb);
812 ctxt = &req->context;
814 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
815 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
817 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
818 if (lancer_chip(adapter)) {
819 req->hdr.version = 1;
820 req->cq_id = cpu_to_le16(cq->id);
822 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
823 be_encoded_q_len(mccq->len));
824 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
825 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
827 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
831 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
832 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
833 be_encoded_q_len(mccq->len));
834 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
837 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
838 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
839 be_dws_cpu_to_le(ctxt, sizeof(req->context));
841 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
843 status = be_mbox_notify_wait(adapter);
845 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
846 mccq->id = le16_to_cpu(resp->id);
847 mccq->created = true;
849 mutex_unlock(&adapter->mbox_lock);
854 int be_cmd_mccq_org_create(struct be_adapter *adapter,
855 struct be_queue_info *mccq,
856 struct be_queue_info *cq)
858 struct be_mcc_wrb *wrb;
859 struct be_cmd_req_mcc_create *req;
860 struct be_dma_mem *q_mem = &mccq->dma_mem;
864 if (mutex_lock_interruptible(&adapter->mbox_lock))
867 wrb = wrb_from_mbox(adapter);
868 req = embedded_payload(wrb);
869 ctxt = &req->context;
871 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
872 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
874 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
876 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
877 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
878 be_encoded_q_len(mccq->len));
879 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
881 be_dws_cpu_to_le(ctxt, sizeof(req->context));
883 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
885 status = be_mbox_notify_wait(adapter);
887 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
888 mccq->id = le16_to_cpu(resp->id);
889 mccq->created = true;
892 mutex_unlock(&adapter->mbox_lock);
896 int be_cmd_mccq_create(struct be_adapter *adapter,
897 struct be_queue_info *mccq,
898 struct be_queue_info *cq)
902 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
903 if (status && !lancer_chip(adapter)) {
904 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
905 "or newer to avoid conflicting priorities between NIC "
907 status = be_cmd_mccq_org_create(adapter, mccq, cq);
912 int be_cmd_txq_create(struct be_adapter *adapter,
913 struct be_queue_info *txq,
914 struct be_queue_info *cq)
916 struct be_mcc_wrb *wrb;
917 struct be_cmd_req_eth_tx_create *req;
918 struct be_dma_mem *q_mem = &txq->dma_mem;
922 spin_lock_bh(&adapter->mcc_lock);
924 wrb = wrb_from_mccq(adapter);
930 req = embedded_payload(wrb);
931 ctxt = &req->context;
933 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
934 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
936 if (lancer_chip(adapter)) {
937 req->hdr.version = 1;
938 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
942 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
943 req->ulp_num = BE_ULP1_NUM;
944 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
946 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
947 be_encoded_q_len(txq->len));
948 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
949 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
951 be_dws_cpu_to_le(ctxt, sizeof(req->context));
953 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
955 status = be_mcc_notify_wait(adapter);
957 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
958 txq->id = le16_to_cpu(resp->cid);
963 spin_unlock_bh(&adapter->mcc_lock);
969 int be_cmd_rxq_create(struct be_adapter *adapter,
970 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
971 u32 if_id, u32 rss, u8 *rss_id)
973 struct be_mcc_wrb *wrb;
974 struct be_cmd_req_eth_rx_create *req;
975 struct be_dma_mem *q_mem = &rxq->dma_mem;
978 spin_lock_bh(&adapter->mcc_lock);
980 wrb = wrb_from_mccq(adapter);
985 req = embedded_payload(wrb);
987 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
988 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
990 req->cq_id = cpu_to_le16(cq_id);
991 req->frag_size = fls(frag_size) - 1;
993 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
994 req->interface_id = cpu_to_le32(if_id);
995 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
996 req->rss_queue = cpu_to_le32(rss);
998 status = be_mcc_notify_wait(adapter);
1000 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1001 rxq->id = le16_to_cpu(resp->id);
1002 rxq->created = true;
1003 *rss_id = resp->rss_id;
1007 spin_unlock_bh(&adapter->mcc_lock);
1011 /* Generic destroyer function for all types of queues
1014 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1017 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_q_destroy *req;
1019 u8 subsys = 0, opcode = 0;
1022 if (mutex_lock_interruptible(&adapter->mbox_lock))
1025 wrb = wrb_from_mbox(adapter);
1026 req = embedded_payload(wrb);
1028 switch (queue_type) {
1030 subsys = CMD_SUBSYSTEM_COMMON;
1031 opcode = OPCODE_COMMON_EQ_DESTROY;
1034 subsys = CMD_SUBSYSTEM_COMMON;
1035 opcode = OPCODE_COMMON_CQ_DESTROY;
1038 subsys = CMD_SUBSYSTEM_ETH;
1039 opcode = OPCODE_ETH_TX_DESTROY;
1042 subsys = CMD_SUBSYSTEM_ETH;
1043 opcode = OPCODE_ETH_RX_DESTROY;
1046 subsys = CMD_SUBSYSTEM_COMMON;
1047 opcode = OPCODE_COMMON_MCC_DESTROY;
1053 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1055 req->id = cpu_to_le16(q->id);
1057 status = be_mbox_notify_wait(adapter);
1061 mutex_unlock(&adapter->mbox_lock);
1066 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1068 struct be_mcc_wrb *wrb;
1069 struct be_cmd_req_q_destroy *req;
1072 spin_lock_bh(&adapter->mcc_lock);
1074 wrb = wrb_from_mccq(adapter);
1079 req = embedded_payload(wrb);
1081 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1082 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1083 req->id = cpu_to_le16(q->id);
1085 status = be_mcc_notify_wait(adapter);
1090 spin_unlock_bh(&adapter->mcc_lock);
1094 /* Create an rx filtering policy configuration on an i/f
1097 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1098 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
1100 struct be_mcc_wrb *wrb;
1101 struct be_cmd_req_if_create *req;
1104 spin_lock_bh(&adapter->mcc_lock);
1106 wrb = wrb_from_mccq(adapter);
1111 req = embedded_payload(wrb);
1113 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1114 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1115 req->hdr.domain = domain;
1116 req->capability_flags = cpu_to_le32(cap_flags);
1117 req->enable_flags = cpu_to_le32(en_flags);
1119 memcpy(req->mac_addr, mac, ETH_ALEN);
1121 req->pmac_invalid = true;
1123 status = be_mcc_notify_wait(adapter);
1125 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1126 *if_handle = le32_to_cpu(resp->interface_id);
1128 *pmac_id = le32_to_cpu(resp->pmac_id);
1132 spin_unlock_bh(&adapter->mcc_lock);
1137 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1139 struct be_mcc_wrb *wrb;
1140 struct be_cmd_req_if_destroy *req;
1143 if (interface_id == -1)
1146 spin_lock_bh(&adapter->mcc_lock);
1148 wrb = wrb_from_mccq(adapter);
1153 req = embedded_payload(wrb);
1155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1156 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1157 req->hdr.domain = domain;
1158 req->interface_id = cpu_to_le32(interface_id);
1160 status = be_mcc_notify_wait(adapter);
1162 spin_unlock_bh(&adapter->mcc_lock);
1166 /* Get stats is a non embedded command: the request is not embedded inside
1167 * WRB but is a separate dma memory block
1168 * Uses asynchronous MCC
1170 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1172 struct be_mcc_wrb *wrb;
1173 struct be_cmd_req_hdr *hdr;
1176 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1177 be_cmd_get_die_temperature(adapter);
1179 spin_lock_bh(&adapter->mcc_lock);
1181 wrb = wrb_from_mccq(adapter);
1186 hdr = nonemb_cmd->va;
1188 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1189 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1191 if (adapter->generation == BE_GEN3)
1194 be_mcc_notify(adapter);
1195 adapter->stats_cmd_sent = true;
1198 spin_unlock_bh(&adapter->mcc_lock);
1203 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1204 struct be_dma_mem *nonemb_cmd)
1207 struct be_mcc_wrb *wrb;
1208 struct lancer_cmd_req_pport_stats *req;
1211 spin_lock_bh(&adapter->mcc_lock);
1213 wrb = wrb_from_mccq(adapter);
1218 req = nonemb_cmd->va;
1220 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1221 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1224 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1225 req->cmd_params.params.reset_stats = 0;
1227 be_mcc_notify(adapter);
1228 adapter->stats_cmd_sent = true;
1231 spin_unlock_bh(&adapter->mcc_lock);
1235 /* Uses synchronous mcc */
1236 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1237 u16 *link_speed, u8 *link_status, u32 dom)
1239 struct be_mcc_wrb *wrb;
1240 struct be_cmd_req_link_status *req;
1243 spin_lock_bh(&adapter->mcc_lock);
1246 *link_status = LINK_DOWN;
1248 wrb = wrb_from_mccq(adapter);
1253 req = embedded_payload(wrb);
1255 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1256 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1258 if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
1259 req->hdr.version = 1;
1261 req->hdr.domain = dom;
1263 status = be_mcc_notify_wait(adapter);
1265 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1266 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1268 *link_speed = le16_to_cpu(resp->link_speed);
1270 *mac_speed = resp->mac_speed;
1273 *link_status = resp->logical_link_status;
1277 spin_unlock_bh(&adapter->mcc_lock);
1281 /* Uses synchronous mcc */
1282 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1284 struct be_mcc_wrb *wrb;
1285 struct be_cmd_req_get_cntl_addnl_attribs *req;
1289 spin_lock_bh(&adapter->mcc_lock);
1291 mccq_index = adapter->mcc_obj.q.head;
1293 wrb = wrb_from_mccq(adapter);
1298 req = embedded_payload(wrb);
1300 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1301 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1304 wrb->tag1 = mccq_index;
1306 be_mcc_notify(adapter);
1309 spin_unlock_bh(&adapter->mcc_lock);
1313 /* Uses synchronous mcc */
1314 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1316 struct be_mcc_wrb *wrb;
1317 struct be_cmd_req_get_fat *req;
1320 spin_lock_bh(&adapter->mcc_lock);
1322 wrb = wrb_from_mccq(adapter);
1327 req = embedded_payload(wrb);
1329 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1330 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1331 req->fat_operation = cpu_to_le32(QUERY_FAT);
1332 status = be_mcc_notify_wait(adapter);
1334 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1335 if (log_size && resp->log_size)
1336 *log_size = le32_to_cpu(resp->log_size) -
1340 spin_unlock_bh(&adapter->mcc_lock);
1344 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1346 struct be_dma_mem get_fat_cmd;
1347 struct be_mcc_wrb *wrb;
1348 struct be_cmd_req_get_fat *req;
1349 u32 offset = 0, total_size, buf_size,
1350 log_offset = sizeof(u32), payload_len;
1356 total_size = buf_len;
1358 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1359 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1362 if (!get_fat_cmd.va) {
1364 dev_err(&adapter->pdev->dev,
1365 "Memory allocation failure while retrieving FAT data\n");
1369 spin_lock_bh(&adapter->mcc_lock);
1371 while (total_size) {
1372 buf_size = min(total_size, (u32)60*1024);
1373 total_size -= buf_size;
1375 wrb = wrb_from_mccq(adapter);
1380 req = get_fat_cmd.va;
1382 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1383 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1384 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1387 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1388 req->read_log_offset = cpu_to_le32(log_offset);
1389 req->read_log_length = cpu_to_le32(buf_size);
1390 req->data_buffer_size = cpu_to_le32(buf_size);
1392 status = be_mcc_notify_wait(adapter);
1394 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1395 memcpy(buf + offset,
1397 le32_to_cpu(resp->read_log_length));
1399 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1403 log_offset += buf_size;
1406 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1409 spin_unlock_bh(&adapter->mcc_lock);
1412 /* Uses synchronous mcc */
1413 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1416 struct be_mcc_wrb *wrb;
1417 struct be_cmd_req_get_fw_version *req;
1420 spin_lock_bh(&adapter->mcc_lock);
1422 wrb = wrb_from_mccq(adapter);
1428 req = embedded_payload(wrb);
1430 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1431 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1432 status = be_mcc_notify_wait(adapter);
1434 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1435 strcpy(fw_ver, resp->firmware_version_string);
1437 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1440 spin_unlock_bh(&adapter->mcc_lock);
1444 /* set the EQ delay interval of an EQ to specified value
1447 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1449 struct be_mcc_wrb *wrb;
1450 struct be_cmd_req_modify_eq_delay *req;
1453 spin_lock_bh(&adapter->mcc_lock);
1455 wrb = wrb_from_mccq(adapter);
1460 req = embedded_payload(wrb);
1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1463 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1465 req->num_eq = cpu_to_le32(1);
1466 req->delay[0].eq_id = cpu_to_le32(eq_id);
1467 req->delay[0].phase = 0;
1468 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1470 be_mcc_notify(adapter);
1473 spin_unlock_bh(&adapter->mcc_lock);
1477 /* Uses sycnhronous mcc */
1478 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1479 u32 num, bool untagged, bool promiscuous)
1481 struct be_mcc_wrb *wrb;
1482 struct be_cmd_req_vlan_config *req;
1485 spin_lock_bh(&adapter->mcc_lock);
1487 wrb = wrb_from_mccq(adapter);
1492 req = embedded_payload(wrb);
1494 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1495 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1497 req->interface_id = if_id;
1498 req->promiscuous = promiscuous;
1499 req->untagged = untagged;
1500 req->num_vlan = num;
1502 memcpy(req->normal_vlan, vtag_array,
1503 req->num_vlan * sizeof(vtag_array[0]));
1506 status = be_mcc_notify_wait(adapter);
1509 spin_unlock_bh(&adapter->mcc_lock);
1513 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1515 struct be_mcc_wrb *wrb;
1516 struct be_dma_mem *mem = &adapter->rx_filter;
1517 struct be_cmd_req_rx_filter *req = mem->va;
1520 spin_lock_bh(&adapter->mcc_lock);
1522 wrb = wrb_from_mccq(adapter);
1527 memset(req, 0, sizeof(*req));
1528 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1529 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1532 req->if_id = cpu_to_le32(adapter->if_handle);
1533 if (flags & IFF_PROMISC) {
1534 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1535 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1537 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1538 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1539 } else if (flags & IFF_ALLMULTI) {
1540 req->if_flags_mask = req->if_flags =
1541 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1543 struct netdev_hw_addr *ha;
1546 req->if_flags_mask = req->if_flags =
1547 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1549 /* Reset mcast promisc mode if already set by setting mask
1550 * and not setting flags field
1552 req->if_flags_mask |=
1553 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1555 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1556 netdev_for_each_mc_addr(ha, adapter->netdev)
1557 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1560 status = be_mcc_notify_wait(adapter);
1562 spin_unlock_bh(&adapter->mcc_lock);
1566 /* Uses synchrounous mcc */
1567 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1569 struct be_mcc_wrb *wrb;
1570 struct be_cmd_req_set_flow_control *req;
1573 spin_lock_bh(&adapter->mcc_lock);
1575 wrb = wrb_from_mccq(adapter);
1580 req = embedded_payload(wrb);
1582 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1583 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1585 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1586 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1588 status = be_mcc_notify_wait(adapter);
1591 spin_unlock_bh(&adapter->mcc_lock);
1596 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1598 struct be_mcc_wrb *wrb;
1599 struct be_cmd_req_get_flow_control *req;
1602 spin_lock_bh(&adapter->mcc_lock);
1604 wrb = wrb_from_mccq(adapter);
1609 req = embedded_payload(wrb);
1611 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1612 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1614 status = be_mcc_notify_wait(adapter);
1616 struct be_cmd_resp_get_flow_control *resp =
1617 embedded_payload(wrb);
1618 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1619 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1623 spin_unlock_bh(&adapter->mcc_lock);
1628 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1629 u32 *mode, u32 *caps)
1631 struct be_mcc_wrb *wrb;
1632 struct be_cmd_req_query_fw_cfg *req;
1635 if (mutex_lock_interruptible(&adapter->mbox_lock))
1638 wrb = wrb_from_mbox(adapter);
1639 req = embedded_payload(wrb);
1641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1642 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1644 status = be_mbox_notify_wait(adapter);
1646 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1647 *port_num = le32_to_cpu(resp->phys_port);
1648 *mode = le32_to_cpu(resp->function_mode);
1649 *caps = le32_to_cpu(resp->function_caps);
1652 mutex_unlock(&adapter->mbox_lock);
1657 int be_cmd_reset_function(struct be_adapter *adapter)
1659 struct be_mcc_wrb *wrb;
1660 struct be_cmd_req_hdr *req;
1663 if (mutex_lock_interruptible(&adapter->mbox_lock))
1666 wrb = wrb_from_mbox(adapter);
1667 req = embedded_payload(wrb);
1669 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1670 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1672 status = be_mbox_notify_wait(adapter);
1674 mutex_unlock(&adapter->mbox_lock);
1678 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1680 struct be_mcc_wrb *wrb;
1681 struct be_cmd_req_rss_config *req;
1682 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1683 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1684 0x3ea83c02, 0x4a110304};
1687 if (mutex_lock_interruptible(&adapter->mbox_lock))
1690 wrb = wrb_from_mbox(adapter);
1691 req = embedded_payload(wrb);
1693 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1694 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1696 req->if_id = cpu_to_le32(adapter->if_handle);
1697 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1698 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1699 memcpy(req->cpu_table, rsstable, table_size);
1700 memcpy(req->hash, myhash, sizeof(myhash));
1701 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1703 status = be_mbox_notify_wait(adapter);
1705 mutex_unlock(&adapter->mbox_lock);
1710 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1711 u8 bcn, u8 sts, u8 state)
1713 struct be_mcc_wrb *wrb;
1714 struct be_cmd_req_enable_disable_beacon *req;
1717 spin_lock_bh(&adapter->mcc_lock);
1719 wrb = wrb_from_mccq(adapter);
1724 req = embedded_payload(wrb);
1726 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1727 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1729 req->port_num = port_num;
1730 req->beacon_state = state;
1731 req->beacon_duration = bcn;
1732 req->status_duration = sts;
1734 status = be_mcc_notify_wait(adapter);
1737 spin_unlock_bh(&adapter->mcc_lock);
1742 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1744 struct be_mcc_wrb *wrb;
1745 struct be_cmd_req_get_beacon_state *req;
1748 spin_lock_bh(&adapter->mcc_lock);
1750 wrb = wrb_from_mccq(adapter);
1755 req = embedded_payload(wrb);
1757 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1758 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1760 req->port_num = port_num;
1762 status = be_mcc_notify_wait(adapter);
1764 struct be_cmd_resp_get_beacon_state *resp =
1765 embedded_payload(wrb);
1766 *state = resp->beacon_state;
1770 spin_unlock_bh(&adapter->mcc_lock);
1774 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1775 u32 data_size, u32 data_offset, const char *obj_name,
1776 u32 *data_written, u8 *addn_status)
1778 struct be_mcc_wrb *wrb;
1779 struct lancer_cmd_req_write_object *req;
1780 struct lancer_cmd_resp_write_object *resp;
1784 spin_lock_bh(&adapter->mcc_lock);
1785 adapter->flash_status = 0;
1787 wrb = wrb_from_mccq(adapter);
1793 req = embedded_payload(wrb);
1795 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1796 OPCODE_COMMON_WRITE_OBJECT,
1797 sizeof(struct lancer_cmd_req_write_object), wrb,
1800 ctxt = &req->context;
1801 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1802 write_length, ctxt, data_size);
1805 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1808 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1811 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1812 req->write_offset = cpu_to_le32(data_offset);
1813 strcpy(req->object_name, obj_name);
1814 req->descriptor_count = cpu_to_le32(1);
1815 req->buf_len = cpu_to_le32(data_size);
1816 req->addr_low = cpu_to_le32((cmd->dma +
1817 sizeof(struct lancer_cmd_req_write_object))
1819 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1820 sizeof(struct lancer_cmd_req_write_object)));
1822 be_mcc_notify(adapter);
1823 spin_unlock_bh(&adapter->mcc_lock);
1825 if (!wait_for_completion_timeout(&adapter->flash_compl,
1826 msecs_to_jiffies(12000)))
1829 status = adapter->flash_status;
1831 resp = embedded_payload(wrb);
1833 *data_written = le32_to_cpu(resp->actual_write_len);
1835 *addn_status = resp->additional_status;
1836 status = resp->status;
1842 spin_unlock_bh(&adapter->mcc_lock);
1846 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1847 u32 data_size, u32 data_offset, const char *obj_name,
1848 u32 *data_read, u32 *eof, u8 *addn_status)
1850 struct be_mcc_wrb *wrb;
1851 struct lancer_cmd_req_read_object *req;
1852 struct lancer_cmd_resp_read_object *resp;
1855 spin_lock_bh(&adapter->mcc_lock);
1857 wrb = wrb_from_mccq(adapter);
1863 req = embedded_payload(wrb);
1865 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1866 OPCODE_COMMON_READ_OBJECT,
1867 sizeof(struct lancer_cmd_req_read_object), wrb,
1870 req->desired_read_len = cpu_to_le32(data_size);
1871 req->read_offset = cpu_to_le32(data_offset);
1872 strcpy(req->object_name, obj_name);
1873 req->descriptor_count = cpu_to_le32(1);
1874 req->buf_len = cpu_to_le32(data_size);
1875 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
1876 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
1878 status = be_mcc_notify_wait(adapter);
1880 resp = embedded_payload(wrb);
1882 *data_read = le32_to_cpu(resp->actual_read_len);
1883 *eof = le32_to_cpu(resp->eof);
1885 *addn_status = resp->additional_status;
1889 spin_unlock_bh(&adapter->mcc_lock);
1893 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1894 u32 flash_type, u32 flash_opcode, u32 buf_size)
1896 struct be_mcc_wrb *wrb;
1897 struct be_cmd_write_flashrom *req;
1900 spin_lock_bh(&adapter->mcc_lock);
1901 adapter->flash_status = 0;
1903 wrb = wrb_from_mccq(adapter);
1910 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1911 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
1913 req->params.op_type = cpu_to_le32(flash_type);
1914 req->params.op_code = cpu_to_le32(flash_opcode);
1915 req->params.data_buf_size = cpu_to_le32(buf_size);
1917 be_mcc_notify(adapter);
1918 spin_unlock_bh(&adapter->mcc_lock);
1920 if (!wait_for_completion_timeout(&adapter->flash_compl,
1921 msecs_to_jiffies(40000)))
1924 status = adapter->flash_status;
1929 spin_unlock_bh(&adapter->mcc_lock);
1933 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1936 struct be_mcc_wrb *wrb;
1937 struct be_cmd_write_flashrom *req;
1940 spin_lock_bh(&adapter->mcc_lock);
1942 wrb = wrb_from_mccq(adapter);
1947 req = embedded_payload(wrb);
1949 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1950 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
1952 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1953 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1954 req->params.offset = cpu_to_le32(offset);
1955 req->params.data_buf_size = cpu_to_le32(0x4);
1957 status = be_mcc_notify_wait(adapter);
1959 memcpy(flashed_crc, req->params.data_buf, 4);
1962 spin_unlock_bh(&adapter->mcc_lock);
1966 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1967 struct be_dma_mem *nonemb_cmd)
1969 struct be_mcc_wrb *wrb;
1970 struct be_cmd_req_acpi_wol_magic_config *req;
1973 spin_lock_bh(&adapter->mcc_lock);
1975 wrb = wrb_from_mccq(adapter);
1980 req = nonemb_cmd->va;
1982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1983 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
1985 memcpy(req->magic_mac, mac, ETH_ALEN);
1987 status = be_mcc_notify_wait(adapter);
1990 spin_unlock_bh(&adapter->mcc_lock);
1994 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1995 u8 loopback_type, u8 enable)
1997 struct be_mcc_wrb *wrb;
1998 struct be_cmd_req_set_lmode *req;
2001 spin_lock_bh(&adapter->mcc_lock);
2003 wrb = wrb_from_mccq(adapter);
2009 req = embedded_payload(wrb);
2011 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2012 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2015 req->src_port = port_num;
2016 req->dest_port = port_num;
2017 req->loopback_type = loopback_type;
2018 req->loopback_state = enable;
2020 status = be_mcc_notify_wait(adapter);
2022 spin_unlock_bh(&adapter->mcc_lock);
2026 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2027 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2029 struct be_mcc_wrb *wrb;
2030 struct be_cmd_req_loopback_test *req;
2033 spin_lock_bh(&adapter->mcc_lock);
2035 wrb = wrb_from_mccq(adapter);
2041 req = embedded_payload(wrb);
2043 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2044 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2045 req->hdr.timeout = cpu_to_le32(4);
2047 req->pattern = cpu_to_le64(pattern);
2048 req->src_port = cpu_to_le32(port_num);
2049 req->dest_port = cpu_to_le32(port_num);
2050 req->pkt_size = cpu_to_le32(pkt_size);
2051 req->num_pkts = cpu_to_le32(num_pkts);
2052 req->loopback_type = cpu_to_le32(loopback_type);
2054 status = be_mcc_notify_wait(adapter);
2056 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2057 status = le32_to_cpu(resp->status);
2061 spin_unlock_bh(&adapter->mcc_lock);
2065 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2066 u32 byte_cnt, struct be_dma_mem *cmd)
2068 struct be_mcc_wrb *wrb;
2069 struct be_cmd_req_ddrdma_test *req;
2073 spin_lock_bh(&adapter->mcc_lock);
2075 wrb = wrb_from_mccq(adapter);
2081 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2082 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2084 req->pattern = cpu_to_le64(pattern);
2085 req->byte_count = cpu_to_le32(byte_cnt);
2086 for (i = 0; i < byte_cnt; i++) {
2087 req->snd_buff[i] = (u8)(pattern >> (j*8));
2093 status = be_mcc_notify_wait(adapter);
2096 struct be_cmd_resp_ddrdma_test *resp;
2098 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2105 spin_unlock_bh(&adapter->mcc_lock);
2109 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2110 struct be_dma_mem *nonemb_cmd)
2112 struct be_mcc_wrb *wrb;
2113 struct be_cmd_req_seeprom_read *req;
2117 spin_lock_bh(&adapter->mcc_lock);
2119 wrb = wrb_from_mccq(adapter);
2124 req = nonemb_cmd->va;
2125 sge = nonembedded_sgl(wrb);
2127 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2128 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2131 status = be_mcc_notify_wait(adapter);
2134 spin_unlock_bh(&adapter->mcc_lock);
2138 int be_cmd_get_phy_info(struct be_adapter *adapter,
2139 struct be_phy_info *phy_info)
2141 struct be_mcc_wrb *wrb;
2142 struct be_cmd_req_get_phy_info *req;
2143 struct be_dma_mem cmd;
2146 spin_lock_bh(&adapter->mcc_lock);
2148 wrb = wrb_from_mccq(adapter);
2153 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2154 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2157 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2164 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2165 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2168 status = be_mcc_notify_wait(adapter);
2170 struct be_phy_info *resp_phy_info =
2171 cmd.va + sizeof(struct be_cmd_req_hdr);
2172 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
2173 phy_info->interface_type =
2174 le16_to_cpu(resp_phy_info->interface_type);
2176 pci_free_consistent(adapter->pdev, cmd.size,
2179 spin_unlock_bh(&adapter->mcc_lock);
2183 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2185 struct be_mcc_wrb *wrb;
2186 struct be_cmd_req_set_qos *req;
2189 spin_lock_bh(&adapter->mcc_lock);
2191 wrb = wrb_from_mccq(adapter);
2197 req = embedded_payload(wrb);
2199 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2200 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2202 req->hdr.domain = domain;
2203 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2204 req->max_bps_nic = cpu_to_le32(bps);
2206 status = be_mcc_notify_wait(adapter);
2209 spin_unlock_bh(&adapter->mcc_lock);
2213 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2215 struct be_mcc_wrb *wrb;
2216 struct be_cmd_req_cntl_attribs *req;
2217 struct be_cmd_resp_cntl_attribs *resp;
2219 int payload_len = max(sizeof(*req), sizeof(*resp));
2220 struct mgmt_controller_attrib *attribs;
2221 struct be_dma_mem attribs_cmd;
2223 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2224 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2225 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2227 if (!attribs_cmd.va) {
2228 dev_err(&adapter->pdev->dev,
2229 "Memory allocation failure\n");
2233 if (mutex_lock_interruptible(&adapter->mbox_lock))
2236 wrb = wrb_from_mbox(adapter);
2241 req = attribs_cmd.va;
2243 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2244 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2247 status = be_mbox_notify_wait(adapter);
2249 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2250 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2254 mutex_unlock(&adapter->mbox_lock);
2255 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2261 int be_cmd_req_native_mode(struct be_adapter *adapter)
2263 struct be_mcc_wrb *wrb;
2264 struct be_cmd_req_set_func_cap *req;
2267 if (mutex_lock_interruptible(&adapter->mbox_lock))
2270 wrb = wrb_from_mbox(adapter);
2276 req = embedded_payload(wrb);
2278 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2279 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2281 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2282 CAPABILITY_BE3_NATIVE_ERX_API);
2283 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2285 status = be_mbox_notify_wait(adapter);
2287 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2288 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2289 CAPABILITY_BE3_NATIVE_ERX_API;
2292 mutex_unlock(&adapter->mbox_lock);
2296 /* Uses synchronous MCCQ */
2297 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2298 bool *pmac_id_active, u32 *pmac_id, u8 *mac)
2300 struct be_mcc_wrb *wrb;
2301 struct be_cmd_req_get_mac_list *req;
2304 struct be_dma_mem get_mac_list_cmd;
2307 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2308 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2309 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2310 get_mac_list_cmd.size,
2311 &get_mac_list_cmd.dma);
2313 if (!get_mac_list_cmd.va) {
2314 dev_err(&adapter->pdev->dev,
2315 "Memory allocation failure during GET_MAC_LIST\n");
2319 spin_lock_bh(&adapter->mcc_lock);
2321 wrb = wrb_from_mccq(adapter);
2327 req = get_mac_list_cmd.va;
2329 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2330 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2331 wrb, &get_mac_list_cmd);
2333 req->hdr.domain = domain;
2334 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2335 req->perm_override = 1;
2337 status = be_mcc_notify_wait(adapter);
2339 struct be_cmd_resp_get_mac_list *resp =
2340 get_mac_list_cmd.va;
2341 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2342 /* Mac list returned could contain one or more active mac_ids
2343 * or one or more pseudo permanant mac addresses. If an active
2344 * mac_id is present, return first active mac_id found
2346 for (i = 0; i < mac_count; i++) {
2347 struct get_list_macaddr *mac_entry;
2351 mac_entry = &resp->macaddr_list[i];
2352 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2353 /* mac_id is a 32 bit value and mac_addr size
2356 if (mac_addr_size == sizeof(u32)) {
2357 *pmac_id_active = true;
2358 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2359 *pmac_id = le32_to_cpu(mac_id);
2363 /* If no active mac_id found, return first pseudo mac addr */
2364 *pmac_id_active = false;
2365 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2370 spin_unlock_bh(&adapter->mcc_lock);
2371 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2372 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2376 /* Uses synchronous MCCQ */
2377 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2378 u8 mac_count, u32 domain)
2380 struct be_mcc_wrb *wrb;
2381 struct be_cmd_req_set_mac_list *req;
2383 struct be_dma_mem cmd;
2385 memset(&cmd, 0, sizeof(struct be_dma_mem));
2386 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2387 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2388 &cmd.dma, GFP_KERNEL);
2390 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2394 spin_lock_bh(&adapter->mcc_lock);
2396 wrb = wrb_from_mccq(adapter);
2403 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2404 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2407 req->hdr.domain = domain;
2408 req->mac_count = mac_count;
2410 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2412 status = be_mcc_notify_wait(adapter);
2415 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2417 spin_unlock_bh(&adapter->mcc_lock);