1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #include <linux/random.h>
32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
37 #include <net/route.h>
39 #include <net/ip6_route.h>
40 #include <net/ip6_checksum.h>
41 #include <scsi/iscsi_if.h>
45 #include "bnx2x/bnx2x_reg.h"
46 #include "bnx2x/bnx2x_fw_defs.h"
47 #include "bnx2x/bnx2x_hsi.h"
48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
52 #include "cnic_defs.h"
54 #define DRV_MODULE_NAME "cnic"
56 static char version[] __devinitdata =
57 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
59 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
60 "Chen (zongxi@broadcom.com");
61 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
62 MODULE_LICENSE("GPL");
63 MODULE_VERSION(CNIC_MODULE_VERSION);
65 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
66 static LIST_HEAD(cnic_dev_list);
67 static LIST_HEAD(cnic_udev_list);
68 static DEFINE_RWLOCK(cnic_dev_lock);
69 static DEFINE_MUTEX(cnic_lock);
71 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
73 /* helper function, assuming cnic_lock is held */
74 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
76 return rcu_dereference_protected(cnic_ulp_tbl[type],
77 lockdep_is_held(&cnic_lock));
80 static int cnic_service_bnx2(void *, void *);
81 static int cnic_service_bnx2x(void *, void *);
82 static int cnic_ctl(void *, struct cnic_ctl_info *);
84 static struct cnic_ops cnic_bnx2_ops = {
85 .cnic_owner = THIS_MODULE,
86 .cnic_handler = cnic_service_bnx2,
90 static struct cnic_ops cnic_bnx2x_ops = {
91 .cnic_owner = THIS_MODULE,
92 .cnic_handler = cnic_service_bnx2x,
96 static struct workqueue_struct *cnic_wq;
98 static void cnic_shutdown_rings(struct cnic_dev *);
99 static void cnic_init_rings(struct cnic_dev *);
100 static int cnic_cm_set_pg(struct cnic_sock *);
102 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
104 struct cnic_uio_dev *udev = uinfo->priv;
105 struct cnic_dev *dev;
107 if (!capable(CAP_NET_ADMIN))
110 if (udev->uio_dev != -1)
116 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
121 udev->uio_dev = iminor(inode);
123 cnic_shutdown_rings(dev);
124 cnic_init_rings(dev);
130 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
132 struct cnic_uio_dev *udev = uinfo->priv;
138 static inline void cnic_hold(struct cnic_dev *dev)
140 atomic_inc(&dev->ref_count);
143 static inline void cnic_put(struct cnic_dev *dev)
145 atomic_dec(&dev->ref_count);
148 static inline void csk_hold(struct cnic_sock *csk)
150 atomic_inc(&csk->ref_count);
153 static inline void csk_put(struct cnic_sock *csk)
155 atomic_dec(&csk->ref_count);
158 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
160 struct cnic_dev *cdev;
162 read_lock(&cnic_dev_lock);
163 list_for_each_entry(cdev, &cnic_dev_list, list) {
164 if (netdev == cdev->netdev) {
166 read_unlock(&cnic_dev_lock);
170 read_unlock(&cnic_dev_lock);
174 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
176 atomic_inc(&ulp_ops->ref_count);
179 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
181 atomic_dec(&ulp_ops->ref_count);
184 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
186 struct cnic_local *cp = dev->cnic_priv;
187 struct cnic_eth_dev *ethdev = cp->ethdev;
188 struct drv_ctl_info info;
189 struct drv_ctl_io *io = &info.data.io;
191 info.cmd = DRV_CTL_CTX_WR_CMD;
192 io->cid_addr = cid_addr;
195 ethdev->drv_ctl(dev->netdev, &info);
198 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
200 struct cnic_local *cp = dev->cnic_priv;
201 struct cnic_eth_dev *ethdev = cp->ethdev;
202 struct drv_ctl_info info;
203 struct drv_ctl_io *io = &info.data.io;
205 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
208 ethdev->drv_ctl(dev->netdev, &info);
211 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
213 struct cnic_local *cp = dev->cnic_priv;
214 struct cnic_eth_dev *ethdev = cp->ethdev;
215 struct drv_ctl_info info;
216 struct drv_ctl_l2_ring *ring = &info.data.ring;
219 info.cmd = DRV_CTL_START_L2_CMD;
221 info.cmd = DRV_CTL_STOP_L2_CMD;
224 ring->client_id = cl_id;
225 ethdev->drv_ctl(dev->netdev, &info);
228 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
230 struct cnic_local *cp = dev->cnic_priv;
231 struct cnic_eth_dev *ethdev = cp->ethdev;
232 struct drv_ctl_info info;
233 struct drv_ctl_io *io = &info.data.io;
235 info.cmd = DRV_CTL_IO_WR_CMD;
238 ethdev->drv_ctl(dev->netdev, &info);
241 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
243 struct cnic_local *cp = dev->cnic_priv;
244 struct cnic_eth_dev *ethdev = cp->ethdev;
245 struct drv_ctl_info info;
246 struct drv_ctl_io *io = &info.data.io;
248 info.cmd = DRV_CTL_IO_RD_CMD;
250 ethdev->drv_ctl(dev->netdev, &info);
254 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
256 struct cnic_local *cp = dev->cnic_priv;
257 struct cnic_eth_dev *ethdev = cp->ethdev;
258 struct drv_ctl_info info;
259 struct fcoe_capabilities *fcoe_cap =
260 &info.data.register_data.fcoe_features;
263 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
264 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
265 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
267 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
270 info.data.ulp_type = ulp_type;
271 ethdev->drv_ctl(dev->netdev, &info);
274 static int cnic_in_use(struct cnic_sock *csk)
276 return test_bit(SK_F_INUSE, &csk->flags);
279 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
281 struct cnic_local *cp = dev->cnic_priv;
282 struct cnic_eth_dev *ethdev = cp->ethdev;
283 struct drv_ctl_info info;
286 info.data.credit.credit_count = count;
287 ethdev->drv_ctl(dev->netdev, &info);
290 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
297 for (i = 0; i < cp->max_cid_space; i++) {
298 if (cp->ctx_tbl[i].cid == cid) {
306 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
307 struct cnic_sock *csk)
309 struct iscsi_path path_req;
312 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
313 struct cnic_ulp_ops *ulp_ops;
314 struct cnic_uio_dev *udev = cp->udev;
315 int rc = 0, retry = 0;
317 if (!udev || udev->uio_dev == -1)
321 len = sizeof(path_req);
322 buf = (char *) &path_req;
323 memset(&path_req, 0, len);
325 msg_type = ISCSI_KEVENT_PATH_REQ;
326 path_req.handle = (u64) csk->l5_cid;
327 if (test_bit(SK_F_IPV6, &csk->flags)) {
328 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
329 sizeof(struct in6_addr));
330 path_req.ip_addr_len = 16;
332 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
333 sizeof(struct in_addr));
334 path_req.ip_addr_len = 4;
336 path_req.vlan_id = csk->vlan_id;
337 path_req.pmtu = csk->mtu;
343 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
345 rc = ulp_ops->iscsi_nl_send_msg(
346 cp->ulp_handle[CNIC_ULP_ISCSI],
349 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
358 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
360 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
366 case ISCSI_UEVENT_PATH_UPDATE: {
367 struct cnic_local *cp;
369 struct cnic_sock *csk;
370 struct iscsi_path *path_resp;
372 if (len < sizeof(*path_resp))
375 path_resp = (struct iscsi_path *) buf;
377 l5_cid = (u32) path_resp->handle;
378 if (l5_cid >= MAX_CM_SK_TBL_SZ)
382 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
387 csk = &cp->csk_tbl[l5_cid];
389 if (cnic_in_use(csk) &&
390 test_bit(SK_F_CONNECT_START, &csk->flags)) {
392 csk->vlan_id = path_resp->vlan_id;
394 memcpy(csk->ha, path_resp->mac_addr, 6);
395 if (test_bit(SK_F_IPV6, &csk->flags))
396 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
397 sizeof(struct in6_addr));
399 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
400 sizeof(struct in_addr));
402 if (is_valid_ether_addr(csk->ha)) {
404 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
405 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
407 cnic_cm_upcall(cp, csk,
408 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
409 clear_bit(SK_F_CONNECT_START, &csk->flags);
421 static int cnic_offld_prep(struct cnic_sock *csk)
423 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
426 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
427 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
434 static int cnic_close_prep(struct cnic_sock *csk)
436 clear_bit(SK_F_CONNECT_START, &csk->flags);
437 smp_mb__after_clear_bit();
439 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
440 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
448 static int cnic_abort_prep(struct cnic_sock *csk)
450 clear_bit(SK_F_CONNECT_START, &csk->flags);
451 smp_mb__after_clear_bit();
453 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
456 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
457 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
464 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
466 struct cnic_dev *dev;
468 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
469 pr_err("%s: Bad type %d\n", __func__, ulp_type);
472 mutex_lock(&cnic_lock);
473 if (cnic_ulp_tbl_prot(ulp_type)) {
474 pr_err("%s: Type %d has already been registered\n",
476 mutex_unlock(&cnic_lock);
480 read_lock(&cnic_dev_lock);
481 list_for_each_entry(dev, &cnic_dev_list, list) {
482 struct cnic_local *cp = dev->cnic_priv;
484 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
486 read_unlock(&cnic_dev_lock);
488 atomic_set(&ulp_ops->ref_count, 0);
489 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
490 mutex_unlock(&cnic_lock);
492 /* Prevent race conditions with netdev_event */
494 list_for_each_entry(dev, &cnic_dev_list, list) {
495 struct cnic_local *cp = dev->cnic_priv;
497 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
498 ulp_ops->cnic_init(dev);
505 int cnic_unregister_driver(int ulp_type)
507 struct cnic_dev *dev;
508 struct cnic_ulp_ops *ulp_ops;
511 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
512 pr_err("%s: Bad type %d\n", __func__, ulp_type);
515 mutex_lock(&cnic_lock);
516 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
518 pr_err("%s: Type %d has not been registered\n",
522 read_lock(&cnic_dev_lock);
523 list_for_each_entry(dev, &cnic_dev_list, list) {
524 struct cnic_local *cp = dev->cnic_priv;
526 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
527 pr_err("%s: Type %d still has devices registered\n",
529 read_unlock(&cnic_dev_lock);
533 read_unlock(&cnic_dev_lock);
535 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
537 mutex_unlock(&cnic_lock);
539 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
544 if (atomic_read(&ulp_ops->ref_count) != 0)
545 pr_warn("%s: Failed waiting for ref count to go to zero\n",
550 mutex_unlock(&cnic_lock);
554 static int cnic_start_hw(struct cnic_dev *);
555 static void cnic_stop_hw(struct cnic_dev *);
557 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
560 struct cnic_local *cp = dev->cnic_priv;
561 struct cnic_ulp_ops *ulp_ops;
563 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
564 pr_err("%s: Bad type %d\n", __func__, ulp_type);
567 mutex_lock(&cnic_lock);
568 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
569 pr_err("%s: Driver with type %d has not been registered\n",
571 mutex_unlock(&cnic_lock);
574 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
575 pr_err("%s: Type %d has already been registered to this device\n",
577 mutex_unlock(&cnic_lock);
581 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
582 cp->ulp_handle[ulp_type] = ulp_ctx;
583 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
584 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
587 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
588 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
589 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
591 mutex_unlock(&cnic_lock);
593 cnic_ulp_ctl(dev, ulp_type, true);
598 EXPORT_SYMBOL(cnic_register_driver);
600 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
602 struct cnic_local *cp = dev->cnic_priv;
605 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
606 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 mutex_lock(&cnic_lock);
610 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
611 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
614 pr_err("%s: device not registered to this ulp type %d\n",
616 mutex_unlock(&cnic_lock);
619 mutex_unlock(&cnic_lock);
621 if (ulp_type == CNIC_ULP_ISCSI)
622 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
623 else if (ulp_type == CNIC_ULP_FCOE)
624 dev->fcoe_cap = NULL;
628 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
633 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
634 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
636 cnic_ulp_ctl(dev, ulp_type, false);
640 EXPORT_SYMBOL(cnic_unregister_driver);
642 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
645 id_tbl->start = start_id;
648 spin_lock_init(&id_tbl->lock);
649 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
656 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
658 kfree(id_tbl->table);
659 id_tbl->table = NULL;
662 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
667 if (id >= id_tbl->max)
670 spin_lock(&id_tbl->lock);
671 if (!test_bit(id, id_tbl->table)) {
672 set_bit(id, id_tbl->table);
675 spin_unlock(&id_tbl->lock);
679 /* Returns -1 if not successful */
680 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
684 spin_lock(&id_tbl->lock);
685 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
686 if (id >= id_tbl->max) {
688 if (id_tbl->next != 0) {
689 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
690 if (id >= id_tbl->next)
695 if (id < id_tbl->max) {
696 set_bit(id, id_tbl->table);
697 id_tbl->next = (id + 1) & (id_tbl->max - 1);
701 spin_unlock(&id_tbl->lock);
706 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
712 if (id >= id_tbl->max)
715 clear_bit(id, id_tbl->table);
718 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
725 for (i = 0; i < dma->num_pages; i++) {
726 if (dma->pg_arr[i]) {
727 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
728 dma->pg_arr[i], dma->pg_map_arr[i]);
729 dma->pg_arr[i] = NULL;
733 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
734 dma->pgtbl, dma->pgtbl_map);
742 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
745 __le32 *page_table = (__le32 *) dma->pgtbl;
747 for (i = 0; i < dma->num_pages; i++) {
748 /* Each entry needs to be in big endian format. */
749 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
751 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
756 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
759 __le32 *page_table = (__le32 *) dma->pgtbl;
761 for (i = 0; i < dma->num_pages; i++) {
762 /* Each entry needs to be in little endian format. */
763 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
765 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
770 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
771 int pages, int use_pg_tbl)
774 struct cnic_local *cp = dev->cnic_priv;
776 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
777 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
778 if (dma->pg_arr == NULL)
781 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
782 dma->num_pages = pages;
784 for (i = 0; i < pages; i++) {
785 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
789 if (dma->pg_arr[i] == NULL)
795 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
796 ~(BCM_PAGE_SIZE - 1);
797 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
798 &dma->pgtbl_map, GFP_ATOMIC);
799 if (dma->pgtbl == NULL)
802 cp->setup_pgtbl(dev, dma);
807 cnic_free_dma(dev, dma);
811 static void cnic_free_context(struct cnic_dev *dev)
813 struct cnic_local *cp = dev->cnic_priv;
816 for (i = 0; i < cp->ctx_blks; i++) {
817 if (cp->ctx_arr[i].ctx) {
818 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
820 cp->ctx_arr[i].mapping);
821 cp->ctx_arr[i].ctx = NULL;
826 static void __cnic_free_uio(struct cnic_uio_dev *udev)
828 uio_unregister_device(&udev->cnic_uinfo);
831 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
832 udev->l2_buf, udev->l2_buf_map);
837 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
838 udev->l2_ring, udev->l2_ring_map);
839 udev->l2_ring = NULL;
842 pci_dev_put(udev->pdev);
846 static void cnic_free_uio(struct cnic_uio_dev *udev)
851 write_lock(&cnic_dev_lock);
852 list_del_init(&udev->list);
853 write_unlock(&cnic_dev_lock);
854 __cnic_free_uio(udev);
857 static void cnic_free_resc(struct cnic_dev *dev)
859 struct cnic_local *cp = dev->cnic_priv;
860 struct cnic_uio_dev *udev = cp->udev;
867 cnic_free_context(dev);
872 cnic_free_dma(dev, &cp->gbl_buf_info);
873 cnic_free_dma(dev, &cp->kwq_info);
874 cnic_free_dma(dev, &cp->kwq_16_data_info);
875 cnic_free_dma(dev, &cp->kcq2.dma);
876 cnic_free_dma(dev, &cp->kcq1.dma);
877 kfree(cp->iscsi_tbl);
878 cp->iscsi_tbl = NULL;
882 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
883 cnic_free_id_tbl(&cp->cid_tbl);
886 static int cnic_alloc_context(struct cnic_dev *dev)
888 struct cnic_local *cp = dev->cnic_priv;
890 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
893 cp->ctx_blk_size = BCM_PAGE_SIZE;
894 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
895 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
896 sizeof(struct cnic_ctx);
897 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
898 if (cp->ctx_arr == NULL)
902 for (i = 0; i < 2; i++) {
903 u32 j, reg, off, lo, hi;
906 off = BNX2_PG_CTX_MAP;
908 off = BNX2_ISCSI_CTX_MAP;
910 reg = cnic_reg_rd_ind(dev, off);
913 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
914 cp->ctx_arr[k].cid = j;
918 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
923 for (i = 0; i < cp->ctx_blks; i++) {
925 dma_alloc_coherent(&dev->pcidev->dev,
927 &cp->ctx_arr[i].mapping,
929 if (cp->ctx_arr[i].ctx == NULL)
936 static u16 cnic_bnx2_next_idx(u16 idx)
941 static u16 cnic_bnx2_hw_idx(u16 idx)
946 static u16 cnic_bnx2x_next_idx(u16 idx)
949 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
955 static u16 cnic_bnx2x_hw_idx(u16 idx)
957 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
962 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
965 int err, i, use_page_tbl = 0;
971 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
975 kcq = (struct kcqe **) info->dma.pg_arr;
978 info->next_idx = cnic_bnx2_next_idx;
979 info->hw_idx = cnic_bnx2_hw_idx;
983 info->next_idx = cnic_bnx2x_next_idx;
984 info->hw_idx = cnic_bnx2x_hw_idx;
986 for (i = 0; i < KCQ_PAGE_CNT; i++) {
987 struct bnx2x_bd_chain_next *next =
988 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
991 if (j >= KCQ_PAGE_CNT)
993 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
994 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
999 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1001 struct cnic_local *cp = dev->cnic_priv;
1002 struct cnic_uio_dev *udev;
1004 read_lock(&cnic_dev_lock);
1005 list_for_each_entry(udev, &cnic_udev_list, list) {
1006 if (udev->pdev == dev->pcidev) {
1009 read_unlock(&cnic_dev_lock);
1013 read_unlock(&cnic_dev_lock);
1015 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1022 udev->pdev = dev->pcidev;
1023 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1024 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1026 GFP_KERNEL | __GFP_COMP);
1030 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1031 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1032 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1034 GFP_KERNEL | __GFP_COMP);
1038 write_lock(&cnic_dev_lock);
1039 list_add(&udev->list, &cnic_udev_list);
1040 write_unlock(&cnic_dev_lock);
1042 pci_dev_get(udev->pdev);
1048 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1049 udev->l2_ring, udev->l2_ring_map);
1055 static int cnic_init_uio(struct cnic_dev *dev)
1057 struct cnic_local *cp = dev->cnic_priv;
1058 struct cnic_uio_dev *udev = cp->udev;
1059 struct uio_info *uinfo;
1065 uinfo = &udev->cnic_uinfo;
1067 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1068 uinfo->mem[0].internal_addr = dev->regview;
1069 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1071 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1072 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1073 TX_MAX_TSS_RINGS + 1);
1074 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1076 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1077 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1079 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1081 uinfo->name = "bnx2_cnic";
1082 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1083 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1085 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1087 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1089 uinfo->name = "bnx2x_cnic";
1092 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1094 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1095 uinfo->mem[2].size = udev->l2_ring_size;
1096 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1098 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1099 uinfo->mem[3].size = udev->l2_buf_size;
1100 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1102 uinfo->version = CNIC_MODULE_VERSION;
1103 uinfo->irq = UIO_IRQ_CUSTOM;
1105 uinfo->open = cnic_uio_open;
1106 uinfo->release = cnic_uio_close;
1108 if (udev->uio_dev == -1) {
1112 ret = uio_register_device(&udev->pdev->dev, uinfo);
1115 cnic_init_rings(dev);
1121 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1123 struct cnic_local *cp = dev->cnic_priv;
1126 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1129 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1131 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1135 ret = cnic_alloc_context(dev);
1139 ret = cnic_alloc_uio_rings(dev, 2);
1143 ret = cnic_init_uio(dev);
1150 cnic_free_resc(dev);
1154 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1156 struct cnic_local *cp = dev->cnic_priv;
1157 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1158 int total_mem, blks, i;
1160 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1161 blks = total_mem / ctx_blk_size;
1162 if (total_mem % ctx_blk_size)
1165 if (blks > cp->ethdev->ctx_tbl_len)
1168 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1169 if (cp->ctx_arr == NULL)
1172 cp->ctx_blks = blks;
1173 cp->ctx_blk_size = ctx_blk_size;
1174 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1177 cp->ctx_align = ctx_blk_size;
1179 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1181 for (i = 0; i < blks; i++) {
1182 cp->ctx_arr[i].ctx =
1183 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1184 &cp->ctx_arr[i].mapping,
1186 if (cp->ctx_arr[i].ctx == NULL)
1189 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1190 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1191 cnic_free_context(dev);
1192 cp->ctx_blk_size += cp->ctx_align;
1201 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1203 struct cnic_local *cp = dev->cnic_priv;
1204 struct cnic_eth_dev *ethdev = cp->ethdev;
1205 u32 start_cid = ethdev->starting_cid;
1206 int i, j, n, ret, pages;
1207 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1209 cp->iro_arr = ethdev->iro_arr;
1211 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1212 cp->iscsi_start_cid = start_cid;
1213 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1215 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1216 cp->max_cid_space += dev->max_fcoe_conn;
1217 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1218 if (!cp->fcoe_init_cid)
1219 cp->fcoe_init_cid = 0x10;
1222 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1227 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1228 cp->max_cid_space, GFP_KERNEL);
1232 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1233 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1234 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1237 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1238 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1240 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1243 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1247 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1248 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1249 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1251 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1252 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1255 if ((i % n) == (n - 1))
1259 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1263 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1264 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1269 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1270 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1274 ret = cnic_alloc_bnx2x_context(dev);
1278 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1280 cp->l2_rx_ring_size = 15;
1282 ret = cnic_alloc_uio_rings(dev, 4);
1286 ret = cnic_init_uio(dev);
1293 cnic_free_resc(dev);
1297 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1299 return cp->max_kwq_idx -
1300 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1303 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1306 struct cnic_local *cp = dev->cnic_priv;
1307 struct kwqe *prod_qe;
1308 u16 prod, sw_prod, i;
1310 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1311 return -EAGAIN; /* bnx2 is down */
1313 spin_lock_bh(&cp->cnic_ulp_lock);
1314 if (num_wqes > cnic_kwq_avail(cp) &&
1315 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1316 spin_unlock_bh(&cp->cnic_ulp_lock);
1320 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1322 prod = cp->kwq_prod_idx;
1323 sw_prod = prod & MAX_KWQ_IDX;
1324 for (i = 0; i < num_wqes; i++) {
1325 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1326 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1328 sw_prod = prod & MAX_KWQ_IDX;
1330 cp->kwq_prod_idx = prod;
1332 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1334 spin_unlock_bh(&cp->cnic_ulp_lock);
1338 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1339 union l5cm_specific_data *l5_data)
1341 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1344 map = ctx->kwqe_data_mapping;
1345 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1346 l5_data->phy_address.hi = (u64) map >> 32;
1347 return ctx->kwqe_data;
1350 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1351 u32 type, union l5cm_specific_data *l5_data)
1353 struct cnic_local *cp = dev->cnic_priv;
1354 struct l5cm_spe kwqe;
1355 struct kwqe_16 *kwq[1];
1359 kwqe.hdr.conn_and_cmd_data =
1360 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1361 BNX2X_HW_CID(cp, cid)));
1363 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1364 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1365 SPE_HDR_FUNCTION_ID;
1367 kwqe.hdr.type = cpu_to_le16(type_16);
1368 kwqe.hdr.reserved1 = 0;
1369 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1370 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1372 kwq[0] = (struct kwqe_16 *) &kwqe;
1374 spin_lock_bh(&cp->cnic_ulp_lock);
1375 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1376 spin_unlock_bh(&cp->cnic_ulp_lock);
1384 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1385 struct kcqe *cqes[], u32 num_cqes)
1387 struct cnic_local *cp = dev->cnic_priv;
1388 struct cnic_ulp_ops *ulp_ops;
1391 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1392 if (likely(ulp_ops)) {
1393 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1399 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1401 struct cnic_local *cp = dev->cnic_priv;
1402 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1404 u32 pfid = cp->pfid;
1406 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1407 cp->num_ccells = req1->num_ccells_per_conn;
1408 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1409 cp->num_iscsi_tasks;
1410 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1411 BNX2X_ISCSI_R2TQE_SIZE;
1412 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1413 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1414 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1415 cp->num_cqs = req1->num_cqs;
1417 if (!dev->max_iscsi_conn)
1420 /* init Tstorm RAM */
1421 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1423 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1425 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1426 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1427 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1428 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1429 req1->num_tasks_per_conn);
1431 /* init Ustorm RAM */
1432 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1433 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1434 req1->rq_buffer_size);
1435 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1437 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1438 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1439 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1440 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1441 req1->num_tasks_per_conn);
1442 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1444 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1446 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1447 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1449 /* init Xstorm RAM */
1450 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1452 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1453 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1454 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1455 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1456 req1->num_tasks_per_conn);
1457 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1459 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1460 req1->num_tasks_per_conn);
1461 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1462 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1464 /* init Cstorm RAM */
1465 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1467 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1468 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1469 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1470 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1471 req1->num_tasks_per_conn);
1472 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1474 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1480 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1482 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1483 struct cnic_local *cp = dev->cnic_priv;
1484 u32 pfid = cp->pfid;
1485 struct iscsi_kcqe kcqe;
1486 struct kcqe *cqes[1];
1488 memset(&kcqe, 0, sizeof(kcqe));
1489 if (!dev->max_iscsi_conn) {
1490 kcqe.completion_status =
1491 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1495 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1496 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1497 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1498 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1499 req2->error_bit_map[1]);
1501 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1502 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1503 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1504 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1505 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1506 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1507 req2->error_bit_map[1]);
1509 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1510 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1512 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1515 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1516 cqes[0] = (struct kcqe *) &kcqe;
1517 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1522 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1524 struct cnic_local *cp = dev->cnic_priv;
1525 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1527 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1528 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1530 cnic_free_dma(dev, &iscsi->hq_info);
1531 cnic_free_dma(dev, &iscsi->r2tq_info);
1532 cnic_free_dma(dev, &iscsi->task_array_info);
1533 cnic_free_id(&cp->cid_tbl, ctx->cid);
1535 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1541 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1545 struct cnic_local *cp = dev->cnic_priv;
1546 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1547 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1549 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1550 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1559 cid = cnic_alloc_new_id(&cp->cid_tbl);
1566 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1568 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1572 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1573 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1577 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1578 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1585 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1589 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1590 struct regpair *ctx_addr)
1592 struct cnic_local *cp = dev->cnic_priv;
1593 struct cnic_eth_dev *ethdev = cp->ethdev;
1594 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1595 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1596 unsigned long align_off = 0;
1600 if (cp->ctx_align) {
1601 unsigned long mask = cp->ctx_align - 1;
1603 if (cp->ctx_arr[blk].mapping & mask)
1604 align_off = cp->ctx_align -
1605 (cp->ctx_arr[blk].mapping & mask);
1607 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1608 (off * BNX2X_CONTEXT_MEM_SIZE);
1609 ctx = cp->ctx_arr[blk].ctx + align_off +
1610 (off * BNX2X_CONTEXT_MEM_SIZE);
1612 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1614 ctx_addr->lo = ctx_map & 0xffffffff;
1615 ctx_addr->hi = (u64) ctx_map >> 32;
1619 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1622 struct cnic_local *cp = dev->cnic_priv;
1623 struct iscsi_kwqe_conn_offload1 *req1 =
1624 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1625 struct iscsi_kwqe_conn_offload2 *req2 =
1626 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1627 struct iscsi_kwqe_conn_offload3 *req3;
1628 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1629 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1631 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1632 struct iscsi_context *ictx;
1633 struct regpair context_addr;
1634 int i, j, n = 2, n_max;
1635 u8 port = CNIC_PORT(cp);
1638 if (!req2->num_additional_wqes)
1641 n_max = req2->num_additional_wqes + 2;
1643 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1647 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1649 ictx->xstorm_ag_context.hq_prod = 1;
1651 ictx->xstorm_st_context.iscsi.first_burst_length =
1652 ISCSI_DEF_FIRST_BURST_LEN;
1653 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1654 ISCSI_DEF_MAX_RECV_SEG_LEN;
1655 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1656 req1->sq_page_table_addr_lo;
1657 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1658 req1->sq_page_table_addr_hi;
1659 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1660 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1661 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1662 iscsi->hq_info.pgtbl_map & 0xffffffff;
1663 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1664 (u64) iscsi->hq_info.pgtbl_map >> 32;
1665 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1666 iscsi->hq_info.pgtbl[0];
1667 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1668 iscsi->hq_info.pgtbl[1];
1669 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1670 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1671 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1672 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1673 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1674 iscsi->r2tq_info.pgtbl[0];
1675 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1676 iscsi->r2tq_info.pgtbl[1];
1677 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1678 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1679 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1680 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1681 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1682 BNX2X_ISCSI_PBL_NOT_CACHED;
1683 ictx->xstorm_st_context.iscsi.flags.flags |=
1684 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1685 ictx->xstorm_st_context.iscsi.flags.flags |=
1686 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1687 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1689 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
1690 cp->port_mode == CHIP_2_PORT_MODE) {
1694 ictx->xstorm_st_context.common.flags =
1695 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1696 ictx->xstorm_st_context.common.flags =
1697 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1699 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1700 /* TSTORM requires the base address of RQ DB & not PTE */
1701 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1702 req2->rq_page_table_addr_lo & PAGE_MASK;
1703 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1704 req2->rq_page_table_addr_hi;
1705 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1706 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1707 ictx->tstorm_st_context.tcp.flags2 |=
1708 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1709 ictx->tstorm_st_context.tcp.ooo_support_mode =
1710 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1712 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1714 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1715 req2->rq_page_table_addr_lo;
1716 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1717 req2->rq_page_table_addr_hi;
1718 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1719 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1720 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1721 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1722 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1723 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1724 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1725 iscsi->r2tq_info.pgtbl[0];
1726 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1727 iscsi->r2tq_info.pgtbl[1];
1728 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1729 req1->cq_page_table_addr_lo;
1730 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1731 req1->cq_page_table_addr_hi;
1732 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1733 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1734 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1735 ictx->ustorm_st_context.task_pbe_cache_index =
1736 BNX2X_ISCSI_PBL_NOT_CACHED;
1737 ictx->ustorm_st_context.task_pdu_cache_index =
1738 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1740 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1744 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1747 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1748 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1749 req3->qp_first_pte[j].hi;
1750 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1751 req3->qp_first_pte[j].lo;
1754 ictx->ustorm_st_context.task_pbl_base.lo =
1755 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1756 ictx->ustorm_st_context.task_pbl_base.hi =
1757 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1758 ictx->ustorm_st_context.tce_phy_addr.lo =
1759 iscsi->task_array_info.pgtbl[0];
1760 ictx->ustorm_st_context.tce_phy_addr.hi =
1761 iscsi->task_array_info.pgtbl[1];
1762 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1763 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1764 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1765 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1766 ISCSI_DEF_MAX_BURST_LEN;
1767 ictx->ustorm_st_context.negotiated_rx |=
1768 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1769 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1771 ictx->cstorm_st_context.hq_pbl_base.lo =
1772 iscsi->hq_info.pgtbl_map & 0xffffffff;
1773 ictx->cstorm_st_context.hq_pbl_base.hi =
1774 (u64) iscsi->hq_info.pgtbl_map >> 32;
1775 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1776 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1777 ictx->cstorm_st_context.task_pbl_base.lo =
1778 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1779 ictx->cstorm_st_context.task_pbl_base.hi =
1780 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1781 /* CSTORM and USTORM initialization is different, CSTORM requires
1782 * CQ DB base & not PTE addr */
1783 ictx->cstorm_st_context.cq_db_base.lo =
1784 req1->cq_page_table_addr_lo & PAGE_MASK;
1785 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1786 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1787 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1788 for (i = 0; i < cp->num_cqs; i++) {
1789 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1791 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1795 ictx->xstorm_ag_context.cdu_reserved =
1796 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1797 ISCSI_CONNECTION_TYPE);
1798 ictx->ustorm_ag_context.cdu_usage =
1799 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1800 ISCSI_CONNECTION_TYPE);
1805 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1808 struct iscsi_kwqe_conn_offload1 *req1;
1809 struct iscsi_kwqe_conn_offload2 *req2;
1810 struct cnic_local *cp = dev->cnic_priv;
1811 struct cnic_context *ctx;
1812 struct iscsi_kcqe kcqe;
1813 struct kcqe *cqes[1];
1822 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1823 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1824 if ((num - 2) < req2->num_additional_wqes) {
1828 *work = 2 + req2->num_additional_wqes;
1830 l5_cid = req1->iscsi_conn_id;
1831 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1834 memset(&kcqe, 0, sizeof(kcqe));
1835 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1836 kcqe.iscsi_conn_id = l5_cid;
1837 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1839 ctx = &cp->ctx_tbl[l5_cid];
1840 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1841 kcqe.completion_status =
1842 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1846 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1847 atomic_dec(&cp->iscsi_conn);
1850 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1852 atomic_dec(&cp->iscsi_conn);
1856 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1858 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1859 atomic_dec(&cp->iscsi_conn);
1863 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1864 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1867 cqes[0] = (struct kcqe *) &kcqe;
1868 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1873 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1875 struct cnic_local *cp = dev->cnic_priv;
1876 struct iscsi_kwqe_conn_update *req =
1877 (struct iscsi_kwqe_conn_update *) kwqe;
1879 union l5cm_specific_data l5_data;
1880 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1883 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1886 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1890 memcpy(data, kwqe, sizeof(struct kwqe));
1892 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1893 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1897 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1899 struct cnic_local *cp = dev->cnic_priv;
1900 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1901 union l5cm_specific_data l5_data;
1905 init_waitqueue_head(&ctx->waitq);
1907 memset(&l5_data, 0, sizeof(l5_data));
1908 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1910 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1911 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1914 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1915 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1922 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1924 struct cnic_local *cp = dev->cnic_priv;
1925 struct iscsi_kwqe_conn_destroy *req =
1926 (struct iscsi_kwqe_conn_destroy *) kwqe;
1927 u32 l5_cid = req->reserved0;
1928 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1930 struct iscsi_kcqe kcqe;
1931 struct kcqe *cqes[1];
1933 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1934 goto skip_cfc_delete;
1936 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1937 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1939 if (delta > (2 * HZ))
1942 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1943 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1947 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1950 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1953 atomic_dec(&cp->iscsi_conn);
1954 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1958 memset(&kcqe, 0, sizeof(kcqe));
1959 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1960 kcqe.iscsi_conn_id = l5_cid;
1961 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1962 kcqe.iscsi_conn_context_id = req->context_id;
1964 cqes[0] = (struct kcqe *) &kcqe;
1965 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1970 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1971 struct l4_kwq_connect_req1 *kwqe1,
1972 struct l4_kwq_connect_req3 *kwqe3,
1973 struct l5cm_active_conn_buffer *conn_buf)
1975 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1976 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1977 &conn_buf->xstorm_conn_buffer;
1978 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1979 &conn_buf->tstorm_conn_buffer;
1980 struct regpair context_addr;
1981 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1982 struct in6_addr src_ip, dst_ip;
1986 addrp = (u32 *) &conn_addr->local_ip_addr;
1987 for (i = 0; i < 4; i++, addrp++)
1988 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1990 addrp = (u32 *) &conn_addr->remote_ip_addr;
1991 for (i = 0; i < 4; i++, addrp++)
1992 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1994 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1996 xstorm_buf->context_addr.hi = context_addr.hi;
1997 xstorm_buf->context_addr.lo = context_addr.lo;
1998 xstorm_buf->mss = 0xffff;
1999 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2000 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2001 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2002 xstorm_buf->pseudo_header_checksum =
2003 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2005 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
2006 tstorm_buf->params |=
2007 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
2008 if (kwqe3->ka_timeout) {
2009 tstorm_buf->ka_enable = 1;
2010 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2011 tstorm_buf->ka_interval = kwqe3->ka_interval;
2012 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2014 tstorm_buf->max_rt_time = 0xffffffff;
2017 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2019 struct cnic_local *cp = dev->cnic_priv;
2020 u32 pfid = cp->pfid;
2021 u8 *mac = dev->mac_addr;
2023 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2024 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2025 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2026 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2027 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2028 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2029 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2030 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2031 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2032 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2033 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2034 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2036 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2037 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2038 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2039 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2041 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2042 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2043 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2044 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2046 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2047 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2048 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2049 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2053 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2055 struct cnic_local *cp = dev->cnic_priv;
2056 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2057 u16 tstorm_flags = 0;
2060 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2061 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2064 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2065 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2067 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2068 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2071 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2074 struct cnic_local *cp = dev->cnic_priv;
2075 struct l4_kwq_connect_req1 *kwqe1 =
2076 (struct l4_kwq_connect_req1 *) wqes[0];
2077 struct l4_kwq_connect_req3 *kwqe3;
2078 struct l5cm_active_conn_buffer *conn_buf;
2079 struct l5cm_conn_addr_params *conn_addr;
2080 union l5cm_specific_data l5_data;
2081 u32 l5_cid = kwqe1->pg_cid;
2082 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2083 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2091 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2101 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2102 netdev_err(dev->netdev, "conn_buf size too big\n");
2105 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2109 memset(conn_buf, 0, sizeof(*conn_buf));
2111 conn_addr = &conn_buf->conn_addr_buf;
2112 conn_addr->remote_addr_0 = csk->ha[0];
2113 conn_addr->remote_addr_1 = csk->ha[1];
2114 conn_addr->remote_addr_2 = csk->ha[2];
2115 conn_addr->remote_addr_3 = csk->ha[3];
2116 conn_addr->remote_addr_4 = csk->ha[4];
2117 conn_addr->remote_addr_5 = csk->ha[5];
2119 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2120 struct l4_kwq_connect_req2 *kwqe2 =
2121 (struct l4_kwq_connect_req2 *) wqes[1];
2123 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2124 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2125 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2127 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2128 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2129 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2130 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2132 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2134 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2135 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2136 conn_addr->local_tcp_port = kwqe1->src_port;
2137 conn_addr->remote_tcp_port = kwqe1->dst_port;
2139 conn_addr->pmtu = kwqe3->pmtu;
2140 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2142 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2143 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2145 cnic_bnx2x_set_tcp_timestamp(dev,
2146 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2148 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2149 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2151 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2156 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2158 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2159 union l5cm_specific_data l5_data;
2162 memset(&l5_data, 0, sizeof(l5_data));
2163 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2164 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2168 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2170 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2171 union l5cm_specific_data l5_data;
2174 memset(&l5_data, 0, sizeof(l5_data));
2175 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2176 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2179 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2181 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2183 struct kcqe *cqes[1];
2185 memset(&kcqe, 0, sizeof(kcqe));
2186 kcqe.pg_host_opaque = req->host_opaque;
2187 kcqe.pg_cid = req->host_opaque;
2188 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2189 cqes[0] = (struct kcqe *) &kcqe;
2190 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2194 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2196 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2198 struct kcqe *cqes[1];
2200 memset(&kcqe, 0, sizeof(kcqe));
2201 kcqe.pg_host_opaque = req->pg_host_opaque;
2202 kcqe.pg_cid = req->pg_cid;
2203 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2204 cqes[0] = (struct kcqe *) &kcqe;
2205 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2209 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2211 struct fcoe_kwqe_stat *req;
2212 struct fcoe_stat_ramrod_params *fcoe_stat;
2213 union l5cm_specific_data l5_data;
2214 struct cnic_local *cp = dev->cnic_priv;
2218 req = (struct fcoe_kwqe_stat *) kwqe;
2219 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2221 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2225 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2226 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2228 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2229 FCOE_CONNECTION_TYPE, &l5_data);
2233 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2237 struct cnic_local *cp = dev->cnic_priv;
2239 struct fcoe_init_ramrod_params *fcoe_init;
2240 struct fcoe_kwqe_init1 *req1;
2241 struct fcoe_kwqe_init2 *req2;
2242 struct fcoe_kwqe_init3 *req3;
2243 union l5cm_specific_data l5_data;
2249 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2250 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2251 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2252 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2256 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2261 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2262 netdev_err(dev->netdev, "fcoe_init size too big\n");
2265 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2269 memset(fcoe_init, 0, sizeof(*fcoe_init));
2270 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2271 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2272 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2273 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2274 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2275 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2277 fcoe_init->sb_num = cp->status_blk_num;
2278 fcoe_init->eq_prod = MAX_KCQ_IDX;
2279 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2280 cp->kcq2.sw_prod_idx = 0;
2282 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2283 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2284 FCOE_CONNECTION_TYPE, &l5_data);
2289 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2293 u32 cid = -1, l5_cid;
2294 struct cnic_local *cp = dev->cnic_priv;
2295 struct fcoe_kwqe_conn_offload1 *req1;
2296 struct fcoe_kwqe_conn_offload2 *req2;
2297 struct fcoe_kwqe_conn_offload3 *req3;
2298 struct fcoe_kwqe_conn_offload4 *req4;
2299 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2300 struct cnic_context *ctx;
2301 struct fcoe_context *fctx;
2302 struct regpair ctx_addr;
2303 union l5cm_specific_data l5_data;
2304 struct fcoe_kcqe kcqe;
2305 struct kcqe *cqes[1];
2311 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2312 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2313 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2314 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2318 l5_cid = req1->fcoe_conn_id;
2319 if (l5_cid >= dev->max_fcoe_conn)
2322 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2324 ctx = &cp->ctx_tbl[l5_cid];
2325 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2328 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2335 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2337 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2340 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2341 FCOE_CONNECTION_TYPE);
2342 fctx->xstorm_ag_context.cdu_reserved = val;
2343 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2344 FCOE_CONNECTION_TYPE);
2345 fctx->ustorm_ag_context.cdu_usage = val;
2347 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2348 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2351 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2355 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2356 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2357 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2358 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2359 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2361 cid = BNX2X_HW_CID(cp, cid);
2362 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2363 FCOE_CONNECTION_TYPE, &l5_data);
2365 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2371 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2373 memset(&kcqe, 0, sizeof(kcqe));
2374 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2375 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2376 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2378 cqes[0] = (struct kcqe *) &kcqe;
2379 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2383 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2385 struct fcoe_kwqe_conn_enable_disable *req;
2386 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2387 union l5cm_specific_data l5_data;
2390 struct cnic_local *cp = dev->cnic_priv;
2392 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2393 cid = req->context_id;
2394 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2396 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2397 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2400 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2404 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2405 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2406 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2407 FCOE_CONNECTION_TYPE, &l5_data);
2411 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2413 struct fcoe_kwqe_conn_enable_disable *req;
2414 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2415 union l5cm_specific_data l5_data;
2418 struct cnic_local *cp = dev->cnic_priv;
2420 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2421 cid = req->context_id;
2422 l5_cid = req->conn_id;
2423 if (l5_cid >= dev->max_fcoe_conn)
2426 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2428 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2429 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2432 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2436 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2437 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2438 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2439 FCOE_CONNECTION_TYPE, &l5_data);
2443 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2445 struct fcoe_kwqe_conn_destroy *req;
2446 union l5cm_specific_data l5_data;
2449 struct cnic_local *cp = dev->cnic_priv;
2450 struct cnic_context *ctx;
2451 struct fcoe_kcqe kcqe;
2452 struct kcqe *cqes[1];
2454 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2455 cid = req->context_id;
2456 l5_cid = req->conn_id;
2457 if (l5_cid >= dev->max_fcoe_conn)
2460 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2462 ctx = &cp->ctx_tbl[l5_cid];
2464 init_waitqueue_head(&ctx->waitq);
2467 memset(&kcqe, 0, sizeof(kcqe));
2468 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2469 memset(&l5_data, 0, sizeof(l5_data));
2470 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2471 FCOE_CONNECTION_TYPE, &l5_data);
2473 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2475 kcqe.completion_status = 0;
2478 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2479 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2481 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2482 kcqe.fcoe_conn_id = req->conn_id;
2483 kcqe.fcoe_conn_context_id = cid;
2485 cqes[0] = (struct kcqe *) &kcqe;
2486 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2490 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2492 struct cnic_local *cp = dev->cnic_priv;
2495 for (i = start_cid; i < cp->max_cid_space; i++) {
2496 struct cnic_context *ctx = &cp->ctx_tbl[i];
2499 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2502 for (j = 0; j < 5; j++) {
2503 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2508 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2509 netdev_warn(dev->netdev, "CID %x not deleted\n",
2514 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2516 struct fcoe_kwqe_destroy *req;
2517 union l5cm_specific_data l5_data;
2518 struct cnic_local *cp = dev->cnic_priv;
2522 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2524 req = (struct fcoe_kwqe_destroy *) kwqe;
2525 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2527 memset(&l5_data, 0, sizeof(l5_data));
2528 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2529 FCOE_CONNECTION_TYPE, &l5_data);
2533 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2535 struct cnic_local *cp = dev->cnic_priv;
2537 struct kcqe *cqes[1];
2539 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2540 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2544 cid = kwqe->kwqe_info0;
2545 memset(&kcqe, 0, sizeof(kcqe));
2547 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2550 ulp_type = CNIC_ULP_FCOE;
2551 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2552 struct fcoe_kwqe_conn_enable_disable *req;
2554 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2555 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2556 cid = req->context_id;
2557 l5_cid = req->conn_id;
2558 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2559 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2563 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2564 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2565 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2566 kcqe.kcqe_info2 = cid;
2567 kcqe.kcqe_info0 = l5_cid;
2569 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2570 ulp_type = CNIC_ULP_ISCSI;
2571 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2572 cid = kwqe->kwqe_info1;
2574 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2575 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2576 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2577 kcqe.kcqe_info2 = cid;
2578 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2580 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2581 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2583 ulp_type = CNIC_ULP_L4;
2584 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2585 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2586 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2587 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2588 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2589 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2593 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2594 KCQE_FLAGS_LAYER_MASK_L4;
2595 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2597 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2603 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2606 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2607 struct kwqe *wqes[], u32 num_wqes)
2613 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2614 return -EAGAIN; /* bnx2 is down */
2616 for (i = 0; i < num_wqes; ) {
2618 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2622 case ISCSI_KWQE_OPCODE_INIT1:
2623 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2625 case ISCSI_KWQE_OPCODE_INIT2:
2626 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2628 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2629 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2630 num_wqes - i, &work);
2632 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2633 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2635 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2636 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2638 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2639 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2642 case L4_KWQE_OPCODE_VALUE_CLOSE:
2643 ret = cnic_bnx2x_close(dev, kwqe);
2645 case L4_KWQE_OPCODE_VALUE_RESET:
2646 ret = cnic_bnx2x_reset(dev, kwqe);
2648 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2649 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2651 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2652 ret = cnic_bnx2x_update_pg(dev, kwqe);
2654 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2659 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2664 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2667 /* Possibly bnx2x parity error, send completion
2668 * to ulp drivers with error code to speed up
2669 * cleanup and reset recovery.
2671 if (ret == -EIO || ret == -EAGAIN)
2672 cnic_bnx2x_kwqe_err(dev, kwqe);
2679 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2680 struct kwqe *wqes[], u32 num_wqes)
2682 struct cnic_local *cp = dev->cnic_priv;
2687 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2688 return -EAGAIN; /* bnx2 is down */
2690 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
2693 for (i = 0; i < num_wqes; ) {
2695 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2699 case FCOE_KWQE_OPCODE_INIT1:
2700 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2701 num_wqes - i, &work);
2703 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2704 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2705 num_wqes - i, &work);
2707 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2708 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2710 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2711 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2713 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2714 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2716 case FCOE_KWQE_OPCODE_DESTROY:
2717 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2719 case FCOE_KWQE_OPCODE_STAT:
2720 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2724 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2729 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2732 /* Possibly bnx2x parity error, send completion
2733 * to ulp drivers with error code to speed up
2734 * cleanup and reset recovery.
2736 if (ret == -EIO || ret == -EAGAIN)
2737 cnic_bnx2x_kwqe_err(dev, kwqe);
2744 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2750 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2751 return -EAGAIN; /* bnx2x is down */
2756 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2757 switch (layer_code) {
2758 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2759 case KWQE_FLAGS_LAYER_MASK_L4:
2760 case KWQE_FLAGS_LAYER_MASK_L2:
2761 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2764 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2765 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2771 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2773 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2774 return KCQE_FLAGS_LAYER_MASK_L4;
2776 return opflag & KCQE_FLAGS_LAYER_MASK;
2779 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2781 struct cnic_local *cp = dev->cnic_priv;
2787 struct cnic_ulp_ops *ulp_ops;
2789 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2790 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2792 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2795 while (j < num_cqes) {
2796 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2798 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2801 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2806 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2807 ulp_type = CNIC_ULP_RDMA;
2808 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2809 ulp_type = CNIC_ULP_ISCSI;
2810 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2811 ulp_type = CNIC_ULP_FCOE;
2812 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2813 ulp_type = CNIC_ULP_L4;
2814 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2817 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2823 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2824 if (likely(ulp_ops)) {
2825 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2826 cp->completed_kcq + i, j);
2835 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2838 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2840 struct cnic_local *cp = dev->cnic_priv;
2841 u16 i, ri, hw_prod, last;
2843 int kcqe_cnt = 0, last_cnt = 0;
2845 i = ri = last = info->sw_prod_idx;
2847 hw_prod = *info->hw_prod_idx_ptr;
2848 hw_prod = info->hw_idx(hw_prod);
2850 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2851 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2852 cp->completed_kcq[kcqe_cnt++] = kcqe;
2853 i = info->next_idx(i);
2854 ri = i & MAX_KCQ_IDX;
2855 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2856 last_cnt = kcqe_cnt;
2861 info->sw_prod_idx = last;
2865 static int cnic_l2_completion(struct cnic_local *cp)
2867 u16 hw_cons, sw_cons;
2868 struct cnic_uio_dev *udev = cp->udev;
2869 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2870 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2874 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2877 hw_cons = *cp->rx_cons_ptr;
2878 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2881 sw_cons = cp->rx_cons;
2882 while (sw_cons != hw_cons) {
2885 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2886 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2887 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2888 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2889 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2890 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2891 cmd == RAMROD_CMD_ID_ETH_HALT)
2894 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2899 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2901 u16 rx_cons, tx_cons;
2904 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2907 rx_cons = *cp->rx_cons_ptr;
2908 tx_cons = *cp->tx_cons_ptr;
2909 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2910 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2911 comp = cnic_l2_completion(cp);
2913 cp->tx_cons = tx_cons;
2914 cp->rx_cons = rx_cons;
2917 uio_event_notify(&cp->udev->cnic_uinfo);
2920 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2923 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2925 struct cnic_local *cp = dev->cnic_priv;
2926 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2929 /* status block index must be read before reading other fields */
2931 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2933 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2935 service_kcqes(dev, kcqe_cnt);
2937 /* Tell compiler that status_blk fields can change. */
2939 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2940 /* status block index must be read first */
2942 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2945 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2947 cnic_chk_pkt_rings(cp);
2952 static int cnic_service_bnx2(void *data, void *status_blk)
2954 struct cnic_dev *dev = data;
2956 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2957 struct status_block *sblk = status_blk;
2959 return sblk->status_idx;
2962 return cnic_service_bnx2_queues(dev);
2965 static void cnic_service_bnx2_msix(unsigned long data)
2967 struct cnic_dev *dev = (struct cnic_dev *) data;
2968 struct cnic_local *cp = dev->cnic_priv;
2970 cp->last_status_idx = cnic_service_bnx2_queues(dev);
2972 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2973 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2976 static void cnic_doirq(struct cnic_dev *dev)
2978 struct cnic_local *cp = dev->cnic_priv;
2980 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2981 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2983 prefetch(cp->status_blk.gen);
2984 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2986 tasklet_schedule(&cp->cnic_irq_task);
2990 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2992 struct cnic_dev *dev = dev_instance;
2993 struct cnic_local *cp = dev->cnic_priv;
3003 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3004 u16 index, u8 op, u8 update)
3006 struct cnic_local *cp = dev->cnic_priv;
3007 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
3008 COMMAND_REG_INT_ACK);
3009 struct igu_ack_register igu_ack;
3011 igu_ack.status_block_index = index;
3012 igu_ack.sb_id_and_flags =
3013 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3014 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3015 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3016 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3018 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3021 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3022 u16 index, u8 op, u8 update)
3024 struct igu_regular cmd_data;
3025 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3027 cmd_data.sb_id_and_flags =
3028 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3029 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3030 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3031 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3034 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3037 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3039 struct cnic_local *cp = dev->cnic_priv;
3041 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3042 IGU_INT_DISABLE, 0);
3045 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3047 struct cnic_local *cp = dev->cnic_priv;
3049 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3050 IGU_INT_DISABLE, 0);
3053 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3055 u32 last_status = *info->status_idx_ptr;
3058 /* status block index must be read before reading the KCQ */
3060 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3062 service_kcqes(dev, kcqe_cnt);
3064 /* Tell compiler that sblk fields can change. */
3067 last_status = *info->status_idx_ptr;
3068 /* status block index must be read before reading the KCQ */
3074 static void cnic_service_bnx2x_bh(unsigned long data)
3076 struct cnic_dev *dev = (struct cnic_dev *) data;
3077 struct cnic_local *cp = dev->cnic_priv;
3078 u32 status_idx, new_status_idx;
3080 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3084 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3086 CNIC_WR16(dev, cp->kcq1.io_addr,
3087 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3089 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
3090 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
3091 status_idx, IGU_INT_ENABLE, 1);
3095 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3097 if (new_status_idx != status_idx)
3100 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3103 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3104 status_idx, IGU_INT_ENABLE, 1);
3110 static int cnic_service_bnx2x(void *data, void *status_blk)
3112 struct cnic_dev *dev = data;
3113 struct cnic_local *cp = dev->cnic_priv;
3115 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3118 cnic_chk_pkt_rings(cp);
3123 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3125 struct cnic_ulp_ops *ulp_ops;
3127 if (if_type == CNIC_ULP_ISCSI)
3128 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3130 mutex_lock(&cnic_lock);
3131 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3132 lockdep_is_held(&cnic_lock));
3134 mutex_unlock(&cnic_lock);
3137 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3138 mutex_unlock(&cnic_lock);
3140 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3141 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3143 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3146 static void cnic_ulp_stop(struct cnic_dev *dev)
3148 struct cnic_local *cp = dev->cnic_priv;
3151 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3152 cnic_ulp_stop_one(cp, if_type);
3155 static void cnic_ulp_start(struct cnic_dev *dev)
3157 struct cnic_local *cp = dev->cnic_priv;
3160 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3161 struct cnic_ulp_ops *ulp_ops;
3163 mutex_lock(&cnic_lock);
3164 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3165 lockdep_is_held(&cnic_lock));
3166 if (!ulp_ops || !ulp_ops->cnic_start) {
3167 mutex_unlock(&cnic_lock);
3170 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3171 mutex_unlock(&cnic_lock);
3173 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3174 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3176 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3180 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3182 struct cnic_local *cp = dev->cnic_priv;
3183 struct cnic_ulp_ops *ulp_ops;
3186 mutex_lock(&cnic_lock);
3187 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
3188 if (ulp_ops && ulp_ops->cnic_get_stats)
3189 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3192 mutex_unlock(&cnic_lock);
3196 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3198 struct cnic_dev *dev = data;
3199 int ulp_type = CNIC_ULP_ISCSI;
3201 switch (info->cmd) {
3202 case CNIC_CTL_STOP_CMD:
3210 case CNIC_CTL_START_CMD:
3213 if (!cnic_start_hw(dev))
3214 cnic_ulp_start(dev);
3218 case CNIC_CTL_STOP_ISCSI_CMD: {
3219 struct cnic_local *cp = dev->cnic_priv;
3220 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3221 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3224 case CNIC_CTL_COMPLETION_CMD: {
3225 struct cnic_ctl_completion *comp = &info->data.comp;
3226 u32 cid = BNX2X_SW_CID(comp->cid);
3228 struct cnic_local *cp = dev->cnic_priv;
3230 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3233 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3234 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3236 if (unlikely(comp->error)) {
3237 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3238 netdev_err(dev->netdev,
3239 "CID %x CFC delete comp error %x\n",
3244 wake_up(&ctx->waitq);
3248 case CNIC_CTL_FCOE_STATS_GET_CMD:
3249 ulp_type = CNIC_ULP_FCOE;
3251 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3253 cnic_copy_ulp_stats(dev, ulp_type);
3263 static void cnic_ulp_init(struct cnic_dev *dev)
3266 struct cnic_local *cp = dev->cnic_priv;
3268 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3269 struct cnic_ulp_ops *ulp_ops;
3271 mutex_lock(&cnic_lock);
3272 ulp_ops = cnic_ulp_tbl_prot(i);
3273 if (!ulp_ops || !ulp_ops->cnic_init) {
3274 mutex_unlock(&cnic_lock);
3278 mutex_unlock(&cnic_lock);
3280 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3281 ulp_ops->cnic_init(dev);
3287 static void cnic_ulp_exit(struct cnic_dev *dev)
3290 struct cnic_local *cp = dev->cnic_priv;
3292 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3293 struct cnic_ulp_ops *ulp_ops;
3295 mutex_lock(&cnic_lock);
3296 ulp_ops = cnic_ulp_tbl_prot(i);
3297 if (!ulp_ops || !ulp_ops->cnic_exit) {
3298 mutex_unlock(&cnic_lock);
3302 mutex_unlock(&cnic_lock);
3304 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3305 ulp_ops->cnic_exit(dev);
3311 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3313 struct cnic_dev *dev = csk->dev;
3314 struct l4_kwq_offload_pg *l4kwqe;
3315 struct kwqe *wqes[1];
3317 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3318 memset(l4kwqe, 0, sizeof(*l4kwqe));
3319 wqes[0] = (struct kwqe *) l4kwqe;
3321 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3323 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3324 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3326 l4kwqe->da0 = csk->ha[0];
3327 l4kwqe->da1 = csk->ha[1];
3328 l4kwqe->da2 = csk->ha[2];
3329 l4kwqe->da3 = csk->ha[3];
3330 l4kwqe->da4 = csk->ha[4];
3331 l4kwqe->da5 = csk->ha[5];
3333 l4kwqe->sa0 = dev->mac_addr[0];
3334 l4kwqe->sa1 = dev->mac_addr[1];
3335 l4kwqe->sa2 = dev->mac_addr[2];
3336 l4kwqe->sa3 = dev->mac_addr[3];
3337 l4kwqe->sa4 = dev->mac_addr[4];
3338 l4kwqe->sa5 = dev->mac_addr[5];
3340 l4kwqe->etype = ETH_P_IP;
3341 l4kwqe->ipid_start = DEF_IPID_START;
3342 l4kwqe->host_opaque = csk->l5_cid;
3345 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3346 l4kwqe->vlan_tag = csk->vlan_id;
3347 l4kwqe->l2hdr_nbytes += 4;
3350 return dev->submit_kwqes(dev, wqes, 1);
3353 static int cnic_cm_update_pg(struct cnic_sock *csk)
3355 struct cnic_dev *dev = csk->dev;
3356 struct l4_kwq_update_pg *l4kwqe;
3357 struct kwqe *wqes[1];
3359 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3360 memset(l4kwqe, 0, sizeof(*l4kwqe));
3361 wqes[0] = (struct kwqe *) l4kwqe;
3363 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3365 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3366 l4kwqe->pg_cid = csk->pg_cid;
3368 l4kwqe->da0 = csk->ha[0];
3369 l4kwqe->da1 = csk->ha[1];
3370 l4kwqe->da2 = csk->ha[2];
3371 l4kwqe->da3 = csk->ha[3];
3372 l4kwqe->da4 = csk->ha[4];
3373 l4kwqe->da5 = csk->ha[5];
3375 l4kwqe->pg_host_opaque = csk->l5_cid;
3376 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3378 return dev->submit_kwqes(dev, wqes, 1);
3381 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3383 struct cnic_dev *dev = csk->dev;
3384 struct l4_kwq_upload *l4kwqe;
3385 struct kwqe *wqes[1];
3387 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3388 memset(l4kwqe, 0, sizeof(*l4kwqe));
3389 wqes[0] = (struct kwqe *) l4kwqe;
3391 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3393 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3394 l4kwqe->cid = csk->pg_cid;
3396 return dev->submit_kwqes(dev, wqes, 1);
3399 static int cnic_cm_conn_req(struct cnic_sock *csk)
3401 struct cnic_dev *dev = csk->dev;
3402 struct l4_kwq_connect_req1 *l4kwqe1;
3403 struct l4_kwq_connect_req2 *l4kwqe2;
3404 struct l4_kwq_connect_req3 *l4kwqe3;
3405 struct kwqe *wqes[3];
3409 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3410 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3411 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3412 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3413 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3414 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3416 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3418 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3419 l4kwqe3->ka_timeout = csk->ka_timeout;
3420 l4kwqe3->ka_interval = csk->ka_interval;
3421 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3422 l4kwqe3->tos = csk->tos;
3423 l4kwqe3->ttl = csk->ttl;
3424 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3425 l4kwqe3->pmtu = csk->mtu;
3426 l4kwqe3->rcv_buf = csk->rcv_buf;
3427 l4kwqe3->snd_buf = csk->snd_buf;
3428 l4kwqe3->seed = csk->seed;
3430 wqes[0] = (struct kwqe *) l4kwqe1;
3431 if (test_bit(SK_F_IPV6, &csk->flags)) {
3432 wqes[1] = (struct kwqe *) l4kwqe2;
3433 wqes[2] = (struct kwqe *) l4kwqe3;
3436 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3437 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3439 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3440 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3441 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3442 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3443 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3444 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3445 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3446 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3447 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3448 sizeof(struct tcphdr);
3450 wqes[1] = (struct kwqe *) l4kwqe3;
3451 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3452 sizeof(struct tcphdr);
3455 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3457 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3458 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3459 l4kwqe1->cid = csk->cid;
3460 l4kwqe1->pg_cid = csk->pg_cid;
3461 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3462 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3463 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3464 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3465 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3466 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3467 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3468 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3469 if (csk->tcp_flags & SK_TCP_NAGLE)
3470 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3471 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3472 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3473 if (csk->tcp_flags & SK_TCP_SACK)
3474 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3475 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3476 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3478 l4kwqe1->tcp_flags = tcp_flags;
3480 return dev->submit_kwqes(dev, wqes, num_wqes);
3483 static int cnic_cm_close_req(struct cnic_sock *csk)
3485 struct cnic_dev *dev = csk->dev;
3486 struct l4_kwq_close_req *l4kwqe;
3487 struct kwqe *wqes[1];
3489 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3490 memset(l4kwqe, 0, sizeof(*l4kwqe));
3491 wqes[0] = (struct kwqe *) l4kwqe;
3493 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3494 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3495 l4kwqe->cid = csk->cid;
3497 return dev->submit_kwqes(dev, wqes, 1);
3500 static int cnic_cm_abort_req(struct cnic_sock *csk)
3502 struct cnic_dev *dev = csk->dev;
3503 struct l4_kwq_reset_req *l4kwqe;
3504 struct kwqe *wqes[1];
3506 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3507 memset(l4kwqe, 0, sizeof(*l4kwqe));
3508 wqes[0] = (struct kwqe *) l4kwqe;
3510 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3511 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3512 l4kwqe->cid = csk->cid;
3514 return dev->submit_kwqes(dev, wqes, 1);
3517 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3518 u32 l5_cid, struct cnic_sock **csk, void *context)
3520 struct cnic_local *cp = dev->cnic_priv;
3521 struct cnic_sock *csk1;
3523 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3527 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3529 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3533 csk1 = &cp->csk_tbl[l5_cid];
3534 if (atomic_read(&csk1->ref_count))
3537 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3542 csk1->l5_cid = l5_cid;
3543 csk1->ulp_type = ulp_type;
3544 csk1->context = context;
3546 csk1->ka_timeout = DEF_KA_TIMEOUT;
3547 csk1->ka_interval = DEF_KA_INTERVAL;
3548 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3549 csk1->tos = DEF_TOS;
3550 csk1->ttl = DEF_TTL;
3551 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3552 csk1->rcv_buf = DEF_RCV_BUF;
3553 csk1->snd_buf = DEF_SND_BUF;
3554 csk1->seed = DEF_SEED;
3560 static void cnic_cm_cleanup(struct cnic_sock *csk)
3562 if (csk->src_port) {
3563 struct cnic_dev *dev = csk->dev;
3564 struct cnic_local *cp = dev->cnic_priv;
3566 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3571 static void cnic_close_conn(struct cnic_sock *csk)
3573 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3574 cnic_cm_upload_pg(csk);
3575 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3577 cnic_cm_cleanup(csk);
3580 static int cnic_cm_destroy(struct cnic_sock *csk)
3582 if (!cnic_in_use(csk))
3586 clear_bit(SK_F_INUSE, &csk->flags);
3587 smp_mb__after_clear_bit();
3588 while (atomic_read(&csk->ref_count) != 1)
3590 cnic_cm_cleanup(csk);
3597 static inline u16 cnic_get_vlan(struct net_device *dev,
3598 struct net_device **vlan_dev)
3600 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3601 *vlan_dev = vlan_dev_real_dev(dev);
3602 return vlan_dev_vlan_id(dev);
3608 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3609 struct dst_entry **dst)
3611 #if defined(CONFIG_INET)
3614 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3621 return -ENETUNREACH;
3625 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3626 struct dst_entry **dst)
3628 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3631 memset(&fl6, 0, sizeof(fl6));
3632 fl6.daddr = dst_addr->sin6_addr;
3633 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3634 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3636 *dst = ip6_route_output(&init_net, NULL, &fl6);
3637 if ((*dst)->error) {
3640 return -ENETUNREACH;
3645 return -ENETUNREACH;
3648 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3651 struct cnic_dev *dev = NULL;
3652 struct dst_entry *dst;
3653 struct net_device *netdev = NULL;
3654 int err = -ENETUNREACH;
3656 if (dst_addr->sin_family == AF_INET)
3657 err = cnic_get_v4_route(dst_addr, &dst);
3658 else if (dst_addr->sin_family == AF_INET6) {
3659 struct sockaddr_in6 *dst_addr6 =
3660 (struct sockaddr_in6 *) dst_addr;
3662 err = cnic_get_v6_route(dst_addr6, &dst);
3672 cnic_get_vlan(dst->dev, &netdev);
3674 dev = cnic_from_netdev(netdev);
3683 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3685 struct cnic_dev *dev = csk->dev;
3686 struct cnic_local *cp = dev->cnic_priv;
3688 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3691 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3693 struct cnic_dev *dev = csk->dev;
3694 struct cnic_local *cp = dev->cnic_priv;
3696 struct dst_entry *dst = NULL;
3697 struct net_device *realdev;
3701 if (saddr->local.v6.sin6_family == AF_INET6 &&
3702 saddr->remote.v6.sin6_family == AF_INET6)
3704 else if (saddr->local.v4.sin_family == AF_INET &&
3705 saddr->remote.v4.sin_family == AF_INET)
3710 clear_bit(SK_F_IPV6, &csk->flags);
3713 set_bit(SK_F_IPV6, &csk->flags);
3714 cnic_get_v6_route(&saddr->remote.v6, &dst);
3716 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3717 sizeof(struct in6_addr));
3718 csk->dst_port = saddr->remote.v6.sin6_port;
3719 local_port = saddr->local.v6.sin6_port;
3722 cnic_get_v4_route(&saddr->remote.v4, &dst);
3724 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3725 csk->dst_port = saddr->remote.v4.sin_port;
3726 local_port = saddr->local.v4.sin_port;
3730 csk->mtu = dev->netdev->mtu;
3731 if (dst && dst->dev) {
3732 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3733 if (realdev == dev->netdev) {
3734 csk->vlan_id = vlan;
3735 csk->mtu = dst_mtu(dst);
3739 port_id = be16_to_cpu(local_port);
3740 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3741 port_id < CNIC_LOCAL_PORT_MAX) {
3742 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3748 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3749 if (port_id == -1) {
3753 local_port = cpu_to_be16(port_id);
3755 csk->src_port = local_port;
3762 static void cnic_init_csk_state(struct cnic_sock *csk)
3765 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3766 clear_bit(SK_F_CLOSING, &csk->flags);
3769 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3771 struct cnic_local *cp = csk->dev->cnic_priv;
3774 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3777 if (!cnic_in_use(csk))
3780 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3783 cnic_init_csk_state(csk);
3785 err = cnic_get_route(csk, saddr);
3789 err = cnic_resolve_addr(csk, saddr);
3794 clear_bit(SK_F_CONNECT_START, &csk->flags);
3798 static int cnic_cm_abort(struct cnic_sock *csk)
3800 struct cnic_local *cp = csk->dev->cnic_priv;
3801 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3803 if (!cnic_in_use(csk))
3806 if (cnic_abort_prep(csk))
3807 return cnic_cm_abort_req(csk);
3809 /* Getting here means that we haven't started connect, or
3810 * connect was not successful.
3813 cp->close_conn(csk, opcode);
3814 if (csk->state != opcode)
3820 static int cnic_cm_close(struct cnic_sock *csk)
3822 if (!cnic_in_use(csk))
3825 if (cnic_close_prep(csk)) {
3826 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3827 return cnic_cm_close_req(csk);
3834 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3837 struct cnic_ulp_ops *ulp_ops;
3838 int ulp_type = csk->ulp_type;
3841 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3843 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3844 ulp_ops->cm_connect_complete(csk);
3845 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3846 ulp_ops->cm_close_complete(csk);
3847 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3848 ulp_ops->cm_remote_abort(csk);
3849 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3850 ulp_ops->cm_abort_complete(csk);
3851 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3852 ulp_ops->cm_remote_close(csk);
3857 static int cnic_cm_set_pg(struct cnic_sock *csk)
3859 if (cnic_offld_prep(csk)) {
3860 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3861 cnic_cm_update_pg(csk);
3863 cnic_cm_offload_pg(csk);
3868 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3870 struct cnic_local *cp = dev->cnic_priv;
3871 u32 l5_cid = kcqe->pg_host_opaque;
3872 u8 opcode = kcqe->op_code;
3873 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3876 if (!cnic_in_use(csk))
3879 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3880 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3883 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3884 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3885 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3886 cnic_cm_upcall(cp, csk,
3887 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3891 csk->pg_cid = kcqe->pg_cid;
3892 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3893 cnic_cm_conn_req(csk);
3899 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3901 struct cnic_local *cp = dev->cnic_priv;
3902 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3903 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3904 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3906 ctx->timestamp = jiffies;
3908 wake_up(&ctx->waitq);
3911 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3913 struct cnic_local *cp = dev->cnic_priv;
3914 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3915 u8 opcode = l4kcqe->op_code;
3917 struct cnic_sock *csk;
3919 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3920 cnic_process_fcoe_term_conn(dev, kcqe);
3923 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3924 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3925 cnic_cm_process_offld_pg(dev, l4kcqe);
3929 l5_cid = l4kcqe->conn_id;
3931 l5_cid = l4kcqe->cid;
3932 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3935 csk = &cp->csk_tbl[l5_cid];
3938 if (!cnic_in_use(csk)) {
3944 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3945 if (l4kcqe->status != 0) {
3946 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3947 cnic_cm_upcall(cp, csk,
3948 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3951 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3952 if (l4kcqe->status == 0)
3953 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3954 else if (l4kcqe->status ==
3955 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
3956 set_bit(SK_F_HW_ERR, &csk->flags);
3958 smp_mb__before_clear_bit();
3959 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3960 cnic_cm_upcall(cp, csk, opcode);
3963 case L5CM_RAMROD_CMD_ID_CLOSE:
3964 if (l4kcqe->status != 0) {
3965 netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
3966 "status 0x%x\n", l4kcqe->status);
3967 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3972 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3973 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3974 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3975 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3976 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3977 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
3978 set_bit(SK_F_HW_ERR, &csk->flags);
3980 cp->close_conn(csk, opcode);
3983 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3984 /* after we already sent CLOSE_REQ */
3985 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3986 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3987 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3988 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3990 cnic_cm_upcall(cp, csk, opcode);
3996 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3998 struct cnic_dev *dev = data;
4001 for (i = 0; i < num; i++)
4002 cnic_cm_process_kcqe(dev, kcqe[i]);
4005 static struct cnic_ulp_ops cm_ulp_ops = {
4006 .indicate_kcqes = cnic_cm_indicate_kcqe,
4009 static void cnic_cm_free_mem(struct cnic_dev *dev)
4011 struct cnic_local *cp = dev->cnic_priv;
4015 cnic_free_id_tbl(&cp->csk_port_tbl);
4018 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4020 struct cnic_local *cp = dev->cnic_priv;
4023 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4028 port_id = random32();
4029 port_id %= CNIC_LOCAL_PORT_RANGE;
4030 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4031 CNIC_LOCAL_PORT_MIN, port_id)) {
4032 cnic_cm_free_mem(dev);
4038 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4040 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4041 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4042 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4043 csk->state = opcode;
4046 /* 1. If event opcode matches the expected event in csk->state
4047 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4049 * 3. If the expected event is 0, meaning the connection was never
4050 * never established, we accept the opcode from cm_abort.
4052 if (opcode == csk->state || csk->state == 0 ||
4053 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4054 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4055 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4056 if (csk->state == 0)
4057 csk->state = opcode;
4064 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4066 struct cnic_dev *dev = csk->dev;
4067 struct cnic_local *cp = dev->cnic_priv;
4069 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4070 cnic_cm_upcall(cp, csk, opcode);
4074 clear_bit(SK_F_CONNECT_START, &csk->flags);
4075 cnic_close_conn(csk);
4076 csk->state = opcode;
4077 cnic_cm_upcall(cp, csk, opcode);
4080 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4084 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4089 cnic_ctx_wr(dev, 45, 0, seed);
4093 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4095 struct cnic_dev *dev = csk->dev;
4096 struct cnic_local *cp = dev->cnic_priv;
4097 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4098 union l5cm_specific_data l5_data;
4100 int close_complete = 0;
4103 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4104 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4105 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4106 if (cnic_ready_to_close(csk, opcode)) {
4107 if (test_bit(SK_F_HW_ERR, &csk->flags))
4109 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4110 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4115 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4116 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4118 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4123 memset(&l5_data, 0, sizeof(l5_data));
4125 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4127 } else if (close_complete) {
4128 ctx->timestamp = jiffies;
4129 cnic_close_conn(csk);
4130 cnic_cm_upcall(cp, csk, csk->state);
4134 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4136 struct cnic_local *cp = dev->cnic_priv;
4141 if (!netif_running(dev->netdev))
4144 cnic_bnx2x_delete_wait(dev, 0);
4146 cancel_delayed_work(&cp->delete_task);
4147 flush_workqueue(cnic_wq);
4149 if (atomic_read(&cp->iscsi_conn) != 0)
4150 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4151 atomic_read(&cp->iscsi_conn));
4154 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4156 struct cnic_local *cp = dev->cnic_priv;
4157 u32 pfid = cp->pfid;
4158 u32 port = CNIC_PORT(cp);
4160 cnic_init_bnx2x_mac(dev);
4161 cnic_bnx2x_set_tcp_timestamp(dev, 1);
4163 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4164 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4166 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4167 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4168 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4169 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4172 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4173 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4174 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4175 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4176 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4177 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4178 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4179 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4181 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4186 static void cnic_delete_task(struct work_struct *work)
4188 struct cnic_local *cp;
4189 struct cnic_dev *dev;
4191 int need_resched = 0;
4193 cp = container_of(work, struct cnic_local, delete_task.work);
4196 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4197 struct drv_ctl_info info;
4199 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4201 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4202 cp->ethdev->drv_ctl(dev->netdev, &info);
4205 for (i = 0; i < cp->max_cid_space; i++) {
4206 struct cnic_context *ctx = &cp->ctx_tbl[i];
4209 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4210 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4213 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4218 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4221 err = cnic_bnx2x_destroy_ramrod(dev, i);
4223 cnic_free_bnx2x_conn_resc(dev, i);
4225 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4226 atomic_dec(&cp->iscsi_conn);
4228 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4233 queue_delayed_work(cnic_wq, &cp->delete_task,
4234 msecs_to_jiffies(10));
4238 static int cnic_cm_open(struct cnic_dev *dev)
4240 struct cnic_local *cp = dev->cnic_priv;
4243 err = cnic_cm_alloc_mem(dev);
4247 err = cp->start_cm(dev);
4252 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4254 dev->cm_create = cnic_cm_create;
4255 dev->cm_destroy = cnic_cm_destroy;
4256 dev->cm_connect = cnic_cm_connect;
4257 dev->cm_abort = cnic_cm_abort;
4258 dev->cm_close = cnic_cm_close;
4259 dev->cm_select_dev = cnic_cm_select_dev;
4261 cp->ulp_handle[CNIC_ULP_L4] = dev;
4262 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4266 cnic_cm_free_mem(dev);
4270 static int cnic_cm_shutdown(struct cnic_dev *dev)
4272 struct cnic_local *cp = dev->cnic_priv;
4278 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4279 struct cnic_sock *csk = &cp->csk_tbl[i];
4281 clear_bit(SK_F_INUSE, &csk->flags);
4282 cnic_cm_cleanup(csk);
4284 cnic_cm_free_mem(dev);
4289 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4294 cid_addr = GET_CID_ADDR(cid);
4296 for (i = 0; i < CTX_SIZE; i += 4)
4297 cnic_ctx_wr(dev, cid_addr, i, 0);
4300 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4302 struct cnic_local *cp = dev->cnic_priv;
4304 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4306 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4309 for (i = 0; i < cp->ctx_blks; i++) {
4311 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4314 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4316 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4317 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4318 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4319 (u64) cp->ctx_arr[i].mapping >> 32);
4320 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4321 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4322 for (j = 0; j < 10; j++) {
4324 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4325 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4329 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4337 static void cnic_free_irq(struct cnic_dev *dev)
4339 struct cnic_local *cp = dev->cnic_priv;
4340 struct cnic_eth_dev *ethdev = cp->ethdev;
4342 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4343 cp->disable_int_sync(dev);
4344 tasklet_kill(&cp->cnic_irq_task);
4345 free_irq(ethdev->irq_arr[0].vector, dev);
4349 static int cnic_request_irq(struct cnic_dev *dev)
4351 struct cnic_local *cp = dev->cnic_priv;
4352 struct cnic_eth_dev *ethdev = cp->ethdev;
4355 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4357 tasklet_disable(&cp->cnic_irq_task);
4362 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4364 struct cnic_local *cp = dev->cnic_priv;
4365 struct cnic_eth_dev *ethdev = cp->ethdev;
4367 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4369 int sblk_num = cp->status_blk_num;
4370 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4371 BNX2_HC_SB_CONFIG_1;
4373 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4375 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4376 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4377 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4379 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4380 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4381 (unsigned long) dev);
4382 err = cnic_request_irq(dev);
4386 while (cp->status_blk.bnx2->status_completion_producer_index &&
4388 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4389 1 << (11 + sblk_num));
4394 if (cp->status_blk.bnx2->status_completion_producer_index) {
4400 struct status_block *sblk = cp->status_blk.gen;
4401 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4404 while (sblk->status_completion_producer_index && i < 10) {
4405 CNIC_WR(dev, BNX2_HC_COMMAND,
4406 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4411 if (sblk->status_completion_producer_index)
4418 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4422 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4424 struct cnic_local *cp = dev->cnic_priv;
4425 struct cnic_eth_dev *ethdev = cp->ethdev;
4427 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4430 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4431 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4434 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4436 struct cnic_local *cp = dev->cnic_priv;
4437 struct cnic_eth_dev *ethdev = cp->ethdev;
4439 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4442 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4443 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4444 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4445 synchronize_irq(ethdev->irq_arr[0].vector);
4448 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4450 struct cnic_local *cp = dev->cnic_priv;
4451 struct cnic_eth_dev *ethdev = cp->ethdev;
4452 struct cnic_uio_dev *udev = cp->udev;
4453 u32 cid_addr, tx_cid, sb_id;
4454 u32 val, offset0, offset1, offset2, offset3;
4457 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4458 struct status_block *s_blk = cp->status_blk.gen;
4460 sb_id = cp->status_blk_num;
4462 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4463 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4464 struct status_block_msix *sblk = cp->status_blk.bnx2;
4466 tx_cid = TX_TSS_CID + sb_id - 1;
4467 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4469 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4471 cp->tx_cons = *cp->tx_cons_ptr;
4473 cid_addr = GET_CID_ADDR(tx_cid);
4474 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4475 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4477 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4478 cnic_ctx_wr(dev, cid_addr2, i, 0);
4480 offset0 = BNX2_L2CTX_TYPE_XI;
4481 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4482 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4483 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4485 cnic_init_context(dev, tx_cid);
4486 cnic_init_context(dev, tx_cid + 1);
4488 offset0 = BNX2_L2CTX_TYPE;
4489 offset1 = BNX2_L2CTX_CMD_TYPE;
4490 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4491 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4493 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4494 cnic_ctx_wr(dev, cid_addr, offset0, val);
4496 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4497 cnic_ctx_wr(dev, cid_addr, offset1, val);
4499 txbd = udev->l2_ring;
4501 buf_map = udev->l2_buf_map;
4502 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4503 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4504 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4506 val = (u64) ring_map >> 32;
4507 cnic_ctx_wr(dev, cid_addr, offset2, val);
4508 txbd->tx_bd_haddr_hi = val;
4510 val = (u64) ring_map & 0xffffffff;
4511 cnic_ctx_wr(dev, cid_addr, offset3, val);
4512 txbd->tx_bd_haddr_lo = val;
4515 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4517 struct cnic_local *cp = dev->cnic_priv;
4518 struct cnic_eth_dev *ethdev = cp->ethdev;
4519 struct cnic_uio_dev *udev = cp->udev;
4520 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4523 struct status_block *s_blk = cp->status_blk.gen;
4524 dma_addr_t ring_map = udev->l2_ring_map;
4526 sb_id = cp->status_blk_num;
4527 cnic_init_context(dev, 2);
4528 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4529 coal_reg = BNX2_HC_COMMAND;
4530 coal_val = CNIC_RD(dev, coal_reg);
4531 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4532 struct status_block_msix *sblk = cp->status_blk.bnx2;
4534 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4535 coal_reg = BNX2_HC_COALESCE_NOW;
4536 coal_val = 1 << (11 + sb_id);
4539 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4540 CNIC_WR(dev, coal_reg, coal_val);
4545 cp->rx_cons = *cp->rx_cons_ptr;
4547 cid_addr = GET_CID_ADDR(2);
4548 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4549 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4550 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4553 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4555 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4556 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4558 rxbd = udev->l2_ring + BCM_PAGE_SIZE;
4559 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4561 int n = (i % cp->l2_rx_ring_size) + 1;
4563 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4564 rxbd->rx_bd_len = cp->l2_single_buf_size;
4565 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4566 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4567 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4569 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4570 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4571 rxbd->rx_bd_haddr_hi = val;
4573 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4574 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4575 rxbd->rx_bd_haddr_lo = val;
4577 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4578 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4581 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4583 struct kwqe *wqes[1], l2kwqe;
4585 memset(&l2kwqe, 0, sizeof(l2kwqe));
4587 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4588 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4589 KWQE_OPCODE_SHIFT) | 2;
4590 dev->submit_kwqes(dev, wqes, 1);
4593 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4595 struct cnic_local *cp = dev->cnic_priv;
4598 val = cp->func << 2;
4600 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4602 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4603 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4604 dev->mac_addr[0] = (u8) (val >> 8);
4605 dev->mac_addr[1] = (u8) val;
4607 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4609 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4610 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4611 dev->mac_addr[2] = (u8) (val >> 24);
4612 dev->mac_addr[3] = (u8) (val >> 16);
4613 dev->mac_addr[4] = (u8) (val >> 8);
4614 dev->mac_addr[5] = (u8) val;
4616 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4618 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4619 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4620 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4622 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4623 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4624 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4627 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4629 struct cnic_local *cp = dev->cnic_priv;
4630 struct cnic_eth_dev *ethdev = cp->ethdev;
4631 struct status_block *sblk = cp->status_blk.gen;
4632 u32 val, kcq_cid_addr, kwq_cid_addr;
4635 cnic_set_bnx2_mac(dev);
4637 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4638 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4639 if (BCM_PAGE_BITS > 12)
4640 val |= (12 - 8) << 4;
4642 val |= (BCM_PAGE_BITS - 8) << 4;
4644 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4646 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4647 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4648 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4650 err = cnic_setup_5709_context(dev, 1);
4654 cnic_init_context(dev, KWQ_CID);
4655 cnic_init_context(dev, KCQ_CID);
4657 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4658 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4660 cp->max_kwq_idx = MAX_KWQ_IDX;
4661 cp->kwq_prod_idx = 0;
4662 cp->kwq_con_idx = 0;
4663 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4665 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4666 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4668 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4670 /* Initialize the kernel work queue context. */
4671 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4672 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4673 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4675 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4676 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4678 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4679 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4681 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4682 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4684 val = (u32) cp->kwq_info.pgtbl_map;
4685 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4687 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4688 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4690 cp->kcq1.sw_prod_idx = 0;
4691 cp->kcq1.hw_prod_idx_ptr =
4692 &sblk->status_completion_producer_index;
4694 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4696 /* Initialize the kernel complete queue context. */
4697 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4698 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4699 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4701 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4702 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4704 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4705 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4707 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4708 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4710 val = (u32) cp->kcq1.dma.pgtbl_map;
4711 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4714 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4715 struct status_block_msix *msblk = cp->status_blk.bnx2;
4716 u32 sb_id = cp->status_blk_num;
4717 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4719 cp->kcq1.hw_prod_idx_ptr =
4720 &msblk->status_completion_producer_index;
4721 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4722 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4723 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4724 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4725 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4728 /* Enable Commnad Scheduler notification when we write to the
4729 * host producer index of the kernel contexts. */
4730 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4732 /* Enable Command Scheduler notification when we write to either
4733 * the Send Queue or Receive Queue producer indexes of the kernel
4734 * bypass contexts. */
4735 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4736 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4738 /* Notify COM when the driver post an application buffer. */
4739 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4741 /* Set the CP and COM doorbells. These two processors polls the
4742 * doorbell for a non zero value before running. This must be done
4743 * after setting up the kernel queue contexts. */
4744 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4745 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4747 cnic_init_bnx2_tx_ring(dev);
4748 cnic_init_bnx2_rx_ring(dev);
4750 err = cnic_init_bnx2_irq(dev);
4752 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4753 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4754 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4761 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4763 struct cnic_local *cp = dev->cnic_priv;
4764 struct cnic_eth_dev *ethdev = cp->ethdev;
4765 u32 start_offset = ethdev->ctx_tbl_offset;
4768 for (i = 0; i < cp->ctx_blks; i++) {
4769 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4770 dma_addr_t map = ctx->mapping;
4772 if (cp->ctx_align) {
4773 unsigned long mask = cp->ctx_align - 1;
4775 map = (map + mask) & ~mask;
4778 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4782 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4784 struct cnic_local *cp = dev->cnic_priv;
4785 struct cnic_eth_dev *ethdev = cp->ethdev;
4788 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4789 (unsigned long) dev);
4790 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4791 err = cnic_request_irq(dev);
4796 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4797 u16 sb_id, u8 sb_index,
4801 u32 addr = BAR_CSTRORM_INTMEM +
4802 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4803 offsetof(struct hc_status_block_data_e1x, index_data) +
4804 sizeof(struct hc_index_data)*sb_index +
4805 offsetof(struct hc_index_data, flags);
4806 u16 flags = CNIC_RD16(dev, addr);
4808 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4809 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4810 HC_INDEX_DATA_HC_ENABLED);
4811 CNIC_WR16(dev, addr, flags);
4814 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4816 struct cnic_local *cp = dev->cnic_priv;
4817 u8 sb_id = cp->status_blk_num;
4819 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4820 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4821 offsetof(struct hc_status_block_data_e1x, index_data) +
4822 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4823 offsetof(struct hc_index_data, timeout), 64 / 4);
4824 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4827 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4831 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4832 struct client_init_ramrod_data *data)
4834 struct cnic_local *cp = dev->cnic_priv;
4835 struct cnic_uio_dev *udev = cp->udev;
4836 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4837 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4838 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4840 u32 cli = cp->ethdev->iscsi_l2_client_id;
4843 memset(txbd, 0, BCM_PAGE_SIZE);
4845 buf_map = udev->l2_buf_map;
4846 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4847 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4848 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4850 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4851 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4852 reg_bd->addr_hi = start_bd->addr_hi;
4853 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4854 start_bd->nbytes = cpu_to_le16(0x10);
4855 start_bd->nbd = cpu_to_le16(3);
4856 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4857 start_bd->general_data = (UNICAST_ADDRESS <<
4858 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4859 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4863 val = (u64) ring_map >> 32;
4864 txbd->next_bd.addr_hi = cpu_to_le32(val);
4866 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4868 val = (u64) ring_map & 0xffffffff;
4869 txbd->next_bd.addr_lo = cpu_to_le32(val);
4871 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4873 /* Other ramrod params */
4874 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4875 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4877 /* reset xstorm per client statistics */
4878 if (cli < MAX_STAT_COUNTER_ID) {
4879 data->general.statistics_zero_flg = 1;
4880 data->general.statistics_en_flg = 1;
4881 data->general.statistics_counter_id = cli;
4885 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4888 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4889 struct client_init_ramrod_data *data)
4891 struct cnic_local *cp = dev->cnic_priv;
4892 struct cnic_uio_dev *udev = cp->udev;
4893 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4895 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4896 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
4897 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4899 u32 cli = cp->ethdev->iscsi_l2_client_id;
4900 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4902 dma_addr_t ring_map = udev->l2_ring_map;
4905 data->general.client_id = cli;
4906 data->general.activate_flg = 1;
4907 data->general.sp_client_id = cli;
4908 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4909 data->general.func_id = cp->pfid;
4911 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4913 int n = (i % cp->l2_rx_ring_size) + 1;
4915 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4916 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4917 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4920 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4921 rxbd->addr_hi = cpu_to_le32(val);
4922 data->rx.bd_page_base.hi = cpu_to_le32(val);
4924 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4925 rxbd->addr_lo = cpu_to_le32(val);
4926 data->rx.bd_page_base.lo = cpu_to_le32(val);
4928 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4929 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4930 rxcqe->addr_hi = cpu_to_le32(val);
4931 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4933 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4934 rxcqe->addr_lo = cpu_to_le32(val);
4935 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4937 /* Other ramrod params */
4938 data->rx.client_qzone_id = cl_qzone_id;
4939 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4940 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4942 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4944 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
4945 data->rx.outer_vlan_removal_enable_flg = 1;
4946 data->rx.silent_vlan_removal_flg = 1;
4947 data->rx.silent_vlan_value = 0;
4948 data->rx.silent_vlan_mask = 0xffff;
4951 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4952 cp->rx_cons = *cp->rx_cons_ptr;
4955 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4957 struct cnic_local *cp = dev->cnic_priv;
4958 u32 pfid = cp->pfid;
4960 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4961 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4962 cp->kcq1.sw_prod_idx = 0;
4964 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4965 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4967 cp->kcq1.hw_prod_idx_ptr =
4968 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4969 cp->kcq1.status_idx_ptr =
4970 &sb->sb.running_index[SM_RX_ID];
4972 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4974 cp->kcq1.hw_prod_idx_ptr =
4975 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4976 cp->kcq1.status_idx_ptr =
4977 &sb->sb.running_index[SM_RX_ID];
4980 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4981 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4983 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4984 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4985 cp->kcq2.sw_prod_idx = 0;
4986 cp->kcq2.hw_prod_idx_ptr =
4987 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4988 cp->kcq2.status_idx_ptr =
4989 &sb->sb.running_index[SM_RX_ID];
4993 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4995 struct cnic_local *cp = dev->cnic_priv;
4996 struct cnic_eth_dev *ethdev = cp->ethdev;
4997 int func = CNIC_FUNC(cp), ret;
5000 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5001 cp->port_mode = CHIP_PORT_MODE_NONE;
5003 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5006 pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
5007 cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
5008 ME_REG_ABS_PF_NUM_SHIFT);
5009 func = CNIC_FUNC(cp);
5011 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
5013 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
5015 val = (val >> 1) & 1;
5018 cp->port_mode = CHIP_4_PORT_MODE;
5019 cp->pfid = func >> 1;
5021 cp->port_mode = CHIP_2_PORT_MODE;
5022 cp->pfid = func & 0x6;
5029 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5030 cp->iscsi_start_cid, 0);
5035 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5036 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5037 cp->fcoe_start_cid, 0);
5043 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5045 cnic_init_bnx2x_kcq(dev);
5048 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5049 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5050 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5051 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5052 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5053 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5054 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5055 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5056 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5057 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5058 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5059 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5060 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5061 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5062 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5063 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5064 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5065 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5066 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5067 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5068 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5069 HC_INDEX_ISCSI_EQ_CONS);
5071 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5072 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5073 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5074 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5075 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5076 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5078 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5079 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5081 cnic_setup_bnx2x_context(dev);
5083 ret = cnic_init_bnx2x_irq(dev);
5090 static void cnic_init_rings(struct cnic_dev *dev)
5092 struct cnic_local *cp = dev->cnic_priv;
5093 struct cnic_uio_dev *udev = cp->udev;
5095 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5098 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5099 cnic_init_bnx2_tx_ring(dev);
5100 cnic_init_bnx2_rx_ring(dev);
5101 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5102 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5103 u32 cli = cp->ethdev->iscsi_l2_client_id;
5104 u32 cid = cp->ethdev->iscsi_l2_cid;
5106 struct client_init_ramrod_data *data;
5107 union l5cm_specific_data l5_data;
5108 struct ustorm_eth_rx_producers rx_prods = {0};
5109 u32 off, i, *cid_ptr;
5111 rx_prods.bd_prod = 0;
5112 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5115 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
5117 off = BAR_USTRORM_INTMEM +
5118 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
5119 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5120 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
5122 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5123 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5125 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5127 data = udev->l2_buf;
5128 cid_ptr = udev->l2_buf + 12;
5130 memset(data, 0, sizeof(*data));
5132 cnic_init_bnx2x_tx_ring(dev, data);
5133 cnic_init_bnx2x_rx_ring(dev, data);
5135 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5136 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5138 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5140 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5141 cid, ETH_CONNECTION_TYPE, &l5_data);
5144 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5148 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5149 netdev_err(dev->netdev,
5150 "iSCSI CLIENT_SETUP did not complete\n");
5151 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5152 cnic_ring_ctl(dev, cid, cli, 1);
5157 static void cnic_shutdown_rings(struct cnic_dev *dev)
5159 struct cnic_local *cp = dev->cnic_priv;
5160 struct cnic_uio_dev *udev = cp->udev;
5163 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5166 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5167 cnic_shutdown_bnx2_rx_ring(dev);
5168 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5169 u32 cli = cp->ethdev->iscsi_l2_client_id;
5170 u32 cid = cp->ethdev->iscsi_l2_cid;
5171 union l5cm_specific_data l5_data;
5174 cnic_ring_ctl(dev, cid, cli, 0);
5176 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5178 l5_data.phy_address.lo = cli;
5179 l5_data.phy_address.hi = 0;
5180 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5181 cid, ETH_CONNECTION_TYPE, &l5_data);
5183 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5187 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5188 netdev_err(dev->netdev,
5189 "iSCSI CLIENT_HALT did not complete\n");
5190 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5192 memset(&l5_data, 0, sizeof(l5_data));
5193 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5194 cid, NONE_CONNECTION_TYPE, &l5_data);
5197 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5198 rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
5199 memset(rx_ring, 0, BCM_PAGE_SIZE);
5202 static int cnic_register_netdev(struct cnic_dev *dev)
5204 struct cnic_local *cp = dev->cnic_priv;
5205 struct cnic_eth_dev *ethdev = cp->ethdev;
5211 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5214 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5216 netdev_err(dev->netdev, "register_cnic failed\n");
5221 static void cnic_unregister_netdev(struct cnic_dev *dev)
5223 struct cnic_local *cp = dev->cnic_priv;
5224 struct cnic_eth_dev *ethdev = cp->ethdev;
5229 ethdev->drv_unregister_cnic(dev->netdev);
5232 static int cnic_start_hw(struct cnic_dev *dev)
5234 struct cnic_local *cp = dev->cnic_priv;
5235 struct cnic_eth_dev *ethdev = cp->ethdev;
5238 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5241 dev->regview = ethdev->io_base;
5242 pci_dev_get(dev->pcidev);
5243 cp->func = PCI_FUNC(dev->pcidev->devfn);
5244 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5245 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5247 err = cp->alloc_resc(dev);
5249 netdev_err(dev->netdev, "allocate resource failure\n");
5253 err = cp->start_hw(dev);
5257 err = cnic_cm_open(dev);
5261 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5263 cp->enable_int(dev);
5269 pci_dev_put(dev->pcidev);
5273 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5275 cnic_disable_bnx2_int_sync(dev);
5277 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5278 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5280 cnic_init_context(dev, KWQ_CID);
5281 cnic_init_context(dev, KCQ_CID);
5283 cnic_setup_5709_context(dev, 0);
5286 cnic_free_resc(dev);
5290 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5292 struct cnic_local *cp = dev->cnic_priv;
5295 *cp->kcq1.hw_prod_idx_ptr = 0;
5296 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5297 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5298 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5299 cnic_free_resc(dev);
5302 static void cnic_stop_hw(struct cnic_dev *dev)
5304 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5305 struct cnic_local *cp = dev->cnic_priv;
5308 /* Need to wait for the ring shutdown event to complete
5309 * before clearing the CNIC_UP flag.
5311 while (cp->udev->uio_dev != -1 && i < 15) {
5315 cnic_shutdown_rings(dev);
5317 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5318 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5320 cnic_cm_shutdown(dev);
5322 pci_dev_put(dev->pcidev);
5326 static void cnic_free_dev(struct cnic_dev *dev)
5330 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5334 if (atomic_read(&dev->ref_count) != 0)
5335 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5337 netdev_info(dev->netdev, "Removed CNIC device\n");
5338 dev_put(dev->netdev);
5342 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5343 struct pci_dev *pdev)
5345 struct cnic_dev *cdev;
5346 struct cnic_local *cp;
5349 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5351 cdev = kzalloc(alloc_size , GFP_KERNEL);
5353 netdev_err(dev, "allocate dev struct failure\n");
5358 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5359 cdev->register_device = cnic_register_device;
5360 cdev->unregister_device = cnic_unregister_device;
5361 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5363 cp = cdev->cnic_priv;
5365 cp->l2_single_buf_size = 0x400;
5366 cp->l2_rx_ring_size = 3;
5368 spin_lock_init(&cp->cnic_ulp_lock);
5370 netdev_info(dev, "Added CNIC device\n");
5375 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5377 struct pci_dev *pdev;
5378 struct cnic_dev *cdev;
5379 struct cnic_local *cp;
5380 struct cnic_eth_dev *ethdev = NULL;
5381 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5383 probe = symbol_get(bnx2_cnic_probe);
5385 ethdev = (*probe)(dev);
5386 symbol_put(bnx2_cnic_probe);
5391 pdev = ethdev->pdev;
5397 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5398 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5399 (pdev->revision < 0x10)) {
5405 cdev = cnic_alloc_dev(dev, pdev);
5409 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5410 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5412 cp = cdev->cnic_priv;
5413 cp->ethdev = ethdev;
5414 cdev->pcidev = pdev;
5415 cp->chip_id = ethdev->chip_id;
5417 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5419 cp->cnic_ops = &cnic_bnx2_ops;
5420 cp->start_hw = cnic_start_bnx2_hw;
5421 cp->stop_hw = cnic_stop_bnx2_hw;
5422 cp->setup_pgtbl = cnic_setup_page_tbl;
5423 cp->alloc_resc = cnic_alloc_bnx2_resc;
5424 cp->free_resc = cnic_free_resc;
5425 cp->start_cm = cnic_cm_init_bnx2_hw;
5426 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5427 cp->enable_int = cnic_enable_bnx2_int;
5428 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5429 cp->close_conn = cnic_close_bnx2_conn;
5437 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5439 struct pci_dev *pdev;
5440 struct cnic_dev *cdev;
5441 struct cnic_local *cp;
5442 struct cnic_eth_dev *ethdev = NULL;
5443 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5445 probe = symbol_get(bnx2x_cnic_probe);
5447 ethdev = (*probe)(dev);
5448 symbol_put(bnx2x_cnic_probe);
5453 pdev = ethdev->pdev;
5458 cdev = cnic_alloc_dev(dev, pdev);
5464 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5465 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5467 cp = cdev->cnic_priv;
5468 cp->ethdev = ethdev;
5469 cdev->pcidev = pdev;
5470 cp->chip_id = ethdev->chip_id;
5472 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5474 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5475 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5476 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
5477 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5478 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5480 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5481 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5483 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5485 cp->cnic_ops = &cnic_bnx2x_ops;
5486 cp->start_hw = cnic_start_bnx2x_hw;
5487 cp->stop_hw = cnic_stop_bnx2x_hw;
5488 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5489 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5490 cp->free_resc = cnic_free_resc;
5491 cp->start_cm = cnic_cm_init_bnx2x_hw;
5492 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5493 cp->enable_int = cnic_enable_bnx2x_int;
5494 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5495 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
5496 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5498 cp->ack_int = cnic_ack_bnx2x_msix;
5499 cp->close_conn = cnic_close_bnx2x_conn;
5503 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5505 struct ethtool_drvinfo drvinfo;
5506 struct cnic_dev *cdev = NULL;
5508 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5509 memset(&drvinfo, 0, sizeof(drvinfo));
5510 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5512 if (!strcmp(drvinfo.driver, "bnx2"))
5513 cdev = init_bnx2_cnic(dev);
5514 if (!strcmp(drvinfo.driver, "bnx2x"))
5515 cdev = init_bnx2x_cnic(dev);
5517 write_lock(&cnic_dev_lock);
5518 list_add(&cdev->list, &cnic_dev_list);
5519 write_unlock(&cnic_dev_lock);
5525 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5531 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5532 struct cnic_ulp_ops *ulp_ops;
5535 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5536 if (!ulp_ops || !ulp_ops->indicate_netevent)
5539 ctx = cp->ulp_handle[if_type];
5541 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5546 /* netdev event handler */
5547 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5550 struct net_device *netdev = ptr;
5551 struct cnic_dev *dev;
5554 dev = cnic_from_netdev(netdev);
5556 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5557 /* Check for the hot-plug device */
5558 dev = is_cnic_dev(netdev);
5565 struct cnic_local *cp = dev->cnic_priv;
5569 else if (event == NETDEV_UNREGISTER)
5572 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5573 if (cnic_register_netdev(dev) != 0) {
5577 if (!cnic_start_hw(dev))
5578 cnic_ulp_start(dev);
5581 cnic_rcv_netevent(cp, event, 0);
5583 if (event == NETDEV_GOING_DOWN) {
5586 cnic_unregister_netdev(dev);
5587 } else if (event == NETDEV_UNREGISTER) {
5588 write_lock(&cnic_dev_lock);
5589 list_del_init(&dev->list);
5590 write_unlock(&cnic_dev_lock);
5598 struct net_device *realdev;
5601 vid = cnic_get_vlan(netdev, &realdev);
5603 dev = cnic_from_netdev(realdev);
5605 vid |= VLAN_TAG_PRESENT;
5606 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5615 static struct notifier_block cnic_netdev_notifier = {
5616 .notifier_call = cnic_netdev_event
5619 static void cnic_release(void)
5621 struct cnic_dev *dev;
5622 struct cnic_uio_dev *udev;
5624 while (!list_empty(&cnic_dev_list)) {
5625 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5626 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5632 cnic_unregister_netdev(dev);
5633 list_del_init(&dev->list);
5636 while (!list_empty(&cnic_udev_list)) {
5637 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5639 cnic_free_uio(udev);
5643 static int __init cnic_init(void)
5647 pr_info("%s", version);
5649 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5655 cnic_wq = create_singlethread_workqueue("cnic_wq");
5658 unregister_netdevice_notifier(&cnic_netdev_notifier);
5665 static void __exit cnic_exit(void)
5667 unregister_netdevice_notifier(&cnic_netdev_notifier);
5669 destroy_workqueue(cnic_wq);
5672 module_init(cnic_init);
5673 module_exit(cnic_exit);