2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
52 #include <libcxgb_cm.h>
56 static char *states[] = {
73 module_param(nocong, int, 0644);
74 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
76 static int enable_ecn;
77 module_param(enable_ecn, int, 0644);
78 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
80 static int dack_mode = 1;
81 module_param(dack_mode, int, 0644);
82 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
84 uint c4iw_max_read_depth = 32;
85 module_param(c4iw_max_read_depth, int, 0644);
86 MODULE_PARM_DESC(c4iw_max_read_depth,
87 "Per-connection max ORD/IRD (default=32)");
89 static int enable_tcp_timestamps;
90 module_param(enable_tcp_timestamps, int, 0644);
91 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
93 static int enable_tcp_sack;
94 module_param(enable_tcp_sack, int, 0644);
95 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
97 static int enable_tcp_window_scaling = 1;
98 module_param(enable_tcp_window_scaling, int, 0644);
99 MODULE_PARM_DESC(enable_tcp_window_scaling,
100 "Enable tcp window scaling (default=1)");
103 module_param(c4iw_debug, int, 0644);
104 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
106 static int peer2peer = 1;
107 module_param(peer2peer, int, 0644);
108 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
110 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
111 module_param(p2p_type, int, 0644);
112 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
113 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
115 static int ep_timeout_secs = 60;
116 module_param(ep_timeout_secs, int, 0644);
117 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
118 "in seconds (default=60)");
120 static int mpa_rev = 2;
121 module_param(mpa_rev, int, 0644);
122 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
123 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
124 " compliant (default=2)");
126 static int markers_enabled;
127 module_param(markers_enabled, int, 0644);
128 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
130 static int crc_enabled = 1;
131 module_param(crc_enabled, int, 0644);
132 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
134 static int rcv_win = 256 * 1024;
135 module_param(rcv_win, int, 0644);
136 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
138 static int snd_win = 128 * 1024;
139 module_param(snd_win, int, 0644);
140 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
142 static struct workqueue_struct *workq;
144 static struct sk_buff_head rxq;
146 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
147 static void ep_timeout(unsigned long arg);
148 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
149 static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
151 static LIST_HEAD(timeout_list);
152 static spinlock_t timeout_lock;
154 static void deref_cm_id(struct c4iw_ep_common *epc)
156 epc->cm_id->rem_ref(epc->cm_id);
158 set_bit(CM_ID_DEREFED, &epc->history);
161 static void ref_cm_id(struct c4iw_ep_common *epc)
163 set_bit(CM_ID_REFED, &epc->history);
164 epc->cm_id->add_ref(epc->cm_id);
167 static void deref_qp(struct c4iw_ep *ep)
169 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
170 clear_bit(QP_REFERENCED, &ep->com.flags);
171 set_bit(QP_DEREFED, &ep->com.history);
174 static void ref_qp(struct c4iw_ep *ep)
176 set_bit(QP_REFERENCED, &ep->com.flags);
177 set_bit(QP_REFED, &ep->com.history);
178 c4iw_qp_add_ref(&ep->com.qp->ibqp);
181 static void start_ep_timer(struct c4iw_ep *ep)
183 PDBG("%s ep %p\n", __func__, ep);
184 if (timer_pending(&ep->timer)) {
185 pr_err("%s timer already started! ep %p\n",
189 clear_bit(TIMEOUT, &ep->com.flags);
190 c4iw_get_ep(&ep->com);
191 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
192 ep->timer.data = (unsigned long)ep;
193 ep->timer.function = ep_timeout;
194 add_timer(&ep->timer);
197 static int stop_ep_timer(struct c4iw_ep *ep)
199 PDBG("%s ep %p stopping\n", __func__, ep);
200 del_timer_sync(&ep->timer);
201 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
202 c4iw_put_ep(&ep->com);
208 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
209 struct l2t_entry *l2e)
213 if (c4iw_fatal_error(rdev)) {
215 PDBG("%s - device in error state - dropping\n", __func__);
218 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
221 else if (error == NET_XMIT_DROP)
223 return error < 0 ? error : 0;
226 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
230 if (c4iw_fatal_error(rdev)) {
232 PDBG("%s - device in error state - dropping\n", __func__);
235 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
238 return error < 0 ? error : 0;
241 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
243 struct cpl_tid_release *req;
245 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
248 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
249 INIT_TP_WR(req, hwtid);
250 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
251 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
252 c4iw_ofld_send(rdev, skb);
256 static void set_emss(struct c4iw_ep *ep, u16 opt)
258 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
259 ((AF_INET == ep->com.remote_addr.ss_family) ?
260 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
261 sizeof(struct tcphdr);
263 if (TCPOPT_TSTAMP_G(opt))
264 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
268 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
269 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
270 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
274 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
276 enum c4iw_ep_state state;
278 mutex_lock(&epc->mutex);
280 mutex_unlock(&epc->mutex);
284 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
289 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
291 mutex_lock(&epc->mutex);
292 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
293 __state_set(epc, new);
294 mutex_unlock(&epc->mutex);
298 static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
304 len = roundup(sizeof(union cpl_wr_size), 16);
305 for (i = 0; i < size; i++) {
306 skb = alloc_skb(len, GFP_KERNEL);
309 skb_queue_tail(ep_skb_list, skb);
313 skb_queue_purge(ep_skb_list);
317 static void *alloc_ep(int size, gfp_t gfp)
319 struct c4iw_ep_common *epc;
321 epc = kzalloc(size, gfp);
323 kref_init(&epc->kref);
324 mutex_init(&epc->mutex);
325 c4iw_init_wr_wait(&epc->wr_wait);
327 PDBG("%s alloc ep %p\n", __func__, epc);
331 static void remove_ep_tid(struct c4iw_ep *ep)
335 spin_lock_irqsave(&ep->com.dev->lock, flags);
336 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
337 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
340 static void insert_ep_tid(struct c4iw_ep *ep)
344 spin_lock_irqsave(&ep->com.dev->lock, flags);
345 _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
346 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
350 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
352 static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
357 spin_lock_irqsave(&dev->lock, flags);
358 ep = idr_find(&dev->hwtid_idr, tid);
360 c4iw_get_ep(&ep->com);
361 spin_unlock_irqrestore(&dev->lock, flags);
366 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
368 static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
371 struct c4iw_listen_ep *ep;
374 spin_lock_irqsave(&dev->lock, flags);
375 ep = idr_find(&dev->stid_idr, stid);
377 c4iw_get_ep(&ep->com);
378 spin_unlock_irqrestore(&dev->lock, flags);
382 void _c4iw_free_ep(struct kref *kref)
386 ep = container_of(kref, struct c4iw_ep, com.kref);
387 PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
388 if (test_bit(QP_REFERENCED, &ep->com.flags))
390 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
391 if (ep->com.remote_addr.ss_family == AF_INET6) {
392 struct sockaddr_in6 *sin6 =
393 (struct sockaddr_in6 *)
397 ep->com.dev->rdev.lldi.ports[0],
398 (const u32 *)&sin6->sin6_addr.s6_addr,
401 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
402 dst_release(ep->dst);
403 cxgb4_l2t_release(ep->l2t);
405 kfree_skb(ep->mpa_skb);
407 if (!skb_queue_empty(&ep->com.ep_skb_list))
408 skb_queue_purge(&ep->com.ep_skb_list);
412 static void release_ep_resources(struct c4iw_ep *ep)
414 set_bit(RELEASE_RESOURCES, &ep->com.flags);
417 * If we have a hwtid, then remove it from the idr table
418 * so lookups will no longer find this endpoint. Otherwise
419 * we have a race where one thread finds the ep ptr just
420 * before the other thread is freeing the ep memory.
424 c4iw_put_ep(&ep->com);
427 static int status2errno(int status)
432 case CPL_ERR_CONN_RESET:
434 case CPL_ERR_ARP_MISS:
435 return -EHOSTUNREACH;
436 case CPL_ERR_CONN_TIMEDOUT:
438 case CPL_ERR_TCAM_FULL:
440 case CPL_ERR_CONN_EXIST:
448 * Try and reuse skbs already allocated...
450 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
452 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
455 skb_reset_transport_header(skb);
457 skb = alloc_skb(len, gfp);
459 t4_set_arp_err_handler(skb, NULL, NULL);
463 static struct net_device *get_real_dev(struct net_device *egress_dev)
465 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
468 static void arp_failure_discard(void *handle, struct sk_buff *skb)
470 pr_err(MOD "ARP failure\n");
474 static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
476 pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
481 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
482 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
485 static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
489 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
490 release_ep_resources(ep);
494 static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
498 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
499 c4iw_put_ep(&ep->parent_ep->com);
500 release_ep_resources(ep);
505 * Fake up a special CPL opcode and call sched() so process_work() will call
506 * _put_ep_safe() in a safe context to free the ep resources. This is needed
507 * because ARP error handlers are called in an ATOMIC context, and
508 * _c4iw_free_ep() needs to block.
510 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
513 struct cpl_act_establish *rpl = cplhdr(skb);
515 /* Set our special ARP_FAILURE opcode */
516 rpl->ot.opcode = cpl;
519 * Save ep in the skb->cb area, after where sched() will save the dev
522 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
523 sched(ep->com.dev, skb);
526 /* Handle an ARP failure for an accept */
527 static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
529 struct c4iw_ep *ep = handle;
531 pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n",
534 __state_set(&ep->com, DEAD);
535 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
539 * Handle an ARP failure for an active open.
541 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
543 struct c4iw_ep *ep = handle;
545 printk(KERN_ERR MOD "ARP failure during connect\n");
546 connect_reply_upcall(ep, -EHOSTUNREACH);
547 __state_set(&ep->com, DEAD);
548 if (ep->com.remote_addr.ss_family == AF_INET6) {
549 struct sockaddr_in6 *sin6 =
550 (struct sockaddr_in6 *)&ep->com.local_addr;
551 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
552 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
554 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
555 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
556 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
560 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
563 static void abort_arp_failure(void *handle, struct sk_buff *skb)
566 struct c4iw_ep *ep = handle;
567 struct c4iw_rdev *rdev = &ep->com.dev->rdev;
568 struct cpl_abort_req *req = cplhdr(skb);
570 PDBG("%s rdev %p\n", __func__, rdev);
571 req->cmd = CPL_ABORT_NO_RST;
572 ret = c4iw_ofld_send(rdev, skb);
574 __state_set(&ep->com, DEAD);
575 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
579 static int send_flowc(struct c4iw_ep *ep)
581 struct fw_flowc_wr *flowc;
582 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
584 u16 vlan = ep->l2t->vlan;
590 if (vlan == CPL_L2T_VLAN_NONE)
595 flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
597 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
598 FW_FLOWC_WR_NPARAMS_V(nparams));
599 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
600 16)) | FW_WR_FLOWID_V(ep->hwtid));
602 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
603 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
604 (ep->com.dev->rdev.lldi.pf));
605 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
606 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
607 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
608 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
609 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
610 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
611 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
612 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
613 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
614 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
615 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
616 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
617 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
618 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
622 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
623 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
624 flowc->mnemval[8].val = cpu_to_be32(pri);
626 /* Pad WR to 16 byte boundary */
627 flowc->mnemval[8].mnemonic = 0;
628 flowc->mnemval[8].val = 0;
630 for (i = 0; i < 9; i++) {
631 flowc->mnemval[i].r4[0] = 0;
632 flowc->mnemval[i].r4[1] = 0;
633 flowc->mnemval[i].r4[2] = 0;
636 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
637 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
640 static int send_halfclose(struct c4iw_ep *ep)
642 struct cpl_close_con_req *req;
643 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
644 int wrlen = roundup(sizeof *req, 16);
646 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
650 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
651 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
652 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
653 memset(req, 0, wrlen);
654 INIT_TP_WR(req, ep->hwtid);
655 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
657 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
660 static int send_abort(struct c4iw_ep *ep)
662 struct cpl_abort_req *req;
663 int wrlen = roundup(sizeof *req, 16);
664 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
666 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
667 if (WARN_ON(!req_skb))
670 set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
671 t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
672 req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
673 memset(req, 0, wrlen);
674 INIT_TP_WR(req, ep->hwtid);
675 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
676 req->cmd = CPL_ABORT_SEND_RST;
677 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
680 static int send_connect(struct c4iw_ep *ep)
682 struct cpl_act_open_req *req = NULL;
683 struct cpl_t5_act_open_req *t5req = NULL;
684 struct cpl_t6_act_open_req *t6req = NULL;
685 struct cpl_act_open_req6 *req6 = NULL;
686 struct cpl_t5_act_open_req6 *t5req6 = NULL;
687 struct cpl_t6_act_open_req6 *t6req6 = NULL;
691 unsigned int mtu_idx;
693 int win, sizev4, sizev6, wrlen;
694 struct sockaddr_in *la = (struct sockaddr_in *)
696 struct sockaddr_in *ra = (struct sockaddr_in *)
697 &ep->com.remote_addr;
698 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
700 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
701 &ep->com.remote_addr;
703 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
704 u32 isn = (prandom_u32() & ~7UL) - 1;
706 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
708 sizev4 = sizeof(struct cpl_act_open_req);
709 sizev6 = sizeof(struct cpl_act_open_req6);
712 sizev4 = sizeof(struct cpl_t5_act_open_req);
713 sizev6 = sizeof(struct cpl_t5_act_open_req6);
716 sizev4 = sizeof(struct cpl_t6_act_open_req);
717 sizev6 = sizeof(struct cpl_t6_act_open_req6);
720 pr_err("T%d Chip is not supported\n",
721 CHELSIO_CHIP_VERSION(adapter_type));
725 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
726 roundup(sizev4, 16) :
729 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
731 skb = get_skb(NULL, wrlen, GFP_KERNEL);
733 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
737 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
739 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
740 enable_tcp_timestamps,
741 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
742 wscale = compute_wscale(rcv_win);
745 * Specify the largest window that will fit in opt0. The
746 * remainder will be specified in the rx_data_ack.
748 win = ep->rcv_win >> 10;
749 if (win > RCV_BUFSIZ_M)
752 opt0 = (nocong ? NO_CONG_F : 0) |
755 WND_SCALE_V(wscale) |
757 L2T_IDX_V(ep->l2t->idx) |
758 TX_CHAN_V(ep->tx_chan) |
759 SMAC_SEL_V(ep->smac_idx) |
760 DSCP_V(ep->tos >> 2) |
761 ULP_MODE_V(ULP_MODE_TCPDDP) |
763 opt2 = RX_CHANNEL_V(0) |
764 CCTRL_ECN_V(enable_ecn) |
765 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
766 if (enable_tcp_timestamps)
767 opt2 |= TSTAMPS_EN_F;
770 if (wscale && enable_tcp_window_scaling)
771 opt2 |= WND_SCALE_EN_F;
772 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
776 opt2 |= T5_OPT_2_VALID_F;
777 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
781 if (ep->com.remote_addr.ss_family == AF_INET6)
782 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
783 (const u32 *)&la6->sin6_addr.s6_addr, 1);
785 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
787 if (ep->com.remote_addr.ss_family == AF_INET) {
788 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
790 req = (struct cpl_act_open_req *)skb_put(skb, wrlen);
794 t5req = (struct cpl_t5_act_open_req *)skb_put(skb,
796 INIT_TP_WR(t5req, 0);
797 req = (struct cpl_act_open_req *)t5req;
800 t6req = (struct cpl_t6_act_open_req *)skb_put(skb,
802 INIT_TP_WR(t6req, 0);
803 req = (struct cpl_act_open_req *)t6req;
804 t5req = (struct cpl_t5_act_open_req *)t6req;
807 pr_err("T%d Chip is not supported\n",
808 CHELSIO_CHIP_VERSION(adapter_type));
813 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
814 ((ep->rss_qid<<14) | ep->atid)));
815 req->local_port = la->sin_port;
816 req->peer_port = ra->sin_port;
817 req->local_ip = la->sin_addr.s_addr;
818 req->peer_ip = ra->sin_addr.s_addr;
819 req->opt0 = cpu_to_be64(opt0);
821 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
822 req->params = cpu_to_be32(cxgb4_select_ntuple(
823 ep->com.dev->rdev.lldi.ports[0],
825 req->opt2 = cpu_to_be32(opt2);
827 t5req->params = cpu_to_be64(FILTER_TUPLE_V(
829 ep->com.dev->rdev.lldi.ports[0],
831 t5req->rsvd = cpu_to_be32(isn);
832 PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
833 t5req->opt2 = cpu_to_be32(opt2);
836 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
838 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
842 t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb,
844 INIT_TP_WR(t5req6, 0);
845 req6 = (struct cpl_act_open_req6 *)t5req6;
848 t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb,
850 INIT_TP_WR(t6req6, 0);
851 req6 = (struct cpl_act_open_req6 *)t6req6;
852 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
855 pr_err("T%d Chip is not supported\n",
856 CHELSIO_CHIP_VERSION(adapter_type));
861 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
862 ((ep->rss_qid<<14)|ep->atid)));
863 req6->local_port = la6->sin6_port;
864 req6->peer_port = ra6->sin6_port;
865 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
866 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
867 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
868 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
869 req6->opt0 = cpu_to_be64(opt0);
871 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
872 req6->params = cpu_to_be32(cxgb4_select_ntuple(
873 ep->com.dev->rdev.lldi.ports[0],
875 req6->opt2 = cpu_to_be32(opt2);
877 t5req6->params = cpu_to_be64(FILTER_TUPLE_V(
879 ep->com.dev->rdev.lldi.ports[0],
881 t5req6->rsvd = cpu_to_be32(isn);
882 PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
883 t5req6->opt2 = cpu_to_be32(opt2);
887 set_bit(ACT_OPEN_REQ, &ep->com.history);
888 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
890 if (ret && ep->com.remote_addr.ss_family == AF_INET6)
891 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
892 (const u32 *)&la6->sin6_addr.s6_addr, 1);
896 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
899 int mpalen, wrlen, ret;
900 struct fw_ofld_tx_data_wr *req;
901 struct mpa_message *mpa;
902 struct mpa_v2_conn_params mpa_v2_params;
904 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
906 BUG_ON(skb_cloned(skb));
908 mpalen = sizeof(*mpa) + ep->plen;
909 if (mpa_rev_to_use == 2)
910 mpalen += sizeof(struct mpa_v2_conn_params);
911 wrlen = roundup(mpalen + sizeof *req, 16);
912 skb = get_skb(skb, wrlen, GFP_KERNEL);
914 connect_reply_upcall(ep, -ENOMEM);
917 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
919 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
920 memset(req, 0, wrlen);
921 req->op_to_immdlen = cpu_to_be32(
922 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
924 FW_WR_IMMDLEN_V(mpalen));
925 req->flowid_len16 = cpu_to_be32(
926 FW_WR_FLOWID_V(ep->hwtid) |
927 FW_WR_LEN16_V(wrlen >> 4));
928 req->plen = cpu_to_be32(mpalen);
929 req->tunnel_to_proxy = cpu_to_be32(
930 FW_OFLD_TX_DATA_WR_FLUSH_F |
931 FW_OFLD_TX_DATA_WR_SHOVE_F);
933 mpa = (struct mpa_message *)(req + 1);
934 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
938 mpa->flags |= MPA_CRC;
939 if (markers_enabled) {
940 mpa->flags |= MPA_MARKERS;
941 ep->mpa_attr.recv_marker_enabled = 1;
943 ep->mpa_attr.recv_marker_enabled = 0;
945 if (mpa_rev_to_use == 2)
946 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
948 mpa->private_data_size = htons(ep->plen);
949 mpa->revision = mpa_rev_to_use;
950 if (mpa_rev_to_use == 1) {
951 ep->tried_with_mpa_v1 = 1;
952 ep->retry_with_mpa_v1 = 0;
955 if (mpa_rev_to_use == 2) {
956 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
957 sizeof (struct mpa_v2_conn_params));
958 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
960 mpa_v2_params.ird = htons((u16)ep->ird);
961 mpa_v2_params.ord = htons((u16)ep->ord);
964 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
965 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
967 htons(MPA_V2_RDMA_WRITE_RTR);
968 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
970 htons(MPA_V2_RDMA_READ_RTR);
972 memcpy(mpa->private_data, &mpa_v2_params,
973 sizeof(struct mpa_v2_conn_params));
976 memcpy(mpa->private_data +
977 sizeof(struct mpa_v2_conn_params),
978 ep->mpa_pkt + sizeof(*mpa), ep->plen);
981 memcpy(mpa->private_data,
982 ep->mpa_pkt + sizeof(*mpa), ep->plen);
985 * Reference the mpa skb. This ensures the data area
986 * will remain in memory until the hw acks the tx.
987 * Function fw4_ack() will deref it.
990 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
993 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
997 __state_set(&ep->com, MPA_REQ_SENT);
998 ep->mpa_attr.initiator = 1;
999 ep->snd_seq += mpalen;
1003 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1006 struct fw_ofld_tx_data_wr *req;
1007 struct mpa_message *mpa;
1008 struct sk_buff *skb;
1009 struct mpa_v2_conn_params mpa_v2_params;
1011 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
1013 mpalen = sizeof(*mpa) + plen;
1014 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1015 mpalen += sizeof(struct mpa_v2_conn_params);
1016 wrlen = roundup(mpalen + sizeof *req, 16);
1018 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1020 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
1023 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1025 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
1026 memset(req, 0, wrlen);
1027 req->op_to_immdlen = cpu_to_be32(
1028 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1030 FW_WR_IMMDLEN_V(mpalen));
1031 req->flowid_len16 = cpu_to_be32(
1032 FW_WR_FLOWID_V(ep->hwtid) |
1033 FW_WR_LEN16_V(wrlen >> 4));
1034 req->plen = cpu_to_be32(mpalen);
1035 req->tunnel_to_proxy = cpu_to_be32(
1036 FW_OFLD_TX_DATA_WR_FLUSH_F |
1037 FW_OFLD_TX_DATA_WR_SHOVE_F);
1039 mpa = (struct mpa_message *)(req + 1);
1040 memset(mpa, 0, sizeof(*mpa));
1041 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1042 mpa->flags = MPA_REJECT;
1043 mpa->revision = ep->mpa_attr.version;
1044 mpa->private_data_size = htons(plen);
1046 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1047 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1048 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1049 sizeof (struct mpa_v2_conn_params));
1050 mpa_v2_params.ird = htons(((u16)ep->ird) |
1051 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1053 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1055 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1056 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1057 FW_RI_INIT_P2PTYPE_READ_REQ ?
1058 MPA_V2_RDMA_READ_RTR : 0) : 0));
1059 memcpy(mpa->private_data, &mpa_v2_params,
1060 sizeof(struct mpa_v2_conn_params));
1063 memcpy(mpa->private_data +
1064 sizeof(struct mpa_v2_conn_params), pdata, plen);
1067 memcpy(mpa->private_data, pdata, plen);
1070 * Reference the mpa skb again. This ensures the data area
1071 * will remain in memory until the hw acks the tx.
1072 * Function fw4_ack() will deref it.
1075 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1076 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1077 BUG_ON(ep->mpa_skb);
1079 ep->snd_seq += mpalen;
1080 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1083 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1086 struct fw_ofld_tx_data_wr *req;
1087 struct mpa_message *mpa;
1088 struct sk_buff *skb;
1089 struct mpa_v2_conn_params mpa_v2_params;
1091 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
1093 mpalen = sizeof(*mpa) + plen;
1094 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1095 mpalen += sizeof(struct mpa_v2_conn_params);
1096 wrlen = roundup(mpalen + sizeof *req, 16);
1098 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1100 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
1103 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1105 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
1106 memset(req, 0, wrlen);
1107 req->op_to_immdlen = cpu_to_be32(
1108 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1110 FW_WR_IMMDLEN_V(mpalen));
1111 req->flowid_len16 = cpu_to_be32(
1112 FW_WR_FLOWID_V(ep->hwtid) |
1113 FW_WR_LEN16_V(wrlen >> 4));
1114 req->plen = cpu_to_be32(mpalen);
1115 req->tunnel_to_proxy = cpu_to_be32(
1116 FW_OFLD_TX_DATA_WR_FLUSH_F |
1117 FW_OFLD_TX_DATA_WR_SHOVE_F);
1119 mpa = (struct mpa_message *)(req + 1);
1120 memset(mpa, 0, sizeof(*mpa));
1121 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1123 if (ep->mpa_attr.crc_enabled)
1124 mpa->flags |= MPA_CRC;
1125 if (ep->mpa_attr.recv_marker_enabled)
1126 mpa->flags |= MPA_MARKERS;
1127 mpa->revision = ep->mpa_attr.version;
1128 mpa->private_data_size = htons(plen);
1130 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1131 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1132 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1133 sizeof (struct mpa_v2_conn_params));
1134 mpa_v2_params.ird = htons((u16)ep->ird);
1135 mpa_v2_params.ord = htons((u16)ep->ord);
1136 if (peer2peer && (ep->mpa_attr.p2p_type !=
1137 FW_RI_INIT_P2PTYPE_DISABLED)) {
1138 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1140 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1141 mpa_v2_params.ord |=
1142 htons(MPA_V2_RDMA_WRITE_RTR);
1143 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1144 mpa_v2_params.ord |=
1145 htons(MPA_V2_RDMA_READ_RTR);
1148 memcpy(mpa->private_data, &mpa_v2_params,
1149 sizeof(struct mpa_v2_conn_params));
1152 memcpy(mpa->private_data +
1153 sizeof(struct mpa_v2_conn_params), pdata, plen);
1156 memcpy(mpa->private_data, pdata, plen);
1159 * Reference the mpa skb. This ensures the data area
1160 * will remain in memory until the hw acks the tx.
1161 * Function fw4_ack() will deref it.
1164 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1166 __state_set(&ep->com, MPA_REP_SENT);
1167 ep->snd_seq += mpalen;
1168 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1171 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1174 struct cpl_act_establish *req = cplhdr(skb);
1175 unsigned int tid = GET_TID(req);
1176 unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
1177 struct tid_info *t = dev->rdev.lldi.tids;
1180 ep = lookup_atid(t, atid);
1182 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
1183 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
1185 mutex_lock(&ep->com.mutex);
1186 dst_confirm(ep->dst);
1188 /* setup the hwtid for this connection */
1190 cxgb4_insert_tid(t, ep, tid);
1193 ep->snd_seq = be32_to_cpu(req->snd_isn);
1194 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1196 set_emss(ep, ntohs(req->tcp_opt));
1198 /* dealloc the atid */
1199 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1200 cxgb4_free_atid(t, atid);
1201 set_bit(ACT_ESTAB, &ep->com.history);
1203 /* start MPA negotiation */
1204 ret = send_flowc(ep);
1207 if (ep->retry_with_mpa_v1)
1208 ret = send_mpa_req(ep, skb, 1);
1210 ret = send_mpa_req(ep, skb, mpa_rev);
1213 mutex_unlock(&ep->com.mutex);
1216 mutex_unlock(&ep->com.mutex);
1217 connect_reply_upcall(ep, -ENOMEM);
1218 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1222 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1224 struct iw_cm_event event;
1226 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1227 memset(&event, 0, sizeof(event));
1228 event.event = IW_CM_EVENT_CLOSE;
1229 event.status = status;
1230 if (ep->com.cm_id) {
1231 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1232 ep, ep->com.cm_id, ep->hwtid);
1233 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1234 deref_cm_id(&ep->com);
1235 set_bit(CLOSE_UPCALL, &ep->com.history);
1239 static void peer_close_upcall(struct c4iw_ep *ep)
1241 struct iw_cm_event event;
1243 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1244 memset(&event, 0, sizeof(event));
1245 event.event = IW_CM_EVENT_DISCONNECT;
1246 if (ep->com.cm_id) {
1247 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1248 ep, ep->com.cm_id, ep->hwtid);
1249 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1250 set_bit(DISCONN_UPCALL, &ep->com.history);
1254 static void peer_abort_upcall(struct c4iw_ep *ep)
1256 struct iw_cm_event event;
1258 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1259 memset(&event, 0, sizeof(event));
1260 event.event = IW_CM_EVENT_CLOSE;
1261 event.status = -ECONNRESET;
1262 if (ep->com.cm_id) {
1263 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
1264 ep->com.cm_id, ep->hwtid);
1265 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1266 deref_cm_id(&ep->com);
1267 set_bit(ABORT_UPCALL, &ep->com.history);
1271 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1273 struct iw_cm_event event;
1275 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
1276 memset(&event, 0, sizeof(event));
1277 event.event = IW_CM_EVENT_CONNECT_REPLY;
1278 event.status = status;
1279 memcpy(&event.local_addr, &ep->com.local_addr,
1280 sizeof(ep->com.local_addr));
1281 memcpy(&event.remote_addr, &ep->com.remote_addr,
1282 sizeof(ep->com.remote_addr));
1284 if ((status == 0) || (status == -ECONNREFUSED)) {
1285 if (!ep->tried_with_mpa_v1) {
1286 /* this means MPA_v2 is used */
1287 event.ord = ep->ird;
1288 event.ird = ep->ord;
1289 event.private_data_len = ep->plen -
1290 sizeof(struct mpa_v2_conn_params);
1291 event.private_data = ep->mpa_pkt +
1292 sizeof(struct mpa_message) +
1293 sizeof(struct mpa_v2_conn_params);
1295 /* this means MPA_v1 is used */
1296 event.ord = cur_max_read_depth(ep->com.dev);
1297 event.ird = cur_max_read_depth(ep->com.dev);
1298 event.private_data_len = ep->plen;
1299 event.private_data = ep->mpa_pkt +
1300 sizeof(struct mpa_message);
1304 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
1306 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1307 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1310 deref_cm_id(&ep->com);
1313 static int connect_request_upcall(struct c4iw_ep *ep)
1315 struct iw_cm_event event;
1318 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1319 memset(&event, 0, sizeof(event));
1320 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1321 memcpy(&event.local_addr, &ep->com.local_addr,
1322 sizeof(ep->com.local_addr));
1323 memcpy(&event.remote_addr, &ep->com.remote_addr,
1324 sizeof(ep->com.remote_addr));
1325 event.provider_data = ep;
1326 if (!ep->tried_with_mpa_v1) {
1327 /* this means MPA_v2 is used */
1328 event.ord = ep->ord;
1329 event.ird = ep->ird;
1330 event.private_data_len = ep->plen -
1331 sizeof(struct mpa_v2_conn_params);
1332 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1333 sizeof(struct mpa_v2_conn_params);
1335 /* this means MPA_v1 is used. Send max supported */
1336 event.ord = cur_max_read_depth(ep->com.dev);
1337 event.ird = cur_max_read_depth(ep->com.dev);
1338 event.private_data_len = ep->plen;
1339 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1341 c4iw_get_ep(&ep->com);
1342 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1345 c4iw_put_ep(&ep->com);
1346 set_bit(CONNREQ_UPCALL, &ep->com.history);
1347 c4iw_put_ep(&ep->parent_ep->com);
1351 static void established_upcall(struct c4iw_ep *ep)
1353 struct iw_cm_event event;
1355 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1356 memset(&event, 0, sizeof(event));
1357 event.event = IW_CM_EVENT_ESTABLISHED;
1358 event.ird = ep->ord;
1359 event.ord = ep->ird;
1360 if (ep->com.cm_id) {
1361 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1362 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1363 set_bit(ESTAB_UPCALL, &ep->com.history);
1367 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1369 struct cpl_rx_data_ack *req;
1370 struct sk_buff *skb;
1371 int wrlen = roundup(sizeof *req, 16);
1373 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1374 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1376 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1381 * If we couldn't specify the entire rcv window at connection setup
1382 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1383 * then add the overage in to the credits returned.
1385 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1386 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1388 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1389 memset(req, 0, wrlen);
1390 INIT_TP_WR(req, ep->hwtid);
1391 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1393 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
1395 RX_DACK_MODE_V(dack_mode));
1396 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
1397 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1401 #define RELAXED_IRD_NEGOTIATION 1
1404 * process_mpa_reply - process streaming mode MPA reply
1408 * 0 upon success indicating a connect request was delivered to the ULP
1409 * or the mpa request is incomplete but valid so far.
1411 * 1 if a failure requires the caller to close the connection.
1413 * 2 if a failure requires the caller to abort the connection.
1415 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1417 struct mpa_message *mpa;
1418 struct mpa_v2_conn_params *mpa_v2_params;
1420 u16 resp_ird, resp_ord;
1421 u8 rtr_mismatch = 0, insuff_ird = 0;
1422 struct c4iw_qp_attributes attrs;
1423 enum c4iw_qp_attr_mask mask;
1427 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1430 * If we get more than the supported amount of private data
1431 * then we must fail this connection.
1433 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1435 goto err_stop_timer;
1439 * copy the new data into our accumulation buffer.
1441 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1443 ep->mpa_pkt_len += skb->len;
1446 * if we don't even have the mpa message, then bail.
1448 if (ep->mpa_pkt_len < sizeof(*mpa))
1450 mpa = (struct mpa_message *) ep->mpa_pkt;
1452 /* Validate MPA header. */
1453 if (mpa->revision > mpa_rev) {
1454 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1455 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1457 goto err_stop_timer;
1459 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1461 goto err_stop_timer;
1464 plen = ntohs(mpa->private_data_size);
1467 * Fail if there's too much private data.
1469 if (plen > MPA_MAX_PRIVATE_DATA) {
1471 goto err_stop_timer;
1475 * If plen does not account for pkt size
1477 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1479 goto err_stop_timer;
1482 ep->plen = (u8) plen;
1485 * If we don't have all the pdata yet, then bail.
1486 * We'll continue process when more data arrives.
1488 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1491 if (mpa->flags & MPA_REJECT) {
1492 err = -ECONNREFUSED;
1493 goto err_stop_timer;
1497 * Stop mpa timer. If it expired, then
1498 * we ignore the MPA reply. process_timeout()
1499 * will abort the connection.
1501 if (stop_ep_timer(ep))
1505 * If we get here we have accumulated the entire mpa
1506 * start reply message including private data. And
1507 * the MPA header is valid.
1509 __state_set(&ep->com, FPDU_MODE);
1510 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1511 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1512 ep->mpa_attr.version = mpa->revision;
1513 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1515 if (mpa->revision == 2) {
1516 ep->mpa_attr.enhanced_rdma_conn =
1517 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1518 if (ep->mpa_attr.enhanced_rdma_conn) {
1519 mpa_v2_params = (struct mpa_v2_conn_params *)
1520 (ep->mpa_pkt + sizeof(*mpa));
1521 resp_ird = ntohs(mpa_v2_params->ird) &
1522 MPA_V2_IRD_ORD_MASK;
1523 resp_ord = ntohs(mpa_v2_params->ord) &
1524 MPA_V2_IRD_ORD_MASK;
1525 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1526 __func__, resp_ird, resp_ord, ep->ird, ep->ord);
1529 * This is a double-check. Ideally, below checks are
1530 * not required since ird/ord stuff has been taken
1531 * care of in c4iw_accept_cr
1533 if (ep->ird < resp_ord) {
1534 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1535 ep->com.dev->rdev.lldi.max_ordird_qp)
1539 } else if (ep->ird > resp_ord) {
1542 if (ep->ord > resp_ird) {
1543 if (RELAXED_IRD_NEGOTIATION)
1554 if (ntohs(mpa_v2_params->ird) &
1555 MPA_V2_PEER2PEER_MODEL) {
1556 if (ntohs(mpa_v2_params->ord) &
1557 MPA_V2_RDMA_WRITE_RTR)
1558 ep->mpa_attr.p2p_type =
1559 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1560 else if (ntohs(mpa_v2_params->ord) &
1561 MPA_V2_RDMA_READ_RTR)
1562 ep->mpa_attr.p2p_type =
1563 FW_RI_INIT_P2PTYPE_READ_REQ;
1566 } else if (mpa->revision == 1)
1568 ep->mpa_attr.p2p_type = p2p_type;
1570 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1571 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1572 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1573 ep->mpa_attr.recv_marker_enabled,
1574 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1575 ep->mpa_attr.p2p_type, p2p_type);
1578 * If responder's RTR does not match with that of initiator, assign
1579 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1580 * generated when moving QP to RTS state.
1581 * A TERM message will be sent after QP has moved to RTS state
1583 if ((ep->mpa_attr.version == 2) && peer2peer &&
1584 (ep->mpa_attr.p2p_type != p2p_type)) {
1585 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1589 attrs.mpa_attr = ep->mpa_attr;
1590 attrs.max_ird = ep->ird;
1591 attrs.max_ord = ep->ord;
1592 attrs.llp_stream_handle = ep;
1593 attrs.next_state = C4IW_QP_STATE_RTS;
1595 mask = C4IW_QP_ATTR_NEXT_STATE |
1596 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1597 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1599 /* bind QP and TID with INIT_WR */
1600 err = c4iw_modify_qp(ep->com.qp->rhp,
1601 ep->com.qp, mask, &attrs, 1);
1606 * If responder's RTR requirement did not match with what initiator
1607 * supports, generate TERM message
1610 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1611 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1612 attrs.ecode = MPA_NOMATCH_RTR;
1613 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1614 attrs.send_term = 1;
1615 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1616 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1623 * Generate TERM if initiator IRD is not sufficient for responder
1624 * provided ORD. Currently, we do the same behaviour even when
1625 * responder provided IRD is also not sufficient as regards to
1629 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1631 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1632 attrs.ecode = MPA_INSUFF_IRD;
1633 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1634 attrs.send_term = 1;
1635 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1636 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1647 connect_reply_upcall(ep, err);
1652 * process_mpa_request - process streaming mode MPA request
1656 * 0 upon success indicating a connect request was delivered to the ULP
1657 * or the mpa request is incomplete but valid so far.
1659 * 1 if a failure requires the caller to close the connection.
1661 * 2 if a failure requires the caller to abort the connection.
1663 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1665 struct mpa_message *mpa;
1666 struct mpa_v2_conn_params *mpa_v2_params;
1669 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1672 * If we get more than the supported amount of private data
1673 * then we must fail this connection.
1675 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1676 goto err_stop_timer;
1678 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1681 * Copy the new data into our accumulation buffer.
1683 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1685 ep->mpa_pkt_len += skb->len;
1688 * If we don't even have the mpa message, then bail.
1689 * We'll continue process when more data arrives.
1691 if (ep->mpa_pkt_len < sizeof(*mpa))
1694 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1695 mpa = (struct mpa_message *) ep->mpa_pkt;
1698 * Validate MPA Header.
1700 if (mpa->revision > mpa_rev) {
1701 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1702 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1703 goto err_stop_timer;
1706 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1707 goto err_stop_timer;
1709 plen = ntohs(mpa->private_data_size);
1712 * Fail if there's too much private data.
1714 if (plen > MPA_MAX_PRIVATE_DATA)
1715 goto err_stop_timer;
1718 * If plen does not account for pkt size
1720 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1721 goto err_stop_timer;
1722 ep->plen = (u8) plen;
1725 * If we don't have all the pdata yet, then bail.
1727 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1731 * If we get here we have accumulated the entire mpa
1732 * start reply message including private data.
1734 ep->mpa_attr.initiator = 0;
1735 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1736 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1737 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1738 ep->mpa_attr.version = mpa->revision;
1739 if (mpa->revision == 1)
1740 ep->tried_with_mpa_v1 = 1;
1741 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1743 if (mpa->revision == 2) {
1744 ep->mpa_attr.enhanced_rdma_conn =
1745 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1746 if (ep->mpa_attr.enhanced_rdma_conn) {
1747 mpa_v2_params = (struct mpa_v2_conn_params *)
1748 (ep->mpa_pkt + sizeof(*mpa));
1749 ep->ird = ntohs(mpa_v2_params->ird) &
1750 MPA_V2_IRD_ORD_MASK;
1751 ep->ird = min_t(u32, ep->ird,
1752 cur_max_read_depth(ep->com.dev));
1753 ep->ord = ntohs(mpa_v2_params->ord) &
1754 MPA_V2_IRD_ORD_MASK;
1755 ep->ord = min_t(u32, ep->ord,
1756 cur_max_read_depth(ep->com.dev));
1757 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1759 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1761 if (ntohs(mpa_v2_params->ord) &
1762 MPA_V2_RDMA_WRITE_RTR)
1763 ep->mpa_attr.p2p_type =
1764 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1765 else if (ntohs(mpa_v2_params->ord) &
1766 MPA_V2_RDMA_READ_RTR)
1767 ep->mpa_attr.p2p_type =
1768 FW_RI_INIT_P2PTYPE_READ_REQ;
1771 } else if (mpa->revision == 1)
1773 ep->mpa_attr.p2p_type = p2p_type;
1775 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1776 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1777 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1778 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1779 ep->mpa_attr.p2p_type);
1781 __state_set(&ep->com, MPA_REQ_RCVD);
1784 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1785 if (ep->parent_ep->com.state != DEAD) {
1786 if (connect_request_upcall(ep))
1787 goto err_unlock_parent;
1789 goto err_unlock_parent;
1791 mutex_unlock(&ep->parent_ep->com.mutex);
1795 mutex_unlock(&ep->parent_ep->com.mutex);
1798 (void)stop_ep_timer(ep);
1803 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1806 struct cpl_rx_data *hdr = cplhdr(skb);
1807 unsigned int dlen = ntohs(hdr->len);
1808 unsigned int tid = GET_TID(hdr);
1809 __u8 status = hdr->status;
1812 ep = get_ep_from_tid(dev, tid);
1815 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1816 skb_pull(skb, sizeof(*hdr));
1817 skb_trim(skb, dlen);
1818 mutex_lock(&ep->com.mutex);
1820 /* update RX credits */
1821 update_rx_credits(ep, dlen);
1823 switch (ep->com.state) {
1825 ep->rcv_seq += dlen;
1826 disconnect = process_mpa_reply(ep, skb);
1829 ep->rcv_seq += dlen;
1830 disconnect = process_mpa_request(ep, skb);
1833 struct c4iw_qp_attributes attrs;
1834 BUG_ON(!ep->com.qp);
1836 pr_err("%s Unexpected streaming data." \
1837 " qpid %u ep %p state %d tid %u status %d\n",
1838 __func__, ep->com.qp->wq.sq.qid, ep,
1839 ep->com.state, ep->hwtid, status);
1840 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1841 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1842 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1849 mutex_unlock(&ep->com.mutex);
1851 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
1852 c4iw_put_ep(&ep->com);
1856 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1859 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1861 unsigned int tid = GET_TID(rpl);
1863 ep = get_ep_from_tid(dev, tid);
1865 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1868 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1869 mutex_lock(&ep->com.mutex);
1870 switch (ep->com.state) {
1872 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1873 __state_set(&ep->com, DEAD);
1877 printk(KERN_ERR "%s ep %p state %d\n",
1878 __func__, ep, ep->com.state);
1881 mutex_unlock(&ep->com.mutex);
1884 release_ep_resources(ep);
1885 c4iw_put_ep(&ep->com);
1889 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1891 struct sk_buff *skb;
1892 struct fw_ofld_connection_wr *req;
1893 unsigned int mtu_idx;
1895 struct sockaddr_in *sin;
1898 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1899 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1900 memset(req, 0, sizeof(*req));
1901 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
1902 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1903 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1904 ep->com.dev->rdev.lldi.ports[0],
1906 sin = (struct sockaddr_in *)&ep->com.local_addr;
1907 req->le.lport = sin->sin_port;
1908 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1909 sin = (struct sockaddr_in *)&ep->com.remote_addr;
1910 req->le.pport = sin->sin_port;
1911 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1912 req->tcb.t_state_to_astid =
1913 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1914 FW_OFLD_CONNECTION_WR_ASTID_V(atid));
1915 req->tcb.cplrxdataack_cplpassacceptrpl =
1916 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
1917 req->tcb.tx_max = (__force __be32) jiffies;
1918 req->tcb.rcv_adv = htons(1);
1919 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1920 enable_tcp_timestamps,
1921 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1922 wscale = compute_wscale(rcv_win);
1925 * Specify the largest window that will fit in opt0. The
1926 * remainder will be specified in the rx_data_ack.
1928 win = ep->rcv_win >> 10;
1929 if (win > RCV_BUFSIZ_M)
1932 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
1933 (nocong ? NO_CONG_F : 0) |
1936 WND_SCALE_V(wscale) |
1937 MSS_IDX_V(mtu_idx) |
1938 L2T_IDX_V(ep->l2t->idx) |
1939 TX_CHAN_V(ep->tx_chan) |
1940 SMAC_SEL_V(ep->smac_idx) |
1941 DSCP_V(ep->tos >> 2) |
1942 ULP_MODE_V(ULP_MODE_TCPDDP) |
1944 req->tcb.opt2 = (__force __be32) (PACE_V(1) |
1945 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1947 CCTRL_ECN_V(enable_ecn) |
1948 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1949 if (enable_tcp_timestamps)
1950 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
1951 if (enable_tcp_sack)
1952 req->tcb.opt2 |= (__force __be32)SACK_EN_F;
1953 if (wscale && enable_tcp_window_scaling)
1954 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1955 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
1956 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
1957 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1958 set_bit(ACT_OFLD_CONN, &ep->com.history);
1959 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1963 * Some of the error codes above implicitly indicate that there is no TID
1964 * allocated with the result of an ACT_OPEN. We use this predicate to make
1967 static inline int act_open_has_tid(int status)
1969 return (status != CPL_ERR_TCAM_PARITY &&
1970 status != CPL_ERR_TCAM_MISS &&
1971 status != CPL_ERR_TCAM_FULL &&
1972 status != CPL_ERR_CONN_EXIST_SYNRECV &&
1973 status != CPL_ERR_CONN_EXIST);
1976 static char *neg_adv_str(unsigned int status)
1979 case CPL_ERR_RTX_NEG_ADVICE:
1980 return "Retransmit timeout";
1981 case CPL_ERR_PERSIST_NEG_ADVICE:
1982 return "Persist timeout";
1983 case CPL_ERR_KEEPALV_NEG_ADVICE:
1984 return "Keepalive timeout";
1990 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
1992 ep->snd_win = snd_win;
1993 ep->rcv_win = rcv_win;
1994 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
1997 #define ACT_OPEN_RETRY_COUNT 2
1999 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2000 struct dst_entry *dst, struct c4iw_dev *cdev,
2001 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
2003 struct neighbour *n;
2005 struct net_device *pdev;
2007 n = dst_neigh_lookup(dst, peer_ip);
2013 if (n->dev->flags & IFF_LOOPBACK) {
2015 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
2016 else if (IS_ENABLED(CONFIG_IPV6))
2017 for_each_netdev(&init_net, pdev) {
2018 if (ipv6_chk_addr(&init_net,
2019 (struct in6_addr *)peer_ip,
2030 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2031 n, pdev, rt_tos2priority(tos));
2034 ep->mtu = pdev->mtu;
2035 ep->tx_chan = cxgb4_port_chan(pdev);
2036 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2037 cxgb4_port_viid(pdev));
2038 step = cdev->rdev.lldi.ntxq /
2039 cdev->rdev.lldi.nchan;
2040 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2041 step = cdev->rdev.lldi.nrxq /
2042 cdev->rdev.lldi.nchan;
2043 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2044 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2045 cxgb4_port_idx(pdev) * step];
2046 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2049 pdev = get_real_dev(n->dev);
2050 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2054 ep->mtu = dst_mtu(dst);
2055 ep->tx_chan = cxgb4_port_chan(pdev);
2056 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2057 cxgb4_port_viid(pdev));
2058 step = cdev->rdev.lldi.ntxq /
2059 cdev->rdev.lldi.nchan;
2060 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2061 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2062 step = cdev->rdev.lldi.nrxq /
2063 cdev->rdev.lldi.nchan;
2064 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2065 cxgb4_port_idx(pdev) * step];
2066 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2069 ep->retry_with_mpa_v1 = 0;
2070 ep->tried_with_mpa_v1 = 0;
2082 static int c4iw_reconnect(struct c4iw_ep *ep)
2086 struct sockaddr_in *laddr = (struct sockaddr_in *)
2087 &ep->com.cm_id->m_local_addr;
2088 struct sockaddr_in *raddr = (struct sockaddr_in *)
2089 &ep->com.cm_id->m_remote_addr;
2090 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
2091 &ep->com.cm_id->m_local_addr;
2092 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
2093 &ep->com.cm_id->m_remote_addr;
2097 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
2098 init_timer(&ep->timer);
2099 c4iw_init_wr_wait(&ep->com.wr_wait);
2101 /* When MPA revision is different on nodes, the node with MPA_rev=2
2102 * tries to reconnect with MPA_rev 1 for the same EP through
2103 * c4iw_reconnect(), where the same EP is assigned with new tid for
2104 * further connection establishment. As we are using the same EP pointer
2105 * for reconnect, few skbs are used during the previous c4iw_connect(),
2106 * which leaves the EP with inadequate skbs for further
2107 * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
2108 * skb_list() during peer_abort(). Allocate skbs which is already used.
2110 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
2111 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
2117 * Allocate an active TID to initiate a TCP connection.
2119 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2120 if (ep->atid == -1) {
2121 pr_err("%s - cannot alloc atid.\n", __func__);
2125 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
2128 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
2129 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
2130 laddr->sin_addr.s_addr,
2131 raddr->sin_addr.s_addr,
2133 raddr->sin_port, ep->com.cm_id->tos);
2135 ra = (__u8 *)&raddr->sin_addr;
2137 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
2139 laddr6->sin6_addr.s6_addr,
2140 raddr6->sin6_addr.s6_addr,
2142 raddr6->sin6_port, 0,
2143 raddr6->sin6_scope_id);
2145 ra = (__u8 *)&raddr6->sin6_addr;
2148 pr_err("%s - cannot find route.\n", __func__);
2149 err = -EHOSTUNREACH;
2152 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
2153 ep->com.dev->rdev.lldi.adapter_type,
2154 ep->com.cm_id->tos);
2156 pr_err("%s - cannot alloc l2e.\n", __func__);
2160 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2161 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2164 state_set(&ep->com, CONNECTING);
2165 ep->tos = ep->com.cm_id->tos;
2167 /* send connect request to rnic */
2168 err = send_connect(ep);
2172 cxgb4_l2t_release(ep->l2t);
2174 dst_release(ep->dst);
2176 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2177 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2180 * remember to send notification to upper layer.
2181 * We are in here so the upper layer is not aware that this is
2182 * re-connect attempt and so, upper layer is still waiting for
2183 * response of 1st connect request.
2185 connect_reply_upcall(ep, -ECONNRESET);
2187 c4iw_put_ep(&ep->com);
2192 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2195 struct cpl_act_open_rpl *rpl = cplhdr(skb);
2196 unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2197 ntohl(rpl->atid_status)));
2198 struct tid_info *t = dev->rdev.lldi.tids;
2199 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2200 struct sockaddr_in *la;
2201 struct sockaddr_in *ra;
2202 struct sockaddr_in6 *la6;
2203 struct sockaddr_in6 *ra6;
2206 ep = lookup_atid(t, atid);
2207 la = (struct sockaddr_in *)&ep->com.local_addr;
2208 ra = (struct sockaddr_in *)&ep->com.remote_addr;
2209 la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2210 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
2212 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
2213 status, status2errno(status));
2215 if (cxgb_is_neg_adv(status)) {
2216 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2217 __func__, atid, status, neg_adv_str(status));
2218 ep->stats.connect_neg_adv++;
2219 mutex_lock(&dev->rdev.stats.lock);
2220 dev->rdev.stats.neg_adv++;
2221 mutex_unlock(&dev->rdev.stats.lock);
2225 set_bit(ACT_OPEN_RPL, &ep->com.history);
2228 * Log interesting failures.
2231 case CPL_ERR_CONN_RESET:
2232 case CPL_ERR_CONN_TIMEDOUT:
2234 case CPL_ERR_TCAM_FULL:
2235 mutex_lock(&dev->rdev.stats.lock);
2236 dev->rdev.stats.tcam_full++;
2237 mutex_unlock(&dev->rdev.stats.lock);
2238 if (ep->com.local_addr.ss_family == AF_INET &&
2239 dev->rdev.lldi.enable_fw_ofld_conn) {
2240 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2241 ntohl(rpl->atid_status))));
2247 case CPL_ERR_CONN_EXIST:
2248 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2249 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2250 if (ep->com.remote_addr.ss_family == AF_INET6) {
2251 struct sockaddr_in6 *sin6 =
2252 (struct sockaddr_in6 *)
2253 &ep->com.local_addr;
2255 ep->com.dev->rdev.lldi.ports[0],
2257 &sin6->sin6_addr.s6_addr, 1);
2259 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2261 cxgb4_free_atid(t, atid);
2262 dst_release(ep->dst);
2263 cxgb4_l2t_release(ep->l2t);
2269 if (ep->com.local_addr.ss_family == AF_INET) {
2270 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2271 atid, status, status2errno(status),
2272 &la->sin_addr.s_addr, ntohs(la->sin_port),
2273 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
2275 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2276 atid, status, status2errno(status),
2277 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2278 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2284 connect_reply_upcall(ep, status2errno(status));
2285 state_set(&ep->com, DEAD);
2287 if (ep->com.remote_addr.ss_family == AF_INET6) {
2288 struct sockaddr_in6 *sin6 =
2289 (struct sockaddr_in6 *)&ep->com.local_addr;
2290 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2291 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2293 if (status && act_open_has_tid(status))
2294 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
2296 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
2297 cxgb4_free_atid(t, atid);
2298 dst_release(ep->dst);
2299 cxgb4_l2t_release(ep->l2t);
2300 c4iw_put_ep(&ep->com);
2305 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2307 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
2308 unsigned int stid = GET_TID(rpl);
2309 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2312 PDBG("%s stid %d lookup failure!\n", __func__, stid);
2315 PDBG("%s ep %p status %d error %d\n", __func__, ep,
2316 rpl->status, status2errno(rpl->status));
2317 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2318 c4iw_put_ep(&ep->com);
2323 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2325 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2326 unsigned int stid = GET_TID(rpl);
2327 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2329 PDBG("%s ep %p\n", __func__, ep);
2330 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2331 c4iw_put_ep(&ep->com);
2335 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2336 struct cpl_pass_accept_req *req)
2338 struct cpl_pass_accept_rpl *rpl;
2339 unsigned int mtu_idx;
2343 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
2345 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
2347 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2348 BUG_ON(skb_cloned(skb));
2352 if (!is_t4(adapter_type)) {
2353 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2355 INIT_TP_WR(rpl5, ep->hwtid);
2357 skb_trim(skb, sizeof(*rpl));
2358 INIT_TP_WR(rpl, ep->hwtid);
2360 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2363 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2364 enable_tcp_timestamps && req->tcpopt.tstamp,
2365 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
2366 wscale = compute_wscale(rcv_win);
2369 * Specify the largest window that will fit in opt0. The
2370 * remainder will be specified in the rx_data_ack.
2372 win = ep->rcv_win >> 10;
2373 if (win > RCV_BUFSIZ_M)
2375 opt0 = (nocong ? NO_CONG_F : 0) |
2378 WND_SCALE_V(wscale) |
2379 MSS_IDX_V(mtu_idx) |
2380 L2T_IDX_V(ep->l2t->idx) |
2381 TX_CHAN_V(ep->tx_chan) |
2382 SMAC_SEL_V(ep->smac_idx) |
2383 DSCP_V(ep->tos >> 2) |
2384 ULP_MODE_V(ULP_MODE_TCPDDP) |
2386 opt2 = RX_CHANNEL_V(0) |
2387 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2389 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2390 opt2 |= TSTAMPS_EN_F;
2391 if (enable_tcp_sack && req->tcpopt.sack)
2393 if (wscale && enable_tcp_window_scaling)
2394 opt2 |= WND_SCALE_EN_F;
2396 const struct tcphdr *tcph;
2397 u32 hlen = ntohl(req->hdr_len);
2399 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
2400 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2403 tcph = (const void *)(req + 1) +
2404 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
2405 if (tcph->ece && tcph->cwr)
2406 opt2 |= CCTRL_ECN_V(1);
2408 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
2409 u32 isn = (prandom_u32() & ~7UL) - 1;
2410 opt2 |= T5_OPT_2_VALID_F;
2411 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2414 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2417 rpl5->iss = cpu_to_be32(isn);
2418 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
2421 rpl->opt0 = cpu_to_be64(opt0);
2422 rpl->opt2 = cpu_to_be32(opt2);
2423 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2424 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
2426 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2429 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2431 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2432 BUG_ON(skb_cloned(skb));
2433 skb_trim(skb, sizeof(struct cpl_tid_release));
2434 release_tid(&dev->rdev, hwtid, skb);
2438 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2440 struct c4iw_ep *child_ep = NULL, *parent_ep;
2441 struct cpl_pass_accept_req *req = cplhdr(skb);
2442 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
2443 struct tid_info *t = dev->rdev.lldi.tids;
2444 unsigned int hwtid = GET_TID(req);
2445 struct dst_entry *dst;
2446 __u8 local_ip[16], peer_ip[16];
2447 __be16 local_port, peer_port;
2448 struct sockaddr_in6 *sin6;
2450 u16 peer_mss = ntohs(req->tcpopt.mss);
2452 unsigned short hdrs;
2453 u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
2455 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
2457 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2461 if (state_read(&parent_ep->com) != LISTEN) {
2462 PDBG("%s - listening ep not in LISTEN\n", __func__);
2466 cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
2467 &iptype, local_ip, peer_ip, &local_port, &peer_port);
2469 /* Find output route */
2471 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2472 , __func__, parent_ep, hwtid,
2473 local_ip, peer_ip, ntohs(local_port),
2474 ntohs(peer_port), peer_mss);
2475 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
2476 *(__be32 *)local_ip, *(__be32 *)peer_ip,
2477 local_port, peer_port, tos);
2479 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2480 , __func__, parent_ep, hwtid,
2481 local_ip, peer_ip, ntohs(local_port),
2482 ntohs(peer_port), peer_mss);
2483 dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
2484 local_ip, peer_ip, local_port, peer_port,
2485 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
2486 ((struct sockaddr_in6 *)
2487 &parent_ep->com.local_addr)->sin6_scope_id);
2490 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
2495 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2497 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2503 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
2504 parent_ep->com.dev->rdev.lldi.adapter_type, tos);
2506 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
2513 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
2514 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2515 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2516 child_ep->mtu = peer_mss + hdrs;
2518 skb_queue_head_init(&child_ep->com.ep_skb_list);
2519 if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
2522 state_set(&child_ep->com, CONNECTING);
2523 child_ep->com.dev = dev;
2524 child_ep->com.cm_id = NULL;
2527 struct sockaddr_in *sin = (struct sockaddr_in *)
2528 &child_ep->com.local_addr;
2530 sin->sin_family = PF_INET;
2531 sin->sin_port = local_port;
2532 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2534 sin = (struct sockaddr_in *)&child_ep->com.local_addr;
2535 sin->sin_family = PF_INET;
2536 sin->sin_port = ((struct sockaddr_in *)
2537 &parent_ep->com.local_addr)->sin_port;
2538 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2540 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2541 sin->sin_family = PF_INET;
2542 sin->sin_port = peer_port;
2543 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2545 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2546 sin6->sin6_family = PF_INET6;
2547 sin6->sin6_port = local_port;
2548 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2550 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2551 sin6->sin6_family = PF_INET6;
2552 sin6->sin6_port = ((struct sockaddr_in6 *)
2553 &parent_ep->com.local_addr)->sin6_port;
2554 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2556 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2557 sin6->sin6_family = PF_INET6;
2558 sin6->sin6_port = peer_port;
2559 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2562 c4iw_get_ep(&parent_ep->com);
2563 child_ep->parent_ep = parent_ep;
2564 child_ep->tos = tos;
2565 child_ep->dst = dst;
2566 child_ep->hwtid = hwtid;
2568 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
2569 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2571 init_timer(&child_ep->timer);
2572 cxgb4_insert_tid(t, child_ep, hwtid);
2573 insert_ep_tid(child_ep);
2574 if (accept_cr(child_ep, skb, req)) {
2575 c4iw_put_ep(&parent_ep->com);
2576 release_ep_resources(child_ep);
2578 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2581 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2582 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
2583 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2587 c4iw_put_ep(&child_ep->com);
2589 reject_cr(dev, hwtid, skb);
2591 c4iw_put_ep(&parent_ep->com);
2596 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2599 struct cpl_pass_establish *req = cplhdr(skb);
2600 unsigned int tid = GET_TID(req);
2603 ep = get_ep_from_tid(dev, tid);
2604 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2605 ep->snd_seq = be32_to_cpu(req->snd_isn);
2606 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2608 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2609 ntohs(req->tcp_opt));
2611 set_emss(ep, ntohs(req->tcp_opt));
2613 dst_confirm(ep->dst);
2614 mutex_lock(&ep->com.mutex);
2615 ep->com.state = MPA_REQ_WAIT;
2617 set_bit(PASS_ESTAB, &ep->com.history);
2618 ret = send_flowc(ep);
2619 mutex_unlock(&ep->com.mutex);
2621 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2622 c4iw_put_ep(&ep->com);
2627 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2629 struct cpl_peer_close *hdr = cplhdr(skb);
2631 struct c4iw_qp_attributes attrs;
2634 unsigned int tid = GET_TID(hdr);
2637 ep = get_ep_from_tid(dev, tid);
2641 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2642 dst_confirm(ep->dst);
2644 set_bit(PEER_CLOSE, &ep->com.history);
2645 mutex_lock(&ep->com.mutex);
2646 switch (ep->com.state) {
2648 __state_set(&ep->com, CLOSING);
2651 __state_set(&ep->com, CLOSING);
2652 connect_reply_upcall(ep, -ECONNRESET);
2657 * We're gonna mark this puppy DEAD, but keep
2658 * the reference on it until the ULP accepts or
2659 * rejects the CR. Also wake up anyone waiting
2660 * in rdma connection migration (see c4iw_accept_cr()).
2662 __state_set(&ep->com, CLOSING);
2663 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2664 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2667 __state_set(&ep->com, CLOSING);
2668 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2669 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2673 __state_set(&ep->com, CLOSING);
2674 attrs.next_state = C4IW_QP_STATE_CLOSING;
2675 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2676 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2677 if (ret != -ECONNRESET) {
2678 peer_close_upcall(ep);
2686 __state_set(&ep->com, MORIBUND);
2690 (void)stop_ep_timer(ep);
2691 if (ep->com.cm_id && ep->com.qp) {
2692 attrs.next_state = C4IW_QP_STATE_IDLE;
2693 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2694 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2696 close_complete_upcall(ep, 0);
2697 __state_set(&ep->com, DEAD);
2707 mutex_unlock(&ep->com.mutex);
2709 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2711 release_ep_resources(ep);
2712 c4iw_put_ep(&ep->com);
2716 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2718 struct cpl_abort_req_rss *req = cplhdr(skb);
2720 struct cpl_abort_rpl *rpl;
2721 struct sk_buff *rpl_skb;
2722 struct c4iw_qp_attributes attrs;
2725 unsigned int tid = GET_TID(req);
2727 ep = get_ep_from_tid(dev, tid);
2731 if (cxgb_is_neg_adv(req->status)) {
2732 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2733 __func__, ep->hwtid, req->status,
2734 neg_adv_str(req->status));
2735 ep->stats.abort_neg_adv++;
2736 mutex_lock(&dev->rdev.stats.lock);
2737 dev->rdev.stats.neg_adv++;
2738 mutex_unlock(&dev->rdev.stats.lock);
2741 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2743 set_bit(PEER_ABORT, &ep->com.history);
2746 * Wake up any threads in rdma_init() or rdma_fini().
2747 * However, this is not needed if com state is just
2750 if (ep->com.state != MPA_REQ_SENT)
2751 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2753 mutex_lock(&ep->com.mutex);
2754 switch (ep->com.state) {
2756 c4iw_put_ep(&ep->parent_ep->com);
2759 (void)stop_ep_timer(ep);
2762 (void)stop_ep_timer(ep);
2763 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2764 connect_reply_upcall(ep, -ECONNRESET);
2767 * we just don't send notification upwards because we
2768 * want to retry with mpa_v1 without upper layers even
2771 * do some housekeeping so as to re-initiate the
2774 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2776 ep->retry_with_mpa_v1 = 1;
2788 if (ep->com.cm_id && ep->com.qp) {
2789 attrs.next_state = C4IW_QP_STATE_ERROR;
2790 ret = c4iw_modify_qp(ep->com.qp->rhp,
2791 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2795 "%s - qp <- error failed!\n",
2798 peer_abort_upcall(ep);
2803 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2804 mutex_unlock(&ep->com.mutex);
2810 dst_confirm(ep->dst);
2811 if (ep->com.state != ABORTING) {
2812 __state_set(&ep->com, DEAD);
2813 /* we don't release if we want to retry with mpa_v1 */
2814 if (!ep->retry_with_mpa_v1)
2817 mutex_unlock(&ep->com.mutex);
2819 rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
2820 if (WARN_ON(!rpl_skb)) {
2824 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2825 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2826 INIT_TP_WR(rpl, ep->hwtid);
2827 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2828 rpl->cmd = CPL_ABORT_NO_RST;
2829 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2832 release_ep_resources(ep);
2833 else if (ep->retry_with_mpa_v1) {
2834 if (ep->com.remote_addr.ss_family == AF_INET6) {
2835 struct sockaddr_in6 *sin6 =
2836 (struct sockaddr_in6 *)
2837 &ep->com.local_addr;
2839 ep->com.dev->rdev.lldi.ports[0],
2840 (const u32 *)&sin6->sin6_addr.s6_addr,
2843 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
2844 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2845 dst_release(ep->dst);
2846 cxgb4_l2t_release(ep->l2t);
2851 c4iw_put_ep(&ep->com);
2852 /* Dereferencing ep, referenced in peer_abort_intr() */
2853 c4iw_put_ep(&ep->com);
2857 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2860 struct c4iw_qp_attributes attrs;
2861 struct cpl_close_con_rpl *rpl = cplhdr(skb);
2863 unsigned int tid = GET_TID(rpl);
2865 ep = get_ep_from_tid(dev, tid);
2869 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2872 /* The cm_id may be null if we failed to connect */
2873 mutex_lock(&ep->com.mutex);
2874 set_bit(CLOSE_CON_RPL, &ep->com.history);
2875 switch (ep->com.state) {
2877 __state_set(&ep->com, MORIBUND);
2880 (void)stop_ep_timer(ep);
2881 if ((ep->com.cm_id) && (ep->com.qp)) {
2882 attrs.next_state = C4IW_QP_STATE_IDLE;
2883 c4iw_modify_qp(ep->com.qp->rhp,
2885 C4IW_QP_ATTR_NEXT_STATE,
2888 close_complete_upcall(ep, 0);
2889 __state_set(&ep->com, DEAD);
2899 mutex_unlock(&ep->com.mutex);
2901 release_ep_resources(ep);
2902 c4iw_put_ep(&ep->com);
2906 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2908 struct cpl_rdma_terminate *rpl = cplhdr(skb);
2909 unsigned int tid = GET_TID(rpl);
2911 struct c4iw_qp_attributes attrs;
2913 ep = get_ep_from_tid(dev, tid);
2916 if (ep && ep->com.qp) {
2917 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2918 ep->com.qp->wq.sq.qid);
2919 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2920 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2921 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2923 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2924 c4iw_put_ep(&ep->com);
2930 * Upcall from the adapter indicating data has been transmitted.
2931 * For us its just the single MPA request or reply. We can now free
2932 * the skb holding the mpa message.
2934 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2937 struct cpl_fw4_ack *hdr = cplhdr(skb);
2938 u8 credits = hdr->credits;
2939 unsigned int tid = GET_TID(hdr);
2942 ep = get_ep_from_tid(dev, tid);
2945 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2947 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2948 __func__, ep, ep->hwtid, state_read(&ep->com));
2952 dst_confirm(ep->dst);
2954 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2955 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2956 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2957 mutex_lock(&ep->com.mutex);
2958 kfree_skb(ep->mpa_skb);
2960 if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
2962 mutex_unlock(&ep->com.mutex);
2965 c4iw_put_ep(&ep->com);
2969 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2972 struct c4iw_ep *ep = to_ep(cm_id);
2974 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2976 mutex_lock(&ep->com.mutex);
2977 if (ep->com.state != MPA_REQ_RCVD) {
2978 mutex_unlock(&ep->com.mutex);
2979 c4iw_put_ep(&ep->com);
2982 set_bit(ULP_REJECT, &ep->com.history);
2986 abort = send_mpa_reject(ep, pdata, pdata_len);
2987 mutex_unlock(&ep->com.mutex);
2990 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2991 c4iw_put_ep(&ep->com);
2995 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2998 struct c4iw_qp_attributes attrs;
2999 enum c4iw_qp_attr_mask mask;
3000 struct c4iw_ep *ep = to_ep(cm_id);
3001 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
3002 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
3005 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
3007 mutex_lock(&ep->com.mutex);
3008 if (ep->com.state != MPA_REQ_RCVD) {
3015 set_bit(ULP_ACCEPT, &ep->com.history);
3016 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3017 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
3022 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3023 if (conn_param->ord > ep->ird) {
3024 if (RELAXED_IRD_NEGOTIATION) {
3025 conn_param->ord = ep->ird;
3027 ep->ird = conn_param->ird;
3028 ep->ord = conn_param->ord;
3029 send_mpa_reject(ep, conn_param->private_data,
3030 conn_param->private_data_len);
3035 if (conn_param->ird < ep->ord) {
3036 if (RELAXED_IRD_NEGOTIATION &&
3037 ep->ord <= h->rdev.lldi.max_ordird_qp) {
3038 conn_param->ird = ep->ord;
3045 ep->ird = conn_param->ird;
3046 ep->ord = conn_param->ord;
3048 if (ep->mpa_attr.version == 1) {
3049 if (peer2peer && ep->ird == 0)
3053 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
3054 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
3058 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
3060 ep->com.cm_id = cm_id;
3061 ref_cm_id(&ep->com);
3065 /* bind QP to EP and move to RTS */
3066 attrs.mpa_attr = ep->mpa_attr;
3067 attrs.max_ird = ep->ird;
3068 attrs.max_ord = ep->ord;
3069 attrs.llp_stream_handle = ep;
3070 attrs.next_state = C4IW_QP_STATE_RTS;
3072 /* bind QP and TID with INIT_WR */
3073 mask = C4IW_QP_ATTR_NEXT_STATE |
3074 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
3075 C4IW_QP_ATTR_MPA_ATTR |
3076 C4IW_QP_ATTR_MAX_IRD |
3077 C4IW_QP_ATTR_MAX_ORD;
3079 err = c4iw_modify_qp(ep->com.qp->rhp,
3080 ep->com.qp, mask, &attrs, 1);
3082 goto err_deref_cm_id;
3084 set_bit(STOP_MPA_TIMER, &ep->com.flags);
3085 err = send_mpa_reply(ep, conn_param->private_data,
3086 conn_param->private_data_len);
3088 goto err_deref_cm_id;
3090 __state_set(&ep->com, FPDU_MODE);
3091 established_upcall(ep);
3092 mutex_unlock(&ep->com.mutex);
3093 c4iw_put_ep(&ep->com);
3096 deref_cm_id(&ep->com);
3100 mutex_unlock(&ep->com.mutex);
3102 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3103 c4iw_put_ep(&ep->com);
3107 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3109 struct in_device *ind;
3111 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3112 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3114 ind = in_dev_get(dev->rdev.lldi.ports[0]);
3116 return -EADDRNOTAVAIL;
3117 for_primary_ifa(ind) {
3118 laddr->sin_addr.s_addr = ifa->ifa_address;
3119 raddr->sin_addr.s_addr = ifa->ifa_address;
3125 return found ? 0 : -EADDRNOTAVAIL;
3128 static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
3129 unsigned char banned_flags)
3131 struct inet6_dev *idev;
3132 int err = -EADDRNOTAVAIL;
3135 idev = __in6_dev_get(dev);
3137 struct inet6_ifaddr *ifp;
3139 read_lock_bh(&idev->lock);
3140 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3141 if (ifp->scope == IFA_LINK &&
3142 !(ifp->flags & banned_flags)) {
3143 memcpy(addr, &ifp->addr, 16);
3148 read_unlock_bh(&idev->lock);
3154 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3156 struct in6_addr uninitialized_var(addr);
3157 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3158 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
3160 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
3161 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
3162 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
3165 return -EADDRNOTAVAIL;
3168 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3170 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3173 struct sockaddr_in *laddr;
3174 struct sockaddr_in *raddr;
3175 struct sockaddr_in6 *laddr6;
3176 struct sockaddr_in6 *raddr6;
3180 if ((conn_param->ord > cur_max_read_depth(dev)) ||
3181 (conn_param->ird > cur_max_read_depth(dev))) {
3185 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3187 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3192 skb_queue_head_init(&ep->com.ep_skb_list);
3193 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
3198 init_timer(&ep->timer);
3199 ep->plen = conn_param->private_data_len;
3201 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3202 conn_param->private_data, ep->plen);
3203 ep->ird = conn_param->ird;
3204 ep->ord = conn_param->ord;
3206 if (peer2peer && ep->ord == 0)
3209 ep->com.cm_id = cm_id;
3210 ref_cm_id(&ep->com);
3212 ep->com.qp = get_qhp(dev, conn_param->qpn);
3214 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
3219 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
3223 * Allocate an active TID to initiate a TCP connection.
3225 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3226 if (ep->atid == -1) {
3227 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
3231 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
3233 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3234 sizeof(ep->com.local_addr));
3235 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
3236 sizeof(ep->com.remote_addr));
3238 laddr = (struct sockaddr_in *)&ep->com.local_addr;
3239 raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3240 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3241 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
3243 if (cm_id->m_remote_addr.ss_family == AF_INET) {
3245 ra = (__u8 *)&raddr->sin_addr;
3248 * Handle loopback requests to INADDR_ANY.
3250 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
3251 err = pick_local_ipaddrs(dev, cm_id);
3257 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3258 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
3259 ra, ntohs(raddr->sin_port));
3260 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3261 laddr->sin_addr.s_addr,
3262 raddr->sin_addr.s_addr,
3264 raddr->sin_port, cm_id->tos);
3267 ra = (__u8 *)&raddr6->sin6_addr;
3270 * Handle loopback requests to INADDR_ANY.
3272 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3273 err = pick_local_ip6addrs(dev, cm_id);
3279 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3280 __func__, laddr6->sin6_addr.s6_addr,
3281 ntohs(laddr6->sin6_port),
3282 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
3283 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
3284 laddr6->sin6_addr.s6_addr,
3285 raddr6->sin6_addr.s6_addr,
3287 raddr6->sin6_port, 0,
3288 raddr6->sin6_scope_id);
3291 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
3292 err = -EHOSTUNREACH;
3296 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
3297 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3299 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
3303 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3304 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3307 state_set(&ep->com, CONNECTING);
3308 ep->tos = cm_id->tos;
3310 /* send connect request to rnic */
3311 err = send_connect(ep);
3315 cxgb4_l2t_release(ep->l2t);
3317 dst_release(ep->dst);
3319 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
3320 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3322 skb_queue_purge(&ep->com.ep_skb_list);
3323 deref_cm_id(&ep->com);
3325 c4iw_put_ep(&ep->com);
3330 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3333 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3334 &ep->com.local_addr;
3336 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
3337 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3338 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3342 c4iw_init_wr_wait(&ep->com.wr_wait);
3343 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3344 ep->stid, &sin6->sin6_addr,
3346 ep->com.dev->rdev.lldi.rxq_ids[0]);
3348 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3352 err = net_xmit_errno(err);
3354 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3355 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3356 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3358 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3363 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3366 struct sockaddr_in *sin = (struct sockaddr_in *)
3367 &ep->com.local_addr;
3369 if (dev->rdev.lldi.enable_fw_ofld_conn) {
3371 err = cxgb4_create_server_filter(
3372 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3373 sin->sin_addr.s_addr, sin->sin_port, 0,
3374 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3375 if (err == -EBUSY) {
3376 if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3380 set_current_state(TASK_UNINTERRUPTIBLE);
3381 schedule_timeout(usecs_to_jiffies(100));
3383 } while (err == -EBUSY);
3385 c4iw_init_wr_wait(&ep->com.wr_wait);
3386 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3387 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3388 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3390 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3394 err = net_xmit_errno(err);
3397 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3399 &sin->sin_addr, ntohs(sin->sin_port));
3403 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3406 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3407 struct c4iw_listen_ep *ep;
3411 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3413 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3417 skb_queue_head_init(&ep->com.ep_skb_list);
3418 PDBG("%s ep %p\n", __func__, ep);
3419 ep->com.cm_id = cm_id;
3420 ref_cm_id(&ep->com);
3422 ep->backlog = backlog;
3423 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3424 sizeof(ep->com.local_addr));
3427 * Allocate a server TID.
3429 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3430 ep->com.local_addr.ss_family == AF_INET)
3431 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3432 cm_id->m_local_addr.ss_family, ep);
3434 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3435 cm_id->m_local_addr.ss_family, ep);
3437 if (ep->stid == -1) {
3438 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
3442 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
3444 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3445 sizeof(ep->com.local_addr));
3447 state_set(&ep->com, LISTEN);
3448 if (ep->com.local_addr.ss_family == AF_INET)
3449 err = create_server4(dev, ep);
3451 err = create_server6(dev, ep);
3453 cm_id->provider_data = ep;
3457 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3458 ep->com.local_addr.ss_family);
3460 deref_cm_id(&ep->com);
3461 c4iw_put_ep(&ep->com);
3467 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3470 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3472 PDBG("%s ep %p\n", __func__, ep);
3475 state_set(&ep->com, DEAD);
3476 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3477 ep->com.local_addr.ss_family == AF_INET) {
3478 err = cxgb4_remove_server_filter(
3479 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3480 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3482 struct sockaddr_in6 *sin6;
3483 c4iw_init_wr_wait(&ep->com.wr_wait);
3484 err = cxgb4_remove_server(
3485 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3486 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3489 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
3491 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3492 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3493 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3495 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3496 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3497 ep->com.local_addr.ss_family);
3499 deref_cm_id(&ep->com);
3500 c4iw_put_ep(&ep->com);
3504 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3509 struct c4iw_rdev *rdev;
3511 mutex_lock(&ep->com.mutex);
3513 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
3514 states[ep->com.state], abrupt);
3517 * Ref the ep here in case we have fatal errors causing the
3518 * ep to be released and freed.
3520 c4iw_get_ep(&ep->com);
3522 rdev = &ep->com.dev->rdev;
3523 if (c4iw_fatal_error(rdev)) {
3525 close_complete_upcall(ep, -EIO);
3526 ep->com.state = DEAD;
3528 switch (ep->com.state) {
3537 ep->com.state = ABORTING;
3539 ep->com.state = CLOSING;
3542 * if we close before we see the fw4_ack() then we fix
3543 * up the timer state since we're reusing it.
3546 test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
3547 clear_bit(STOP_MPA_TIMER, &ep->com.flags);
3552 set_bit(CLOSE_SENT, &ep->com.flags);
3555 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3558 (void)stop_ep_timer(ep);
3559 ep->com.state = ABORTING;
3561 ep->com.state = MORIBUND;
3567 PDBG("%s ignoring disconnect ep %p state %u\n",
3568 __func__, ep, ep->com.state);
3577 set_bit(EP_DISC_ABORT, &ep->com.history);
3578 close_complete_upcall(ep, -ECONNRESET);
3579 ret = send_abort(ep);
3581 set_bit(EP_DISC_CLOSE, &ep->com.history);
3582 ret = send_halfclose(ep);
3585 set_bit(EP_DISC_FAIL, &ep->com.history);
3588 close_complete_upcall(ep, -EIO);
3591 struct c4iw_qp_attributes attrs;
3593 attrs.next_state = C4IW_QP_STATE_ERROR;
3594 ret = c4iw_modify_qp(ep->com.qp->rhp,
3596 C4IW_QP_ATTR_NEXT_STATE,
3600 "%s - qp <- error failed!\n",
3606 mutex_unlock(&ep->com.mutex);
3607 c4iw_put_ep(&ep->com);
3609 release_ep_resources(ep);
3613 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3614 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3617 int atid = be32_to_cpu(req->tid);
3619 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3620 (__force u32) req->tid);
3624 switch (req->retval) {
3626 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3627 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3628 send_fw_act_open_req(ep, atid);
3632 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3633 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3634 send_fw_act_open_req(ep, atid);
3639 pr_info("%s unexpected ofld conn wr retval %d\n",
3640 __func__, req->retval);
3643 pr_err("active ofld_connect_wr failure %d atid %d\n",
3645 mutex_lock(&dev->rdev.stats.lock);
3646 dev->rdev.stats.act_ofld_conn_fails++;
3647 mutex_unlock(&dev->rdev.stats.lock);
3648 connect_reply_upcall(ep, status2errno(req->retval));
3649 state_set(&ep->com, DEAD);
3650 if (ep->com.remote_addr.ss_family == AF_INET6) {
3651 struct sockaddr_in6 *sin6 =
3652 (struct sockaddr_in6 *)&ep->com.local_addr;
3653 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3654 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3656 remove_handle(dev, &dev->atid_idr, atid);
3657 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3658 dst_release(ep->dst);
3659 cxgb4_l2t_release(ep->l2t);
3660 c4iw_put_ep(&ep->com);
3663 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3664 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3666 struct sk_buff *rpl_skb;
3667 struct cpl_pass_accept_req *cpl;
3670 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
3673 PDBG("%s passive open failure %d\n", __func__, req->retval);
3674 mutex_lock(&dev->rdev.stats.lock);
3675 dev->rdev.stats.pas_ofld_conn_fails++;
3676 mutex_unlock(&dev->rdev.stats.lock);
3679 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3680 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
3681 (__force u32) htonl(
3682 (__force u32) req->tid)));
3683 ret = pass_accept_req(dev, rpl_skb);
3690 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3692 struct cpl_fw6_msg *rpl = cplhdr(skb);
3693 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3695 switch (rpl->type) {
3697 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3699 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3700 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3701 switch (req->t_state) {
3703 active_ofld_conn_reply(dev, skb, req);
3706 passive_ofld_conn_reply(dev, skb, req);
3709 pr_err("%s unexpected ofld conn wr state %d\n",
3710 __func__, req->t_state);
3718 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3721 __be16 hdr_len, vlantag, len;
3723 int tcp_hdr_len, ip_hdr_len;
3725 struct cpl_rx_pkt *cpl = cplhdr(skb);
3726 struct cpl_pass_accept_req *req;
3727 struct tcp_options_received tmp_opt;
3728 struct c4iw_dev *dev;
3729 enum chip_type type;
3731 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3732 /* Store values from cpl_rx_pkt in temporary location. */
3733 vlantag = cpl->vlan;
3735 l2info = cpl->l2info;
3736 hdr_len = cpl->hdr_len;
3739 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3742 * We need to parse the TCP options from SYN packet.
3743 * to generate cpl_pass_accept_req.
3745 memset(&tmp_opt, 0, sizeof(tmp_opt));
3746 tcp_clear_options(&tmp_opt);
3747 tcp_parse_options(skb, &tmp_opt, 0, NULL);
3749 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3750 memset(req, 0, sizeof(*req));
3751 req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3752 SYN_MAC_IDX_V(RX_MACIDX_G(
3753 be32_to_cpu(l2info))) |
3755 type = dev->rdev.lldi.adapter_type;
3756 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
3757 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
3759 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
3760 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
3761 eth_hdr_len = is_t4(type) ?
3762 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
3763 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
3764 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
3765 IP_HDR_LEN_V(ip_hdr_len) |
3766 ETH_HDR_LEN_V(eth_hdr_len));
3767 } else { /* T6 and later */
3768 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
3769 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
3770 T6_IP_HDR_LEN_V(ip_hdr_len) |
3771 T6_ETH_HDR_LEN_V(eth_hdr_len));
3773 req->vlan = vlantag;
3775 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3776 PASS_OPEN_TOS_V(tos));
3777 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3778 if (tmp_opt.wscale_ok)
3779 req->tcpopt.wsf = tmp_opt.snd_wscale;
3780 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3781 if (tmp_opt.sack_ok)
3782 req->tcpopt.sack = 1;
3783 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3787 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3788 __be32 laddr, __be16 lport,
3789 __be32 raddr, __be16 rport,
3790 u32 rcv_isn, u32 filter, u16 window,
3791 u32 rss_qid, u8 port_id)
3793 struct sk_buff *req_skb;
3794 struct fw_ofld_connection_wr *req;
3795 struct cpl_pass_accept_req *cpl = cplhdr(skb);
3798 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3799 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3800 memset(req, 0, sizeof(*req));
3801 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3802 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3803 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
3804 req->le.filter = (__force __be32) filter;
3805 req->le.lport = lport;
3806 req->le.pport = rport;
3807 req->le.u.ipv4.lip = laddr;
3808 req->le.u.ipv4.pip = raddr;
3809 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3810 req->tcb.rcv_adv = htons(window);
3811 req->tcb.t_state_to_astid =
3812 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3813 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3814 FW_OFLD_CONNECTION_WR_ASTID_V(
3815 PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
3818 * We store the qid in opt2 which will be used by the firmware
3819 * to send us the wr response.
3821 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
3824 * We initialize the MSS index in TCB to 0xF.
3825 * So that when driver sends cpl_pass_accept_rpl
3826 * TCB picks up the correct value. If this was 0
3827 * TP will ignore any value > 0 for MSS index.
3829 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3830 req->cookie = (uintptr_t)skb;
3832 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3833 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3835 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3843 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3844 * messages when a filter is being used instead of server to
3845 * redirect a syn packet. When packets hit filter they are redirected
3846 * to the offload queue and driver tries to establish the connection
3847 * using firmware work request.
3849 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3852 unsigned int filter;
3853 struct ethhdr *eh = NULL;
3854 struct vlan_ethhdr *vlan_eh = NULL;
3856 struct tcphdr *tcph;
3857 struct rss_header *rss = (void *)skb->data;
3858 struct cpl_rx_pkt *cpl = (void *)skb->data;
3859 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3860 struct l2t_entry *e;
3861 struct dst_entry *dst;
3862 struct c4iw_ep *lep = NULL;
3864 struct port_info *pi;
3865 struct net_device *pdev;
3866 u16 rss_qid, eth_hdr_len;
3869 struct neighbour *neigh;
3871 /* Drop all non-SYN packets */
3872 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
3876 * Drop all packets which did not hit the filter.
3877 * Unlikely to happen.
3879 if (!(rss->filter_hit && rss->filter_tid))
3883 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3885 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3887 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
3889 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3893 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
3895 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3898 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3901 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3904 pr_err("T%d Chip is not supported\n",
3905 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
3909 if (eth_hdr_len == ETH_HLEN) {
3910 eh = (struct ethhdr *)(req + 1);
3911 iph = (struct iphdr *)(eh + 1);
3913 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3914 iph = (struct iphdr *)(vlan_eh + 1);
3915 skb->vlan_tci = ntohs(cpl->vlan);
3918 if (iph->version != 0x4)
3921 tcph = (struct tcphdr *)(iph + 1);
3922 skb_set_network_header(skb, (void *)iph - (void *)rss);
3923 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3926 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3927 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3928 ntohs(tcph->source), iph->tos);
3930 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3931 iph->daddr, iph->saddr, tcph->dest,
3932 tcph->source, iph->tos);
3934 pr_err("%s - failed to find dst entry!\n",
3938 neigh = dst_neigh_lookup_skb(dst, skb);
3941 pr_err("%s - failed to allocate neigh!\n",
3946 if (neigh->dev->flags & IFF_LOOPBACK) {
3947 pdev = ip_dev_find(&init_net, iph->daddr);
3948 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3950 pi = (struct port_info *)netdev_priv(pdev);
3951 tx_chan = cxgb4_port_chan(pdev);
3954 pdev = get_real_dev(neigh->dev);
3955 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3957 pi = (struct port_info *)netdev_priv(pdev);
3958 tx_chan = cxgb4_port_chan(pdev);
3960 neigh_release(neigh);
3962 pr_err("%s - failed to allocate l2t entry!\n",
3967 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3968 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3969 window = (__force u16) htons((__force u16)tcph->window);
3971 /* Calcuate filter portion for LE region. */
3972 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3973 dev->rdev.lldi.ports[0],
3977 * Synthesize the cpl_pass_accept_req. We have everything except the
3978 * TID. Once firmware sends a reply with TID we update the TID field
3979 * in cpl and pass it through the regular cpl_pass_accept_req path.
3981 build_cpl_pass_accept_req(skb, stid, iph->tos);
3982 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3983 tcph->source, ntohl(tcph->seq), filter, window,
3984 rss_qid, pi->port_id);
3985 cxgb4_l2t_release(e);
3990 c4iw_put_ep(&lep->com);
3995 * These are the real handlers that are called from a
3998 static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
3999 [CPL_ACT_ESTABLISH] = act_establish,
4000 [CPL_ACT_OPEN_RPL] = act_open_rpl,
4001 [CPL_RX_DATA] = rx_data,
4002 [CPL_ABORT_RPL_RSS] = abort_rpl,
4003 [CPL_ABORT_RPL] = abort_rpl,
4004 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
4005 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
4006 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
4007 [CPL_PASS_ESTABLISH] = pass_establish,
4008 [CPL_PEER_CLOSE] = peer_close,
4009 [CPL_ABORT_REQ_RSS] = peer_abort,
4010 [CPL_CLOSE_CON_RPL] = close_con_rpl,
4011 [CPL_RDMA_TERMINATE] = terminate,
4012 [CPL_FW4_ACK] = fw4_ack,
4013 [CPL_FW6_MSG] = deferred_fw6_msg,
4014 [CPL_RX_PKT] = rx_pkt,
4015 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
4016 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
4019 static void process_timeout(struct c4iw_ep *ep)
4021 struct c4iw_qp_attributes attrs;
4024 mutex_lock(&ep->com.mutex);
4025 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
4027 set_bit(TIMEDOUT, &ep->com.history);
4028 switch (ep->com.state) {
4030 connect_reply_upcall(ep, -ETIMEDOUT);
4039 if (ep->com.cm_id && ep->com.qp) {
4040 attrs.next_state = C4IW_QP_STATE_ERROR;
4041 c4iw_modify_qp(ep->com.qp->rhp,
4042 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4045 close_complete_upcall(ep, -ETIMEDOUT);
4051 * These states are expected if the ep timed out at the same
4052 * time as another thread was calling stop_ep_timer().
4053 * So we silently do nothing for these states.
4058 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4059 __func__, ep, ep->hwtid, ep->com.state);
4062 mutex_unlock(&ep->com.mutex);
4064 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
4065 c4iw_put_ep(&ep->com);
4068 static void process_timedout_eps(void)
4072 spin_lock_irq(&timeout_lock);
4073 while (!list_empty(&timeout_list)) {
4074 struct list_head *tmp;
4076 tmp = timeout_list.next;
4080 spin_unlock_irq(&timeout_lock);
4081 ep = list_entry(tmp, struct c4iw_ep, entry);
4082 process_timeout(ep);
4083 spin_lock_irq(&timeout_lock);
4085 spin_unlock_irq(&timeout_lock);
4088 static void process_work(struct work_struct *work)
4090 struct sk_buff *skb = NULL;
4091 struct c4iw_dev *dev;
4092 struct cpl_act_establish *rpl;
4093 unsigned int opcode;
4096 process_timedout_eps();
4097 while ((skb = skb_dequeue(&rxq))) {
4099 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
4100 opcode = rpl->ot.opcode;
4102 BUG_ON(!work_handlers[opcode]);
4103 ret = work_handlers[opcode](dev, skb);
4106 process_timedout_eps();
4110 static DECLARE_WORK(skb_work, process_work);
4112 static void ep_timeout(unsigned long arg)
4114 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
4117 spin_lock(&timeout_lock);
4118 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
4120 * Only insert if it is not already on the list.
4122 if (!ep->entry.next) {
4123 list_add_tail(&ep->entry, &timeout_list);
4127 spin_unlock(&timeout_lock);
4129 queue_work(workq, &skb_work);
4133 * All the CM events are handled on a work queue to have a safe context.
4135 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
4139 * Save dev in the skb->cb area.
4141 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
4144 * Queue the skb and schedule the worker thread.
4146 skb_queue_tail(&rxq, skb);
4147 queue_work(workq, &skb_work);
4151 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
4153 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
4155 if (rpl->status != CPL_ERR_NONE) {
4156 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
4157 "for tid %u\n", rpl->status, GET_TID(rpl));
4163 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
4165 struct cpl_fw6_msg *rpl = cplhdr(skb);
4166 struct c4iw_wr_wait *wr_waitp;
4169 PDBG("%s type %u\n", __func__, rpl->type);
4171 switch (rpl->type) {
4172 case FW6_TYPE_WR_RPL:
4173 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
4174 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
4175 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
4177 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
4181 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
4185 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
4193 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
4195 struct cpl_abort_req_rss *req = cplhdr(skb);
4197 unsigned int tid = GET_TID(req);
4199 ep = get_ep_from_tid(dev, tid);
4200 /* This EP will be dereferenced in peer_abort() */
4202 printk(KERN_WARNING MOD
4203 "Abort on non-existent endpoint, tid %d\n", tid);
4207 if (cxgb_is_neg_adv(req->status)) {
4208 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
4209 __func__, ep->hwtid, req->status,
4210 neg_adv_str(req->status));
4213 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
4216 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
4223 * Most upcalls from the T4 Core go to sched() to
4224 * schedule the processing on a work queue.
4226 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
4227 [CPL_ACT_ESTABLISH] = sched,
4228 [CPL_ACT_OPEN_RPL] = sched,
4229 [CPL_RX_DATA] = sched,
4230 [CPL_ABORT_RPL_RSS] = sched,
4231 [CPL_ABORT_RPL] = sched,
4232 [CPL_PASS_OPEN_RPL] = sched,
4233 [CPL_CLOSE_LISTSRV_RPL] = sched,
4234 [CPL_PASS_ACCEPT_REQ] = sched,
4235 [CPL_PASS_ESTABLISH] = sched,
4236 [CPL_PEER_CLOSE] = sched,
4237 [CPL_CLOSE_CON_RPL] = sched,
4238 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
4239 [CPL_RDMA_TERMINATE] = sched,
4240 [CPL_FW4_ACK] = sched,
4241 [CPL_SET_TCB_RPL] = set_tcb_rpl,
4242 [CPL_FW6_MSG] = fw6_msg,
4243 [CPL_RX_PKT] = sched
4246 int __init c4iw_cm_init(void)
4248 spin_lock_init(&timeout_lock);
4249 skb_queue_head_init(&rxq);
4251 workq = create_singlethread_workqueue("iw_cxgb4");
4258 void c4iw_cm_term(void)
4260 WARN_ON(!list_empty(&timeout_list));
4261 flush_workqueue(workq);
4262 destroy_workqueue(workq);