libcxgb,iw_cxgb4,cxgbit: add cxgb_find_route6()
[cascardo/linux.git] / drivers / target / iscsi / cxgbit / cxgbit_cm.c
1 /*
2  * Copyright (c) 2016 Chelsio Communications, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/workqueue.h>
12 #include <linux/skbuff.h>
13 #include <linux/timer.h>
14 #include <linux/notifier.h>
15 #include <linux/inetdevice.h>
16 #include <linux/ip.h>
17 #include <linux/tcp.h>
18 #include <linux/if_vlan.h>
19
20 #include <net/neighbour.h>
21 #include <net/netevent.h>
22 #include <net/route.h>
23 #include <net/tcp.h>
24 #include <net/ip6_route.h>
25 #include <net/addrconf.h>
26
27 #include <libcxgb_cm.h>
28 #include "cxgbit.h"
29 #include "clip_tbl.h"
30
31 static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
32 {
33         wr_waitp->ret = 0;
34         reinit_completion(&wr_waitp->completion);
35 }
36
37 static void
38 cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
39 {
40         if (ret == CPL_ERR_NONE)
41                 wr_waitp->ret = 0;
42         else
43                 wr_waitp->ret = -EIO;
44
45         if (wr_waitp->ret)
46                 pr_err("%s: err:%u", func, ret);
47
48         complete(&wr_waitp->completion);
49 }
50
51 static int
52 cxgbit_wait_for_reply(struct cxgbit_device *cdev,
53                       struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
54                       const char *func)
55 {
56         int ret;
57
58         if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
59                 wr_waitp->ret = -EIO;
60                 goto out;
61         }
62
63         ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
64         if (!ret) {
65                 pr_info("%s - Device %s not responding tid %u\n",
66                         func, pci_name(cdev->lldi.pdev), tid);
67                 wr_waitp->ret = -ETIMEDOUT;
68         }
69 out:
70         if (wr_waitp->ret)
71                 pr_info("%s: FW reply %d tid %u\n",
72                         pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
73         return wr_waitp->ret;
74 }
75
76 /* Returns whether a CPL status conveys negative advice.
77  */
78 static int cxgbit_is_neg_adv(unsigned int status)
79 {
80         return status == CPL_ERR_RTX_NEG_ADVICE ||
81                 status == CPL_ERR_PERSIST_NEG_ADVICE ||
82                 status == CPL_ERR_KEEPALV_NEG_ADVICE;
83 }
84
85 static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
86 {
87         return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
88 }
89
90 static struct np_info *
91 cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
92                    unsigned int stid)
93 {
94         struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
95
96         if (p) {
97                 int bucket = cxgbit_np_hashfn(cnp);
98
99                 p->cnp = cnp;
100                 p->stid = stid;
101                 spin_lock(&cdev->np_lock);
102                 p->next = cdev->np_hash_tab[bucket];
103                 cdev->np_hash_tab[bucket] = p;
104                 spin_unlock(&cdev->np_lock);
105         }
106
107         return p;
108 }
109
110 static int
111 cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
112 {
113         int stid = -1, bucket = cxgbit_np_hashfn(cnp);
114         struct np_info *p;
115
116         spin_lock(&cdev->np_lock);
117         for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
118                 if (p->cnp == cnp) {
119                         stid = p->stid;
120                         break;
121                 }
122         }
123         spin_unlock(&cdev->np_lock);
124
125         return stid;
126 }
127
128 static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
129 {
130         int stid = -1, bucket = cxgbit_np_hashfn(cnp);
131         struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
132
133         spin_lock(&cdev->np_lock);
134         for (p = *prev; p; prev = &p->next, p = p->next) {
135                 if (p->cnp == cnp) {
136                         stid = p->stid;
137                         *prev = p->next;
138                         kfree(p);
139                         break;
140                 }
141         }
142         spin_unlock(&cdev->np_lock);
143
144         return stid;
145 }
146
147 void _cxgbit_free_cnp(struct kref *kref)
148 {
149         struct cxgbit_np *cnp;
150
151         cnp = container_of(kref, struct cxgbit_np, kref);
152         kfree(cnp);
153 }
154
155 static int
156 cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
157                       struct cxgbit_np *cnp)
158 {
159         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
160                                      &cnp->com.local_addr;
161         int addr_type;
162         int ret;
163
164         pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
165                  __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
166
167         addr_type = ipv6_addr_type((const struct in6_addr *)
168                                    &sin6->sin6_addr);
169         if (addr_type != IPV6_ADDR_ANY) {
170                 ret = cxgb4_clip_get(cdev->lldi.ports[0],
171                                      (const u32 *)&sin6->sin6_addr.s6_addr, 1);
172                 if (ret) {
173                         pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
174                                sin6->sin6_addr.s6_addr, ret);
175                         return -ENOMEM;
176                 }
177         }
178
179         cxgbit_get_cnp(cnp);
180         cxgbit_init_wr_wait(&cnp->com.wr_wait);
181
182         ret = cxgb4_create_server6(cdev->lldi.ports[0],
183                                    stid, &sin6->sin6_addr,
184                                    sin6->sin6_port,
185                                    cdev->lldi.rxq_ids[0]);
186         if (!ret)
187                 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
188                                             0, 10, __func__);
189         else if (ret > 0)
190                 ret = net_xmit_errno(ret);
191         else
192                 cxgbit_put_cnp(cnp);
193
194         if (ret) {
195                 if (ret != -ETIMEDOUT)
196                         cxgb4_clip_release(cdev->lldi.ports[0],
197                                    (const u32 *)&sin6->sin6_addr.s6_addr, 1);
198
199                 pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
200                        ret, stid, sin6->sin6_addr.s6_addr,
201                        ntohs(sin6->sin6_port));
202         }
203
204         return ret;
205 }
206
207 static int
208 cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
209                       struct cxgbit_np *cnp)
210 {
211         struct sockaddr_in *sin = (struct sockaddr_in *)
212                                    &cnp->com.local_addr;
213         int ret;
214
215         pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
216                  __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
217
218         cxgbit_get_cnp(cnp);
219         cxgbit_init_wr_wait(&cnp->com.wr_wait);
220
221         ret = cxgb4_create_server(cdev->lldi.ports[0],
222                                   stid, sin->sin_addr.s_addr,
223                                   sin->sin_port, 0,
224                                   cdev->lldi.rxq_ids[0]);
225         if (!ret)
226                 ret = cxgbit_wait_for_reply(cdev,
227                                             &cnp->com.wr_wait,
228                                             0, 10, __func__);
229         else if (ret > 0)
230                 ret = net_xmit_errno(ret);
231         else
232                 cxgbit_put_cnp(cnp);
233
234         if (ret)
235                 pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
236                        ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
237         return ret;
238 }
239
240 struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
241 {
242         struct cxgbit_device *cdev;
243         u8 i;
244
245         list_for_each_entry(cdev, &cdev_list_head, list) {
246                 struct cxgb4_lld_info *lldi = &cdev->lldi;
247
248                 for (i = 0; i < lldi->nports; i++) {
249                         if (lldi->ports[i] == ndev) {
250                                 if (port_id)
251                                         *port_id = i;
252                                 return cdev;
253                         }
254                 }
255         }
256
257         return NULL;
258 }
259
260 static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
261 {
262         if (ndev->priv_flags & IFF_BONDING) {
263                 pr_err("Bond devices are not supported. Interface:%s\n",
264                        ndev->name);
265                 return NULL;
266         }
267
268         if (is_vlan_dev(ndev))
269                 return vlan_dev_real_dev(ndev);
270
271         return ndev;
272 }
273
274 static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
275 {
276         struct net_device *ndev;
277
278         ndev = __ip_dev_find(&init_net, saddr, false);
279         if (!ndev)
280                 return NULL;
281
282         return cxgbit_get_real_dev(ndev);
283 }
284
285 static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
286 {
287         struct net_device *ndev = NULL;
288         bool found = false;
289
290         if (IS_ENABLED(CONFIG_IPV6)) {
291                 for_each_netdev_rcu(&init_net, ndev)
292                         if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
293                                 found = true;
294                                 break;
295                         }
296         }
297         if (!found)
298                 return NULL;
299         return cxgbit_get_real_dev(ndev);
300 }
301
302 static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
303 {
304         struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
305         int ss_family = sockaddr->ss_family;
306         struct net_device *ndev = NULL;
307         struct cxgbit_device *cdev = NULL;
308
309         rcu_read_lock();
310         if (ss_family == AF_INET) {
311                 struct sockaddr_in *sin;
312
313                 sin = (struct sockaddr_in *)sockaddr;
314                 ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
315         } else if (ss_family == AF_INET6) {
316                 struct sockaddr_in6 *sin6;
317
318                 sin6 = (struct sockaddr_in6 *)sockaddr;
319                 ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
320         }
321         if (!ndev)
322                 goto out;
323
324         cdev = cxgbit_find_device(ndev, NULL);
325 out:
326         rcu_read_unlock();
327         return cdev;
328 }
329
330 static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
331 {
332         struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
333         int ss_family = sockaddr->ss_family;
334         int addr_type;
335
336         if (ss_family == AF_INET) {
337                 struct sockaddr_in *sin;
338
339                 sin = (struct sockaddr_in *)sockaddr;
340                 if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
341                         return true;
342         } else if (ss_family == AF_INET6) {
343                 struct sockaddr_in6 *sin6;
344
345                 sin6 = (struct sockaddr_in6 *)sockaddr;
346                 addr_type = ipv6_addr_type((const struct in6_addr *)
347                                 &sin6->sin6_addr);
348                 if (addr_type == IPV6_ADDR_ANY)
349                         return true;
350         }
351         return false;
352 }
353
354 static int
355 __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
356 {
357         int stid, ret;
358         int ss_family = cnp->com.local_addr.ss_family;
359
360         if (!test_bit(CDEV_STATE_UP, &cdev->flags))
361                 return -EINVAL;
362
363         stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
364         if (stid < 0)
365                 return -EINVAL;
366
367         if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
368                 cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
369                 return -EINVAL;
370         }
371
372         if (ss_family == AF_INET)
373                 ret = cxgbit_create_server4(cdev, stid, cnp);
374         else
375                 ret = cxgbit_create_server6(cdev, stid, cnp);
376
377         if (ret) {
378                 if (ret != -ETIMEDOUT)
379                         cxgb4_free_stid(cdev->lldi.tids, stid,
380                                         ss_family);
381                 cxgbit_np_hash_del(cdev, cnp);
382                 return ret;
383         }
384         return ret;
385 }
386
387 static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
388 {
389         struct cxgbit_device *cdev;
390         int ret = -1;
391
392         mutex_lock(&cdev_list_lock);
393         cdev = cxgbit_find_np_cdev(cnp);
394         if (!cdev)
395                 goto out;
396
397         if (cxgbit_np_hash_find(cdev, cnp) >= 0)
398                 goto out;
399
400         if (__cxgbit_setup_cdev_np(cdev, cnp))
401                 goto out;
402
403         cnp->com.cdev = cdev;
404         ret = 0;
405 out:
406         mutex_unlock(&cdev_list_lock);
407         return ret;
408 }
409
410 static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
411 {
412         struct cxgbit_device *cdev;
413         int ret;
414         u32 count = 0;
415
416         mutex_lock(&cdev_list_lock);
417         list_for_each_entry(cdev, &cdev_list_head, list) {
418                 if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
419                         mutex_unlock(&cdev_list_lock);
420                         return -1;
421                 }
422         }
423
424         list_for_each_entry(cdev, &cdev_list_head, list) {
425                 ret = __cxgbit_setup_cdev_np(cdev, cnp);
426                 if (ret == -ETIMEDOUT)
427                         break;
428                 if (ret != 0)
429                         continue;
430                 count++;
431         }
432         mutex_unlock(&cdev_list_lock);
433
434         return count ? 0 : -1;
435 }
436
437 int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
438 {
439         struct cxgbit_np *cnp;
440         int ret;
441
442         if ((ksockaddr->ss_family != AF_INET) &&
443             (ksockaddr->ss_family != AF_INET6))
444                 return -EINVAL;
445
446         cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
447         if (!cnp)
448                 return -ENOMEM;
449
450         init_waitqueue_head(&cnp->accept_wait);
451         init_completion(&cnp->com.wr_wait.completion);
452         init_completion(&cnp->accept_comp);
453         INIT_LIST_HEAD(&cnp->np_accept_list);
454         spin_lock_init(&cnp->np_accept_lock);
455         kref_init(&cnp->kref);
456         memcpy(&np->np_sockaddr, ksockaddr,
457                sizeof(struct sockaddr_storage));
458         memcpy(&cnp->com.local_addr, &np->np_sockaddr,
459                sizeof(cnp->com.local_addr));
460
461         cnp->np = np;
462         cnp->com.cdev = NULL;
463
464         if (cxgbit_inaddr_any(cnp))
465                 ret = cxgbit_setup_all_np(cnp);
466         else
467                 ret = cxgbit_setup_cdev_np(cnp);
468
469         if (ret) {
470                 cxgbit_put_cnp(cnp);
471                 return -EINVAL;
472         }
473
474         np->np_context = cnp;
475         cnp->com.state = CSK_STATE_LISTEN;
476         return 0;
477 }
478
479 static void
480 cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
481                      struct cxgbit_sock *csk)
482 {
483         conn->login_family = np->np_sockaddr.ss_family;
484         conn->login_sockaddr = csk->com.remote_addr;
485         conn->local_sockaddr = csk->com.local_addr;
486 }
487
488 int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
489 {
490         struct cxgbit_np *cnp = np->np_context;
491         struct cxgbit_sock *csk;
492         int ret = 0;
493
494 accept_wait:
495         ret = wait_for_completion_interruptible(&cnp->accept_comp);
496         if (ret)
497                 return -ENODEV;
498
499         spin_lock_bh(&np->np_thread_lock);
500         if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
501                 spin_unlock_bh(&np->np_thread_lock);
502                 /**
503                  * No point in stalling here when np_thread
504                  * is in state RESET/SHUTDOWN/EXIT - bail
505                  **/
506                 return -ENODEV;
507         }
508         spin_unlock_bh(&np->np_thread_lock);
509
510         spin_lock_bh(&cnp->np_accept_lock);
511         if (list_empty(&cnp->np_accept_list)) {
512                 spin_unlock_bh(&cnp->np_accept_lock);
513                 goto accept_wait;
514         }
515
516         csk = list_first_entry(&cnp->np_accept_list,
517                                struct cxgbit_sock,
518                                accept_node);
519
520         list_del_init(&csk->accept_node);
521         spin_unlock_bh(&cnp->np_accept_lock);
522         conn->context = csk;
523         csk->conn = conn;
524
525         cxgbit_set_conn_info(np, conn, csk);
526         return 0;
527 }
528
529 static int
530 __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
531 {
532         int stid, ret;
533         bool ipv6 = false;
534
535         stid = cxgbit_np_hash_del(cdev, cnp);
536         if (stid < 0)
537                 return -EINVAL;
538         if (!test_bit(CDEV_STATE_UP, &cdev->flags))
539                 return -EINVAL;
540
541         if (cnp->np->np_sockaddr.ss_family == AF_INET6)
542                 ipv6 = true;
543
544         cxgbit_get_cnp(cnp);
545         cxgbit_init_wr_wait(&cnp->com.wr_wait);
546         ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
547                                   cdev->lldi.rxq_ids[0], ipv6);
548
549         if (ret > 0)
550                 ret = net_xmit_errno(ret);
551
552         if (ret) {
553                 cxgbit_put_cnp(cnp);
554                 return ret;
555         }
556
557         ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
558                                     0, 10, __func__);
559         if (ret == -ETIMEDOUT)
560                 return ret;
561
562         if (ipv6 && cnp->com.cdev) {
563                 struct sockaddr_in6 *sin6;
564
565                 sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
566                 cxgb4_clip_release(cdev->lldi.ports[0],
567                                    (const u32 *)&sin6->sin6_addr.s6_addr,
568                                    1);
569         }
570
571         cxgb4_free_stid(cdev->lldi.tids, stid,
572                         cnp->com.local_addr.ss_family);
573         return 0;
574 }
575
576 static void cxgbit_free_all_np(struct cxgbit_np *cnp)
577 {
578         struct cxgbit_device *cdev;
579         int ret;
580
581         mutex_lock(&cdev_list_lock);
582         list_for_each_entry(cdev, &cdev_list_head, list) {
583                 ret = __cxgbit_free_cdev_np(cdev, cnp);
584                 if (ret == -ETIMEDOUT)
585                         break;
586         }
587         mutex_unlock(&cdev_list_lock);
588 }
589
590 static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
591 {
592         struct cxgbit_device *cdev;
593         bool found = false;
594
595         mutex_lock(&cdev_list_lock);
596         list_for_each_entry(cdev, &cdev_list_head, list) {
597                 if (cdev == cnp->com.cdev) {
598                         found = true;
599                         break;
600                 }
601         }
602         if (!found)
603                 goto out;
604
605         __cxgbit_free_cdev_np(cdev, cnp);
606 out:
607         mutex_unlock(&cdev_list_lock);
608 }
609
610 void cxgbit_free_np(struct iscsi_np *np)
611 {
612         struct cxgbit_np *cnp = np->np_context;
613
614         cnp->com.state = CSK_STATE_DEAD;
615         if (cnp->com.cdev)
616                 cxgbit_free_cdev_np(cnp);
617         else
618                 cxgbit_free_all_np(cnp);
619
620         np->np_context = NULL;
621         cxgbit_put_cnp(cnp);
622 }
623
624 static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
625 {
626         struct sk_buff *skb;
627         struct cpl_close_con_req *req;
628         unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
629
630         skb = alloc_skb(len, GFP_ATOMIC);
631         if (!skb)
632                 return;
633
634         req = (struct cpl_close_con_req *)__skb_put(skb, len);
635         memset(req, 0, len);
636
637         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
638         INIT_TP_WR(req, csk->tid);
639         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
640                                                     csk->tid));
641         req->rsvd = 0;
642
643         cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
644         __skb_queue_tail(&csk->txq, skb);
645         cxgbit_push_tx_frames(csk);
646 }
647
648 static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
649 {
650         pr_debug("%s cxgbit_device %p\n", __func__, handle);
651         kfree_skb(skb);
652 }
653
654 static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
655 {
656         struct cxgbit_device *cdev = handle;
657         struct cpl_abort_req *req = cplhdr(skb);
658
659         pr_debug("%s cdev %p\n", __func__, cdev);
660         req->cmd = CPL_ABORT_NO_RST;
661         cxgbit_ofld_send(cdev, skb);
662 }
663
664 static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
665 {
666         struct cpl_abort_req *req;
667         unsigned int len = roundup(sizeof(*req), 16);
668         struct sk_buff *skb;
669
670         pr_debug("%s: csk %p tid %u; state %d\n",
671                  __func__, csk, csk->tid, csk->com.state);
672
673         __skb_queue_purge(&csk->txq);
674
675         if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
676                 cxgbit_send_tx_flowc_wr(csk);
677
678         skb = __skb_dequeue(&csk->skbq);
679         req = (struct cpl_abort_req *)__skb_put(skb, len);
680         memset(req, 0, len);
681
682         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
683         t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
684         INIT_TP_WR(req, csk->tid);
685         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
686                                                     csk->tid));
687         req->cmd = CPL_ABORT_SEND_RST;
688         return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
689 }
690
691 void cxgbit_free_conn(struct iscsi_conn *conn)
692 {
693         struct cxgbit_sock *csk = conn->context;
694         bool release = false;
695
696         pr_debug("%s: state %d\n",
697                  __func__, csk->com.state);
698
699         spin_lock_bh(&csk->lock);
700         switch (csk->com.state) {
701         case CSK_STATE_ESTABLISHED:
702                 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
703                         csk->com.state = CSK_STATE_CLOSING;
704                         cxgbit_send_halfclose(csk);
705                 } else {
706                         csk->com.state = CSK_STATE_ABORTING;
707                         cxgbit_send_abort_req(csk);
708                 }
709                 break;
710         case CSK_STATE_CLOSING:
711                 csk->com.state = CSK_STATE_MORIBUND;
712                 cxgbit_send_halfclose(csk);
713                 break;
714         case CSK_STATE_DEAD:
715                 release = true;
716                 break;
717         default:
718                 pr_err("%s: csk %p; state %d\n",
719                        __func__, csk, csk->com.state);
720         }
721         spin_unlock_bh(&csk->lock);
722
723         if (release)
724                 cxgbit_put_csk(csk);
725 }
726
727 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
728 {
729         csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
730                         ((csk->com.remote_addr.ss_family == AF_INET) ?
731                         sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
732                         sizeof(struct tcphdr);
733         csk->mss = csk->emss;
734         if (TCPOPT_TSTAMP_G(opt))
735                 csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
736         if (csk->emss < 128)
737                 csk->emss = 128;
738         if (csk->emss & 7)
739                 pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
740                         TCPOPT_MSS_G(opt), csk->mss, csk->emss);
741         pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
742                  csk->mss, csk->emss);
743 }
744
745 static void cxgbit_free_skb(struct cxgbit_sock *csk)
746 {
747         struct sk_buff *skb;
748
749         __skb_queue_purge(&csk->txq);
750         __skb_queue_purge(&csk->rxq);
751         __skb_queue_purge(&csk->backlogq);
752         __skb_queue_purge(&csk->ppodq);
753         __skb_queue_purge(&csk->skbq);
754
755         while ((skb = cxgbit_sock_dequeue_wr(csk)))
756                 kfree_skb(skb);
757
758         __kfree_skb(csk->lro_hskb);
759 }
760
761 void _cxgbit_free_csk(struct kref *kref)
762 {
763         struct cxgbit_sock *csk;
764         struct cxgbit_device *cdev;
765
766         csk = container_of(kref, struct cxgbit_sock, kref);
767
768         pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
769
770         if (csk->com.local_addr.ss_family == AF_INET6) {
771                 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
772                                              &csk->com.local_addr;
773                 cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
774                                    (const u32 *)
775                                    &sin6->sin6_addr.s6_addr, 1);
776         }
777
778         cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid);
779         dst_release(csk->dst);
780         cxgb4_l2t_release(csk->l2t);
781
782         cdev = csk->com.cdev;
783         spin_lock_bh(&cdev->cskq.lock);
784         list_del(&csk->list);
785         spin_unlock_bh(&cdev->cskq.lock);
786
787         cxgbit_free_skb(csk);
788         cxgbit_put_cdev(cdev);
789
790         kfree(csk);
791 }
792
793 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
794 {
795         unsigned int linkspeed;
796         u8 scale;
797
798         linkspeed = pi->link_cfg.speed;
799         scale = linkspeed / SPEED_10000;
800
801 #define CXGBIT_10G_RCV_WIN (256 * 1024)
802         csk->rcv_win = CXGBIT_10G_RCV_WIN;
803         if (scale)
804                 csk->rcv_win *= scale;
805
806 #define CXGBIT_10G_SND_WIN (256 * 1024)
807         csk->snd_win = CXGBIT_10G_SND_WIN;
808         if (scale)
809                 csk->snd_win *= scale;
810
811         pr_debug("%s snd_win %d rcv_win %d\n",
812                  __func__, csk->snd_win, csk->rcv_win);
813 }
814
815 #ifdef CONFIG_CHELSIO_T4_DCB
816 static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
817 {
818         return ndev->dcbnl_ops->getstate(ndev);
819 }
820
821 static int cxgbit_select_priority(int pri_mask)
822 {
823         if (!pri_mask)
824                 return 0;
825
826         return (ffs(pri_mask) - 1);
827 }
828
829 static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
830 {
831         int ret;
832         u8 caps;
833
834         struct dcb_app iscsi_dcb_app = {
835                 .protocol = local_port
836         };
837
838         ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
839
840         if (ret)
841                 return 0;
842
843         if (caps & DCB_CAP_DCBX_VER_IEEE) {
844                 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
845
846                 ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
847
848         } else if (caps & DCB_CAP_DCBX_VER_CEE) {
849                 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
850
851                 ret = dcb_getapp(ndev, &iscsi_dcb_app);
852         }
853
854         pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
855
856         return cxgbit_select_priority(ret);
857 }
858 #endif
859
860 static int
861 cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
862                     u16 local_port, struct dst_entry *dst,
863                     struct cxgbit_device *cdev)
864 {
865         struct neighbour *n;
866         int ret, step;
867         struct net_device *ndev;
868         u16 rxq_idx, port_id;
869 #ifdef CONFIG_CHELSIO_T4_DCB
870         u8 priority = 0;
871 #endif
872
873         n = dst_neigh_lookup(dst, peer_ip);
874         if (!n)
875                 return -ENODEV;
876
877         rcu_read_lock();
878         ret = -ENOMEM;
879         if (n->dev->flags & IFF_LOOPBACK) {
880                 if (iptype == 4)
881                         ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
882                 else if (IS_ENABLED(CONFIG_IPV6))
883                         ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
884                 else
885                         ndev = NULL;
886
887                 if (!ndev) {
888                         ret = -ENODEV;
889                         goto out;
890                 }
891
892                 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
893                                          n, ndev, 0);
894                 if (!csk->l2t)
895                         goto out;
896                 csk->mtu = ndev->mtu;
897                 csk->tx_chan = cxgb4_port_chan(ndev);
898                 csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
899                 step = cdev->lldi.ntxq /
900                         cdev->lldi.nchan;
901                 csk->txq_idx = cxgb4_port_idx(ndev) * step;
902                 step = cdev->lldi.nrxq /
903                         cdev->lldi.nchan;
904                 csk->ctrlq_idx = cxgb4_port_idx(ndev);
905                 csk->rss_qid = cdev->lldi.rxq_ids[
906                                 cxgb4_port_idx(ndev) * step];
907                 csk->port_id = cxgb4_port_idx(ndev);
908                 cxgbit_set_tcp_window(csk,
909                                       (struct port_info *)netdev_priv(ndev));
910         } else {
911                 ndev = cxgbit_get_real_dev(n->dev);
912                 if (!ndev) {
913                         ret = -ENODEV;
914                         goto out;
915                 }
916
917 #ifdef CONFIG_CHELSIO_T4_DCB
918                 if (cxgbit_get_iscsi_dcb_state(ndev))
919                         priority = cxgbit_get_iscsi_dcb_priority(ndev,
920                                                                  local_port);
921
922                 csk->dcb_priority = priority;
923
924                 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
925 #else
926                 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
927 #endif
928                 if (!csk->l2t)
929                         goto out;
930                 port_id = cxgb4_port_idx(ndev);
931                 csk->mtu = dst_mtu(dst);
932                 csk->tx_chan = cxgb4_port_chan(ndev);
933                 csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
934                 step = cdev->lldi.ntxq /
935                         cdev->lldi.nports;
936                 csk->txq_idx = (port_id * step) +
937                                 (cdev->selectq[port_id][0]++ % step);
938                 csk->ctrlq_idx = cxgb4_port_idx(ndev);
939                 step = cdev->lldi.nrxq /
940                         cdev->lldi.nports;
941                 rxq_idx = (port_id * step) +
942                                 (cdev->selectq[port_id][1]++ % step);
943                 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
944                 csk->port_id = port_id;
945                 cxgbit_set_tcp_window(csk,
946                                       (struct port_info *)netdev_priv(ndev));
947         }
948         ret = 0;
949 out:
950         rcu_read_unlock();
951         neigh_release(n);
952         return ret;
953 }
954
955 int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
956 {
957         int ret = 0;
958
959         if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
960                 kfree_skb(skb);
961                 pr_err("%s - device not up - dropping\n", __func__);
962                 return -EIO;
963         }
964
965         ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
966         if (ret < 0)
967                 kfree_skb(skb);
968         return ret < 0 ? ret : 0;
969 }
970
971 static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
972 {
973         struct cpl_tid_release *req;
974         unsigned int len = roundup(sizeof(*req), 16);
975         struct sk_buff *skb;
976
977         skb = alloc_skb(len, GFP_ATOMIC);
978         if (!skb)
979                 return;
980
981         req = (struct cpl_tid_release *)__skb_put(skb, len);
982         memset(req, 0, len);
983
984         INIT_TP_WR(req, tid);
985         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
986                    CPL_TID_RELEASE, tid));
987         set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
988         cxgbit_ofld_send(cdev, skb);
989 }
990
991 int
992 cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
993                 struct l2t_entry *l2e)
994 {
995         int ret = 0;
996
997         if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
998                 kfree_skb(skb);
999                 pr_err("%s - device not up - dropping\n", __func__);
1000                 return -EIO;
1001         }
1002
1003         ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1004         if (ret < 0)
1005                 kfree_skb(skb);
1006         return ret < 0 ? ret : 0;
1007 }
1008
1009 static void
1010 cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
1011                 unsigned int *idx, int use_ts, int ipv6)
1012 {
1013         unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
1014                                    sizeof(struct iphdr)) +
1015                                    sizeof(struct tcphdr) +
1016                                    (use_ts ? round_up(TCPOLEN_TIMESTAMP,
1017                                     4) : 0);
1018         unsigned short data_size = mtu - hdr_size;
1019
1020         cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
1021 }
1022
1023 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1024 {
1025         if (csk->com.state != CSK_STATE_ESTABLISHED) {
1026                 __kfree_skb(skb);
1027                 return;
1028         }
1029
1030         cxgbit_ofld_send(csk->com.cdev, skb);
1031 }
1032
1033 /*
1034  * CPL connection rx data ack: host ->
1035  * Send RX credits through an RX_DATA_ACK CPL message.
1036  * Returns the number of credits sent.
1037  */
1038 int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1039 {
1040         struct sk_buff *skb;
1041         struct cpl_rx_data_ack *req;
1042         unsigned int len = roundup(sizeof(*req), 16);
1043
1044         skb = alloc_skb(len, GFP_KERNEL);
1045         if (!skb)
1046                 return -1;
1047
1048         req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
1049         memset(req, 0, len);
1050
1051         set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
1052         INIT_TP_WR(req, csk->tid);
1053         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1054                                                     csk->tid));
1055         req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1056                                        RX_CREDITS_V(csk->rx_credits));
1057
1058         csk->rx_credits = 0;
1059
1060         spin_lock_bh(&csk->lock);
1061         if (csk->lock_owner) {
1062                 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1063                 __skb_queue_tail(&csk->backlogq, skb);
1064                 spin_unlock_bh(&csk->lock);
1065                 return 0;
1066         }
1067
1068         cxgbit_send_rx_credits(csk, skb);
1069         spin_unlock_bh(&csk->lock);
1070
1071         return 0;
1072 }
1073
1074 #define FLOWC_WR_NPARAMS_MIN    9
1075 #define FLOWC_WR_NPARAMS_MAX    11
1076 static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1077 {
1078         struct sk_buff *skb;
1079         u32 len, flowclen;
1080         u8 i;
1081
1082         flowclen = offsetof(struct fw_flowc_wr,
1083                             mnemval[FLOWC_WR_NPARAMS_MAX]);
1084
1085         len = max_t(u32, sizeof(struct cpl_abort_req),
1086                     sizeof(struct cpl_abort_rpl));
1087
1088         len = max(len, flowclen);
1089         len = roundup(len, 16);
1090
1091         for (i = 0; i < 3; i++) {
1092                 skb = alloc_skb(len, GFP_ATOMIC);
1093                 if (!skb)
1094                         goto out;
1095                 __skb_queue_tail(&csk->skbq, skb);
1096         }
1097
1098         skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1099         if (!skb)
1100                 goto out;
1101
1102         memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1103         csk->lro_hskb = skb;
1104
1105         return 0;
1106 out:
1107         __skb_queue_purge(&csk->skbq);
1108         return -ENOMEM;
1109 }
1110
1111 static u32 cxgbit_compute_wscale(u32 win)
1112 {
1113         u32 wscale = 0;
1114
1115         while (wscale < 14 && (65535 << wscale) < win)
1116                 wscale++;
1117         return wscale;
1118 }
1119
1120 static void
1121 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1122 {
1123         struct sk_buff *skb;
1124         const struct tcphdr *tcph;
1125         struct cpl_t5_pass_accept_rpl *rpl5;
1126         unsigned int len = roundup(sizeof(*rpl5), 16);
1127         unsigned int mtu_idx;
1128         u64 opt0;
1129         u32 opt2, hlen;
1130         u32 wscale;
1131         u32 win;
1132
1133         pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1134
1135         skb = alloc_skb(len, GFP_ATOMIC);
1136         if (!skb) {
1137                 cxgbit_put_csk(csk);
1138                 return;
1139         }
1140
1141         rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len);
1142         memset(rpl5, 0, len);
1143
1144         INIT_TP_WR(rpl5, csk->tid);
1145         OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1146                                                      csk->tid));
1147         cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1148                         req->tcpopt.tstamp,
1149                         (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1150         wscale = cxgbit_compute_wscale(csk->rcv_win);
1151         /*
1152          * Specify the largest window that will fit in opt0. The
1153          * remainder will be specified in the rx_data_ack.
1154          */
1155         win = csk->rcv_win >> 10;
1156         if (win > RCV_BUFSIZ_M)
1157                 win = RCV_BUFSIZ_M;
1158         opt0 =  TCAM_BYPASS_F |
1159                 WND_SCALE_V(wscale) |
1160                 MSS_IDX_V(mtu_idx) |
1161                 L2T_IDX_V(csk->l2t->idx) |
1162                 TX_CHAN_V(csk->tx_chan) |
1163                 SMAC_SEL_V(csk->smac_idx) |
1164                 DSCP_V(csk->tos >> 2) |
1165                 ULP_MODE_V(ULP_MODE_ISCSI) |
1166                 RCV_BUFSIZ_V(win);
1167
1168         opt2 = RX_CHANNEL_V(0) |
1169                 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1170
1171         if (req->tcpopt.tstamp)
1172                 opt2 |= TSTAMPS_EN_F;
1173         if (req->tcpopt.sack)
1174                 opt2 |= SACK_EN_F;
1175         if (wscale)
1176                 opt2 |= WND_SCALE_EN_F;
1177
1178         hlen = ntohl(req->hdr_len);
1179         tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
1180                 IP_HDR_LEN_G(hlen);
1181
1182         if (tcph->ece && tcph->cwr)
1183                 opt2 |= CCTRL_ECN_V(1);
1184
1185         opt2 |= RX_COALESCE_V(3);
1186         opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1187
1188         opt2 |= T5_ISS_F;
1189         rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1190
1191         opt2 |= T5_OPT_2_VALID_F;
1192
1193         rpl5->opt0 = cpu_to_be64(opt0);
1194         rpl5->opt2 = cpu_to_be32(opt2);
1195         set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1196         t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
1197         cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1198 }
1199
1200 static void
1201 cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1202 {
1203         struct cxgbit_sock *csk = NULL;
1204         struct cxgbit_np *cnp;
1205         struct cpl_pass_accept_req *req = cplhdr(skb);
1206         unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1207         struct tid_info *t = cdev->lldi.tids;
1208         unsigned int tid = GET_TID(req);
1209         u16 peer_mss = ntohs(req->tcpopt.mss);
1210         unsigned short hdrs;
1211
1212         struct dst_entry *dst;
1213         __u8 local_ip[16], peer_ip[16];
1214         __be16 local_port, peer_port;
1215         int ret;
1216         int iptype;
1217
1218         pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1219                  __func__, cdev, stid, tid);
1220
1221         cnp = lookup_stid(t, stid);
1222         if (!cnp) {
1223                 pr_err("%s connect request on invalid stid %d\n",
1224                        __func__, stid);
1225                 goto rel_skb;
1226         }
1227
1228         if (cnp->com.state != CSK_STATE_LISTEN) {
1229                 pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1230                        __func__);
1231                 goto reject;
1232         }
1233
1234         csk = lookup_tid(t, tid);
1235         if (csk) {
1236                 pr_err("%s csk not null tid %u\n",
1237                        __func__, tid);
1238                 goto rel_skb;
1239         }
1240
1241         cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1242                         peer_ip, &local_port, &peer_port);
1243
1244         /* Find output route */
1245         if (iptype == 4)  {
1246                 pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1247                          "lport %d rport %d peer_mss %d\n"
1248                          , __func__, cnp, tid,
1249                          local_ip, peer_ip, ntohs(local_port),
1250                          ntohs(peer_port), peer_mss);
1251                 dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1252                                       *(__be32 *)local_ip,
1253                                       *(__be32 *)peer_ip,
1254                                       local_port, peer_port,
1255                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1256         } else {
1257                 pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1258                          "lport %d rport %d peer_mss %d\n"
1259                          , __func__, cnp, tid,
1260                          local_ip, peer_ip, ntohs(local_port),
1261                          ntohs(peer_port), peer_mss);
1262                 dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1263                                        local_ip, peer_ip,
1264                                        local_port, peer_port,
1265                                        PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1266                                        ((struct sockaddr_in6 *)
1267                                         &cnp->com.local_addr)->sin6_scope_id);
1268         }
1269         if (!dst) {
1270                 pr_err("%s - failed to find dst entry!\n",
1271                        __func__);
1272                 goto reject;
1273         }
1274
1275         csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1276         if (!csk) {
1277                 dst_release(dst);
1278                 goto rel_skb;
1279         }
1280
1281         ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1282                                   dst, cdev);
1283         if (ret) {
1284                 pr_err("%s - failed to allocate l2t entry!\n",
1285                        __func__);
1286                 dst_release(dst);
1287                 kfree(csk);
1288                 goto reject;
1289         }
1290
1291         kref_init(&csk->kref);
1292         init_completion(&csk->com.wr_wait.completion);
1293
1294         INIT_LIST_HEAD(&csk->accept_node);
1295
1296         hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1297                 sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1298         if (peer_mss && csk->mtu > (peer_mss + hdrs))
1299                 csk->mtu = peer_mss + hdrs;
1300
1301         csk->com.state = CSK_STATE_CONNECTING;
1302         csk->com.cdev = cdev;
1303         csk->cnp = cnp;
1304         csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1305         csk->dst = dst;
1306         csk->tid = tid;
1307         csk->wr_cred = cdev->lldi.wr_cred -
1308                         DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1309         csk->wr_max_cred = csk->wr_cred;
1310         csk->wr_una_cred = 0;
1311
1312         if (iptype == 4) {
1313                 struct sockaddr_in *sin = (struct sockaddr_in *)
1314                                           &csk->com.local_addr;
1315                 sin->sin_family = AF_INET;
1316                 sin->sin_port = local_port;
1317                 sin->sin_addr.s_addr = *(__be32 *)local_ip;
1318
1319                 sin = (struct sockaddr_in *)&csk->com.remote_addr;
1320                 sin->sin_family = AF_INET;
1321                 sin->sin_port = peer_port;
1322                 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1323         } else {
1324                 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1325                                             &csk->com.local_addr;
1326
1327                 sin6->sin6_family = PF_INET6;
1328                 sin6->sin6_port = local_port;
1329                 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1330                 cxgb4_clip_get(cdev->lldi.ports[0],
1331                                (const u32 *)&sin6->sin6_addr.s6_addr,
1332                                1);
1333
1334                 sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1335                 sin6->sin6_family = PF_INET6;
1336                 sin6->sin6_port = peer_port;
1337                 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1338         }
1339
1340         skb_queue_head_init(&csk->rxq);
1341         skb_queue_head_init(&csk->txq);
1342         skb_queue_head_init(&csk->ppodq);
1343         skb_queue_head_init(&csk->backlogq);
1344         skb_queue_head_init(&csk->skbq);
1345         cxgbit_sock_reset_wr_list(csk);
1346         spin_lock_init(&csk->lock);
1347         init_waitqueue_head(&csk->waitq);
1348         init_waitqueue_head(&csk->ack_waitq);
1349         csk->lock_owner = false;
1350
1351         if (cxgbit_alloc_csk_skb(csk)) {
1352                 dst_release(dst);
1353                 kfree(csk);
1354                 goto rel_skb;
1355         }
1356
1357         cxgbit_get_cdev(cdev);
1358
1359         spin_lock(&cdev->cskq.lock);
1360         list_add_tail(&csk->list, &cdev->cskq.list);
1361         spin_unlock(&cdev->cskq.lock);
1362
1363         cxgb4_insert_tid(t, csk, tid);
1364         cxgbit_pass_accept_rpl(csk, req);
1365         goto rel_skb;
1366
1367 reject:
1368         cxgbit_release_tid(cdev, tid);
1369 rel_skb:
1370         __kfree_skb(skb);
1371 }
1372
1373 static u32
1374 cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1375                            u32 *flowclenp)
1376 {
1377         u32 nparams, flowclen16, flowclen;
1378
1379         nparams = FLOWC_WR_NPARAMS_MIN;
1380
1381         if (csk->snd_wscale)
1382                 nparams++;
1383
1384 #ifdef CONFIG_CHELSIO_T4_DCB
1385         nparams++;
1386 #endif
1387         flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1388         flowclen16 = DIV_ROUND_UP(flowclen, 16);
1389         flowclen = flowclen16 * 16;
1390         /*
1391          * Return the number of 16-byte credits used by the flowc request.
1392          * Pass back the nparams and actual flowc length if requested.
1393          */
1394         if (nparamsp)
1395                 *nparamsp = nparams;
1396         if (flowclenp)
1397                 *flowclenp = flowclen;
1398         return flowclen16;
1399 }
1400
1401 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1402 {
1403         struct cxgbit_device *cdev = csk->com.cdev;
1404         struct fw_flowc_wr *flowc;
1405         u32 nparams, flowclen16, flowclen;
1406         struct sk_buff *skb;
1407         u8 index;
1408
1409 #ifdef CONFIG_CHELSIO_T4_DCB
1410         u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1411 #endif
1412
1413         flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1414
1415         skb = __skb_dequeue(&csk->skbq);
1416         flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
1417         memset(flowc, 0, flowclen);
1418
1419         flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1420                                            FW_FLOWC_WR_NPARAMS_V(nparams));
1421         flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1422                                           FW_WR_FLOWID_V(csk->tid));
1423         flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1424         flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1425                                             (csk->com.cdev->lldi.pf));
1426         flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1427         flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1428         flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1429         flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1430         flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1431         flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1432         flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1433         flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1434         flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1435         flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1436         flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1437         flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1438         flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1439         flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1440
1441         flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1442         if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1443                 flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1444         else
1445                 flowc->mnemval[8].val = cpu_to_be32(16384);
1446
1447         index = 9;
1448
1449         if (csk->snd_wscale) {
1450                 flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1451                 flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1452                 index++;
1453         }
1454
1455 #ifdef CONFIG_CHELSIO_T4_DCB
1456         flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1457         if (vlan == VLAN_NONE) {
1458                 pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1459                 flowc->mnemval[index].val = cpu_to_be32(0);
1460         } else
1461                 flowc->mnemval[index].val = cpu_to_be32(
1462                                 (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1463 #endif
1464
1465         pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1466                  " rcv_seq = %u; snd_win = %u; emss = %u\n",
1467                  __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1468                  csk->rcv_nxt, csk->snd_win, csk->emss);
1469         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1470         cxgbit_ofld_send(csk->com.cdev, skb);
1471         return flowclen16;
1472 }
1473
1474 int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1475 {
1476         struct sk_buff *skb;
1477         struct cpl_set_tcb_field *req;
1478         u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1479         u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1480         unsigned int len = roundup(sizeof(*req), 16);
1481         int ret;
1482
1483         skb = alloc_skb(len, GFP_KERNEL);
1484         if (!skb)
1485                 return -ENOMEM;
1486
1487         /*  set up ulp submode */
1488         req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
1489         memset(req, 0, len);
1490
1491         INIT_TP_WR(req, csk->tid);
1492         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1493         req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1494         req->word_cookie = htons(0);
1495         req->mask = cpu_to_be64(0x3 << 4);
1496         req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1497                                 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1498         set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1499
1500         cxgbit_get_csk(csk);
1501         cxgbit_init_wr_wait(&csk->com.wr_wait);
1502
1503         cxgbit_ofld_send(csk->com.cdev, skb);
1504
1505         ret = cxgbit_wait_for_reply(csk->com.cdev,
1506                                     &csk->com.wr_wait,
1507                                     csk->tid, 5, __func__);
1508         if (ret)
1509                 return -1;
1510
1511         return 0;
1512 }
1513
1514 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1515 {
1516         struct sk_buff *skb;
1517         struct cpl_set_tcb_field *req;
1518         unsigned int len = roundup(sizeof(*req), 16);
1519         int ret;
1520
1521         skb = alloc_skb(len, GFP_KERNEL);
1522         if (!skb)
1523                 return -ENOMEM;
1524
1525         req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
1526         memset(req, 0, len);
1527
1528         INIT_TP_WR(req, csk->tid);
1529         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1530         req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1531         req->word_cookie = htons(0);
1532         req->mask = cpu_to_be64(0x3 << 8);
1533         req->val = cpu_to_be64(pg_idx << 8);
1534         set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1535
1536         cxgbit_get_csk(csk);
1537         cxgbit_init_wr_wait(&csk->com.wr_wait);
1538
1539         cxgbit_ofld_send(csk->com.cdev, skb);
1540
1541         ret = cxgbit_wait_for_reply(csk->com.cdev,
1542                                     &csk->com.wr_wait,
1543                                     csk->tid, 5, __func__);
1544         if (ret)
1545                 return -1;
1546
1547         return 0;
1548 }
1549
1550 static void
1551 cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1552 {
1553         struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1554         struct tid_info *t = cdev->lldi.tids;
1555         unsigned int stid = GET_TID(rpl);
1556         struct cxgbit_np *cnp = lookup_stid(t, stid);
1557
1558         pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1559                  __func__, cnp, stid, rpl->status);
1560
1561         if (!cnp) {
1562                 pr_info("%s stid %d lookup failure\n", __func__, stid);
1563                 return;
1564         }
1565
1566         cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1567         cxgbit_put_cnp(cnp);
1568 }
1569
1570 static void
1571 cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1572 {
1573         struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1574         struct tid_info *t = cdev->lldi.tids;
1575         unsigned int stid = GET_TID(rpl);
1576         struct cxgbit_np *cnp = lookup_stid(t, stid);
1577
1578         pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1579                  __func__, cnp, stid, rpl->status);
1580
1581         if (!cnp) {
1582                 pr_info("%s stid %d lookup failure\n", __func__, stid);
1583                 return;
1584         }
1585
1586         cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1587         cxgbit_put_cnp(cnp);
1588 }
1589
1590 static void
1591 cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1592 {
1593         struct cpl_pass_establish *req = cplhdr(skb);
1594         struct tid_info *t = cdev->lldi.tids;
1595         unsigned int tid = GET_TID(req);
1596         struct cxgbit_sock *csk;
1597         struct cxgbit_np *cnp;
1598         u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1599         u32 snd_isn = be32_to_cpu(req->snd_isn);
1600         u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1601
1602         csk = lookup_tid(t, tid);
1603         if (unlikely(!csk)) {
1604                 pr_err("can't find connection for tid %u.\n", tid);
1605                 goto rel_skb;
1606         }
1607         cnp = csk->cnp;
1608
1609         pr_debug("%s: csk %p; tid %u; cnp %p\n",
1610                  __func__, csk, tid, cnp);
1611
1612         csk->write_seq = snd_isn;
1613         csk->snd_una = snd_isn;
1614         csk->snd_nxt = snd_isn;
1615
1616         csk->rcv_nxt = rcv_isn;
1617
1618         if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1619                 csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1620
1621         csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1622         cxgbit_set_emss(csk, tcp_opt);
1623         dst_confirm(csk->dst);
1624         csk->com.state = CSK_STATE_ESTABLISHED;
1625         spin_lock_bh(&cnp->np_accept_lock);
1626         list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1627         spin_unlock_bh(&cnp->np_accept_lock);
1628         complete(&cnp->accept_comp);
1629 rel_skb:
1630         __kfree_skb(skb);
1631 }
1632
1633 static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1634 {
1635         cxgbit_skcb_flags(skb) = 0;
1636         spin_lock_bh(&csk->rxq.lock);
1637         __skb_queue_tail(&csk->rxq, skb);
1638         spin_unlock_bh(&csk->rxq.lock);
1639         wake_up(&csk->waitq);
1640 }
1641
1642 static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1643 {
1644         pr_debug("%s: csk %p; tid %u; state %d\n",
1645                  __func__, csk, csk->tid, csk->com.state);
1646
1647         switch (csk->com.state) {
1648         case CSK_STATE_ESTABLISHED:
1649                 csk->com.state = CSK_STATE_CLOSING;
1650                 cxgbit_queue_rx_skb(csk, skb);
1651                 return;
1652         case CSK_STATE_CLOSING:
1653                 /* simultaneous close */
1654                 csk->com.state = CSK_STATE_MORIBUND;
1655                 break;
1656         case CSK_STATE_MORIBUND:
1657                 csk->com.state = CSK_STATE_DEAD;
1658                 cxgbit_put_csk(csk);
1659                 break;
1660         case CSK_STATE_ABORTING:
1661                 break;
1662         default:
1663                 pr_info("%s: cpl_peer_close in bad state %d\n",
1664                         __func__, csk->com.state);
1665         }
1666
1667         __kfree_skb(skb);
1668 }
1669
1670 static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1671 {
1672         pr_debug("%s: csk %p; tid %u; state %d\n",
1673                  __func__, csk, csk->tid, csk->com.state);
1674
1675         switch (csk->com.state) {
1676         case CSK_STATE_CLOSING:
1677                 csk->com.state = CSK_STATE_MORIBUND;
1678                 break;
1679         case CSK_STATE_MORIBUND:
1680                 csk->com.state = CSK_STATE_DEAD;
1681                 cxgbit_put_csk(csk);
1682                 break;
1683         case CSK_STATE_ABORTING:
1684         case CSK_STATE_DEAD:
1685                 break;
1686         default:
1687                 pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1688                         __func__, csk->com.state);
1689         }
1690
1691         __kfree_skb(skb);
1692 }
1693
1694 static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1695 {
1696         struct cpl_abort_req_rss *hdr = cplhdr(skb);
1697         unsigned int tid = GET_TID(hdr);
1698         struct cpl_abort_rpl *rpl;
1699         struct sk_buff *rpl_skb;
1700         bool release = false;
1701         bool wakeup_thread = false;
1702         unsigned int len = roundup(sizeof(*rpl), 16);
1703
1704         pr_debug("%s: csk %p; tid %u; state %d\n",
1705                  __func__, csk, tid, csk->com.state);
1706
1707         if (cxgbit_is_neg_adv(hdr->status)) {
1708                 pr_err("%s: got neg advise %d on tid %u\n",
1709                        __func__, hdr->status, tid);
1710                 goto rel_skb;
1711         }
1712
1713         switch (csk->com.state) {
1714         case CSK_STATE_CONNECTING:
1715         case CSK_STATE_MORIBUND:
1716                 csk->com.state = CSK_STATE_DEAD;
1717                 release = true;
1718                 break;
1719         case CSK_STATE_ESTABLISHED:
1720                 csk->com.state = CSK_STATE_DEAD;
1721                 wakeup_thread = true;
1722                 break;
1723         case CSK_STATE_CLOSING:
1724                 csk->com.state = CSK_STATE_DEAD;
1725                 if (!csk->conn)
1726                         release = true;
1727                 break;
1728         case CSK_STATE_ABORTING:
1729                 break;
1730         default:
1731                 pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1732                         __func__, csk->com.state);
1733                 csk->com.state = CSK_STATE_DEAD;
1734         }
1735
1736         __skb_queue_purge(&csk->txq);
1737
1738         if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1739                 cxgbit_send_tx_flowc_wr(csk);
1740
1741         rpl_skb = __skb_dequeue(&csk->skbq);
1742         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1743
1744         rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
1745         memset(rpl, 0, len);
1746
1747         INIT_TP_WR(rpl, csk->tid);
1748         OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1749         rpl->cmd = CPL_ABORT_NO_RST;
1750         cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1751
1752         if (wakeup_thread) {
1753                 cxgbit_queue_rx_skb(csk, skb);
1754                 return;
1755         }
1756
1757         if (release)
1758                 cxgbit_put_csk(csk);
1759 rel_skb:
1760         __kfree_skb(skb);
1761 }
1762
1763 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1764 {
1765         pr_debug("%s: csk %p; tid %u; state %d\n",
1766                  __func__, csk, csk->tid, csk->com.state);
1767
1768         switch (csk->com.state) {
1769         case CSK_STATE_ABORTING:
1770                 csk->com.state = CSK_STATE_DEAD;
1771                 cxgbit_put_csk(csk);
1772                 break;
1773         default:
1774                 pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1775                         __func__, csk->com.state);
1776         }
1777
1778         __kfree_skb(skb);
1779 }
1780
1781 static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1782 {
1783         const struct sk_buff *skb = csk->wr_pending_head;
1784         u32 credit = 0;
1785
1786         if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1787                 pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1788                        csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1789                 return true;
1790         }
1791
1792         while (skb) {
1793                 credit += skb->csum;
1794                 skb = cxgbit_skcb_tx_wr_next(skb);
1795         }
1796
1797         if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1798                 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1799                        csk, csk->tid, csk->wr_cred,
1800                        credit, csk->wr_max_cred);
1801
1802                 return true;
1803         }
1804
1805         return false;
1806 }
1807
1808 static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1809 {
1810         struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1811         u32 credits = rpl->credits;
1812         u32 snd_una = ntohl(rpl->snd_una);
1813
1814         csk->wr_cred += credits;
1815         if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1816                 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1817
1818         while (credits) {
1819                 struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1820
1821                 if (unlikely(!p)) {
1822                         pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1823                                csk, csk->tid, credits,
1824                                csk->wr_cred, csk->wr_una_cred);
1825                         break;
1826                 }
1827
1828                 if (unlikely(credits < p->csum)) {
1829                         pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1830                                 csk,  csk->tid,
1831                                 credits, csk->wr_cred, csk->wr_una_cred,
1832                                 p->csum);
1833                         p->csum -= credits;
1834                         break;
1835                 }
1836
1837                 cxgbit_sock_dequeue_wr(csk);
1838                 credits -= p->csum;
1839                 kfree_skb(p);
1840         }
1841
1842         if (unlikely(cxgbit_credit_err(csk))) {
1843                 cxgbit_queue_rx_skb(csk, skb);
1844                 return;
1845         }
1846
1847         if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1848                 if (unlikely(before(snd_una, csk->snd_una))) {
1849                         pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1850                                 csk, csk->tid, snd_una,
1851                                 csk->snd_una);
1852                         goto rel_skb;
1853                 }
1854
1855                 if (csk->snd_una != snd_una) {
1856                         csk->snd_una = snd_una;
1857                         dst_confirm(csk->dst);
1858                         wake_up(&csk->ack_waitq);
1859                 }
1860         }
1861
1862         if (skb_queue_len(&csk->txq))
1863                 cxgbit_push_tx_frames(csk);
1864
1865 rel_skb:
1866         __kfree_skb(skb);
1867 }
1868
1869 static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1870 {
1871         struct cxgbit_sock *csk;
1872         struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1873         unsigned int tid = GET_TID(rpl);
1874         struct cxgb4_lld_info *lldi = &cdev->lldi;
1875         struct tid_info *t = lldi->tids;
1876
1877         csk = lookup_tid(t, tid);
1878         if (unlikely(!csk))
1879                 pr_err("can't find connection for tid %u.\n", tid);
1880         else
1881                 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1882
1883         cxgbit_put_csk(csk);
1884 }
1885
1886 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1887 {
1888         struct cxgbit_sock *csk;
1889         struct cpl_rx_data *cpl = cplhdr(skb);
1890         unsigned int tid = GET_TID(cpl);
1891         struct cxgb4_lld_info *lldi = &cdev->lldi;
1892         struct tid_info *t = lldi->tids;
1893
1894         csk = lookup_tid(t, tid);
1895         if (unlikely(!csk)) {
1896                 pr_err("can't find conn. for tid %u.\n", tid);
1897                 goto rel_skb;
1898         }
1899
1900         cxgbit_queue_rx_skb(csk, skb);
1901         return;
1902 rel_skb:
1903         __kfree_skb(skb);
1904 }
1905
1906 static void
1907 __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1908 {
1909         spin_lock(&csk->lock);
1910         if (csk->lock_owner) {
1911                 __skb_queue_tail(&csk->backlogq, skb);
1912                 spin_unlock(&csk->lock);
1913                 return;
1914         }
1915
1916         cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1917         spin_unlock(&csk->lock);
1918 }
1919
1920 static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1921 {
1922         cxgbit_get_csk(csk);
1923         __cxgbit_process_rx_cpl(csk, skb);
1924         cxgbit_put_csk(csk);
1925 }
1926
1927 static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1928 {
1929         struct cxgbit_sock *csk;
1930         struct cpl_tx_data *cpl = cplhdr(skb);
1931         struct cxgb4_lld_info *lldi = &cdev->lldi;
1932         struct tid_info *t = lldi->tids;
1933         unsigned int tid = GET_TID(cpl);
1934         u8 opcode = cxgbit_skcb_rx_opcode(skb);
1935         bool ref = true;
1936
1937         switch (opcode) {
1938         case CPL_FW4_ACK:
1939                         cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1940                         ref = false;
1941                         break;
1942         case CPL_PEER_CLOSE:
1943                         cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1944                         break;
1945         case CPL_CLOSE_CON_RPL:
1946                         cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1947                         break;
1948         case CPL_ABORT_REQ_RSS:
1949                         cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1950                         break;
1951         case CPL_ABORT_RPL_RSS:
1952                         cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1953                         break;
1954         default:
1955                 goto rel_skb;
1956         }
1957
1958         csk = lookup_tid(t, tid);
1959         if (unlikely(!csk)) {
1960                 pr_err("can't find conn. for tid %u.\n", tid);
1961                 goto rel_skb;
1962         }
1963
1964         if (ref)
1965                 cxgbit_process_rx_cpl(csk, skb);
1966         else
1967                 __cxgbit_process_rx_cpl(csk, skb);
1968
1969         return;
1970 rel_skb:
1971         __kfree_skb(skb);
1972 }
1973
1974 cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
1975         [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
1976         [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
1977         [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
1978         [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
1979         [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
1980         [CPL_RX_DATA]           = cxgbit_rx_data,
1981         [CPL_FW4_ACK]           = cxgbit_rx_cpl,
1982         [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
1983         [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
1984         [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
1985         [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
1986 };