rxrpc: Preallocate peers, conns and calls for incoming service requests
[cascardo/linux.git] / net / rxrpc / call_object.c
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21
22 /*
23  * Maximum lifetime of a call (in jiffies).
24  */
25 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
26
27 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
28         [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
29         [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
30         [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
31         [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
32         [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
33         [RXRPC_CALL_CLIENT_FINAL_ACK]           = "ClFnlACK",
34         [RXRPC_CALL_SERVER_PREALLOC]            = "SvPrealc",
35         [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
36         [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
37         [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
38         [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
39         [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
40         [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
41         [RXRPC_CALL_COMPLETE]                   = "Complete",
42 };
43
44 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
45         [RXRPC_CALL_SUCCEEDED]                  = "Complete",
46         [RXRPC_CALL_SERVER_BUSY]                = "SvBusy  ",
47         [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
48         [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
49         [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
50         [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
51 };
52
53 const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
54         [rxrpc_call_new_client]         = "NWc",
55         [rxrpc_call_new_service]        = "NWs",
56         [rxrpc_call_queued]             = "QUE",
57         [rxrpc_call_queued_ref]         = "QUR",
58         [rxrpc_call_seen]               = "SEE",
59         [rxrpc_call_got]                = "GOT",
60         [rxrpc_call_got_skb]            = "Gsk",
61         [rxrpc_call_got_userid]         = "Gus",
62         [rxrpc_call_put]                = "PUT",
63         [rxrpc_call_put_skb]            = "Psk",
64         [rxrpc_call_put_userid]         = "Pus",
65         [rxrpc_call_put_noqueue]        = "PNQ",
66 };
67
68 struct kmem_cache *rxrpc_call_jar;
69 LIST_HEAD(rxrpc_calls);
70 DEFINE_RWLOCK(rxrpc_call_lock);
71
72 static void rxrpc_call_life_expired(unsigned long _call);
73 static void rxrpc_ack_time_expired(unsigned long _call);
74 static void rxrpc_resend_time_expired(unsigned long _call);
75
76 /*
77  * find an extant server call
78  * - called in process context with IRQs enabled
79  */
80 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
81                                               unsigned long user_call_ID)
82 {
83         struct rxrpc_call *call;
84         struct rb_node *p;
85
86         _enter("%p,%lx", rx, user_call_ID);
87
88         read_lock(&rx->call_lock);
89
90         p = rx->calls.rb_node;
91         while (p) {
92                 call = rb_entry(p, struct rxrpc_call, sock_node);
93
94                 if (user_call_ID < call->user_call_ID)
95                         p = p->rb_left;
96                 else if (user_call_ID > call->user_call_ID)
97                         p = p->rb_right;
98                 else
99                         goto found_extant_call;
100         }
101
102         read_unlock(&rx->call_lock);
103         _leave(" = NULL");
104         return NULL;
105
106 found_extant_call:
107         rxrpc_get_call(call, rxrpc_call_got);
108         read_unlock(&rx->call_lock);
109         _leave(" = %p [%d]", call, atomic_read(&call->usage));
110         return call;
111 }
112
113 /*
114  * allocate a new call
115  */
116 struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
117 {
118         struct rxrpc_call *call;
119
120         call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
121         if (!call)
122                 return NULL;
123
124         call->acks_winsz = 16;
125         call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
126                                     gfp);
127         if (!call->acks_window) {
128                 kmem_cache_free(rxrpc_call_jar, call);
129                 return NULL;
130         }
131
132         setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
133                     (unsigned long) call);
134         setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
135                     (unsigned long) call);
136         setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
137                     (unsigned long) call);
138         INIT_WORK(&call->processor, &rxrpc_process_call);
139         INIT_LIST_HEAD(&call->link);
140         INIT_LIST_HEAD(&call->chan_wait_link);
141         INIT_LIST_HEAD(&call->accept_link);
142         skb_queue_head_init(&call->rx_queue);
143         skb_queue_head_init(&call->rx_oos_queue);
144         skb_queue_head_init(&call->knlrecv_queue);
145         init_waitqueue_head(&call->waitq);
146         spin_lock_init(&call->lock);
147         rwlock_init(&call->state_lock);
148         atomic_set(&call->usage, 1);
149         call->debug_id = atomic_inc_return(&rxrpc_debug_id);
150
151         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
152
153         call->rx_data_expect = 1;
154         call->rx_data_eaten = 0;
155         call->rx_first_oos = 0;
156         call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
157         call->creation_jif = jiffies;
158         return call;
159 }
160
161 /*
162  * Allocate a new client call.
163  */
164 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
165                                                   struct sockaddr_rxrpc *srx,
166                                                   gfp_t gfp)
167 {
168         struct rxrpc_call *call;
169
170         _enter("");
171
172         ASSERT(rx->local != NULL);
173
174         call = rxrpc_alloc_call(gfp);
175         if (!call)
176                 return ERR_PTR(-ENOMEM);
177         call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
178         call->rx_data_post = 1;
179         call->service_id = srx->srx_service;
180         rcu_assign_pointer(call->socket, rx);
181
182         _leave(" = %p", call);
183         return call;
184 }
185
186 /*
187  * Begin client call.
188  */
189 static int rxrpc_begin_client_call(struct rxrpc_call *call,
190                                    struct rxrpc_conn_parameters *cp,
191                                    struct sockaddr_rxrpc *srx,
192                                    gfp_t gfp)
193 {
194         int ret;
195
196         /* Set up or get a connection record and set the protocol parameters,
197          * including channel number and call ID.
198          */
199         ret = rxrpc_connect_call(call, cp, srx, gfp);
200         if (ret < 0)
201                 return ret;
202
203         spin_lock(&call->conn->params.peer->lock);
204         hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
205         spin_unlock(&call->conn->params.peer->lock);
206
207         call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
208         add_timer(&call->lifetimer);
209         return 0;
210 }
211
212 /*
213  * set up a call for the given data
214  * - called in process context with IRQs enabled
215  */
216 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
217                                          struct rxrpc_conn_parameters *cp,
218                                          struct sockaddr_rxrpc *srx,
219                                          unsigned long user_call_ID,
220                                          gfp_t gfp)
221 {
222         struct rxrpc_call *call, *xcall;
223         struct rb_node *parent, **pp;
224         const void *here = __builtin_return_address(0);
225         int ret;
226
227         _enter("%p,%lx", rx, user_call_ID);
228
229         call = rxrpc_alloc_client_call(rx, srx, gfp);
230         if (IS_ERR(call)) {
231                 _leave(" = %ld", PTR_ERR(call));
232                 return call;
233         }
234
235         trace_rxrpc_call(call, 0, atomic_read(&call->usage), here,
236                          (const void *)user_call_ID);
237
238         /* Publish the call, even though it is incompletely set up as yet */
239         call->user_call_ID = user_call_ID;
240         __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
241
242         write_lock(&rx->call_lock);
243
244         pp = &rx->calls.rb_node;
245         parent = NULL;
246         while (*pp) {
247                 parent = *pp;
248                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
249
250                 if (user_call_ID < xcall->user_call_ID)
251                         pp = &(*pp)->rb_left;
252                 else if (user_call_ID > xcall->user_call_ID)
253                         pp = &(*pp)->rb_right;
254                 else
255                         goto found_user_ID_now_present;
256         }
257
258         rxrpc_get_call(call, rxrpc_call_got_userid);
259         rb_link_node(&call->sock_node, parent, pp);
260         rb_insert_color(&call->sock_node, &rx->calls);
261         write_unlock(&rx->call_lock);
262
263         write_lock_bh(&rxrpc_call_lock);
264         list_add_tail(&call->link, &rxrpc_calls);
265         write_unlock_bh(&rxrpc_call_lock);
266
267         ret = rxrpc_begin_client_call(call, cp, srx, gfp);
268         if (ret < 0)
269                 goto error;
270
271         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
272
273         _leave(" = %p [new]", call);
274         return call;
275
276 error:
277         write_lock(&rx->call_lock);
278         rb_erase(&call->sock_node, &rx->calls);
279         write_unlock(&rx->call_lock);
280         rxrpc_put_call(call, rxrpc_call_put_userid);
281
282         write_lock_bh(&rxrpc_call_lock);
283         list_del_init(&call->link);
284         write_unlock_bh(&rxrpc_call_lock);
285
286 error_out:
287         __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
288                                     RX_CALL_DEAD, ret);
289         set_bit(RXRPC_CALL_RELEASED, &call->flags);
290         rxrpc_put_call(call, rxrpc_call_put);
291         _leave(" = %d", ret);
292         return ERR_PTR(ret);
293
294         /* We unexpectedly found the user ID in the list after taking
295          * the call_lock.  This shouldn't happen unless the user races
296          * with itself and tries to add the same user ID twice at the
297          * same time in different threads.
298          */
299 found_user_ID_now_present:
300         write_unlock(&rx->call_lock);
301         ret = -EEXIST;
302         goto error_out;
303 }
304
305 /*
306  * set up an incoming call
307  * - called in process context with IRQs enabled
308  */
309 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
310                                        struct rxrpc_connection *conn,
311                                        struct sk_buff *skb)
312 {
313         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
314         struct rxrpc_call *call, *candidate;
315         const void *here = __builtin_return_address(0);
316         u32 call_id, chan;
317
318         _enter(",%d", conn->debug_id);
319
320         ASSERT(rx != NULL);
321
322         candidate = rxrpc_alloc_call(GFP_NOIO);
323         if (!candidate)
324                 return ERR_PTR(-EBUSY);
325
326         trace_rxrpc_call(candidate, rxrpc_call_new_service,
327                          atomic_read(&candidate->usage), here, NULL);
328
329         chan = sp->hdr.cid & RXRPC_CHANNELMASK;
330         candidate->conn         = conn;
331         candidate->peer         = conn->params.peer;
332         candidate->cid          = sp->hdr.cid;
333         candidate->call_id      = sp->hdr.callNumber;
334         candidate->security_ix  = sp->hdr.securityIndex;
335         candidate->rx_data_post = 0;
336         candidate->state        = RXRPC_CALL_SERVER_ACCEPTING;
337         candidate->flags        |= (1 << RXRPC_CALL_IS_SERVICE);
338         if (conn->security_ix > 0)
339                 candidate->state = RXRPC_CALL_SERVER_SECURING;
340         rcu_assign_pointer(candidate->socket, rx);
341
342         spin_lock(&conn->channel_lock);
343
344         /* set the channel for this call */
345         call = rcu_dereference_protected(conn->channels[chan].call,
346                                          lockdep_is_held(&conn->channel_lock));
347
348         _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
349         if (call && call->call_id == sp->hdr.callNumber) {
350                 /* already set; must've been a duplicate packet */
351                 _debug("extant call [%d]", call->state);
352                 ASSERTCMP(call->conn, ==, conn);
353
354                 read_lock(&call->state_lock);
355                 switch (call->state) {
356                 case RXRPC_CALL_LOCALLY_ABORTED:
357                         if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
358                                 rxrpc_queue_call(call);
359                 case RXRPC_CALL_REMOTELY_ABORTED:
360                         read_unlock(&call->state_lock);
361                         goto aborted_call;
362                 default:
363                         rxrpc_get_call(call, rxrpc_call_got);
364                         read_unlock(&call->state_lock);
365                         goto extant_call;
366                 }
367         }
368
369         if (call) {
370                 /* it seems the channel is still in use from the previous call
371                  * - ditch the old binding if its call is now complete */
372                 _debug("CALL: %u { %s }",
373                        call->debug_id, rxrpc_call_states[call->state]);
374
375                 if (call->state == RXRPC_CALL_COMPLETE) {
376                         __rxrpc_disconnect_call(conn, call);
377                 } else {
378                         spin_unlock(&conn->channel_lock);
379                         kmem_cache_free(rxrpc_call_jar, candidate);
380                         _leave(" = -EBUSY");
381                         return ERR_PTR(-EBUSY);
382                 }
383         }
384
385         /* check the call number isn't duplicate */
386         _debug("check dup");
387         call_id = sp->hdr.callNumber;
388
389         /* We just ignore calls prior to the current call ID.  Terminated calls
390          * are handled via the connection.
391          */
392         if (call_id <= conn->channels[chan].call_counter)
393                 goto old_call; /* TODO: Just drop packet */
394
395         /* Temporary: Mirror the backlog prealloc ref (TODO: use prealloc) */
396         rxrpc_get_call(candidate, rxrpc_call_got);
397
398         /* make the call available */
399         _debug("new call");
400         call = candidate;
401         candidate = NULL;
402         conn->channels[chan].call_counter = call_id;
403         rcu_assign_pointer(conn->channels[chan].call, call);
404         rxrpc_get_connection(conn);
405         rxrpc_get_peer(call->peer);
406         spin_unlock(&conn->channel_lock);
407
408         spin_lock(&conn->params.peer->lock);
409         hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
410         spin_unlock(&conn->params.peer->lock);
411
412         write_lock_bh(&rxrpc_call_lock);
413         list_add_tail(&call->link, &rxrpc_calls);
414         write_unlock_bh(&rxrpc_call_lock);
415
416         call->service_id = conn->params.service_id;
417
418         _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
419
420         call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
421         add_timer(&call->lifetimer);
422         _leave(" = %p {%d} [new]", call, call->debug_id);
423         return call;
424
425 extant_call:
426         spin_unlock(&conn->channel_lock);
427         kmem_cache_free(rxrpc_call_jar, candidate);
428         _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
429         return call;
430
431 aborted_call:
432         spin_unlock(&conn->channel_lock);
433         kmem_cache_free(rxrpc_call_jar, candidate);
434         _leave(" = -ECONNABORTED");
435         return ERR_PTR(-ECONNABORTED);
436
437 old_call:
438         spin_unlock(&conn->channel_lock);
439         kmem_cache_free(rxrpc_call_jar, candidate);
440         _leave(" = -ECONNRESET [old]");
441         return ERR_PTR(-ECONNRESET);
442 }
443
444 /*
445  * Queue a call's work processor, getting a ref to pass to the work queue.
446  */
447 bool rxrpc_queue_call(struct rxrpc_call *call)
448 {
449         const void *here = __builtin_return_address(0);
450         int n = __atomic_add_unless(&call->usage, 1, 0);
451         if (n == 0)
452                 return false;
453         if (rxrpc_queue_work(&call->processor))
454                 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
455         else
456                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
457         return true;
458 }
459
460 /*
461  * Queue a call's work processor, passing the callers ref to the work queue.
462  */
463 bool __rxrpc_queue_call(struct rxrpc_call *call)
464 {
465         const void *here = __builtin_return_address(0);
466         int n = atomic_read(&call->usage);
467         ASSERTCMP(n, >=, 1);
468         if (rxrpc_queue_work(&call->processor))
469                 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
470         else
471                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
472         return true;
473 }
474
475 /*
476  * Note the re-emergence of a call.
477  */
478 void rxrpc_see_call(struct rxrpc_call *call)
479 {
480         const void *here = __builtin_return_address(0);
481         if (call) {
482                 int n = atomic_read(&call->usage);
483
484                 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
485         }
486 }
487
488 /*
489  * Note the addition of a ref on a call.
490  */
491 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
492 {
493         const void *here = __builtin_return_address(0);
494         int n = atomic_inc_return(&call->usage);
495
496         trace_rxrpc_call(call, op, n, here, NULL);
497 }
498
499 /*
500  * Note the addition of a ref on a call for a socket buffer.
501  */
502 void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
503 {
504         const void *here = __builtin_return_address(0);
505         int n = atomic_inc_return(&call->usage);
506
507         trace_rxrpc_call(call, rxrpc_call_got_skb, n, here, skb);
508 }
509
510 /*
511  * detach a call from a socket and set up for release
512  */
513 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
514 {
515         _enter("{%d,%d,%d,%d}",
516                call->debug_id, atomic_read(&call->usage),
517                atomic_read(&call->ackr_not_idle),
518                call->rx_first_oos);
519
520         rxrpc_see_call(call);
521
522         spin_lock_bh(&call->lock);
523         if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
524                 BUG();
525         spin_unlock_bh(&call->lock);
526
527         /* dissociate from the socket
528          * - the socket's ref on the call is passed to the death timer
529          */
530         _debug("RELEASE CALL %p (%d)", call, call->debug_id);
531
532         if (call->peer) {
533                 spin_lock(&call->peer->lock);
534                 hlist_del_init(&call->error_link);
535                 spin_unlock(&call->peer->lock);
536         }
537
538         write_lock_bh(&rx->call_lock);
539         if (!list_empty(&call->accept_link)) {
540                 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
541                        call, call->events, call->flags);
542                 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
543                 list_del_init(&call->accept_link);
544                 sk_acceptq_removed(&rx->sk);
545         } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
546                 rb_erase(&call->sock_node, &rx->calls);
547                 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
548                 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
549                 rxrpc_put_call(call, rxrpc_call_put_userid);
550         }
551         write_unlock_bh(&rx->call_lock);
552
553         /* free up the channel for reuse */
554         if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK) {
555                 clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
556                 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
557                 rxrpc_call_completed(call);
558         } else {
559                 write_lock_bh(&call->state_lock);
560
561                 if (call->state < RXRPC_CALL_COMPLETE) {
562                         _debug("+++ ABORTING STATE %d +++\n", call->state);
563                         __rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
564                         clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
565                         rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
566                 }
567
568                 write_unlock_bh(&call->state_lock);
569         }
570
571         if (call->conn)
572                 rxrpc_disconnect_call(call);
573
574         /* clean up the Rx queue */
575         if (!skb_queue_empty(&call->rx_queue) ||
576             !skb_queue_empty(&call->rx_oos_queue)) {
577                 struct rxrpc_skb_priv *sp;
578                 struct sk_buff *skb;
579
580                 _debug("purge Rx queues");
581
582                 spin_lock_bh(&call->lock);
583                 while ((skb = skb_dequeue(&call->rx_queue)) ||
584                        (skb = skb_dequeue(&call->rx_oos_queue))) {
585                         spin_unlock_bh(&call->lock);
586
587                         sp = rxrpc_skb(skb);
588                         _debug("- zap %s %%%u #%u",
589                                rxrpc_pkts[sp->hdr.type],
590                                sp->hdr.serial, sp->hdr.seq);
591                         rxrpc_free_skb(skb);
592                         spin_lock_bh(&call->lock);
593                 }
594                 spin_unlock_bh(&call->lock);
595         }
596         rxrpc_purge_queue(&call->knlrecv_queue);
597
598         del_timer_sync(&call->resend_timer);
599         del_timer_sync(&call->ack_timer);
600         del_timer_sync(&call->lifetimer);
601
602         /* We have to release the prealloc backlog ref */
603         if (rxrpc_is_service_call(call))
604                 rxrpc_put_call(call, rxrpc_call_put);
605         _leave("");
606 }
607
608 /*
609  * release all the calls associated with a socket
610  */
611 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
612 {
613         struct rxrpc_call *call;
614         struct rb_node *p;
615
616         _enter("%p", rx);
617
618         read_lock_bh(&rx->call_lock);
619
620         /* kill the not-yet-accepted incoming calls */
621         list_for_each_entry(call, &rx->secureq, accept_link) {
622                 rxrpc_release_call(rx, call);
623         }
624
625         list_for_each_entry(call, &rx->acceptq, accept_link) {
626                 rxrpc_release_call(rx, call);
627         }
628
629         /* mark all the calls as no longer wanting incoming packets */
630         for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
631                 call = rb_entry(p, struct rxrpc_call, sock_node);
632                 rxrpc_release_call(rx, call);
633         }
634
635         read_unlock_bh(&rx->call_lock);
636         _leave("");
637 }
638
639 /*
640  * release a call
641  */
642 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
643 {
644         const void *here = __builtin_return_address(0);
645         int n;
646
647         ASSERT(call != NULL);
648
649         n = atomic_dec_return(&call->usage);
650         trace_rxrpc_call(call, op, n, here, NULL);
651         ASSERTCMP(n, >=, 0);
652         if (n == 0) {
653                 _debug("call %d dead", call->debug_id);
654                 rxrpc_cleanup_call(call);
655         }
656 }
657
658 /*
659  * Release a call ref held by a socket buffer.
660  */
661 void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
662 {
663         const void *here = __builtin_return_address(0);
664         int n;
665
666         n = atomic_dec_return(&call->usage);
667         trace_rxrpc_call(call, rxrpc_call_put_skb, n, here, skb);
668         ASSERTCMP(n, >=, 0);
669         if (n == 0) {
670                 _debug("call %d dead", call->debug_id);
671                 rxrpc_cleanup_call(call);
672         }
673 }
674
675 /*
676  * Final call destruction under RCU.
677  */
678 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
679 {
680         struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
681
682         rxrpc_purge_queue(&call->rx_queue);
683         rxrpc_purge_queue(&call->knlrecv_queue);
684         rxrpc_put_peer(call->peer);
685         kmem_cache_free(rxrpc_call_jar, call);
686 }
687
688 /*
689  * clean up a call
690  */
691 void rxrpc_cleanup_call(struct rxrpc_call *call)
692 {
693         _net("DESTROY CALL %d", call->debug_id);
694
695         write_lock_bh(&rxrpc_call_lock);
696         list_del_init(&call->link);
697         write_unlock_bh(&rxrpc_call_lock);
698
699         memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
700
701         del_timer_sync(&call->lifetimer);
702         del_timer_sync(&call->ack_timer);
703         del_timer_sync(&call->resend_timer);
704
705         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
706         ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
707         ASSERT(!work_pending(&call->processor));
708         ASSERTCMP(call->conn, ==, NULL);
709
710         if (call->acks_window) {
711                 _debug("kill Tx window %d",
712                        CIRC_CNT(call->acks_head, call->acks_tail,
713                                 call->acks_winsz));
714                 smp_mb();
715                 while (CIRC_CNT(call->acks_head, call->acks_tail,
716                                 call->acks_winsz) > 0) {
717                         struct rxrpc_skb_priv *sp;
718                         unsigned long _skb;
719
720                         _skb = call->acks_window[call->acks_tail] & ~1;
721                         sp = rxrpc_skb((struct sk_buff *)_skb);
722                         _debug("+++ clear Tx %u", sp->hdr.seq);
723                         rxrpc_free_skb((struct sk_buff *)_skb);
724                         call->acks_tail =
725                                 (call->acks_tail + 1) & (call->acks_winsz - 1);
726                 }
727
728                 kfree(call->acks_window);
729         }
730
731         rxrpc_free_skb(call->tx_pending);
732
733         rxrpc_purge_queue(&call->rx_queue);
734         ASSERT(skb_queue_empty(&call->rx_oos_queue));
735         rxrpc_purge_queue(&call->knlrecv_queue);
736         call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
737 }
738
739 /*
740  * Make sure that all calls are gone.
741  */
742 void __exit rxrpc_destroy_all_calls(void)
743 {
744         struct rxrpc_call *call;
745
746         _enter("");
747
748         if (list_empty(&rxrpc_calls))
749                 return;
750         
751         write_lock_bh(&rxrpc_call_lock);
752
753         while (!list_empty(&rxrpc_calls)) {
754                 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
755                 _debug("Zapping call %p", call);
756
757                 rxrpc_see_call(call);
758                 list_del_init(&call->link);
759
760                 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
761                        call, atomic_read(&call->usage),
762                        atomic_read(&call->ackr_not_idle),
763                        rxrpc_call_states[call->state],
764                        call->flags, call->events);
765                 if (!skb_queue_empty(&call->rx_queue))
766                         pr_err("Rx queue occupied\n");
767                 if (!skb_queue_empty(&call->rx_oos_queue))
768                         pr_err("OOS queue occupied\n");
769
770                 write_unlock_bh(&rxrpc_call_lock);
771                 cond_resched();
772                 write_lock_bh(&rxrpc_call_lock);
773         }
774
775         write_unlock_bh(&rxrpc_call_lock);
776         _leave("");
777 }
778
779 /*
780  * handle call lifetime being exceeded
781  */
782 static void rxrpc_call_life_expired(unsigned long _call)
783 {
784         struct rxrpc_call *call = (struct rxrpc_call *) _call;
785
786         _enter("{%d}", call->debug_id);
787
788         rxrpc_see_call(call);
789         if (call->state >= RXRPC_CALL_COMPLETE)
790                 return;
791
792         set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
793         rxrpc_queue_call(call);
794 }
795
796 /*
797  * handle resend timer expiry
798  * - may not take call->state_lock as this can deadlock against del_timer_sync()
799  */
800 static void rxrpc_resend_time_expired(unsigned long _call)
801 {
802         struct rxrpc_call *call = (struct rxrpc_call *) _call;
803
804         _enter("{%d}", call->debug_id);
805
806         rxrpc_see_call(call);
807         if (call->state >= RXRPC_CALL_COMPLETE)
808                 return;
809
810         clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
811         if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
812                 rxrpc_queue_call(call);
813 }
814
815 /*
816  * handle ACK timer expiry
817  */
818 static void rxrpc_ack_time_expired(unsigned long _call)
819 {
820         struct rxrpc_call *call = (struct rxrpc_call *) _call;
821
822         _enter("{%d}", call->debug_id);
823
824         rxrpc_see_call(call);
825         if (call->state >= RXRPC_CALL_COMPLETE)
826                 return;
827
828         if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
829                 rxrpc_queue_call(call);
830 }