2 * ip_vs_app.c: Application module support for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
12 * is that ip_vs_app module handles the reverse direction (incoming requests
13 * and outgoing responses).
15 * IP_MASQ_APP application masquerading module
17 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
21 #define KMSG_COMPONENT "IPVS"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/skbuff.h>
29 #include <linux/netfilter.h>
30 #include <linux/slab.h>
31 #include <net/net_namespace.h>
32 #include <net/protocol.h>
34 #include <linux/stat.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
39 #include <net/ip_vs.h>
41 EXPORT_SYMBOL(register_ip_vs_app);
42 EXPORT_SYMBOL(unregister_ip_vs_app);
43 EXPORT_SYMBOL(register_ip_vs_app_inc);
45 static DEFINE_MUTEX(__ip_vs_app_mutex);
48 * Get an ip_vs_app object
50 static inline int ip_vs_app_get(struct ip_vs_app *app)
52 return try_module_get(app->module);
56 static inline void ip_vs_app_put(struct ip_vs_app *app)
58 module_put(app->module);
63 * Allocate/initialize app incarnation and register it in proto apps.
66 ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
69 struct ip_vs_protocol *pp;
70 struct ip_vs_app *inc;
73 if (!(pp = ip_vs_proto_get(proto)))
74 return -EPROTONOSUPPORT;
76 if (!pp->unregister_app)
79 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
82 INIT_LIST_HEAD(&inc->p_list);
83 INIT_LIST_HEAD(&inc->incs_list);
85 inc->port = htons(port);
86 atomic_set(&inc->usecnt, 0);
90 ip_vs_create_timeout_table(app->timeouts,
92 if (!inc->timeout_table) {
98 ret = pp->register_app(net, inc);
102 list_add(&inc->a_list, &app->incs_list);
103 IP_VS_DBG(9, "%s App %s:%u registered\n",
104 pp->name, inc->name, ntohs(inc->port));
109 kfree(inc->timeout_table);
116 * Release app incarnation
119 ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
121 struct ip_vs_protocol *pp;
123 if (!(pp = ip_vs_proto_get(inc->protocol)))
126 if (pp->unregister_app)
127 pp->unregister_app(net, inc);
129 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
130 pp->name, inc->name, ntohs(inc->port));
132 list_del(&inc->a_list);
134 kfree(inc->timeout_table);
140 * Get reference to app inc (only called from softirq)
143 int ip_vs_app_inc_get(struct ip_vs_app *inc)
147 atomic_inc(&inc->usecnt);
148 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
149 atomic_dec(&inc->usecnt);
155 * Put the app inc (only called from timer or net softirq)
157 void ip_vs_app_inc_put(struct ip_vs_app *inc)
159 ip_vs_app_put(inc->app);
160 atomic_dec(&inc->usecnt);
165 * Register an application incarnation in protocol applications
168 register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
173 mutex_lock(&__ip_vs_app_mutex);
175 result = ip_vs_app_inc_new(net, app, proto, port);
177 mutex_unlock(&__ip_vs_app_mutex);
183 /* Register application for netns */
184 struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
186 struct netns_ipvs *ipvs = net_ipvs(net);
191 return ERR_PTR(-ENOENT);
193 mutex_lock(&__ip_vs_app_mutex);
195 list_for_each_entry(a, &ipvs->app_list, a_list) {
196 if (!strcmp(app->name, a->name)) {
201 a = kmemdup(app, sizeof(*app), GFP_KERNEL);
206 INIT_LIST_HEAD(&a->incs_list);
207 list_add(&a->a_list, &ipvs->app_list);
208 /* increase the module use count */
209 ip_vs_use_count_inc();
212 mutex_unlock(&__ip_vs_app_mutex);
214 return err ? ERR_PTR(err) : a;
219 * ip_vs_app unregistration routine
220 * We are sure there are no app incarnations attached to services
222 void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
224 struct netns_ipvs *ipvs = net_ipvs(net);
225 struct ip_vs_app *a, *anxt, *inc, *nxt;
230 mutex_lock(&__ip_vs_app_mutex);
232 list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
233 if (app && strcmp(app->name, a->name))
235 list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
236 ip_vs_app_inc_release(net, inc);
239 list_del(&a->a_list);
242 /* decrease the module use count */
243 ip_vs_use_count_dec();
246 mutex_unlock(&__ip_vs_app_mutex);
251 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
253 int ip_vs_bind_app(struct ip_vs_conn *cp,
254 struct ip_vs_protocol *pp)
256 return pp->app_conn_bind(cp);
261 * Unbind cp from application incarnation (called by cp destructor)
263 void ip_vs_unbind_app(struct ip_vs_conn *cp)
265 struct ip_vs_app *inc = cp->app;
270 if (inc->unbind_conn)
271 inc->unbind_conn(inc, cp);
273 inc->done_conn(inc, cp);
274 ip_vs_app_inc_put(inc);
280 * Fixes th->seq based on ip_vs_seq info.
282 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
284 __u32 seq = ntohl(th->seq);
287 * Adjust seq with delta-offset for all packets after
288 * the most recent resized pkt seq and with previous_delta offset
289 * for all packets before most recent resized pkt seq.
291 if (vseq->delta || vseq->previous_delta) {
292 if(after(seq, vseq->init_seq)) {
293 th->seq = htonl(seq + vseq->delta);
294 IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
295 __func__, vseq->delta);
297 th->seq = htonl(seq + vseq->previous_delta);
298 IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n",
299 __func__, vseq->previous_delta);
306 * Fixes th->ack_seq based on ip_vs_seq info.
309 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
311 __u32 ack_seq = ntohl(th->ack_seq);
314 * Adjust ack_seq with delta-offset for
315 * the packets AFTER most recent resized pkt has caused a shift
316 * for packets before most recent resized pkt, use previous_delta
318 if (vseq->delta || vseq->previous_delta) {
319 /* since ack_seq is the number of octet that is expected
320 to receive next, so compare it with init_seq+delta */
321 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
322 th->ack_seq = htonl(ack_seq - vseq->delta);
323 IP_VS_DBG(9, "%s(): subtracted delta "
324 "(%d) from ack_seq\n", __func__, vseq->delta);
327 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
328 IP_VS_DBG(9, "%s(): subtracted "
329 "previous_delta (%d) from ack_seq\n",
330 __func__, vseq->previous_delta);
337 * Updates ip_vs_seq if pkt has been resized
338 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
340 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
341 unsigned int flag, __u32 seq, int diff)
343 /* spinlock is to keep updating cp->flags atomic */
344 spin_lock(&cp->lock);
345 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
346 vseq->previous_delta = vseq->delta;
348 vseq->init_seq = seq;
351 spin_unlock(&cp->lock);
354 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
355 struct ip_vs_app *app)
358 const unsigned int tcp_offset = ip_hdrlen(skb);
362 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
365 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
368 * Remember seq number in case this pkt gets resized
370 seq = ntohl(th->seq);
373 * Fix seq stuff if flagged as so.
375 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
376 vs_fix_seq(&cp->out_seq, th);
377 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
378 vs_fix_ack_seq(&cp->in_seq, th);
381 * Call private output hook function
383 if (app->pkt_out == NULL)
386 if (!app->pkt_out(app, cp, skb, &diff))
390 * Update ip_vs seq stuff if len has changed.
393 vs_seq_update(cp, &cp->out_seq,
394 IP_VS_CONN_F_OUT_SEQ, seq, diff);
400 * Output pkt hook. Will call bound ip_vs_app specific function
401 * called by ipvs packet handler, assumes previously checked cp!=NULL
402 * returns false if it can't handle packet (oom)
404 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
406 struct ip_vs_app *app;
409 * check if application module is bound to
412 if ((app = cp->app) == NULL)
415 /* TCP is complicated */
416 if (cp->protocol == IPPROTO_TCP)
417 return app_tcp_pkt_out(cp, skb, app);
420 * Call private output hook function
422 if (app->pkt_out == NULL)
425 return app->pkt_out(app, cp, skb, NULL);
429 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
430 struct ip_vs_app *app)
433 const unsigned int tcp_offset = ip_hdrlen(skb);
437 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
440 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
443 * Remember seq number in case this pkt gets resized
445 seq = ntohl(th->seq);
448 * Fix seq stuff if flagged as so.
450 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
451 vs_fix_seq(&cp->in_seq, th);
452 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
453 vs_fix_ack_seq(&cp->out_seq, th);
456 * Call private input hook function
458 if (app->pkt_in == NULL)
461 if (!app->pkt_in(app, cp, skb, &diff))
465 * Update ip_vs seq stuff if len has changed.
468 vs_seq_update(cp, &cp->in_seq,
469 IP_VS_CONN_F_IN_SEQ, seq, diff);
475 * Input pkt hook. Will call bound ip_vs_app specific function
476 * called by ipvs packet handler, assumes previously checked cp!=NULL.
477 * returns false if can't handle packet (oom).
479 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
481 struct ip_vs_app *app;
484 * check if application module is bound to
487 if ((app = cp->app) == NULL)
490 /* TCP is complicated */
491 if (cp->protocol == IPPROTO_TCP)
492 return app_tcp_pkt_in(cp, skb, app);
495 * Call private input hook function
497 if (app->pkt_in == NULL)
500 return app->pkt_in(app, cp, skb, NULL);
504 #ifdef CONFIG_PROC_FS
506 * /proc/net/ip_vs_app entry function
509 static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
511 struct ip_vs_app *app, *inc;
513 list_for_each_entry(app, &ipvs->app_list, a_list) {
514 list_for_each_entry(inc, &app->incs_list, a_list) {
523 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
525 struct net *net = seq_file_net(seq);
526 struct netns_ipvs *ipvs = net_ipvs(net);
528 mutex_lock(&__ip_vs_app_mutex);
530 return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
533 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
535 struct ip_vs_app *inc, *app;
537 struct net *net = seq_file_net(seq);
538 struct netns_ipvs *ipvs = net_ipvs(net);
541 if (v == SEQ_START_TOKEN)
542 return ip_vs_app_idx(ipvs, 0);
547 if ((e = inc->a_list.next) != &app->incs_list)
548 return list_entry(e, struct ip_vs_app, a_list);
550 /* go on to next application */
551 for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
552 app = list_entry(e, struct ip_vs_app, a_list);
553 list_for_each_entry(inc, &app->incs_list, a_list) {
560 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
562 mutex_unlock(&__ip_vs_app_mutex);
565 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
567 if (v == SEQ_START_TOKEN)
568 seq_puts(seq, "prot port usecnt name\n");
570 const struct ip_vs_app *inc = v;
572 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
573 ip_vs_proto_name(inc->protocol),
575 atomic_read(&inc->usecnt),
581 static const struct seq_operations ip_vs_app_seq_ops = {
582 .start = ip_vs_app_seq_start,
583 .next = ip_vs_app_seq_next,
584 .stop = ip_vs_app_seq_stop,
585 .show = ip_vs_app_seq_show,
588 static int ip_vs_app_open(struct inode *inode, struct file *file)
590 return seq_open_net(inode, file, &ip_vs_app_seq_ops,
591 sizeof(struct seq_net_private));
594 static const struct file_operations ip_vs_app_fops = {
595 .owner = THIS_MODULE,
596 .open = ip_vs_app_open,
599 .release = seq_release_net,
603 int __net_init ip_vs_app_net_init(struct net *net)
605 struct netns_ipvs *ipvs = net_ipvs(net);
607 INIT_LIST_HEAD(&ipvs->app_list);
608 proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
612 void __net_exit ip_vs_app_net_cleanup(struct net *net)
614 unregister_ip_vs_app(net, NULL /* all */);
615 remove_proc_entry("ip_vs_app", net->proc_net);