Kernels 3.17 and newer have a work queue to evict old fragments, while
older kernel versions use an LRU in the fast path; see upstream commit
b13d3cbfb8e8 ("inet: frag: move eviction of queues to work queue").
This commit fixes the version checking so that rather than enabling the
code for either of these approaches using version checks, it is
triggered based on the presence of the work queue in "struct inet_frags".
Signed-off-by: Joe Stringer <joe@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
[OVS_DEFINE([HAVE_INET_FRAGS_CONST])])
OVS_GREP_IFELSE([$KSRC/include/net/inet_frag.h], [last_in],
[OVS_DEFINE([HAVE_INET_FRAGS_LAST_IN])])
[OVS_DEFINE([HAVE_INET_FRAGS_CONST])])
OVS_GREP_IFELSE([$KSRC/include/net/inet_frag.h], [last_in],
[OVS_DEFINE([HAVE_INET_FRAGS_LAST_IN])])
+ OVS_FIND_FIELD_IFELSE([$KSRC/include/net/inet_frag.h], [inet_frags],
+ [frags_work])
OVS_GREP_IFELSE([$KSRC/include/net/dst_metadata.h], [metadata_dst])
OVS_GREP_IFELSE([$KSRC/include/net/dst_metadata.h], [metadata_dst])
static inline bool rpl_inet_frag_evicting(struct inet_frag_queue *q)
{
static inline bool rpl_inet_frag_evicting(struct inet_frag_queue *q)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
- return (q_flags(q) & INET_FRAG_FIRST_IN) && q->fragments != NULL;
-#else
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
struct ovs_inet_frag_queue *ofq = (struct ovs_inet_frag_queue *)q;
return !hlist_unhashed(&ofq->list_evictor);
struct ovs_inet_frag_queue *ofq = (struct ovs_inet_frag_queue *)q;
return !hlist_unhashed(&ofq->list_evictor);
+#else
+ return (q_flags(q) & INET_FRAG_FIRST_IN) && q->fragments != NULL;
#endif
}
#define inet_frag_evicting rpl_inet_frag_evicting
#endif
}
#define inet_frag_evicting rpl_inet_frag_evicting
return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
}
return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
static bool inet_frag_may_rebuild(struct inet_frags *f)
{
return time_after(jiffies,
static bool inet_frag_may_rebuild(struct inet_frags *f)
{
return time_after(jiffies,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
INIT_WORK(&f->frags_work, inet_frag_worker);
#endif
INIT_WORK(&f->frags_work, inet_frag_worker);
#endif
INIT_HLIST_HEAD(&hb->chain);
}
INIT_HLIST_HEAD(&hb->chain);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
- rwlock_init(&f->lock);
- f->secret_timer.expires = jiffies + f->secret_interval;
-#else
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
seqlock_init(&f->rnd_seqlock);
f->last_rebuild_jiffies = 0;
f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
NULL);
if (!f->frags_cachep)
return -ENOMEM;
seqlock_init(&f->rnd_seqlock);
f->last_rebuild_jiffies = 0;
f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
NULL);
if (!f->frags_cachep)
return -ENOMEM;
+#else
+ rwlock_init(&f->lock);
+ f->secret_timer.expires = jiffies + f->secret_interval;
void inet_frags_fini(struct inet_frags *f)
{
void inet_frags_fini(struct inet_frags *f)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
cancel_work_sync(&f->frags_work);
kmem_cache_destroy(f->frags_cachep);
#endif
cancel_work_sync(&f->frags_work);
kmem_cache_destroy(f->frags_cachep);
#endif
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
-{
- read_lock_bh(&f->lock);
- inet_frag_evictor(nf, f, true);
- read_unlock_bh(&f->lock);
-}
-#else
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
{
unsigned int seq;
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
{
unsigned int seq;
percpu_counter_sum(&nf->mem))
goto evict_again;
}
percpu_counter_sum(&nf->mem))
goto evict_again;
}
+#else
+void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+{
+ read_lock_bh(&f->lock);
+ inet_frag_evictor(nf, f, true);
+ read_unlock_bh(&f->lock);
+}
#endif
static struct inet_frag_bucket *
#endif
static struct inet_frag_bucket *
if (f->destructor)
f->destructor(q);
if (f->destructor)
f->destructor(q);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
- kfree(q);
-#else
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
kmem_cache_free(f->frags_cachep, q);
kmem_cache_free(f->frags_cachep, q);
#endif
sub_frag_mem_limit(nf, sum);
#endif
sub_frag_mem_limit(nf, sum);
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
{
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
+ int i;
+
+ for (i = 0; i < INETFRAGS_HASHSZ ; i++)
+ inet_evict_bucket(f, &f->hash[i]);
+
+ return 0;
+#else
struct inet_frag_queue *q;
int work, evicted = 0;
struct inet_frag_queue *q;
int work, evicted = 0;
-#else
- int i;
-
- for (i = 0; i < INETFRAGS_HASHSZ ; i++)
- inet_evict_bucket(f, &f->hash[i]);
-
- return 0;
struct inet_frag_queue *q;
if (frag_mem_limit(nf) > nf->high_thresh) {
struct inet_frag_queue *q;
if (frag_mem_limit(nf) > nf->high_thresh) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
inet_frag_schedule_worker(f);
#endif
return NULL;
}
inet_frag_schedule_worker(f);
#endif
return NULL;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
- q = kzalloc(f->qsize, GFP_ATOMIC);
-#else
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
+#else
+ q = kzalloc(f->qsize, GFP_ATOMIC);
#endif
if (!q)
return NULL;
#endif
if (!q)
return NULL;
struct inet_frag_queue *q;
int depth = 0;
struct inet_frag_queue *q;
int depth = 0;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
- if (frag_mem_limit(nf) > nf->high_thresh)
- inet_frag_evictor(nf, f, false);
-#else
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
if (frag_mem_limit(nf) > nf->low_thresh)
inet_frag_schedule_worker(f);
if (frag_mem_limit(nf) > nf->low_thresh)
inet_frag_schedule_worker(f);
+#else
+ if (frag_mem_limit(nf) > nf->high_thresh)
+ inet_frag_evictor(nf, f, false);
#endif
hash &= (INETFRAGS_HASHSZ - 1);
#endif
hash &= (INETFRAGS_HASHSZ - 1);
if (depth <= INETFRAGS_MAXDEPTH)
return inet_frag_create(nf, f, key);
if (depth <= INETFRAGS_MAXDEPTH)
return inet_frag_create(nf, f, key);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
if (inet_frag_may_rebuild(f)) {
if (!f->rebuild)
f->rebuild = true;
if (inet_frag_may_rebuild(f)) {
if (!f->rebuild)
f->rebuild = true;
ip4_frags.qsize = sizeof(struct ipq);
ip4_frags.match = ip4_frag_match;
ip4_frags.frag_expire = ip_expire;
ip4_frags.qsize = sizeof(struct ipq);
ip4_frags.match = ip4_frag_match;
ip4_frags.frag_expire = ip_expire;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
ip4_frags.frags_cache_name = ip_frag_cache_name;
#endif
if (inet_frags_init(&ip4_frags)) {
ip4_frags.frags_cache_name = ip_frag_cache_name;
#endif
if (inet_frags_init(&ip4_frags)) {
nf_frags.qsize = sizeof(struct frag_queue);
nf_frags.match = rpl_ip6_frag_match;
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_frags.qsize = sizeof(struct frag_queue);
nf_frags.match = rpl_ip6_frag_match;
nf_frags.frag_expire = nf_ct_frag6_expire;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+#ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
nf_frags.frags_cache_name = nf_frags_cache_name;
#endif
ret = inet_frags_init(&nf_frags);
nf_frags.frags_cache_name = nf_frags_cache_name;
#endif
ret = inet_frags_init(&nf_frags);