From ffc117c13919c94b66f0b00841ea3343c8dd47c2 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Tue, 3 Dec 2013 20:41:15 -0800 Subject: [PATCH] datapath: Backport skb_zerocopy() functions. These functions will be factored out and exported upstream. On kernels 3.5 and newer the backport will provide zero copy support but older kernels will work as before (due to lack of skb->head_frag). Signed-off-by: Jesse Gross --- datapath/linux/compat/include/linux/skbuff.h | 6 ++ datapath/linux/compat/skbuff-openvswitch.c | 95 ++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/datapath/linux/compat/include/linux/skbuff.h b/datapath/linux/compat/include/linux/skbuff.h index 4f2260040..ced572e1a 100644 --- a/datapath/linux/compat/include/linux/skbuff.h +++ b/datapath/linux/compat/include/linux/skbuff.h @@ -228,4 +228,10 @@ static inline __u32 skb_get_rxhash(struct sk_buff *skb) } #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) +unsigned int skb_zerocopy_headlen(const struct sk_buff *from); +void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, + int hlen); +#endif + #endif diff --git a/datapath/linux/compat/skbuff-openvswitch.c b/datapath/linux/compat/skbuff-openvswitch.c index 3baa09eeb..ddd7bc85a 100644 --- a/datapath/linux/compat/skbuff-openvswitch.c +++ b/datapath/linux/compat/skbuff-openvswitch.c @@ -14,3 +14,98 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb) } #endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) + +static inline bool head_frag(const struct sk_buff *skb) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + return skb->head_frag; +#else + return false; +#endif +} + + /** + * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() + * @from: source buffer + * + * Calculates the amount of linear headroom needed in the 'to' skb passed + * into skb_zerocopy(). + */ +unsigned int +skb_zerocopy_headlen(const struct sk_buff *from) +{ + unsigned int hlen = 0; + + if (!head_frag(from) || + skb_headlen(from) < L1_CACHE_BYTES || + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) + hlen = skb_headlen(from); + + if (skb_has_frag_list(from)) + hlen = from->len; + + return hlen; +} + +/** + * skb_zerocopy - Zero copy skb to skb + * @to: destination buffer + * @source: source buffer + * @len: number of bytes to copy from source buffer + * @hlen: size of linear headroom in destination buffer + * + * Copies up to `len` bytes from `from` to `to` by creating references + * to the frags in the source buffer. + * + * The `hlen` as calculated by skb_zerocopy_headlen() specifies the + * headroom in the `to` buffer. + */ +void +skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) +{ + int i, j = 0; + int plen = 0; /* length of skb->head fragment */ + struct page *page; + unsigned int offset; + + BUG_ON(!head_frag(from) && !hlen); + + /* dont bother with small payloads */ + if (len <= skb_tailroom(to)) { + skb_copy_bits(from, 0, skb_put(to, len), len); + return; + } + + if (hlen) { + skb_copy_bits(from, 0, skb_put(to, hlen), hlen); + len -= hlen; + } else { + plen = min_t(int, skb_headlen(from), len); + if (plen) { + page = virt_to_head_page(from->head); + offset = from->data - (unsigned char *)page_address(page); + __skb_fill_page_desc(to, 0, page, offset, plen); + get_page(page); + j = 1; + len -= plen; + } + } + + to->truesize += len + plen; + to->len += len + plen; + to->data_len += len + plen; + + for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { + if (!len) + break; + skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; + skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); + len -= skb_shinfo(to)->frags[j].size; + skb_frag_ref(to, j); + j++; + } + skb_shinfo(to)->nr_frags = j; +} +#endif -- 2.20.1