Reported by Travis.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
skb_reset_inner_headers(skb);
if (skb_is_gso(skb)) {
+ if (skb_is_encapsulated(skb)) {
+ err = -ENOSYS;
+ goto error;
+ }
+
if (gre_csum)
OVS_GSO_CB(skb)->fix_segment = gre_csum_fix;
else
return addend;
}
#else
+
static inline struct sk_buff *rpl_gre_handle_offloads(struct sk_buff *skb,
bool gre_csum)
{
- if ((ovs_skb_get_inner_protocol(skb) || skb->encapsulation) &&
- skb_is_gso(skb)) {
+ if (skb_is_gso(skb) && skb_is_encapsulated(skb)) {
kfree_skb(skb);
return ERR_PTR(-ENOSYS);
}
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
+bool skb_is_encapsulated(struct sk_buff *skb);
+
#endif /* __NET_IP_TUNNELS_H */
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, __be32 vni)
{
- if ((ovs_skb_get_inner_protocol(skb) || skb->encapsulation) &&
- skb_is_gso(skb)) {
+ if (skb_is_gso(skb) && skb_is_encapsulated(skb)) {
kfree_skb(skb);
return -ENOSYS;
}
}
#endif
+
+bool skb_is_encapsulated(struct sk_buff *skb)
+{
+ /* checking for inner protocol should be sufficient on newer kernel, but
+ * old kernel just set encapsulation bit.
+ */
+ /* XXX: set inner protocol for all tunnel in OVS. */
+ return ovs_skb_get_inner_protocol(skb) || skb_encapsulation(skb);
+}
static int handle_offloads(struct sk_buff *skb)
{
if (skb_is_gso(skb)) {
+ if (skb_is_encapsulated(skb))
+ return -ENOSYS;
+
OVS_GSO_CB(skb)->fix_segment = vxlan_gso;
} else {
if (skb->ip_summed != CHECKSUM_PARTIAL)
static int handle_offloads(struct sk_buff *skb)
{
- if (skb_is_gso(skb))
+ if (skb_is_gso(skb)) {
+ if (skb_is_encapsulated(skb))
+ return -ENOSYS;
OVS_GSO_CB(skb)->fix_segment = geneve_fix_segment;
- else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_NONE;
+ }
return 0;
}
#else
static int handle_offloads(struct sk_buff *skb)
{
- if (skb->encapsulation && skb_is_gso(skb)) {
- kfree_skb(skb);
- return -ENOSYS;
- }
-
if (skb_is_gso(skb)) {
- int err = skb_unclone(skb, GFP_ATOMIC);
+ int err;
+
+ if (skb_is_encapsulated(skb))
+ return -ENOSYS;
+
+ err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
return err;
static int handle_offloads(struct sk_buff *skb)
{
- if (skb_is_gso(skb))
+ if (skb_is_gso(skb)) {
+ if (skb_is_encapsulated(skb))
+ return -ENOSYS;
+
OVS_GSO_CB(skb)->fix_segment = lisp_fix_segment;
- else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_NONE;
+ }
return 0;
}
#else
static int handle_offloads(struct sk_buff *skb)
{
- if ((ovs_skb_get_inner_protocol(skb) || skb->encapsulation) &&
- skb_is_gso(skb)) {
- kfree_skb(skb);
- return -ENOSYS;
- }
-
if (skb_is_gso(skb)) {
- int err = skb_unclone(skb, GFP_ATOMIC);
+ int err;
+
+ if (skb_is_encapsulated(skb))
+ return -ENOSYS;
+
+ err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
return err;