Todd Deshane deshantm@gmail.com
Tom Everman teverman@google.com
Tsvi Slonim tsvi@toroki.com
+Tuan Nguyen tuan.nguyen@veriksystems.com
Tyler Coumbes coumbes@gmail.com
Valient Gough vgough@pobox.com
Vivien Bernet-Rollande vbr@soprive.net
static int nr_bridges;
#ifdef HAVE_RHEL_OVS_HOOK
-int netdev_rx_handler_register(struct net_device *dev,
- openvswitch_handle_frame_hook_t *hook,
- void *rx_handler_data)
+int rpl_netdev_rx_handler_register(struct net_device *dev,
+ openvswitch_handle_frame_hook_t *hook,
+ void *rx_handler_data)
{
nr_bridges++;
rcu_assign_pointer(dev->ax25_ptr, rx_handler_data);
rcu_assign_pointer(openvswitch_handle_frame_hook, hook);
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_netdev_rx_handler_register);
#else
-int netdev_rx_handler_register(struct net_device *dev,
- struct sk_buff *(*hook)(struct net_bridge_port *p,
- struct sk_buff *skb),
- void *rx_handler_data)
+int rpl_netdev_rx_handler_register(struct net_device *dev,
+ struct sk_buff *(*hook)(struct net_bridge_port *p,
+ struct sk_buff *skb),
+ void *rx_handler_data)
{
nr_bridges++;
if (dev->br_port)
br_handle_frame_hook = hook;
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_netdev_rx_handler_register);
#endif
-void netdev_rx_handler_unregister(struct net_device *dev)
+void rpl_netdev_rx_handler_unregister(struct net_device *dev)
{
nr_bridges--;
#ifdef HAVE_RHEL_OVS_HOOK
br_handle_frame_hook = NULL;
#endif
}
+EXPORT_SYMBOL_GPL(rpl_netdev_rx_handler_unregister);
+
#endif
*nexthdrp = nexthdr;
return start;
}
+EXPORT_SYMBOL_GPL(rpl_ipv6_skip_exthdr);
#endif /* Kernel version < 3.3 */
#ifndef HAVE_IP6_FH_F_SKIP_RH
*offset = start;
return nexthdr;
}
+EXPORT_SYMBOL_GPL(rpl_ipv6_find_hdr);
#endif
* capacity in the base structure. Also note that no effort is made
* to efficiently pack objects across page boundaries.
*/
-struct flex_array *flex_array_alloc(int element_size, unsigned int total,
+struct flex_array *rpl_flex_array_alloc(int element_size, unsigned int total,
gfp_t flags)
{
struct flex_array *ret;
FLEX_ARRAY_BASE_BYTES_LEFT);
return ret;
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_alloc);
static int fa_element_to_part_nr(struct flex_array *fa,
unsigned int element_nr)
* This is to be used in cases where the base 'struct flex_array'
* has been statically allocated and should not be free.
*/
-void flex_array_free_parts(struct flex_array *fa)
+void rpl_flex_array_free_parts(struct flex_array *fa)
{
int part_nr;
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
kfree(fa->parts[part_nr]);
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_free_parts);
-void flex_array_free(struct flex_array *fa)
+void rpl_flex_array_free(struct flex_array *fa)
{
flex_array_free_parts(fa);
kfree(fa);
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_free);
static unsigned int index_inside_part(struct flex_array *fa,
unsigned int element_nr,
*
* Locking must be provided by the caller.
*/
-int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
+int rpl_flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags)
{
int part_nr = 0;
memcpy(dst, src, fa->element_size);
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_put);
/**
* flex_array_clear - clear element in array at @element_nr
*
* Locking must be provided by the caller.
*/
-int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
+int rpl_flex_array_clear(struct flex_array *fa, unsigned int element_nr)
{
int part_nr = 0;
struct flex_array_part *part;
memset(dst, FLEX_ARRAY_FREE, fa->element_size);
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_clear);
/**
* flex_array_prealloc - guarantee that array space exists
*
* Locking must be provided by the caller.
*/
-int flex_array_prealloc(struct flex_array *fa, unsigned int start,
+int rpl_flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags)
{
int start_part;
}
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_prealloc);
/**
* flex_array_get - pull data back out of the array
*
* Locking must be provided by the caller.
*/
-void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
+void *rpl_flex_array_get(struct flex_array *fa, unsigned int element_nr)
{
int part_nr = 0;
struct flex_array_part *part;
}
return &part->elements[index_inside_part(fa, element_nr, part_nr)];
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_get);
/**
* flex_array_get_ptr - pull a ptr back out of the array
* flex_array_put_ptr(). This function should not be called if the
* element in question was not set using the _put_ptr() helper.
*/
-void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
+void *rpl_flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
{
void **tmp;
return *tmp;
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_get_ptr);
static int part_is_free(struct flex_array_part *part)
{
*
* Locking must be provided by the caller.
*/
-int flex_array_shrink(struct flex_array *fa)
+int rpl_flex_array_shrink(struct flex_array *fa)
{
struct flex_array_part *part;
int part_nr;
}
return ret;
}
+EXPORT_SYMBOL_GPL(rpl_flex_array_shrink);
#endif /* Linux version < 3.0.0 */
return jhash_3words(a, b, c, hashrnd);
}
-u32 __skb_get_hash(struct sk_buff *skb)
+u32 rpl__skb_get_rxhash(struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
#endif
return hash;
}
-EXPORT_SYMBOL_GPL(__skb_get_hash);
+EXPORT_SYMBOL_GPL(rpl__skb_get_rxhash);
+
#endif
genl_notify(skb, net, portid, group, nlh, flags);
#endif
}
+EXPORT_SYMBOL_GPL(rpl_genl_notify);
int rpl___genl_register_family(struct rpl_genl_family *f)
{
return err;
}
+EXPORT_SYMBOL_GPL(rpl___genl_register_family);
+
#endif /* kernel version < 3.13.0 */
*
* This function will add other UDP tunnel headers.
*/
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
- __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
- __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool csum, bool xnet)
+int rpl_geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
+ __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
+ __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+ bool csum, bool xnet)
{
struct genevehdr *gnvh;
int min_headroom;
tos, ttl, df, src_port, dst_port, xnet,
!csum);
}
-EXPORT_SYMBOL_GPL(geneve_xmit_skb);
+EXPORT_SYMBOL_GPL(rpl_geneve_xmit_skb);
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
return gs;
}
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
- geneve_rcv_t *rcv, void *data,
- bool no_share, bool ipv6)
+struct geneve_sock *rpl_geneve_sock_add(struct net *net, __be16 port,
+ geneve_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6)
{
return geneve_socket_create(net, port, rcv, data, ipv6);
}
-EXPORT_SYMBOL_GPL(geneve_sock_add);
+EXPORT_SYMBOL_GPL(rpl_geneve_sock_add);
static void rcu_free_gs(struct rcu_head *rcu)
{
kfree(gs);
}
-void geneve_sock_release(struct geneve_sock *gs)
+void rpl_geneve_sock_release(struct geneve_sock *gs)
{
udp_tunnel_sock_release(gs->sock);
call_rcu(&gs->rcu, rcu_free_gs);
}
-EXPORT_SYMBOL_GPL(geneve_sock_release);
+EXPORT_SYMBOL_GPL(rpl_geneve_sock_release);
.handler = gre_cisco_rcv,
};
-int gre_cisco_register(struct gre_cisco_protocol *newp)
+int rpl_gre_cisco_register(struct gre_cisco_protocol *newp)
{
int err;
return (cmpxchg((struct gre_cisco_protocol **)&gre_cisco_proto, NULL, newp) == NULL) ?
0 : -EBUSY;
}
-EXPORT_SYMBOL_GPL(gre_cisco_register);
+EXPORT_SYMBOL_GPL(rpl_gre_cisco_register);
-int gre_cisco_unregister(struct gre_cisco_protocol *proto)
+int rpl_gre_cisco_unregister(struct gre_cisco_protocol *proto)
{
int ret;
ret = gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
return ret;
}
-EXPORT_SYMBOL_GPL(gre_cisco_unregister);
+EXPORT_SYMBOL_GPL(rpl_gre_cisco_unregister);
#endif /* !HAVE_GRE_CISCO_REGISTER */
skb->len - gre_offset, 0));
}
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
+struct sk_buff *rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
int type = gre_csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE;
gso_fix_segment_t fix_segment;
return ovs_iptunnel_handle_offloads(skb, gre_csum, type, fix_segment);
}
-EXPORT_SYMBOL_GPL(gre_handle_offloads);
+EXPORT_SYMBOL_GPL(rpl_gre_handle_offloads);
static bool is_gre_gso(struct sk_buff *skb)
{
return skb_is_gso(skb);
}
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+void rpl_gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
int hdr_len)
{
struct gre_base_hdr *greh;
ovs_skb_set_inner_protocol(skb, tpi->proto);
}
-EXPORT_SYMBOL_GPL(gre_build_header);
+EXPORT_SYMBOL_GPL(rpl_gre_build_header);
#endif /* CONFIG_NET_IPGRE_DEMUX */
kfree_skb(skb);
return err;
}
+EXPORT_SYMBOL_GPL(rpl_dev_queue_xmit);
#endif /* 3.16 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
}
return ret;
}
+EXPORT_SYMBOL_GPL(rpl_ip_local_out);
+
#endif /* 3.18 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
#define ip_local_out rpl_ip_local_out
-int ip_local_out(struct sk_buff *skb);
+int rpl_ip_local_out(struct sk_buff *skb);
static inline int skb_inner_mac_offset(const struct sk_buff *skb)
{
FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
}
-struct flex_array *flex_array_alloc(int element_size, unsigned int total,
+#define flex_array_alloc rpl_flex_array_alloc
+struct flex_array *rpl_flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
-int flex_array_prealloc(struct flex_array *fa, unsigned int start,
+
+#define flex_array_prealloc rpl_flex_array_prealloc
+int rpl_flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags);
-void flex_array_free(struct flex_array *fa);
-void flex_array_free_parts(struct flex_array *fa);
-int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
+
+#define flex_array_free rpl_flex_array_free
+void rpl_flex_array_free(struct flex_array *fa);
+
+#define flex_array_free_parts rpl_flex_array_free_parts
+void rpl_flex_array_free_parts(struct flex_array *fa);
+
+#define flex_array_put rpl_flex_array_put
+int rpl_flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags);
-int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
-void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
-int flex_array_shrink(struct flex_array *fa);
-#define flex_array_put_ptr(fa, nr, src, gfp) \
+#define flex_array_clear rpl_flex_array_clear
+int rpl_flex_array_clear(struct flex_array *fa, unsigned int element_nr);
+
+#define flex_array_get rpl_flex_array_get
+void *rpl_flex_array_get(struct flex_array *fa, unsigned int element_nr);
+
+#define flex_array_shrink rpl_flex_array_shrink
+int rpl_flex_array_shrink(struct flex_array *fa);
+
+#define flex_array_put_ptr rpl_flex_array_put_ptr
+#define rpl_flex_array_put_ptr(fa, nr, src, gfp) \
flex_array_put(fa, nr, (void *)&(src), gfp)
-void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
+#define flex_array_get_ptr rpl_flex_array_get_ptr
+void *rpl_flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
#endif /* Linux version < 3.0.0 */
#endif /* __LINUX_FLEX_ARRAY_WRAPPER_H */
#ifndef HAVE_SKBUFF_HEADER_HELPERS
#include <linux/skbuff.h>
+
static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_network_header(skb);
#endif
#ifndef net_get_random_once
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
+#define __net_get_random_once rpl___net_get_random_once
+bool rpl___net_get_random_once(void *buf, int nbytes, bool *done,
atomic_t *done_key);
#define ___NET_RANDOM_STATIC_KEY_INIT ATOMIC_INIT(0)
typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
-int netdev_rx_handler_register(struct net_device *dev,
- openvswitch_handle_frame_hook_t *hook,
- void *rx_handler_data);
+#define netdev_rx_handler_register rpl_netdev_rx_handler_register
+int rpl_netdev_rx_handler_register(struct net_device *dev,
+ openvswitch_handle_frame_hook_t *hook,
+ void *rx_handler_data);
#else
-int netdev_rx_handler_register(struct net_device *dev,
- struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
- struct sk_buff *skb),
- void *rx_handler_data);
+#define netdev_rx_handler_register rpl_netdev_rx_handler_register
+int rpl_netdev_rx_handler_register(struct net_device *dev,
+ struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
+ struct sk_buff *skb),
+ void *rx_handler_data);
#endif
-void netdev_rx_handler_unregister(struct net_device *dev);
+#define netdev_rx_handler_unregister rpl_netdev_rx_handler_unregister
+void rpl_netdev_rx_handler_unregister(struct net_device *dev);
#endif
#ifndef HAVE_DEV_GET_BY_INDEX_RCU
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
#define dev_queue_xmit rpl_dev_queue_xmit
-int dev_queue_xmit(struct sk_buff *skb);
+int rpl_dev_queue_xmit(struct sk_buff *skb);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
u8 sh1, sh2;
};
-struct reciprocal_value reciprocal_value(u32 d);
+struct reciprocal_value rpl_reciprocal_value(u32 d);
#define reciprocal_divide rpl_reciprocal_divide
-static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
+static inline u32 rpl_reciprocal_divide(u32 a, struct reciprocal_value R)
{
u32 t = (u32)(((u64)a * R.m) >> 32);
return (t + ((a - t) >> R.sh1)) >> R.sh2;
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
-unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
+#define skb_zerocopy_headlen rpl_skb_zerocopy_headlen
+unsigned int rpl_skb_zerocopy_headlen(const struct sk_buff *from);
#endif
#ifndef HAVE_SKB_ZEROCOPY
#define skb_zerocopy rpl_skb_zerocopy
-int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len,
- int hlen);
+int rpl_skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len,
+ int hlen);
#endif
#ifndef HAVE_SKB_CLEAR_HASH
#ifndef HAVE_SKB_ENSURE_WRITABLE
#define skb_ensure_writable rpl_skb_ensure_writable
-int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int rpl_skb_ensure_writable(struct sk_buff *skb, int write_len);
#endif
#ifndef HAVE_SKB_VLAN_POP
#define skb_vlan_pop rpl_skb_vlan_pop
-int skb_vlan_pop(struct sk_buff *skb);
+int rpl_skb_vlan_pop(struct sk_buff *skb);
#endif
#ifndef HAVE_SKB_VLAN_PUSH
#define skb_vlan_push rpl_skb_vlan_push
-int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+int rpl_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
#endif
#endif
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
-void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
- const __be32 *from, const __be32 *to,
- int pseudohdr);
+#define inet_proto_csum_replace16 rpl_inet_proto_csum_replace16
+void rpl_inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ const __be32 *from, const __be32 *to,
+ int pseudohdr);
#endif
#endif /* checksum.h */
#define genl_family rpl_genl_family
#define genl_notify rpl_genl_notify
-void genl_notify(struct genl_family *family,
- struct sk_buff *skb, struct net *net, u32 portid, u32 group,
- struct nlmsghdr *nlh, gfp_t flags);
+void rpl_genl_notify(struct genl_family *family,
+ struct sk_buff *skb, struct net *net, u32 portid, u32 group,
+ struct nlmsghdr *nlh, gfp_t flags);
static inline void *rpl_genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
struct genl_family *family, int flags, u8 cmd)
#define GENEVE_VER 0
#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
- geneve_rcv_t *rcv, void *data,
- bool no_share, bool ipv6);
+#define geneve_sock_add rpl_geneve_sock_add
+struct geneve_sock *rpl_geneve_sock_add(struct net *net, __be16 port,
+ geneve_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6);
-void geneve_sock_release(struct geneve_sock *vs);
+#define geneve_sock_release rpl_geneve_sock_release
+void rpl_geneve_sock_release(struct geneve_sock *vs);
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
- __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
- __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool csum, bool xnet);
+#define geneve_xmit_skb rpl_geneve_xmit_skb
+int rpl_geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
+ __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
+ __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+ bool csum, bool xnet);
#endif /*ifdef CONFIG_INET */
#endif /*ifdef__NET_GENEVE_WRAPPER_H */
};
#define gre_cisco_register rpl_gre_cisco_register
-int gre_cisco_register(struct gre_cisco_protocol *proto);
+int rpl_gre_cisco_register(struct gre_cisco_protocol *proto);
#define gre_cisco_unregister rpl_gre_cisco_unregister
-int gre_cisco_unregister(struct gre_cisco_protocol *proto);
+int rpl_gre_cisco_unregister(struct gre_cisco_protocol *proto);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
struct gre_base_hdr {
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)
#define gre_build_header rpl_gre_build_header
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
- int hdr_len);
+void rpl_gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+ int hdr_len);
#define gre_handle_offloads rpl_gre_handle_offloads
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
+struct sk_buff *rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
#define ip_gre_calc_hlen rpl_ip_gre_calc_hlen
static inline int ip_gre_calc_hlen(__be16 o_flags)
void (*fix_segment)(struct sk_buff *));
#define iptunnel_xmit rpl_iptunnel_xmit
-int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
- __be16 df, bool xnet);
+int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
+ __be16 df, bool xnet);
#define iptunnel_pull_header rpl_iptunnel_pull_header
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
+int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
#else
#undef TUNNEL_OPTIONS_PRESENT
#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
-bool skb_is_encapsulated(struct sk_buff *skb);
+#define skb_is_encapsulated ovs_skb_is_encapsulated
+bool ovs_skb_is_encapsulated(struct sk_buff *skb);
#endif /* __NET_IP_TUNNELS_H */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
#define ipv6_skip_exthdr rpl_ipv6_skip_exthdr
-extern int ipv6_skip_exthdr(const struct sk_buff *skb, int start,
- u8 *nexthdrp, __be16 *frag_offp);
+extern int rpl_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+ u8 *nexthdrp, __be16 *frag_offp);
#endif
#ifndef HAVE_IP6_FH_F_SKIP_RH
* IP6_FH_F_SKIP_RH.
*/
#define ipv6_find_hdr rpl_ipv6_find_hdr
-extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
- int target, unsigned short *fragoff, int *fragflg);
+extern int rpl_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+ int target, unsigned short *fragoff, int *fragflg);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
#define register_pernet_device rpl_register_pernet_gen_device
#define unregister_pernet_device rpl_unregister_pernet_gen_device
-int compat_init_net(struct net *net, struct rpl_pernet_operations *pnet);
-void compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet);
+#define compat_init_net ovs_compat_init_net
+int ovs_compat_init_net(struct net *net, struct rpl_pernet_operations *pnet);
+#define compat_exit_net ovs_compat_exit_net
+void ovs_compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet);
#define DEFINE_COMPAT_PNET_REG_FUNC(TYPE) \
\
#endif
#ifndef HAVE_UDP_SET_CSUM
-void udp_set_csum(bool nocheck, struct sk_buff *skb,
- __be32 saddr, __be32 daddr, int len);
+#define udp_set_csum rpl_udp_set_csum
+void rpl_udp_set_csum(bool nocheck, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr, int len);
#endif
#endif
};
#define udp_sock_create rpl_udp_sock_create
-int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
- struct socket **sockp);
+int rpl_udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
};
/* Setup the given (UDP) sock to receive UDP encapsulated packets */
-void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
- struct udp_tunnel_sock_cfg *sock_cfg);
+#define setup_udp_tunnel_sock rpl_setup_udp_tunnel_sock
+void rpl_setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ struct udp_tunnel_sock_cfg *sock_cfg);
/* Transmit the skb using UDP encapsulation. */
-int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl,
- __be16 df, __be16 src_port, __be16 dst_port,
- bool xnet, bool nocheck);
+#define udp_tunnel_xmit_skb rpl_udp_tunnel_xmit_skb
+int rpl_udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+ __be16 df, __be16 src_port, __be16 dst_port,
+ bool xnet, bool nocheck);
-void udp_tunnel_sock_release(struct socket *sock);
+#define udp_tunnel_sock_release rpl_udp_tunnel_sock_release
+void rpl_udp_tunnel_sock_release(struct socket *sock);
void ovs_udp_gso(struct sk_buff *skb);
void ovs_udp_csum_gso(struct sk_buff *skb);
};
#define vxlan_sock_add rpl_vxlan_sock_add
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share, u32 flags);
+struct vxlan_sock *rpl_vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, u32 flags);
#define vxlan_sock_release rpl_vxlan_sock_release
-void vxlan_sock_release(struct vxlan_sock *vs);
+void rpl_vxlan_sock_release(struct vxlan_sock *vs);
#define vxlan_xmit_skb rpl_vxlan_xmit_skb
-int vxlan_xmit_skb(struct vxlan_sock *vs,
- struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port,
- struct vxlan_metadata *md, bool xnet, u32 vxflags);
+int rpl_vxlan_xmit_skb(struct vxlan_sock *vs,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port,
+ struct vxlan_metadata *md, bool xnet, u32 vxflags);
#endif /* !HAVE_VXLAN_METADATA */
#endif
}
EXPORT_SYMBOL_GPL(ovs_iptunnel_handle_offloads);
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
{
if (unlikely(!pskb_may_pull(skb, hdr_len)))
return -ENOMEM;
skb->pkt_type = PACKET_HOST;
return 0;
}
-EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+EXPORT_SYMBOL_GPL(rpl_iptunnel_pull_header);
#endif
-bool skb_is_encapsulated(struct sk_buff *skb)
+bool ovs_skb_is_encapsulated(struct sk_buff *skb)
{
/* checking for inner protocol should be sufficient on newer kernel, but
* old kernel just set encapsulation bit.
*/
return ovs_skb_get_inner_protocol(skb) || skb_encapsulation(skb);
}
-EXPORT_SYMBOL_GPL(skb_is_encapsulated);
+EXPORT_SYMBOL_GPL(ovs_skb_is_encapsulated);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
-int compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
+int ovs_compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
{
int err;
void *ovs_net = kzalloc(pnet->size, GFP_KERNEL);
kfree(ovs_net);
return err;
}
+EXPORT_SYMBOL_GPL(ovs_compat_init_net);
-void compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
+void ovs_compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
{
void *ovs_net = net_generic(net, *pnet->id);
pnet->exit(net);
kfree(ovs_net);
}
+EXPORT_SYMBOL_GPL(ovs_compat_exit_net);
+
#endif
return harmonize_features(skb, protocol, features);
}
}
+EXPORT_SYMBOL_GPL(rpl_netif_skb_features);
#endif /* kernel version < 2.6.38 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
return skb_gso;
}
EXPORT_SYMBOL_GPL(rpl_skb_gso_segment);
+
#endif /* kernel version < 3.16.0 */
#include <linux/kernel.h>
#include <asm/div64.h>
+#include <linux/module.h>
#include <linux/reciprocal_div.h>
/*
* include/linux/reciprocal_div.h
*/
-struct reciprocal_value reciprocal_value(u32 d)
+struct reciprocal_value rpl_reciprocal_value(u32 d)
{
struct reciprocal_value R;
u64 m;
return R;
}
+EXPORT_SYMBOL_GPL(rpl_reciprocal_value);
* into skb_zerocopy().
*/
unsigned int
-skb_zerocopy_headlen(const struct sk_buff *from)
+rpl_skb_zerocopy_headlen(const struct sk_buff *from)
{
unsigned int hlen = 0;
return hlen;
}
+EXPORT_SYMBOL_GPL(rpl_skb_zerocopy_headlen);
#ifndef HAVE_SKB_ZEROCOPY
/**
* -EFAULT: skb_copy_bits() found some problem with skb geometry
*/
int
-skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
+rpl_skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_skb_zerocopy);
#endif
#endif
#ifndef HAVE_SKB_ENSURE_WRITABLE
-int skb_ensure_writable(struct sk_buff *skb, int write_len)
+int rpl_skb_ensure_writable(struct sk_buff *skb, int write_len)
{
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
+EXPORT_SYMBOL_GPL(rpl_skb_ensure_writable);
#endif
#ifndef HAVE_SKB_VLAN_POP
return err;
}
-int skb_vlan_pop(struct sk_buff *skb)
+int rpl_skb_vlan_pop(struct sk_buff *skb)
{
u16 vlan_tci;
__be16 vlan_proto;
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_skb_vlan_pop);
#endif
#ifndef HAVE_SKB_VLAN_PUSH
-int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+int rpl_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
{
if (skb_vlan_tag_present(skb)) {
unsigned int offset = skb->data - skb_mac_header(skb);
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
}
+EXPORT_SYMBOL_GPL(rpl_skb_vlan_push);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
return 0;
}
EXPORT_SYMBOL(rpl_pskb_expand_head);
+
#endif
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
* for the simple case like when setting the checksum for a UDP tunnel.
*/
-void udp_set_csum(bool nocheck, struct sk_buff *skb,
- __be32 saddr, __be32 daddr, int len)
+void rpl_udp_set_csum(bool nocheck, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr, int len)
{
struct udphdr *uh = udp_hdr(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
+EXPORT_SYMBOL_GPL(rpl_udp_set_csum);
#endif /* Linux version < 3.16 */
#include <net/udp_tunnel.h>
#include <net/net_namespace.h>
-int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
- struct socket **sockp)
+int rpl_udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
{
int err;
struct socket *sock = NULL;
*sockp = NULL;
return err;
}
-EXPORT_SYMBOL_GPL(udp_sock_create);
+EXPORT_SYMBOL_GPL(rpl_udp_sock_create);
-void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
- struct udp_tunnel_sock_cfg *cfg)
+void rpl_setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ struct udp_tunnel_sock_cfg *cfg)
{
struct sock *sk = sock->sk;
udp_tunnel_encap_enable(sock);
}
-EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
+EXPORT_SYMBOL_GPL(rpl_setup_udp_tunnel_sock);
void ovs_udp_gso(struct sk_buff *skb)
{
}
EXPORT_SYMBOL_GPL(ovs_udp_csum_gso);
-int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl,
- __be16 df, __be16 src_port, __be16 dst_port,
- bool xnet, bool nocheck)
+int rpl_udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+ __be16 df, __be16 src_port, __be16 dst_port,
+ bool xnet, bool nocheck)
{
struct udphdr *uh;
return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP,
tos, ttl, df, xnet);
}
-EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
+EXPORT_SYMBOL_GPL(rpl_udp_tunnel_xmit_skb);
-void udp_tunnel_sock_release(struct socket *sock)
+void rpl_udp_tunnel_sock_release(struct socket *sock)
{
rcu_assign_sk_user_data(sock->sk, NULL);
kernel_sock_shutdown(sock, SHUT_RDWR);
sk_release_kernel(sock->sk);
}
-EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
+EXPORT_SYMBOL_GPL(rpl_udp_tunnel_sock_release);
#endif /* Linux version < 3.20 */
#include <asm/uaccess.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
-void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
- const __be32 *from, const __be32 *to,
- int pseudohdr)
+void rpl_inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ const __be32 *from, const __be32 *to,
+ int pseudohdr)
{
__be32 diff[] = {
~from[0], ~from[1], ~from[2], ~from[3],
*sum = ~csum_fold(csum_partial(diff, sizeof(diff),
csum_unfold(*sum)));
}
+EXPORT_SYMBOL_GPL(rpl_inet_proto_csum_replace16);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
+
+bool rpl___net_get_random_once(void *buf, int nbytes, bool *done,
atomic_t *done_key)
{
static DEFINE_SPINLOCK(lock);
return true;
}
+EXPORT_SYMBOL_GPL(rpl___net_get_random_once);
+
#endif
gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
}
-int vxlan_xmit_skb(struct vxlan_sock *vs,
- struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port,
- struct vxlan_metadata *md, bool xnet, u32 vxflags)
+int rpl_vxlan_xmit_skb(struct vxlan_sock *vs,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port,
+ struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
int min_headroom;
ttl, df, src_port, dst_port, xnet,
!udp_sum);
}
-EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
+EXPORT_SYMBOL_GPL(rpl_vxlan_xmit_skb);
static void rcu_free_vs(struct rcu_head *rcu)
{
return vs;
}
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share, u32 flags)
+struct vxlan_sock *rpl_vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, u32 flags)
{
return vxlan_socket_create(net, port, rcv, data, flags);
}
-EXPORT_SYMBOL_GPL(vxlan_sock_add);
+EXPORT_SYMBOL_GPL(rpl_vxlan_sock_add);
-void vxlan_sock_release(struct vxlan_sock *vs)
+void rpl_vxlan_sock_release(struct vxlan_sock *vs)
{
ASSERT_OVSL();
queue_work(system_wq, &vs->del_work);
}
-EXPORT_SYMBOL_GPL(vxlan_sock_release);
+EXPORT_SYMBOL_GPL(rpl_vxlan_sock_release);
#endif /* !USE_UPSTREAM_VXLAN */