Use netdev comment style.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Acked-by: Andy Zhou <azhou@nicira.com>
static struct action_fifo __percpu *action_fifos;
#define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
- looping by the network stack */
+ * looping by the network stack
+ */
static DEFINE_PER_CPU(int, exec_actions_level);
static void action_fifo_init(struct action_fifo *fifo)
{
struct rtable *rt;
/* Tunnel configuration keeps DSCP part of TOS bits, But Linux
- * router expect RT_TOS bits only. */
+ * router expect RT_TOS bits only.
+ */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
struct flowi fl = { .nl_u = { .ip4_u = {
};
/* Check if need to build a reply message.
- * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
+ * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
+ */
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
unsigned int group)
{
}
/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
- * returned dp pointer valid. */
+ * returned dp pointer valid.
+ */
static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
{
struct datapath *dp;
}
/* Only reserve room for attribute header, packet data is added
- * in skb_zerocopy() */
+ * in skb_zerocopy()
+ */
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
err = -ENOBUFS;
goto out;
/* Normally, setting the skb 'protocol' field would be handled by a
* call to eth_type_trans(), but it assumes there's a sending
- * device, which we may not have. */
+ * device, which we may not have.
+ */
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
packet->protocol = eth->h_proto;
else
}
/* Most of the time we need to allocate a new flow, do it before
- * locking. */
+ * locking.
+ */
new_flow = ovs_flow_alloc();
if (IS_ERR(new_flow)) {
error = PTR_ERR(new_flow);
__skb_pull(skb, 2 * ETH_ALEN);
/* We are going to push all headers that we pull, so no need to
- * update skb->csum here. */
+ * update skb->csum here.
+ */
key->eth.tci = 0;
if (vlan_tx_tag_present(skb))
struct icmphdr *icmp = icmp_hdr(skb);
/* The ICMP type and code fields use the 16-bit
* transport port fields, so we need to store
- * them in 16-bit network byte order. */
+ * them in 16-bit network byte order.
+ */
key->tp.src = htons(icmp->type);
key->tp.dst = htons(icmp->code);
} else {
u64 mask_allowed = key_attrs; /* At most allow all key attributes */
/* The following mask attributes allowed only if they
- * pass the validation tests. */
+ * pass the validation tests.
+ */
mask_allowed &= ~((1ULL << OVS_KEY_ATTR_IPV4)
| (1ULL << OVS_KEY_ATTR_IPV6)
| (1ULL << OVS_KEY_ATTR_TCP)
size_t ovs_tun_key_attr_size(void)
{
/* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
- * updating this function. */
+ * updating this function.
+ */
return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
+ nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
+ nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
size_t ovs_key_attr_size(void)
{
/* Whenever adding new OVS_KEY_ FIELDS, we should consider
- * updating this function. */
+ * updating this function.
+ */
BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
}
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
- * The caller must hold rcu_read_lock for this to be sensible. */
+ * The caller must hold rcu_read_lock for this to be sensible.
+ */
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
{
call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
}
/* No need for locking this function is called from RCU callback or
- * error path. */
+ * error path.
+ */
void ovs_flow_tbl_destroy(struct flow_table *table)
{
struct table_instance *ti = rcu_dereference_raw(table->ti);
* cache entry in mask cache.
* This is per cpu cache and is divided in MC_HASH_SEGS segments.
* In case of a hash collision the entry is hashed in next segment.
- * */
+ */
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key,
u32 skb_hash,
table->count--;
/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
- * accessible as long as the RCU read lock is held. */
+ * accessible as long as the RCU read lock is held.
+ */
flow_mask_remove(table, flow->mask);
}
}
/* Initializes the flow module.
- * Returns zero if successful or a negative error code. */
+ * Returns zero if successful or a negative error code.
+ */
int ovs_flow_init(void)
{
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
ovs_vport_get_stats(vport, &vport_stats);
/* The tx and rx stats need to be swapped because the
- * switch and host OS have opposite perspectives. */
+ * switch and host OS have opposite perspectives.
+ */
stats->rx_packets = vport_stats.tx_packets;
stats->tx_packets = vport_stats.rx_packets;
stats->rx_bytes = vport_stats.tx_bytes;
/* Make our own copy of the packet. Otherwise we will mangle the
* packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
* (No one comes after us, since we tell handle_bridge() that we took
- * the packet.) */
+ * the packet.)
+ */
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return;
enum vport_err_type err_type);
/* List of statically compiled vport implementations. Don't forget to also
- * add yours to the list at the bottom of vport.h. */
+ * add yours to the list at the bottom of vport.h.
+ */
static const struct vport_ops *vport_ops_list[] = {
&ovs_netdev_vport_ops,
&ovs_internal_vport_ops,
const struct ovs_tunnel_info *);
/* List of statically compiled vport implementations. Don't forget to also
- * add yours to the list at the top of vport.c. */
+ * add yours to the list at the top of vport.c.
+ */
extern const struct vport_ops ovs_netdev_vport_ops;
extern const struct vport_ops ovs_internal_vport_ops;
extern const struct vport_ops ovs_geneve_vport_ops;