X-Git-Url: http://git.cascardo.eti.br/?a=blobdiff_plain;f=lib%2Fdpif-netdev.c;h=500e7cc43a947f671853b0ad65ebe661486cb9ec;hb=cc245ce87d3de9c2a66ee42719ab413e464fb2de;hp=ea87023974e9a86a8704cf97e464c449a673a9c1;hpb=7af12bd7c83e5cf8cb831f3aa9621bac57a6a861;p=cascardo%2Fovs.git diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index ea8702397..500e7cc43 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc. + * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,8 +31,10 @@ #include #include +#include "bitmap.h" #include "cmap.h" #include "csum.h" +#include "dp-packet.h" #include "dpif.h" #include "dpif-provider.h" #include "dummy.h" @@ -40,10 +42,11 @@ #include "fat-rwlock.h" #include "flow.h" #include "cmap.h" +#include "coverage.h" +#include "hmapx.h" #include "latch.h" #include "list.h" #include "match.h" -#include "meta-flow.h" #include "netdev.h" #include "netdev-dpdk.h" #include "netdev-vport.h" @@ -54,7 +57,6 @@ #include "ofpbuf.h" #include "ovs-numa.h" #include "ovs-rcu.h" -#include "packet-dpif.h" #include "packets.h" #include "poll-loop.h" #include "pvector.h" @@ -63,10 +65,11 @@ #include "shash.h" #include "sset.h" #include "timeval.h" -#include "tnl-arp-cache.h" +#include "tnl-neigh-cache.h" +#include "tnl-ports.h" #include "unixctl.h" #include "util.h" -#include "vlog.h" +#include "openvswitch/vlog.h" VLOG_DEFINE_THIS_MODULE(dpif_netdev); @@ -87,13 +90,18 @@ static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex) static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600); +static struct odp_support dp_netdev_support = { + .max_mpls_depth = SIZE_MAX, + .recirc = true, +}; + /* Stores a miniflow with inline values */ struct netdev_flow_key { uint32_t hash; /* Hash function differs for different users. */ uint32_t len; /* Length of the following miniflow (incl. map). */ struct miniflow mf; - uint32_t buf[FLOW_MAX_PACKET_U32S - MINI_N_INLINE]; + uint64_t buf[FLOW_MAX_PACKET_U64S]; }; /* Exact match cache for frequently used flows @@ -117,7 +125,7 @@ struct netdev_flow_key { * If dp_netdev_input is not called from a pmd thread, a mutex is used. */ -#define EM_FLOW_HASH_SHIFT 10 +#define EM_FLOW_HASH_SHIFT 13 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT) #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1) #define EM_FLOW_HASH_SEGS 2 @@ -177,7 +185,6 @@ static bool dpcls_lookup(const struct dpcls *cls, * * dp_netdev_mutex (global) * port_mutex - * flow_mutex */ struct dp_netdev { const struct dpif_class *const class; @@ -186,20 +193,6 @@ struct dp_netdev { struct ovs_refcount ref_cnt; atomic_flag destroyed; - /* Flows. - * - * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding - * changes to 'cls' must be made while still holding the 'flow_mutex'. - */ - struct ovs_mutex flow_mutex; - struct dpcls cls; - struct cmap flow_table OVS_GUARDED; /* Flow table. */ - - /* Statistics. - * - * ovsthread_stats is internally synchronized. */ - struct ovsthread_stats stats; /* Contains 'struct dp_netdev_stats *'. */ - /* Ports. * * Protected by RCU. Take the mutex to add or remove ports. */ @@ -213,6 +206,11 @@ struct dp_netdev { upcall_callback *upcall_cb; /* Callback function for executing upcalls. */ void *upcall_aux; + /* Callback function for notifying the purging of dp flows (during + * reseting pmd deletion). */ + dp_purge_callback *dp_purge_cb; + void *dp_purge_aux; + /* Stores all 'struct dp_netdev_pmd_thread's. */ struct cmap poll_threads; @@ -224,9 +222,7 @@ struct dp_netdev { * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */ ovsthread_key_t per_pmd_key; - /* Number of rx queues for each dpdk interface and the cpu mask - * for pin of pmd threads. */ - size_t n_dpdk_rxqs; + /* Cpu mask for pin of pmd threads. */ char *pmd_cmask; uint64_t last_tnl_conf_seq; }; @@ -235,41 +231,48 @@ static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t); enum dp_stat_type { - DP_STAT_HIT, /* Packets that matched in the flow table. */ + DP_STAT_EXACT_HIT, /* Packets that had an exact match (emc). */ + DP_STAT_MASKED_HIT, /* Packets that matched in the flow table. */ DP_STAT_MISS, /* Packets that did not match. */ DP_STAT_LOST, /* Packets not passed up to the client. */ DP_N_STATS }; -/* Contained by struct dp_netdev's 'stats' member. */ -struct dp_netdev_stats { - struct ovs_mutex mutex; /* Protects 'n'. */ - - /* Indexed by DP_STAT_*, protected by 'mutex'. */ - unsigned long long int n[DP_N_STATS] OVS_GUARDED; +enum pmd_cycles_counter_type { + PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */ + PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */ + PMD_N_CYCLES }; - /* A port in a netdev-based datapath. */ struct dp_netdev_port { - struct cmap_node node; /* Node in dp_netdev's 'ports'. */ odp_port_t port_no; struct netdev *netdev; + struct cmap_node node; /* Node in dp_netdev's 'ports'. */ struct netdev_saved_flags *sf; struct netdev_rxq **rxq; struct ovs_refcount ref_cnt; char *type; /* Port type as requested by user. */ + int latest_requested_n_rxq; /* Latest requested from netdev number + of rx queues. */ }; - -/* A flow in dp_netdev's 'flow_table'. +/* Contained by struct dp_netdev_flow's 'stats' member. */ +struct dp_netdev_flow_stats { + atomic_llong used; /* Last used time, in monotonic msecs. */ + atomic_ullong packet_count; /* Number of packets matched. */ + atomic_ullong byte_count; /* Number of bytes matched. */ + atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */ +}; + +/* A flow in 'dp_netdev_pmd_thread's 'flow_table'. * * * Thread-safety * ============= * * Except near the beginning or ending of its lifespan, rule 'rule' belongs to - * its dp_netdev's classifier. The text below calls this classifier 'cls'. + * its pmd thread's classifier. The text below calls this classifier 'cls'. * * Motivation * ---------- @@ -300,11 +303,13 @@ struct dp_netdev_port { * requires synchronization, as noted in more detail below. */ struct dp_netdev_flow { - bool dead; - - /* Hash table index by unmasked flow. */ - const struct cmap_node node; /* In owning dp_netdev's 'flow_table'. */ const struct flow flow; /* Unmasked flow that created this entry. */ + /* Hash table index by unmasked flow. */ + const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */ + /* 'flow_table'. */ + const ovs_u128 ufid; /* Unique flow identifier. */ + const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */ + /* flow. */ /* Number of references. * The classifier owns one reference. @@ -312,14 +317,20 @@ struct dp_netdev_flow { * reference. */ struct ovs_refcount ref_cnt; - /* Statistics. - * - * Reading or writing these members requires 'mutex'. */ - struct ovsthread_stats stats; /* Contains "struct dp_netdev_flow_stats". */ + bool dead; + + /* Statistics. */ + struct dp_netdev_flow_stats stats; /* Actions. */ OVSRCU_TYPE(struct dp_netdev_actions *) actions; + /* While processing a group of input packets, the datapath uses the next + * member to store a pointer to the output batch for the flow. It is + * reset after the batch has been sent out (See dp_netdev_queue_batches(), + * packet_batch_init() and packet_batch_execute()). */ + struct packet_batch *batch; + /* Packet classification. */ struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */ /* 'cr' must be the last member. */ @@ -327,16 +338,8 @@ struct dp_netdev_flow { static void dp_netdev_flow_unref(struct dp_netdev_flow *); static bool dp_netdev_flow_ref(struct dp_netdev_flow *); - -/* Contained by struct dp_netdev_flow's 'stats' member. */ -struct dp_netdev_flow_stats { - struct ovs_mutex mutex; /* Guards all the other members. */ - - long long int used OVS_GUARDED; /* Last used time, in monotonic msecs. */ - long long int packet_count OVS_GUARDED; /* Number of packets matched. */ - long long int byte_count OVS_GUARDED; /* Number of bytes matched. */ - uint16_t tcp_flags OVS_GUARDED; /* Bitwise-OR of seen tcp_flags values. */ -}; +static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t, + struct flow *); /* A set of datapath actions within a "struct dp_netdev_flow". * @@ -348,8 +351,8 @@ struct dp_netdev_flow_stats { struct dp_netdev_actions { /* These members are immutable: they do not change during the struct's * lifetime. */ - struct nlattr *actions; /* Sequence of OVS_ACTION_ATTR_* attributes. */ unsigned int size; /* Size of 'actions', in bytes. */ + struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */ }; struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *, @@ -358,20 +361,44 @@ struct dp_netdev_actions *dp_netdev_flow_get_actions( const struct dp_netdev_flow *); static void dp_netdev_actions_free(struct dp_netdev_actions *); +/* Contained by struct dp_netdev_pmd_thread's 'stats' member. */ +struct dp_netdev_pmd_stats { + /* Indexed by DP_STAT_*. */ + atomic_ullong n[DP_N_STATS]; +}; + +/* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */ +struct dp_netdev_pmd_cycles { + /* Indexed by PMD_CYCLES_*. */ + atomic_ullong n[PMD_N_CYCLES]; +}; + +/* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */ +struct rxq_poll { + struct dp_netdev_port *port; + struct netdev_rxq *rx; + struct ovs_list node; +}; + /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate * the performance overhead of interrupt processing. Therefore netdev can * not implement rx-wait for these devices. dpif-netdev needs to poll * these device to check for recv buffer. pmd-thread does polling for - * devices assigned to itself thread. + * devices assigned to itself. * * DPDK used PMD for accessing NIC. * * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for * I/O of all non-pmd threads. There will be no actual thread created * for the instance. - **/ + * + * Each struct has its own flow table and classifier. Packets received + * from managed ports are looked up in the corresponding pmd thread's + * flow table, and are executed with the found actions. + * */ struct dp_netdev_pmd_thread { struct dp_netdev *dp; + struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */ struct cmap_node node; /* In 'dp->poll_threads'. */ pthread_cond_t cond; /* For synchronizing pmd thread reload. */ @@ -382,13 +409,46 @@ struct dp_netdev_pmd_thread { * need to be protected (e.g. by 'dp_netdev_mutex'). All other * instances will only be accessed by its own pmd thread. */ struct emc_cache flow_cache; + + /* Classifier and Flow-Table. + * + * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding + * changes to 'cls' must be made while still holding the 'flow_mutex'. + */ + struct ovs_mutex flow_mutex; + struct dpcls cls; + struct cmap flow_table OVS_GUARDED; /* Flow table. */ + + /* Statistics. */ + struct dp_netdev_pmd_stats stats; + + /* Cycles counters */ + struct dp_netdev_pmd_cycles cycles; + + /* Used to count cicles. See 'cycles_counter_end()' */ + unsigned long long last_cycles; + struct latch exit_latch; /* For terminating the pmd thread. */ atomic_uint change_seq; /* For reloading pmd ports. */ pthread_t thread; int index; /* Idx of this pmd thread among pmd*/ /* threads on same numa node. */ - int core_id; /* CPU core id of this pmd thread. */ + unsigned core_id; /* CPU core id of this pmd thread. */ int numa_id; /* numa node id of this pmd thread. */ + atomic_int tx_qid; /* Queue id used by this pmd thread to + * send packets on all netdevs */ + + struct ovs_mutex poll_mutex; /* Mutex for poll_list. */ + /* List of rx queues to poll. */ + struct ovs_list poll_list OVS_GUARDED; + int poll_cnt; /* Number of elemints in poll_list. */ + + /* Only a pmd thread can write on its own 'cycles' and 'stats'. + * The main thread keeps 'stats_zero' and 'cycles_zero' as base + * values and subtracts them from 'stats' and 'cycles' before + * reporting to the user */ + unsigned long long stats_zero[DP_N_STATS]; + uint64_t cycles_zero[PMD_N_CYCLES]; }; #define PMD_INITIAL_SEQ 1 @@ -406,7 +466,6 @@ static int get_port_by_name(struct dp_netdev *dp, const char *devname, struct dp_netdev_port **portp); static void dp_netdev_free(struct dp_netdev *) OVS_REQUIRES(dp_netdev_mutex); -static void dp_netdev_flow_flush(struct dp_netdev *); static int do_add_port(struct dp_netdev *dp, const char *devname, const char *type, odp_port_t port_no) OVS_REQUIRES(dp->port_mutex); @@ -415,24 +474,45 @@ static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *) static int dpif_netdev_open(const struct dpif_class *, const char *name, bool create, struct dpif **); static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **, int c, + struct dp_packet **, int c, bool may_steal, const struct nlattr *actions, size_t actions_len); static void dp_netdev_input(struct dp_netdev_pmd_thread *, - struct dpif_packet **, int cnt); + struct dp_packet **, int cnt, odp_port_t port_no); +static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *, + struct dp_packet **, int cnt); static void dp_netdev_disable_upcall(struct dp_netdev *); -void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd); +static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, int index, - int core_id, int numa_id); + unsigned core_id, int numa_id); +static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_set_nonpmd(struct dp_netdev *dp); -static struct dp_netdev_pmd_thread *dp_netdev_get_nonpmd(struct dp_netdev *dp); +static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp, + unsigned core_id); +static struct dp_netdev_pmd_thread * +dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos); static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp); static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id); static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id); +static void dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd); +static void dp_netdev_del_port_from_pmd(struct dp_netdev_port *port, + struct dp_netdev_pmd_thread *pmd); +static void dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp, + struct dp_netdev_port *port); +static void +dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port); +static void +dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, + struct dp_netdev_port *port, struct netdev_rxq *rx); +static struct dp_netdev_pmd_thread * +dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev *dp, int numa_id); static void dp_netdev_reset_pmd_threads(struct dp_netdev *dp); +static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd); +static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd); +static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd); static inline bool emc_entry_alive(struct emc_entry *ce); static void emc_clear_entry(struct emc_entry *ce); @@ -442,16 +522,12 @@ emc_cache_init(struct emc_cache *flow_cache) { int i; - BUILD_ASSERT(offsetof(struct miniflow, inline_values) == sizeof(uint64_t)); - flow_cache->sweep_idx = 0; for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { flow_cache->entries[i].flow = NULL; flow_cache->entries[i].key.hash = 0; - flow_cache->entries[i].key.len - = offsetof(struct miniflow, inline_values); - miniflow_initialize(&flow_cache->entries[i].key.mf, - flow_cache->entries[i].key.buf); + flow_cache->entries[i].key.len = sizeof(struct miniflow); + flowmap_init(&flow_cache->entries[i].key.mf.map); } } @@ -478,10 +554,17 @@ emc_cache_slow_sweep(struct emc_cache *flow_cache) flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK; } +/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */ +bool +dpif_is_netdev(const struct dpif *dpif) +{ + return dpif->dpif_class->open == dpif_netdev_open; +} + static struct dpif_netdev * dpif_netdev_cast(const struct dpif *dpif) { - ovs_assert(dpif->dpif_class->open == dpif_netdev_open); + ovs_assert(dpif_is_netdev(dpif)); return CONTAINER_OF(dpif, struct dpif_netdev, dpif); } @@ -490,6 +573,220 @@ get_dp_netdev(const struct dpif *dpif) { return dpif_netdev_cast(dpif)->dp; } + +enum pmd_info_type { + PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */ + PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */ + PMD_INFO_SHOW_RXQ /* Show poll-lists of pmd threads. */ +}; + +static void +pmd_info_show_stats(struct ds *reply, + struct dp_netdev_pmd_thread *pmd, + unsigned long long stats[DP_N_STATS], + uint64_t cycles[PMD_N_CYCLES]) +{ + unsigned long long total_packets = 0; + uint64_t total_cycles = 0; + int i; + + /* These loops subtracts reference values ('*_zero') from the counters. + * Since loads and stores are relaxed, it might be possible for a '*_zero' + * value to be more recent than the current value we're reading from the + * counter. This is not a big problem, since these numbers are not + * supposed to be too accurate, but we should at least make sure that + * the result is not negative. */ + for (i = 0; i < DP_N_STATS; i++) { + if (stats[i] > pmd->stats_zero[i]) { + stats[i] -= pmd->stats_zero[i]; + } else { + stats[i] = 0; + } + + if (i != DP_STAT_LOST) { + /* Lost packets are already included in DP_STAT_MISS */ + total_packets += stats[i]; + } + } + + for (i = 0; i < PMD_N_CYCLES; i++) { + if (cycles[i] > pmd->cycles_zero[i]) { + cycles[i] -= pmd->cycles_zero[i]; + } else { + cycles[i] = 0; + } + + total_cycles += cycles[i]; + } + + ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID) + ? "main thread" : "pmd thread"); + + if (pmd->numa_id != OVS_NUMA_UNSPEC) { + ds_put_format(reply, " numa_id %d", pmd->numa_id); + } + if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) { + ds_put_format(reply, " core_id %u", pmd->core_id); + } + ds_put_cstr(reply, ":\n"); + + ds_put_format(reply, + "\temc hits:%llu\n\tmegaflow hits:%llu\n" + "\tmiss:%llu\n\tlost:%llu\n", + stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT], + stats[DP_STAT_MISS], stats[DP_STAT_LOST]); + + if (total_cycles == 0) { + return; + } + + ds_put_format(reply, + "\tpolling cycles:%"PRIu64" (%.02f%%)\n" + "\tprocessing cycles:%"PRIu64" (%.02f%%)\n", + cycles[PMD_CYCLES_POLLING], + cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100, + cycles[PMD_CYCLES_PROCESSING], + cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100); + + if (total_packets == 0) { + return; + } + + ds_put_format(reply, + "\tavg cycles per packet: %.02f (%"PRIu64"/%llu)\n", + total_cycles / (double)total_packets, + total_cycles, total_packets); + + ds_put_format(reply, + "\tavg processing cycles per packet: " + "%.02f (%"PRIu64"/%llu)\n", + cycles[PMD_CYCLES_PROCESSING] / (double)total_packets, + cycles[PMD_CYCLES_PROCESSING], total_packets); +} + +static void +pmd_info_clear_stats(struct ds *reply OVS_UNUSED, + struct dp_netdev_pmd_thread *pmd, + unsigned long long stats[DP_N_STATS], + uint64_t cycles[PMD_N_CYCLES]) +{ + int i; + + /* We cannot write 'stats' and 'cycles' (because they're written by other + * threads) and we shouldn't change 'stats' (because they're used to count + * datapath stats, which must not be cleared here). Instead, we save the + * current values and subtract them from the values to be displayed in the + * future */ + for (i = 0; i < DP_N_STATS; i++) { + pmd->stats_zero[i] = stats[i]; + } + for (i = 0; i < PMD_N_CYCLES; i++) { + pmd->cycles_zero[i] = cycles[i]; + } +} + +static void +pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd) +{ + if (pmd->core_id != NON_PMD_CORE_ID) { + struct rxq_poll *poll; + const char *prev_name = NULL; + + ds_put_format(reply, "pmd thread numa_id %d core_id %u:\n", + pmd->numa_id, pmd->core_id); + + ovs_mutex_lock(&pmd->poll_mutex); + LIST_FOR_EACH (poll, node, &pmd->poll_list) { + const char *name = netdev_get_name(poll->port->netdev); + + if (!prev_name || strcmp(name, prev_name)) { + if (prev_name) { + ds_put_cstr(reply, "\n"); + } + ds_put_format(reply, "\tport: %s\tqueue-id:", + netdev_get_name(poll->port->netdev)); + } + ds_put_format(reply, " %d", netdev_rxq_get_queue_id(poll->rx)); + prev_name = name; + } + ovs_mutex_unlock(&pmd->poll_mutex); + ds_put_cstr(reply, "\n"); + } +} + +static void +dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[], + void *aux) +{ + struct ds reply = DS_EMPTY_INITIALIZER; + struct dp_netdev_pmd_thread *pmd; + struct dp_netdev *dp = NULL; + enum pmd_info_type type = *(enum pmd_info_type *) aux; + + ovs_mutex_lock(&dp_netdev_mutex); + + if (argc == 2) { + dp = shash_find_data(&dp_netdevs, argv[1]); + } else if (shash_count(&dp_netdevs) == 1) { + /* There's only one datapath */ + dp = shash_first(&dp_netdevs)->data; + } + + if (!dp) { + ovs_mutex_unlock(&dp_netdev_mutex); + unixctl_command_reply_error(conn, + "please specify an existing datapath"); + return; + } + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + if (type == PMD_INFO_SHOW_RXQ) { + pmd_info_show_rxq(&reply, pmd); + } else { + unsigned long long stats[DP_N_STATS]; + uint64_t cycles[PMD_N_CYCLES]; + int i; + + /* Read current stats and cycle counters */ + for (i = 0; i < ARRAY_SIZE(stats); i++) { + atomic_read_relaxed(&pmd->stats.n[i], &stats[i]); + } + for (i = 0; i < ARRAY_SIZE(cycles); i++) { + atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]); + } + + if (type == PMD_INFO_CLEAR_STATS) { + pmd_info_clear_stats(&reply, pmd, stats, cycles); + } else if (type == PMD_INFO_SHOW_STATS) { + pmd_info_show_stats(&reply, pmd, stats, cycles); + } + } + } + + ovs_mutex_unlock(&dp_netdev_mutex); + + unixctl_command_reply(conn, ds_cstr(&reply)); + ds_destroy(&reply); +} + +static int +dpif_netdev_init(void) +{ + static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS, + clear_aux = PMD_INFO_CLEAR_STATS, + poll_aux = PMD_INFO_SHOW_RXQ; + + unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]", + 0, 1, dpif_netdev_pmd_info, + (void *)&show_aux); + unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]", + 0, 1, dpif_netdev_pmd_info, + (void *)&clear_aux); + unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]", + 0, 1, dpif_netdev_pmd_info, + (void *)&poll_aux); + return 0; +} static int dpif_netdev_enumerate(struct sset *all_dps, @@ -600,12 +897,6 @@ create_dp_netdev(const char *name, const struct dpif_class *class, ovs_refcount_init(&dp->ref_cnt); atomic_flag_clear(&dp->destroyed); - ovs_mutex_init(&dp->flow_mutex); - dpcls_init(&dp->cls); - cmap_init(&dp->flow_table); - - ovsthread_stats_init(&dp->stats); - ovs_mutex_init(&dp->port_mutex); cmap_init(&dp->ports); dp->port_seq = seq_create(); @@ -620,10 +911,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class, ovs_mutex_init_recursive(&dp->non_pmd_mutex); ovsthread_key_create(&dp->per_pmd_key, NULL); - /* Reserves the core NON_PMD_CORE_ID for all non-pmd threads. */ - ovs_numa_try_pin_core_specific(NON_PMD_CORE_ID); dp_netdev_set_nonpmd(dp); - dp->n_dpdk_rxqs = NR_QUEUE; ovs_mutex_lock(&dp->port_mutex); error = do_add_port(dp, name, "internal", ODPP_LOCAL); @@ -682,32 +970,21 @@ dp_netdev_free(struct dp_netdev *dp) OVS_REQUIRES(dp_netdev_mutex) { struct dp_netdev_port *port; - struct dp_netdev_stats *bucket; - int i; shash_find_and_delete(&dp_netdevs, dp->name); dp_netdev_destroy_all_pmds(dp); - cmap_destroy(&dp->poll_threads); ovs_mutex_destroy(&dp->non_pmd_mutex); ovsthread_key_delete(dp->per_pmd_key); - dp_netdev_flow_flush(dp); ovs_mutex_lock(&dp->port_mutex); CMAP_FOR_EACH (port, node, &dp->ports) { + /* PMD threads are destroyed here. do_del_port() cannot quiesce */ do_del_port(dp, port); } ovs_mutex_unlock(&dp->port_mutex); + cmap_destroy(&dp->poll_threads); - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) { - ovs_mutex_destroy(&bucket->mutex); - free_cacheline(bucket); - } - ovsthread_stats_destroy(&dp->stats); - - dpcls_destroy(&dp->cls); - cmap_destroy(&dp->flow_table); - ovs_mutex_destroy(&dp->flow_mutex); seq_destroy(dp->port_seq); cmap_destroy(&dp->ports); @@ -757,22 +1034,40 @@ dpif_netdev_destroy(struct dpif *dpif) return 0; } +/* Add 'n' to the atomic variable 'var' non-atomically and using relaxed + * load/store semantics. While the increment is not atomic, the load and + * store operations are, making it impossible to read inconsistent values. + * + * This is used to update thread local stats counters. */ +static void +non_atomic_ullong_add(atomic_ullong *var, unsigned long long n) +{ + unsigned long long tmp; + + atomic_read_relaxed(var, &tmp); + tmp += n; + atomic_store_relaxed(var, tmp); +} + static int dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) { struct dp_netdev *dp = get_dp_netdev(dpif); - struct dp_netdev_stats *bucket; - size_t i; - - stats->n_flows = cmap_count(&dp->flow_table); + struct dp_netdev_pmd_thread *pmd; - stats->n_hit = stats->n_missed = stats->n_lost = 0; - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) { - ovs_mutex_lock(&bucket->mutex); - stats->n_hit += bucket->n[DP_STAT_HIT]; - stats->n_missed += bucket->n[DP_STAT_MISS]; - stats->n_lost += bucket->n[DP_STAT_LOST]; - ovs_mutex_unlock(&bucket->mutex); + stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0; + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + unsigned long long n; + stats->n_flows += cmap_count(&pmd->flow_table); + + atomic_read_relaxed(&pmd->stats.n[DP_STAT_MASKED_HIT], &n); + stats->n_hit += n; + atomic_read_relaxed(&pmd->stats.n[DP_STAT_EXACT_HIT], &n); + stats->n_hit += n; + atomic_read_relaxed(&pmd->stats.n[DP_STAT_MISS], &n); + stats->n_missed += n; + atomic_read_relaxed(&pmd->stats.n[DP_STAT_LOST], &n); + stats->n_lost += n; } stats->n_masks = UINT32_MAX; stats->n_mask_hit = UINT64_MAX; @@ -795,18 +1090,6 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd) ovs_mutex_unlock(&pmd->cond_mutex); } -/* Causes all pmd threads to reload its tx/rx devices. - * Must be called after adding/removing ports. */ -static void -dp_netdev_reload_pmds(struct dp_netdev *dp) -{ - struct dp_netdev_pmd_thread *pmd; - - CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { - dp_netdev_reload_pmd__(pmd); - } -} - static uint32_t hash_port_no(odp_port_t port_no) { @@ -826,7 +1109,10 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, int error; int i; - /* XXX reject devices already in some dp_netdev. */ + /* Reject devices already in 'dp'. */ + if (!get_port_by_name(dp, devname, &port)) { + return EEXIST; + } /* Open and validate network device. */ open_type = dpif_netdev_port_open_type(dp->class, type); @@ -851,8 +1137,10 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, return ENOENT; } /* There can only be ovs_numa_get_n_cores() pmd threads, - * so creates a txq for each. */ - error = netdev_set_multiq(netdev, n_cores, dp->n_dpdk_rxqs); + * so creates a txq for each, and one extra for the non + * pmd threads. */ + error = netdev_set_multiq(netdev, n_cores + 1, + netdev_requested_n_rxq(netdev)); if (error && (error != EOPNOTSUPP)) { VLOG_ERR("%s, cannot set multiq", devname); return errno; @@ -863,6 +1151,7 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, port->netdev = netdev; port->rxq = xmalloc(sizeof *port->rxq * netdev_n_rxq(netdev)); port->type = xstrdup(type); + port->latest_requested_n_rxq = netdev_requested_n_rxq(netdev); for (i = 0; i < netdev_n_rxq(netdev); i++) { error = netdev_rxq_open(netdev, &port->rxq[i], i); if (error @@ -894,8 +1183,7 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, cmap_insert(&dp->ports, &port->node, hash_port_no(port_no)); if (netdev_is_pmd(netdev)) { - dp_netdev_set_pmds_on_numa(dp, netdev_get_numa_id(netdev)); - dp_netdev_reload_pmds(dp); + dp_netdev_add_port_to_pmds(dp, port); } seq_change(dp->port_seq); @@ -992,16 +1280,6 @@ port_ref(struct dp_netdev_port *port) } } -static bool -port_try_ref(struct dp_netdev_port *port) -{ - if (port) { - return ovs_refcount_try_ref_rcu(&port->ref_cnt); - } - - return false; -} - static void port_unref(struct dp_netdev_port *port) { @@ -1037,6 +1315,13 @@ get_port_by_name(struct dp_netdev *dp, return ENOENT; } +static int +get_n_pmd_threads(struct dp_netdev *dp) +{ + /* There is one non pmd thread in dp->poll_threads */ + return cmap_count(&dp->poll_threads) - 1; +} + static int get_n_pmd_threads_on_numa(struct dp_netdev *dp, int numa_id) { @@ -1079,12 +1364,15 @@ do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port) if (netdev_is_pmd(port->netdev)) { int numa_id = netdev_get_numa_id(port->netdev); + /* PMD threads can not be on invalid numa node. */ + ovs_assert(ovs_numa_numa_id_is_valid(numa_id)); /* If there is no netdev on the numa node, deletes the pmd threads - * for that numa. Else, just reloads the queues. */ + * for that numa. Else, deletes the queues from polling lists. */ if (!has_pmd_port_for_numa(dp, numa_id)) { dp_netdev_del_pmds_on_numa(dp, numa_id); + } else { + dp_netdev_del_port_from_all_pmds(dp, port); } - dp_netdev_reload_pmds(dp); } port_unref(port); @@ -1136,15 +1424,6 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, static void dp_netdev_flow_free(struct dp_netdev_flow *flow) { - struct dp_netdev_flow_stats *bucket; - size_t i; - - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &flow->stats) { - ovs_mutex_destroy(&bucket->mutex); - free_cacheline(bucket); - } - ovsthread_stats_destroy(&flow->stats); - dp_netdev_actions_free(dp_netdev_flow_get_actions(flow)); free(flow); } @@ -1156,37 +1435,50 @@ static void dp_netdev_flow_unref(struct dp_netdev_flow *flow) } } +static uint32_t +dp_netdev_flow_hash(const ovs_u128 *ufid) +{ + return ufid->u32[0]; +} + static void -dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow) - OVS_REQUIRES(dp->flow_mutex) +dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd, + struct dp_netdev_flow *flow) + OVS_REQUIRES(pmd->flow_mutex) { struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node); - dpcls_remove(&dp->cls, &flow->cr); - cmap_remove(&dp->flow_table, node, flow_hash(&flow->flow, 0)); + dpcls_remove(&pmd->cls, &flow->cr); + flow->cr.mask = NULL; /* Accessing rule's mask after this is not safe. */ + + cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid)); flow->dead = true; dp_netdev_flow_unref(flow); } static void -dp_netdev_flow_flush(struct dp_netdev *dp) +dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd) { struct dp_netdev_flow *netdev_flow; - ovs_mutex_lock(&dp->flow_mutex); - CMAP_FOR_EACH (netdev_flow, node, &dp->flow_table) { - dp_netdev_remove_flow(dp, netdev_flow); + ovs_mutex_lock(&pmd->flow_mutex); + CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) { + dp_netdev_pmd_remove_flow(pmd, netdev_flow); } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); } static int dpif_netdev_flow_flush(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); + struct dp_netdev_pmd_thread *pmd; + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + dp_netdev_pmd_flow_flush(pmd); + } - dp_netdev_flow_flush(dp); return 0; } @@ -1287,22 +1579,14 @@ static bool dp_netdev_flow_ref(struct dp_netdev_flow *flow) * miniflow_extract(), if the map is different the miniflow is different. * Therefore we can be faster by comparing the map and the miniflow in a * single memcmp(). - * _ netdev_flow_key's miniflow has always inline values. - * - These functions can be inlined by the compiler. - * - * The following assertions make sure that what we're doing with miniflow is - * safe - */ -BUILD_ASSERT_DECL(offsetof(struct miniflow, inline_values) - == sizeof(uint64_t)); + * - These functions can be inlined by the compiler. */ -/* Given the number of bits set in the miniflow map, returns the size of the +/* Given the number of bits set in miniflow's maps, returns the size of the * 'netdev_flow_key.mf' */ -static inline uint32_t -netdev_flow_key_size(uint32_t flow_u32s) +static inline size_t +netdev_flow_key_size(size_t flow_u64s) { - return offsetof(struct miniflow, inline_values) + - MINIFLOW_VALUES_SIZE(flow_u32s); + return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s); } static inline bool @@ -1336,18 +1620,16 @@ static void netdev_flow_key_from_flow(struct netdev_flow_key *dst, const struct flow *src) { - struct ofpbuf packet; + struct dp_packet packet; uint64_t buf_stub[512 / 8]; - struct pkt_metadata md = pkt_metadata_from_flow(src); - - miniflow_initialize(&dst->mf, dst->buf); - ofpbuf_use_stub(&packet, buf_stub, sizeof buf_stub); + dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub); + pkt_metadata_from_flow(&packet.md, src); flow_compose(&packet, src); - miniflow_extract(&packet, &md, &dst->mf); - ofpbuf_uninit(&packet); + miniflow_extract(&packet, &dst->mf); + dp_packet_uninit(&packet); - dst->len = netdev_flow_key_size(count_1bits(dst->mf.map)); + dst->len = netdev_flow_key_size(miniflow_n_values(&dst->mf)); dst->hash = 0; /* Not computed yet. */ } @@ -1356,67 +1638,62 @@ static inline void netdev_flow_mask_init(struct netdev_flow_key *mask, const struct match *match) { - const uint32_t *mask_u32 = (const uint32_t *) &match->wc.masks; - uint32_t *dst = mask->mf.inline_values; - uint64_t map, mask_map = 0; + uint64_t *dst = miniflow_values(&mask->mf); + struct flowmap fmap; uint32_t hash = 0; - int n; + size_t idx; /* Only check masks that make sense for the flow. */ - map = flow_wc_map(&match->flow); + flow_wc_map(&match->flow, &fmap); + flowmap_init(&mask->mf.map); - while (map) { - uint64_t rm1bit = rightmost_1bit(map); - int i = raw_ctz(map); + FLOWMAP_FOR_EACH_INDEX(idx, fmap) { + uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx); - if (mask_u32[i]) { - mask_map |= rm1bit; - *dst++ = mask_u32[i]; - hash = hash_add(hash, mask_u32[i]); + if (mask_u64) { + flowmap_set(&mask->mf.map, idx, 1); + *dst++ = mask_u64; + hash = hash_add64(hash, mask_u64); } - map -= rm1bit; } - mask->mf.values_inline = true; - mask->mf.map = mask_map; + map_t map; - hash = hash_add(hash, mask_map); - hash = hash_add(hash, mask_map >> 32); + FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) { + hash = hash_add64(hash, map); + } - n = dst - mask->mf.inline_values; + size_t n = dst - miniflow_get_values(&mask->mf); - mask->hash = hash_finish(hash, n * 4); + mask->hash = hash_finish(hash, n * 8); mask->len = netdev_flow_key_size(n); } -/* Initializes 'dst' as a copy of 'src' masked with 'mask'. */ +/* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */ static inline void netdev_flow_key_init_masked(struct netdev_flow_key *dst, const struct flow *flow, const struct netdev_flow_key *mask) { - uint32_t *dst_u32 = dst->mf.inline_values; - const uint32_t *mask_u32 = mask->mf.inline_values; + uint64_t *dst_u64 = miniflow_values(&dst->mf); + const uint64_t *mask_u64 = miniflow_get_values(&mask->mf); uint32_t hash = 0; - uint32_t value; + uint64_t value; dst->len = mask->len; - dst->mf.values_inline = true; - dst->mf.map = mask->mf.map; + dst->mf = mask->mf; /* Copy maps. */ - FLOW_FOR_EACH_IN_MAP(value, flow, mask->mf.map) { - *dst_u32 = value & *mask_u32++; - hash = hash_add(hash, *dst_u32++); + FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) { + *dst_u64 = value & *mask_u64++; + hash = hash_add64(hash, *dst_u64++); } - dst->hash = hash_finish(hash, (dst_u32 - dst->mf.inline_values) * 4); + dst->hash = hash_finish(hash, + (dst_u64 - miniflow_get_values(&dst->mf)) * 8); } -/* Iterate through all netdev_flow_key u32 values specified by 'MAP' */ -#define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \ - for (struct mf_for_each_in_map_aux aux__ \ - = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \ - mf_get_next_in_map(&aux__, &(VALUE)); \ - ) +/* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */ +#define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \ + MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP) /* Returns a hash value for the bits of 'key' where there are 1-bits in * 'mask'. */ @@ -1424,15 +1701,15 @@ static inline uint32_t netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key, const struct netdev_flow_key *mask) { - const uint32_t *p = mask->mf.inline_values; + const uint64_t *p = miniflow_get_values(&mask->mf); uint32_t hash = 0; - uint32_t key_u32; + uint64_t value; - NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u32, key, mask->mf.map) { - hash = hash_add(hash, key_u32 & *p++); + NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) { + hash = hash_add64(hash, value & *p++); } - return hash_finish(hash, (p - mask->mf.inline_values) * 4); + return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8); } static inline bool @@ -1518,27 +1795,40 @@ emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key) } static struct dp_netdev_flow * -dp_netdev_lookup_flow(const struct dp_netdev *dp, - const struct netdev_flow_key *key) +dp_netdev_pmd_lookup_flow(const struct dp_netdev_pmd_thread *pmd, + const struct netdev_flow_key *key) { struct dp_netdev_flow *netdev_flow; struct dpcls_rule *rule; - dpcls_lookup(&dp->cls, key, &rule, 1); + dpcls_lookup(&pmd->cls, key, &rule, 1); netdev_flow = dp_netdev_flow_cast(rule); return netdev_flow; } static struct dp_netdev_flow * -dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow) +dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd, + const ovs_u128 *ufidp, const struct nlattr *key, + size_t key_len) { struct dp_netdev_flow *netdev_flow; + struct flow flow; + ovs_u128 ufid; + + /* If a UFID is not provided, determine one based on the key. */ + if (!ufidp && key && key_len + && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow)) { + dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid); + ufidp = &ufid; + } - CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0), - &dp->flow_table) { - if (flow_equal(&netdev_flow->flow, flow)) { - return netdev_flow; + if (ufidp) { + CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp), + &pmd->flow_table) { + if (ovs_u128_equals(&netdev_flow->ufid, ufidp)) { + return netdev_flow; + } } } @@ -1546,21 +1836,24 @@ dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow) } static void -get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow, +get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_, struct dpif_flow_stats *stats) { - struct dp_netdev_flow_stats *bucket; - size_t i; + struct dp_netdev_flow *netdev_flow; + unsigned long long n; + long long used; + uint16_t flags; - memset(stats, 0, sizeof *stats); - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &netdev_flow->stats) { - ovs_mutex_lock(&bucket->mutex); - stats->n_packets += bucket->packet_count; - stats->n_bytes += bucket->byte_count; - stats->used = MAX(stats->used, bucket->used); - stats->tcp_flags |= bucket->tcp_flags; - ovs_mutex_unlock(&bucket->mutex); - } + netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_); + + atomic_read_relaxed(&netdev_flow->stats.packet_count, &n); + stats->n_packets = n; + atomic_read_relaxed(&netdev_flow->stats.byte_count, &n); + stats->n_bytes = n; + atomic_read_relaxed(&netdev_flow->stats.used, &used); + stats->used = used; + atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); + stats->tcp_flags = flags; } /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for @@ -1568,39 +1861,48 @@ get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow, * 'mask_buf'. Actions will be returned without copying, by relying on RCU to * protect them. */ static void -dp_netdev_flow_to_dpif_flow(const struct dpif *dpif, - const struct dp_netdev_flow *netdev_flow, +dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow, struct ofpbuf *key_buf, struct ofpbuf *mask_buf, - struct dpif_flow *flow) + struct dpif_flow *flow, bool terse) { - struct flow_wildcards wc; - struct dp_netdev_actions *actions; - size_t offset; - - miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); - - /* Key */ - offset = ofpbuf_size(key_buf); - flow->key = ofpbuf_tail(key_buf); - odp_flow_key_from_flow(key_buf, &netdev_flow->flow, &wc.masks, - netdev_flow->flow.in_port.odp_port, true); - flow->key_len = ofpbuf_size(key_buf) - offset; - - /* Mask */ - offset = ofpbuf_size(mask_buf); - flow->mask = ofpbuf_tail(mask_buf); - odp_flow_key_from_mask(mask_buf, &wc.masks, &netdev_flow->flow, - odp_to_u32(wc.masks.in_port.odp_port), - SIZE_MAX, true); - flow->mask_len = ofpbuf_size(mask_buf) - offset; - - /* Actions */ - actions = dp_netdev_flow_get_actions(netdev_flow); - flow->actions = actions->actions; - flow->actions_len = actions->size; - - dpif_flow_hash(dpif, &netdev_flow->flow, sizeof netdev_flow->flow, - &flow->ufid); + if (terse) { + memset(flow, 0, sizeof *flow); + } else { + struct flow_wildcards wc; + struct dp_netdev_actions *actions; + size_t offset; + struct odp_flow_key_parms odp_parms = { + .flow = &netdev_flow->flow, + .mask = &wc.masks, + .support = dp_netdev_support, + }; + + miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); + + /* Key */ + offset = key_buf->size; + flow->key = ofpbuf_tail(key_buf); + odp_parms.odp_in_port = netdev_flow->flow.in_port.odp_port; + odp_flow_key_from_flow(&odp_parms, key_buf); + flow->key_len = key_buf->size - offset; + + /* Mask */ + offset = mask_buf->size; + flow->mask = ofpbuf_tail(mask_buf); + odp_parms.odp_in_port = wc.masks.in_port.odp_port; + odp_parms.key_buf = key_buf; + odp_flow_key_from_mask(&odp_parms, mask_buf); + flow->mask_len = mask_buf->size - offset; + + /* Actions */ + actions = dp_netdev_flow_get_actions(netdev_flow); + flow->actions = actions->actions; + flow->actions_len = actions->size; + } + + flow->ufid = netdev_flow->ufid; + flow->ufid_present = true; + flow->pmd_id = netdev_flow->pmd_id; get_dpif_flow_stats(netdev_flow, &flow->stats); } @@ -1608,57 +1910,32 @@ static int dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len, const struct nlattr *mask_key, uint32_t mask_key_len, const struct flow *flow, - struct flow *mask) -{ - if (mask_key_len) { - enum odp_key_fitness fitness; - - fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow); - if (fitness) { - /* This should not happen: it indicates that - * odp_flow_key_from_mask() and odp_flow_key_to_mask() - * disagree on the acceptable form of a mask. Log the problem - * as an error, with enough details to enable debugging. */ - static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); - - if (!VLOG_DROP_ERR(&rl)) { - struct ds s; - - ds_init(&s); - odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s, - true); - VLOG_ERR("internal error parsing flow mask %s (%s)", - ds_cstr(&s), odp_key_fitness_to_string(fitness)); - ds_destroy(&s); - } + struct flow_wildcards *wc) +{ + enum odp_key_fitness fitness; - return EINVAL; - } - } else { - enum mf_field_id id; - /* No mask key, unwildcard everything except fields whose - * prerequisities are not met. */ - memset(mask, 0x0, sizeof *mask); - - for (id = 0; id < MFF_N_IDS; ++id) { - /* Skip registers and metadata. */ - if (!(id >= MFF_REG0 && id < MFF_REG0 + FLOW_N_REGS) - && id != MFF_METADATA) { - const struct mf_field *mf = mf_from_id(id); - if (mf_are_prereqs_ok(mf, flow)) { - mf_mask_field(mf, mask); - } - } + fitness = odp_flow_key_to_mask_udpif(mask_key, mask_key_len, key, + key_len, wc, flow); + if (fitness) { + /* This should not happen: it indicates that + * odp_flow_key_from_mask() and odp_flow_key_to_mask() + * disagree on the acceptable form of a mask. Log the problem + * as an error, with enough details to enable debugging. */ + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); + + if (!VLOG_DROP_ERR(&rl)) { + struct ds s; + + ds_init(&s); + odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s, + true); + VLOG_ERR("internal error parsing flow mask %s (%s)", + ds_cstr(&s), odp_key_fitness_to_string(fitness)); + ds_destroy(&s); } - } - /* Force unwildcard the in_port. - * - * We need to do this even in the case where we unwildcard "everything" - * above because "everything" only includes the 16-bit OpenFlow port number - * mask->in_port.ofp_port, which only covers half of the 32-bit datapath - * port number mask->in_port.odp_port. */ - mask->in_port.odp_port = u32_to_odp(UINT32_MAX); + return EINVAL; + } return 0; } @@ -1669,7 +1946,7 @@ dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len, { odp_port_t in_port; - if (odp_flow_key_to_flow(key, key_len, flow)) { + if (odp_flow_key_to_flow_udpif(key, key_len, flow)) { /* This should not happen: it indicates that odp_flow_key_from_flow() * and odp_flow_key_to_flow() disagree on the acceptable form of a * flow. Log the problem as an error, with enough details to enable @@ -1693,6 +1970,12 @@ dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len, return EINVAL; } + /* Userspace datapath doesn't support conntrack. */ + if (flow->ct_state || flow->ct_zone || flow->ct_mark + || !ovs_u128_is_zero(&flow->ct_label)) { + return EINVAL; + } + return 0; } @@ -1701,60 +1984,72 @@ dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; - struct flow key; - int error; + struct dp_netdev_pmd_thread *pmd; + unsigned pmd_id = get->pmd_id == PMD_ID_NULL + ? NON_PMD_CORE_ID : get->pmd_id; + int error = 0; - error = dpif_netdev_flow_from_nlattrs(get->key, get->key_len, &key); - if (error) { - return error; + pmd = dp_netdev_get_pmd(dp, pmd_id); + if (!pmd) { + return EINVAL; } - netdev_flow = dp_netdev_find_flow(dp, &key); - + netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key, + get->key_len); if (netdev_flow) { - dp_netdev_flow_to_dpif_flow(dpif, netdev_flow, get->buffer, - get->buffer, get->flow); - } else { + dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer, + get->flow, false); + } else { error = ENOENT; } + dp_netdev_pmd_unref(pmd); + return error; } static struct dp_netdev_flow * -dp_netdev_flow_add(struct dp_netdev *dp, struct match *match, +dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, + struct match *match, const ovs_u128 *ufid, const struct nlattr *actions, size_t actions_len) - OVS_REQUIRES(dp->flow_mutex) + OVS_REQUIRES(pmd->flow_mutex) { struct dp_netdev_flow *flow; struct netdev_flow_key mask; netdev_flow_mask_init(&mask, match); /* Make sure wc does not have metadata. */ - ovs_assert(!(mask.mf.map & (MINIFLOW_MAP(metadata) | MINIFLOW_MAP(regs)))); + ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata) + && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs)); /* Do not allocate extra space. */ flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len); + memset(&flow->stats, 0, sizeof flow->stats); flow->dead = false; + flow->batch = NULL; + *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id; *CONST_CAST(struct flow *, &flow->flow) = match->flow; + *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid; ovs_refcount_init(&flow->ref_cnt); - ovsthread_stats_init(&flow->stats); ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len)); - cmap_insert(&dp->flow_table, - CONST_CAST(struct cmap_node *, &flow->node), - flow_hash(&flow->flow, 0)); netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask); - dpcls_insert(&dp->cls, &flow->cr, &mask); + dpcls_insert(&pmd->cls, &flow->cr, &mask); + + cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node), + dp_netdev_flow_hash(&flow->ufid)); if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) { struct match match; struct ds ds = DS_EMPTY_INITIALIZER; + match.tun_md.valid = false; match.flow = flow->flow; miniflow_expand(&flow->cr.mask->mf, &match.wc.masks); ds_put_cstr(&ds, "flow_add: "); + odp_format_ufid(ufid, &ds); + ds_put_cstr(&ds, " "); match_format(&match, &ds, OFP_DEFAULT_PRIORITY); ds_put_cstr(&ds, ", actions:"); format_odp_actions(&ds, actions, actions_len); @@ -1767,29 +2062,17 @@ dp_netdev_flow_add(struct dp_netdev *dp, struct match *match, return flow; } -static void -clear_stats(struct dp_netdev_flow *netdev_flow) -{ - struct dp_netdev_flow_stats *bucket; - size_t i; - - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &netdev_flow->stats) { - ovs_mutex_lock(&bucket->mutex); - bucket->used = 0; - bucket->packet_count = 0; - bucket->byte_count = 0; - bucket->tcp_flags = 0; - ovs_mutex_unlock(&bucket->mutex); - } -} - static int dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; struct netdev_flow_key key; + struct dp_netdev_pmd_thread *pmd; struct match match; + ovs_u128 ufid; + unsigned pmd_id = put->pmd_id == PMD_ID_NULL + ? NON_PMD_CORE_ID : put->pmd_id; int error; error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow); @@ -1798,25 +2081,37 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) } error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len, put->mask, put->mask_len, - &match.flow, &match.wc.masks); + &match.flow, &match.wc); if (error) { return error; } - /* Must produce a netdev_flow_key for lookup. + pmd = dp_netdev_get_pmd(dp, pmd_id); + if (!pmd) { + return EINVAL; + } + + /* Must produce a netdev_flow_key for lookup. * This interface is no longer performance critical, since it is not used * for upcall processing any more. */ netdev_flow_key_from_flow(&key, &match.flow); - ovs_mutex_lock(&dp->flow_mutex); - netdev_flow = dp_netdev_lookup_flow(dp, &key); + if (put->ufid) { + ufid = *put->ufid; + } else { + dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid); + } + + ovs_mutex_lock(&pmd->flow_mutex); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &key); if (!netdev_flow) { if (put->flags & DPIF_FP_CREATE) { - if (cmap_count(&dp->flow_table) < MAX_FLOWS) { + if (cmap_count(&pmd->flow_table) < MAX_FLOWS) { if (put->stats) { memset(put->stats, 0, sizeof *put->stats); } - dp_netdev_flow_add(dp, &match, put->actions, put->actions_len); + dp_netdev_flow_add(pmd, &match, &ufid, put->actions, + put->actions_len); error = 0; } else { error = EFBIG; @@ -1840,7 +2135,16 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) get_dpif_flow_stats(netdev_flow, put->stats); } if (put->flags & DPIF_FP_ZERO_STATS) { - clear_stats(netdev_flow); + /* XXX: The userspace datapath uses thread local statistics + * (for flows), which should be updated only by the owning + * thread. Since we cannot write on stats memory here, + * we choose not to support this flag. Please note: + * - This feature is currently used only by dpctl commands with + * option --clear. + * - Should the need arise, this operation can be implemented + * by keeping a base value (to be update here) for each + * counter, and subtracting it before outputting the stats */ + error = EOPNOTSUPP; } ovsrcu_postpone(dp_netdev_actions_free, old_actions); @@ -1851,7 +2155,8 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) error = EINVAL; } } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); + dp_netdev_pmd_unref(pmd); return error; } @@ -1861,32 +2166,38 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; - struct flow key; - int error; + struct dp_netdev_pmd_thread *pmd; + unsigned pmd_id = del->pmd_id == PMD_ID_NULL + ? NON_PMD_CORE_ID : del->pmd_id; + int error = 0; - error = dpif_netdev_flow_from_nlattrs(del->key, del->key_len, &key); - if (error) { - return error; + pmd = dp_netdev_get_pmd(dp, pmd_id); + if (!pmd) { + return EINVAL; } - ovs_mutex_lock(&dp->flow_mutex); - netdev_flow = dp_netdev_find_flow(dp, &key); + ovs_mutex_lock(&pmd->flow_mutex); + netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key, + del->key_len); if (netdev_flow) { if (del->stats) { get_dpif_flow_stats(netdev_flow, del->stats); } - dp_netdev_remove_flow(dp, netdev_flow); + dp_netdev_pmd_remove_flow(pmd, netdev_flow); } else { error = ENOENT; } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); + dp_netdev_pmd_unref(pmd); return error; } struct dpif_netdev_flow_dump { struct dpif_flow_dump up; - struct cmap_position pos; + struct cmap_position poll_thread_pos; + struct cmap_position flow_pos; + struct dp_netdev_pmd_thread *cur_pmd; int status; struct ovs_mutex mutex; }; @@ -1898,14 +2209,13 @@ dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump) } static struct dpif_flow_dump * -dpif_netdev_flow_dump_create(const struct dpif *dpif_) +dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse) { struct dpif_netdev_flow_dump *dump; - dump = xmalloc(sizeof *dump); + dump = xzalloc(sizeof *dump); dpif_flow_dump_init(&dump->up, dpif_); - memset(&dump->pos, 0, sizeof dump->pos); - dump->status = 0; + dump->up.terse = terse; ovs_mutex_init(&dump->mutex); return &dump->up; @@ -1962,26 +2272,58 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, struct dpif_netdev_flow_dump_thread *thread = dpif_netdev_flow_dump_thread_cast(thread_); struct dpif_netdev_flow_dump *dump = thread->dump; - struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif); struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH]; - struct dp_netdev *dp = get_dp_netdev(&dpif->dpif); int n_flows = 0; int i; ovs_mutex_lock(&dump->mutex); if (!dump->status) { - for (n_flows = 0; n_flows < MIN(max_flows, FLOW_DUMP_MAX_BATCH); - n_flows++) { - struct cmap_node *node; + struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif); + struct dp_netdev *dp = get_dp_netdev(&dpif->dpif); + struct dp_netdev_pmd_thread *pmd = dump->cur_pmd; + int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH); + + /* First call to dump_next(), extracts the first pmd thread. + * If there is no pmd thread, returns immediately. */ + if (!pmd) { + pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); + if (!pmd) { + ovs_mutex_unlock(&dump->mutex); + return n_flows; - node = cmap_next_position(&dp->flow_table, &dump->pos); - if (!node) { - dump->status = EOF; - break; } - netdev_flows[n_flows] = CONTAINER_OF(node, struct dp_netdev_flow, - node); } + + do { + for (n_flows = 0; n_flows < flow_limit; n_flows++) { + struct cmap_node *node; + + node = cmap_next_position(&pmd->flow_table, &dump->flow_pos); + if (!node) { + break; + } + netdev_flows[n_flows] = CONTAINER_OF(node, + struct dp_netdev_flow, + node); + } + /* When finishing dumping the current pmd thread, moves to + * the next. */ + if (n_flows < flow_limit) { + memset(&dump->flow_pos, 0, sizeof dump->flow_pos); + dp_netdev_pmd_unref(pmd); + pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); + if (!pmd) { + dump->status = EOF; + break; + } + } + /* Keeps the reference to next caller. */ + dump->cur_pmd = pmd; + + /* If the current dump is empty, do not exit the loop, since the + * remaining pmds could have flows to be dumped. Just dumps again + * on the new 'pmd'. */ + } while (!n_flows); } ovs_mutex_unlock(&dump->mutex); @@ -1994,7 +2336,8 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, ofpbuf_use_stack(&key, keybuf, sizeof *keybuf); ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf); - dp_netdev_flow_to_dpif_flow(&dpif->dpif, netdev_flow, &key, &mask, f); + dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f, + dump->up.terse); } return n_flows; @@ -2006,41 +2349,37 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_pmd_thread *pmd; - struct dpif_packet packet, *pp; + struct dp_packet *pp; - if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN || - ofpbuf_size(execute->packet) > UINT16_MAX) { + if (dp_packet_size(execute->packet) < ETH_HEADER_LEN || + dp_packet_size(execute->packet) > UINT16_MAX) { return EINVAL; } - packet.ofpbuf = *execute->packet; - packet.md = execute->md; - pp = &packet; - /* Tries finding the 'pmd'. If NULL is returned, that means * the current thread is a non-pmd thread and should use - * dp_netdev_get_nonpmd(). */ + * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */ pmd = ovsthread_getspecific(dp->per_pmd_key); if (!pmd) { - pmd = dp_netdev_get_nonpmd(dp); + pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); } /* If the current thread is non-pmd thread, acquires * the 'non_pmd_mutex'. */ if (pmd->core_id == NON_PMD_CORE_ID) { ovs_mutex_lock(&dp->non_pmd_mutex); + ovs_mutex_lock(&dp->port_mutex); } + + pp = execute->packet; dp_netdev_execute_actions(pmd, &pp, 1, false, execute->actions, execute->actions_len); if (pmd->core_id == NON_PMD_CORE_ID) { + dp_netdev_pmd_unref(pmd); + ovs_mutex_unlock(&dp->port_mutex); ovs_mutex_unlock(&dp->non_pmd_mutex); } - /* Even though may_steal is set to false, some actions could modify or - * reallocate the ofpbuf memory. We need to pass those changes to the - * caller */ - *execute->packet = packet.ofpbuf; - execute->md = packet.md; return 0; } @@ -2075,32 +2414,42 @@ dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops) /* Returns true if the configuration for rx queues or cpu mask * is changed. */ static bool -pmd_config_changed(const struct dp_netdev *dp, size_t rxqs, const char *cmask) +pmd_config_changed(const struct dp_netdev *dp, const char *cmask) { - if (dp->n_dpdk_rxqs != rxqs) { - return true; - } else { - if (dp->pmd_cmask != NULL && cmask != NULL) { - return strcmp(dp->pmd_cmask, cmask); - } else { - return (dp->pmd_cmask != NULL || cmask != NULL); + struct dp_netdev_port *port; + + CMAP_FOR_EACH (port, node, &dp->ports) { + struct netdev *netdev = port->netdev; + int requested_n_rxq = netdev_requested_n_rxq(netdev); + if (netdev_is_pmd(netdev) + && port->latest_requested_n_rxq != requested_n_rxq) { + return true; } } + + if (dp->pmd_cmask != NULL && cmask != NULL) { + return strcmp(dp->pmd_cmask, cmask); + } else { + return (dp->pmd_cmask != NULL || cmask != NULL); + } } /* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */ static int -dpif_netdev_pmd_set(struct dpif *dpif, unsigned int n_rxqs, const char *cmask) +dpif_netdev_pmd_set(struct dpif *dpif, const char *cmask) { struct dp_netdev *dp = get_dp_netdev(dpif); - if (pmd_config_changed(dp, n_rxqs, cmask)) { + if (pmd_config_changed(dp, cmask)) { struct dp_netdev_port *port; dp_netdev_destroy_all_pmds(dp); CMAP_FOR_EACH (port, node, &dp->ports) { - if (netdev_is_pmd(port->netdev)) { + struct netdev *netdev = port->netdev; + int requested_n_rxq = netdev_requested_n_rxq(netdev); + if (netdev_is_pmd(port->netdev) + && port->latest_requested_n_rxq != requested_n_rxq) { int i, err; /* Closes the existing 'rxq's. */ @@ -2110,15 +2459,16 @@ dpif_netdev_pmd_set(struct dpif *dpif, unsigned int n_rxqs, const char *cmask) } /* Sets the new rx queue config. */ - err = netdev_set_multiq(port->netdev, ovs_numa_get_n_cores(), - n_rxqs); + err = netdev_set_multiq(port->netdev, + ovs_numa_get_n_cores() + 1, + requested_n_rxq); if (err && (err != EOPNOTSUPP)) { VLOG_ERR("Failed to set dpdk interface %s rx_queue to:" " %u", netdev_get_name(port->netdev), - n_rxqs); + requested_n_rxq); return err; } - + port->latest_requested_n_rxq = requested_n_rxq; /* If the set_multiq() above succeeds, reopens the 'rxq's. */ port->rxq = xrealloc(port->rxq, sizeof *port->rxq * netdev_n_rxq(port->netdev)); @@ -2127,8 +2477,6 @@ dpif_netdev_pmd_set(struct dpif *dpif, unsigned int n_rxqs, const char *cmask) } } } - dp->n_dpdk_rxqs = n_rxqs; - /* Reconfigures the cpu mask. */ ovs_numa_set_cpu_mask(cmask); free(dp->pmd_cmask); @@ -2152,16 +2500,15 @@ dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED, } -/* Creates and returns a new 'struct dp_netdev_actions', with a reference count - * of 1, whose actions are a copy of from the 'ofpacts_len' bytes of - * 'ofpacts'. */ +/* Creates and returns a new 'struct dp_netdev_actions', whose actions are + * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */ struct dp_netdev_actions * dp_netdev_actions_create(const struct nlattr *actions, size_t size) { struct dp_netdev_actions *netdev_actions; - netdev_actions = xmalloc(sizeof *netdev_actions); - netdev_actions->actions = xmemdup(actions, size); + netdev_actions = xmalloc(sizeof *netdev_actions + size); + memcpy(netdev_actions->actions, actions, size); netdev_actions->size = size; return netdev_actions; @@ -2176,30 +2523,60 @@ dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow) static void dp_netdev_actions_free(struct dp_netdev_actions *actions) { - free(actions->actions); free(actions); } +static inline unsigned long long +cycles_counter(void) +{ +#ifdef DPDK_NETDEV + return rte_get_tsc_cycles(); +#else + return 0; +#endif +} + +/* Fake mutex to make sure that the calls to cycles_count_* are balanced */ +extern struct ovs_mutex cycles_counter_fake_mutex; + +/* Start counting cycles. Must be followed by 'cycles_count_end()' */ +static inline void +cycles_count_start(struct dp_netdev_pmd_thread *pmd) + OVS_ACQUIRES(&cycles_counter_fake_mutex) + OVS_NO_THREAD_SAFETY_ANALYSIS +{ + pmd->last_cycles = cycles_counter(); +} + +/* Stop counting cycles and add them to the counter 'type' */ +static inline void +cycles_count_end(struct dp_netdev_pmd_thread *pmd, + enum pmd_cycles_counter_type type) + OVS_RELEASES(&cycles_counter_fake_mutex) + OVS_NO_THREAD_SAFETY_ANALYSIS +{ + unsigned long long interval = cycles_counter() - pmd->last_cycles; + + non_atomic_ullong_add(&pmd->cycles.n[type], interval); +} static void dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port, struct netdev_rxq *rxq) { - struct dpif_packet *packets[NETDEV_MAX_RX_BATCH]; + struct dp_packet *packets[NETDEV_MAX_BURST]; int error, cnt; + cycles_count_start(pmd); error = netdev_rxq_recv(rxq, packets, &cnt); + cycles_count_end(pmd, PMD_CYCLES_POLLING); if (!error) { - int i; - *recirc_depth_get() = 0; - /* XXX: initialize md in netdev implementation. */ - for (i = 0; i < cnt; i++) { - packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no); - } - dp_netdev_input(pmd, packets, cnt); + cycles_count_start(pmd); + dp_netdev_input(pmd, packets, cnt, port->port_no); + cycles_count_end(pmd, PMD_CYCLES_PROCESSING); } else if (error != EAGAIN && error != EOPNOTSUPP) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -2214,7 +2591,8 @@ dpif_netdev_run(struct dpif *dpif) { struct dp_netdev_port *port; struct dp_netdev *dp = get_dp_netdev(dpif); - struct dp_netdev_pmd_thread *non_pmd = dp_netdev_get_nonpmd(dp); + struct dp_netdev_pmd_thread *non_pmd = dp_netdev_get_pmd(dp, + NON_PMD_CORE_ID); uint64_t new_tnl_seq; ovs_mutex_lock(&dp->non_pmd_mutex); @@ -2228,7 +2606,10 @@ dpif_netdev_run(struct dpif *dpif) } } ovs_mutex_unlock(&dp->non_pmd_mutex); - tnl_arp_cache_run(); + dp_netdev_pmd_unref(non_pmd); + + tnl_neigh_cache_run(); + tnl_port_map_run(); new_tnl_seq = seq_read(tnl_conf_seq); if (dp->last_tnl_conf_seq != new_tnl_seq) { @@ -2258,56 +2639,29 @@ dpif_netdev_wait(struct dpif *dpif) seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq); } -struct rxq_poll { - struct dp_netdev_port *port; - struct netdev_rxq *rx; -}; - static int pmd_load_queues(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **ppoll_list, int poll_cnt) + OVS_REQUIRES(pmd->poll_mutex) { struct rxq_poll *poll_list = *ppoll_list; - struct dp_netdev_port *port; - int n_pmds_on_numa, index, i; + struct rxq_poll *poll; + int i; - /* Simple scheduler for netdev rx polling. */ for (i = 0; i < poll_cnt; i++) { port_unref(poll_list[i].port); } - poll_cnt = 0; - n_pmds_on_numa = get_n_pmd_threads_on_numa(pmd->dp, pmd->numa_id); - index = 0; + poll_list = xrealloc(poll_list, pmd->poll_cnt * sizeof *poll_list); - CMAP_FOR_EACH (port, node, &pmd->dp->ports) { - /* Calls port_try_ref() to prevent the main thread - * from deleting the port. */ - if (port_try_ref(port)) { - if (netdev_is_pmd(port->netdev) - && netdev_get_numa_id(port->netdev) == pmd->numa_id) { - int i; - - for (i = 0; i < netdev_n_rxq(port->netdev); i++) { - if ((index % n_pmds_on_numa) == pmd->index) { - poll_list = xrealloc(poll_list, - sizeof *poll_list * (poll_cnt + 1)); - - port_ref(port); - poll_list[poll_cnt].port = port; - poll_list[poll_cnt].rx = port->rxq[i]; - poll_cnt++; - } - index++; - } - } - /* Unrefs the port_try_ref(). */ - port_unref(port); - } + i = 0; + LIST_FOR_EACH (poll, node, &pmd->poll_list) { + port_ref(poll->port); + poll_list[i++] = *poll; } *ppoll_list = poll_list; - return poll_cnt; + return pmd->poll_cnt; } static void * @@ -2328,15 +2682,23 @@ pmd_thread_main(void *f_) pmd_thread_setaffinity_cpu(pmd->core_id); reload: emc_cache_init(&pmd->flow_cache); + + ovs_mutex_lock(&pmd->poll_mutex); poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt); + ovs_mutex_unlock(&pmd->poll_mutex); + + /* List port/core affinity */ + for (i = 0; i < poll_cnt; i++) { + VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n", + pmd->core_id, netdev_get_name(poll_list[i].port->netdev), + netdev_rxq_get_queue_id(poll_list[i].rx)); + } /* Signal here to make sure the pmd finishes * reloading the updated configuration. */ dp_netdev_pmd_reload_done(pmd); for (;;) { - int i; - for (i = 0; i < poll_cnt; i++) { dp_netdev_process_rxq_port(pmd, poll_list[i].port, poll_list[i].rx); } @@ -2347,6 +2709,7 @@ reload: lc = 0; emc_cache_slow_sweep(&pmd->flow_cache); + coverage_try_clear(); ovsrcu_quiesce(); atomic_read_relaxed(&pmd->change_seq, &seq); @@ -2364,7 +2727,7 @@ reload: } for (i = 0; i < poll_cnt; i++) { - port_unref(poll_list[i].port); + port_unref(poll_list[i].port); } dp_netdev_pmd_reload_done(pmd); @@ -2403,7 +2766,7 @@ dpif_netdev_enable_upcall(struct dpif *dpif) dp_netdev_enable_upcall(dp); } -void +static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) { ovs_mutex_lock(&pmd->cond_mutex); @@ -2411,18 +2774,23 @@ dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) ovs_mutex_unlock(&pmd->cond_mutex); } -/* Returns the pointer to the dp_netdev_pmd_thread for non-pmd threads. */ +/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns + * the pointer if succeeds, otherwise, NULL. + * + * Caller must unrefs the returned reference. */ static struct dp_netdev_pmd_thread * -dp_netdev_get_nonpmd(struct dp_netdev *dp) +dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id) { struct dp_netdev_pmd_thread *pmd; const struct cmap_node *pnode; - pnode = cmap_find(&dp->poll_threads, hash_int(NON_PMD_CORE_ID, 0)); - ovs_assert(pnode); + pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0)); + if (!pnode) { + return NULL; + } pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node); - return pmd; + return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL; } /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */ @@ -2436,19 +2804,67 @@ dp_netdev_set_nonpmd(struct dp_netdev *dp) OVS_NUMA_UNSPEC); } +/* Caller must have valid pointer to 'pmd'. */ +static bool +dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd) +{ + return ovs_refcount_try_ref_rcu(&pmd->ref_cnt); +} + +static void +dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd) +{ + if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) { + ovsrcu_postpone(dp_netdev_destroy_pmd, pmd); + } +} + +/* Given cmap position 'pos', tries to ref the next node. If try_ref() + * fails, keeps checking for next node until reaching the end of cmap. + * + * Caller must unrefs the returned reference. */ +static struct dp_netdev_pmd_thread * +dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos) +{ + struct dp_netdev_pmd_thread *next; + + do { + struct cmap_node *node; + + node = cmap_next_position(&dp->poll_threads, pos); + next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node) + : NULL; + } while (next && !dp_netdev_pmd_try_ref(next)); + + return next; +} + /* Configures the 'pmd' based on the input argument. */ static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, - int index, int core_id, int numa_id) + int index, unsigned core_id, int numa_id) { pmd->dp = dp; pmd->index = index; pmd->core_id = core_id; pmd->numa_id = numa_id; + pmd->poll_cnt = 0; + + atomic_init(&pmd->tx_qid, + (core_id == NON_PMD_CORE_ID) + ? ovs_numa_get_n_cores() + : get_n_pmd_threads(dp)); + + ovs_refcount_init(&pmd->ref_cnt); latch_init(&pmd->exit_latch); atomic_init(&pmd->change_seq, PMD_INITIAL_SEQ); xpthread_cond_init(&pmd->cond, NULL); ovs_mutex_init(&pmd->cond_mutex); + ovs_mutex_init(&pmd->flow_mutex); + ovs_mutex_init(&pmd->poll_mutex); + dpcls_init(&pmd->cls); + cmap_init(&pmd->flow_table); + list_init(&pmd->poll_list); /* init the 'flow_cache' since there is no * actual thread created for NON_PMD_CORE_ID. */ if (core_id == NON_PMD_CORE_ID) { @@ -2458,13 +2874,27 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, hash_int(core_id, 0)); } -/* Stops the pmd thread, removes it from the 'dp->poll_threads' - * and destroys the struct. */ static void -dp_netdev_del_pmd(struct dp_netdev_pmd_thread *pmd) +dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd) +{ + dp_netdev_pmd_flow_flush(pmd); + dpcls_destroy(&pmd->cls); + cmap_destroy(&pmd->flow_table); + ovs_mutex_destroy(&pmd->flow_mutex); + latch_destroy(&pmd->exit_latch); + xpthread_cond_destroy(&pmd->cond); + ovs_mutex_destroy(&pmd->cond_mutex); + ovs_mutex_destroy(&pmd->poll_mutex); + free(pmd); +} + +/* Stops the pmd thread, removes it from the 'dp->poll_threads', + * and unrefs the struct. */ +static void +dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd) { /* Uninit the 'flow_cache' since there is - * no actual thread uninit it. */ + * no actual thread uninit it for NON_PMD_CORE_ID. */ if (pmd->core_id == NON_PMD_CORE_ID) { emc_cache_uninit(&pmd->flow_cache); } else { @@ -2473,11 +2903,17 @@ dp_netdev_del_pmd(struct dp_netdev_pmd_thread *pmd) ovs_numa_unpin_core(pmd->core_id); xpthread_join(pmd->thread, NULL); } + + /* Unref all ports and free poll_list. */ + dp_netdev_pmd_clear_poll_list(pmd); + + /* Purges the 'pmd''s flows after stopping the thread, but before + * destroying the flows, so that the flow stats can be collected. */ + if (dp->dp_purge_cb) { + dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id); + } cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0)); - latch_destroy(&pmd->exit_latch); - xpthread_cond_destroy(&pmd->cond); - ovs_mutex_destroy(&pmd->cond_mutex); - free(pmd); + dp_netdev_pmd_unref(pmd); } /* Destroys all pmd threads. */ @@ -2485,23 +2921,202 @@ static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp) { struct dp_netdev_pmd_thread *pmd; + struct dp_netdev_pmd_thread **pmd_list; + size_t k = 0, n_pmds; + + n_pmds = cmap_count(&dp->poll_threads); + pmd_list = xcalloc(n_pmds, sizeof *pmd_list); CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { - dp_netdev_del_pmd(pmd); + /* We cannot call dp_netdev_del_pmd(), since it alters + * 'dp->poll_threads' (while we're iterating it) and it + * might quiesce. */ + ovs_assert(k < n_pmds); + pmd_list[k++] = pmd; + } + + for (size_t i = 0; i < k; i++) { + dp_netdev_del_pmd(dp, pmd_list[i]); } + free(pmd_list); } -/* Deletes all pmd threads on numa node 'numa_id'. */ +/* Deletes all pmd threads on numa node 'numa_id' and + * fixes tx_qids of other threads to keep them sequential. */ static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id) { struct dp_netdev_pmd_thread *pmd; + int n_pmds_on_numa, n_pmds; + int *free_idx, k = 0; + struct dp_netdev_pmd_thread **pmd_list; + + n_pmds_on_numa = get_n_pmd_threads_on_numa(dp, numa_id); + free_idx = xcalloc(n_pmds_on_numa, sizeof *free_idx); + pmd_list = xcalloc(n_pmds_on_numa, sizeof *pmd_list); CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + /* We cannot call dp_netdev_del_pmd(), since it alters + * 'dp->poll_threads' (while we're iterating it) and it + * might quiesce. */ if (pmd->numa_id == numa_id) { - dp_netdev_del_pmd(pmd); + atomic_read_relaxed(&pmd->tx_qid, &free_idx[k]); + pmd_list[k] = pmd; + ovs_assert(k < n_pmds_on_numa); + k++; + } + } + + for (int i = 0; i < k; i++) { + dp_netdev_del_pmd(dp, pmd_list[i]); + } + + n_pmds = get_n_pmd_threads(dp); + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + int old_tx_qid; + + atomic_read_relaxed(&pmd->tx_qid, &old_tx_qid); + + if (old_tx_qid >= n_pmds) { + int new_tx_qid = free_idx[--k]; + + atomic_store_relaxed(&pmd->tx_qid, new_tx_qid); + } + } + + free(pmd_list); + free(free_idx); +} + +/* Deletes all rx queues from pmd->poll_list. */ +static void +dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd) +{ + struct rxq_poll *poll; + + ovs_mutex_lock(&pmd->poll_mutex); + LIST_FOR_EACH_POP (poll, node, &pmd->poll_list) { + port_unref(poll->port); + free(poll); + } + pmd->poll_cnt = 0; + ovs_mutex_unlock(&pmd->poll_mutex); +} + +/* Deletes all rx queues of 'port' from poll_list of pmd thread and + * reloads it if poll_list was changed. */ +static void +dp_netdev_del_port_from_pmd(struct dp_netdev_port *port, + struct dp_netdev_pmd_thread *pmd) +{ + struct rxq_poll *poll, *next; + bool found = false; + + ovs_mutex_lock(&pmd->poll_mutex); + LIST_FOR_EACH_SAFE (poll, next, node, &pmd->poll_list) { + if (poll->port == port) { + found = true; + port_unref(poll->port); + list_remove(&poll->node); + pmd->poll_cnt--; + free(poll); + } + } + ovs_mutex_unlock(&pmd->poll_mutex); + if (found) { + dp_netdev_reload_pmd__(pmd); + } +} + +/* Deletes all rx queues of 'port' from all pmd threads of dp and + * reloads them if needed. */ +static void +dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp, + struct dp_netdev_port *port) +{ + int numa_id = netdev_get_numa_id(port->netdev); + struct dp_netdev_pmd_thread *pmd; + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + if (pmd->numa_id == numa_id) { + dp_netdev_del_port_from_pmd(port, pmd); + } + } +} + +/* Returns PMD thread from this numa node with fewer rx queues to poll. + * Returns NULL if there is no PMD threads on this numa node. + * Can be called safely only by main thread. */ +static struct dp_netdev_pmd_thread * +dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev *dp, int numa_id) +{ + int min_cnt = -1; + struct dp_netdev_pmd_thread *pmd, *res = NULL; + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + if (pmd->numa_id == numa_id + && (min_cnt > pmd->poll_cnt || res == NULL)) { + min_cnt = pmd->poll_cnt; + res = pmd; + } + } + + return res; +} + +/* Adds rx queue to poll_list of PMD thread. */ +static void +dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, + struct dp_netdev_port *port, struct netdev_rxq *rx) + OVS_REQUIRES(pmd->poll_mutex) +{ + struct rxq_poll *poll = xmalloc(sizeof *poll); + + port_ref(port); + poll->port = port; + poll->rx = rx; + + list_push_back(&pmd->poll_list, &poll->node); + pmd->poll_cnt++; +} + +/* Distributes all rx queues of 'port' between all PMD threads and reloads + * them if needed. */ +static void +dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port) +{ + int numa_id = netdev_get_numa_id(port->netdev); + struct dp_netdev_pmd_thread *pmd; + struct hmapx to_reload; + struct hmapx_node *node; + int i; + + hmapx_init(&to_reload); + /* Cannot create pmd threads for invalid numa node. */ + ovs_assert(ovs_numa_numa_id_is_valid(numa_id)); + + for (i = 0; i < netdev_n_rxq(port->netdev); i++) { + pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id); + if (!pmd) { + /* There is no pmd threads on this numa node. */ + dp_netdev_set_pmds_on_numa(dp, numa_id); + /* Assigning of rx queues done. */ + break; } + + ovs_mutex_lock(&pmd->poll_mutex); + dp_netdev_add_rxq_to_pmd(pmd, port, port->rxq[i]); + ovs_mutex_unlock(&pmd->poll_mutex); + + hmapx_add(&to_reload, pmd); } + + HMAPX_FOR_EACH (node, &to_reload) { + pmd = (struct dp_netdev_pmd_thread *) node->data; + dp_netdev_reload_pmd__(pmd); + } + + hmapx_destroy(&to_reload); } /* Checks the numa node id of 'netdev' and starts pmd threads for @@ -2523,7 +3138,9 @@ dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id) * in which 'netdev' is on, do nothing. Else, creates the * pmd threads for the numa node. */ if (!n_pmds) { - int can_have, n_unpinned, i; + int can_have, n_unpinned, i, index = 0; + struct dp_netdev_pmd_thread **pmds; + struct dp_netdev_port *port; n_unpinned = ovs_numa_get_n_unpinned_cores_on_numa(numa_id); if (!n_unpinned) { @@ -2535,28 +3152,37 @@ dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id) /* If cpu mask is specified, uses all unpinned cores, otherwise * tries creating NR_PMD_THREADS pmd threads. */ can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS); + pmds = xzalloc(can_have * sizeof *pmds); for (i = 0; i < can_have; i++) { - struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd); - int core_id = ovs_numa_get_unpinned_core_on_numa(numa_id); + unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id); + pmds[i] = xzalloc(sizeof **pmds); + dp_netdev_configure_pmd(pmds[i], dp, i, core_id, numa_id); + } + + /* Distributes rx queues of this numa node between new pmd threads. */ + CMAP_FOR_EACH (port, node, &dp->ports) { + if (netdev_is_pmd(port->netdev) + && netdev_get_numa_id(port->netdev) == numa_id) { + for (i = 0; i < netdev_n_rxq(port->netdev); i++) { + /* Make thread-safety analyser happy. */ + ovs_mutex_lock(&pmds[index]->poll_mutex); + dp_netdev_add_rxq_to_pmd(pmds[index], port, port->rxq[i]); + ovs_mutex_unlock(&pmds[index]->poll_mutex); + index = (index + 1) % can_have; + } + } + } - dp_netdev_configure_pmd(pmd, dp, i, core_id, numa_id); - /* Each thread will distribute all devices rx-queues among - * themselves. */ - pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd); + /* Actual start of pmd threads. */ + for (i = 0; i < can_have; i++) { + pmds[i]->thread = ovs_thread_create("pmd", pmd_thread_main, pmds[i]); } + free(pmds); VLOG_INFO("Created %d pmd threads on numa node %d", can_have, numa_id); } } -static void * -dp_netdev_flow_stats_new_cb(void) -{ - struct dp_netdev_flow_stats *bucket = xzalloc_cacheline(sizeof *bucket); - ovs_mutex_init(&bucket->mutex); - return bucket; -} - /* Called after pmd threads config change. Restarts pmd threads with * new configuration. */ static void @@ -2580,95 +3206,144 @@ dpif_netdev_get_datapath_version(void) } static void -dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, - int cnt, int size, - uint16_t tcp_flags) +dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size, + uint16_t tcp_flags, long long now) { - long long int now = time_msec(); - struct dp_netdev_flow_stats *bucket; - - bucket = ovsthread_stats_bucket_get(&netdev_flow->stats, - dp_netdev_flow_stats_new_cb); + uint16_t flags; - ovs_mutex_lock(&bucket->mutex); - bucket->used = MAX(now, bucket->used); - bucket->packet_count += cnt; - bucket->byte_count += size; - bucket->tcp_flags |= tcp_flags; - ovs_mutex_unlock(&bucket->mutex); -} - -static void * -dp_netdev_stats_new_cb(void) -{ - struct dp_netdev_stats *bucket = xzalloc_cacheline(sizeof *bucket); - ovs_mutex_init(&bucket->mutex); - return bucket; + atomic_store_relaxed(&netdev_flow->stats.used, now); + non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt); + non_atomic_ullong_add(&netdev_flow->stats.byte_count, size); + atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); + flags |= tcp_flags; + atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags); } static void -dp_netdev_count_packet(struct dp_netdev *dp, enum dp_stat_type type, int cnt) +dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd, + enum dp_stat_type type, int cnt) { - struct dp_netdev_stats *bucket; - - bucket = ovsthread_stats_bucket_get(&dp->stats, dp_netdev_stats_new_cb); - ovs_mutex_lock(&bucket->mutex); - bucket->n[type] += cnt; - ovs_mutex_unlock(&bucket->mutex); + non_atomic_ullong_add(&pmd->stats.n[type], cnt); } static int -dp_netdev_upcall(struct dp_netdev *dp, struct dpif_packet *packet_, +dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_, struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid, enum dpif_upcall_type type, const struct nlattr *userdata, struct ofpbuf *actions, struct ofpbuf *put_actions) { - struct ofpbuf *packet = &packet_->ofpbuf; - - if (type == DPIF_UC_MISS) { - dp_netdev_count_packet(dp, DP_STAT_MISS, 1); - } + struct dp_netdev *dp = pmd->dp; + struct flow_tnl orig_tunnel; + int err; if (OVS_UNLIKELY(!dp->upcall_cb)) { return ENODEV; } + /* Upcall processing expects the Geneve options to be in the translated + * format but we need to retain the raw format for datapath use. */ + orig_tunnel.flags = flow->tunnel.flags; + if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) { + orig_tunnel.metadata.present.len = flow->tunnel.metadata.present.len; + memcpy(orig_tunnel.metadata.opts.gnv, flow->tunnel.metadata.opts.gnv, + flow->tunnel.metadata.present.len); + err = tun_metadata_from_geneve_udpif(&orig_tunnel, &orig_tunnel, + &flow->tunnel); + if (err) { + return err; + } + } + if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) { struct ds ds = DS_EMPTY_INITIALIZER; - struct ofpbuf key; char *packet_str; + struct ofpbuf key; + struct odp_flow_key_parms odp_parms = { + .flow = flow, + .mask = &wc->masks, + .odp_in_port = flow->in_port.odp_port, + .support = dp_netdev_support, + }; ofpbuf_init(&key, 0); - odp_flow_key_from_flow(&key, flow, &wc->masks, flow->in_port.odp_port, - true); + odp_flow_key_from_flow(&odp_parms, &key); + packet_str = ofp_packet_to_string(dp_packet_data(packet_), + dp_packet_size(packet_)); - packet_str = ofp_packet_to_string(ofpbuf_data(packet), - ofpbuf_size(packet)); - - odp_flow_key_format(ofpbuf_data(&key), ofpbuf_size(&key), &ds); + odp_flow_key_format(key.data, key.size, &ds); VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name, dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str); ofpbuf_uninit(&key); free(packet_str); + ds_destroy(&ds); } - return dp->upcall_cb(packet, flow, ufid, type, userdata, actions, wc, - put_actions, dp->upcall_aux); + err = dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata, + actions, wc, put_actions, dp->upcall_aux); + if (err && err != ENOSPC) { + return err; + } + + /* Translate tunnel metadata masks to datapath format. */ + if (wc) { + if (wc->masks.tunnel.metadata.present.map) { + struct geneve_opt opts[TLV_TOT_OPT_SIZE / + sizeof(struct geneve_opt)]; + + if (orig_tunnel.flags & FLOW_TNL_F_UDPIF) { + tun_metadata_to_geneve_udpif_mask(&flow->tunnel, + &wc->masks.tunnel, + orig_tunnel.metadata.opts.gnv, + orig_tunnel.metadata.present.len, + opts); + } else { + orig_tunnel.metadata.present.len = 0; + } + + memset(&wc->masks.tunnel.metadata, 0, + sizeof wc->masks.tunnel.metadata); + memcpy(&wc->masks.tunnel.metadata.opts.gnv, opts, + orig_tunnel.metadata.present.len); + } + wc->masks.tunnel.metadata.present.len = 0xff; + } + + /* Restore tunnel metadata. We need to use the saved options to ensure + * that any unknown options are not lost. The generated mask will have + * the same structure, matching on types and lengths but wildcarding + * option data we don't care about. */ + if (orig_tunnel.flags & FLOW_TNL_F_UDPIF) { + memcpy(&flow->tunnel.metadata.opts.gnv, orig_tunnel.metadata.opts.gnv, + orig_tunnel.metadata.present.len); + flow->tunnel.metadata.present.len = orig_tunnel.metadata.present.len; + flow->tunnel.flags |= FLOW_TNL_F_UDPIF; + } + + return err; } static inline uint32_t -dpif_netdev_packet_get_dp_hash(struct dpif_packet *packet, - const struct miniflow *mf) +dpif_netdev_packet_get_rss_hash(struct dp_packet *packet, + const struct miniflow *mf) { - uint32_t hash; + uint32_t hash, recirc_depth; - hash = dpif_packet_get_dp_hash(packet); - if (OVS_UNLIKELY(!hash)) { + if (OVS_LIKELY(dp_packet_rss_valid(packet))) { + hash = dp_packet_get_rss_hash(packet); + } else { hash = miniflow_hash_5tuple(mf, 0); - dpif_packet_set_dp_hash(packet, hash); + dp_packet_set_rss_hash(packet, hash); + } + + /* The RSS hash must account for the recirculation depth to avoid + * collisions in the exact match cache */ + recirc_depth = *recirc_depth_get_unsafe(); + if (OVS_UNLIKELY(recirc_depth)) { + hash = hash_finish(hash, recirc_depth); + dp_packet_set_rss_hash(packet, hash); } return hash; } @@ -2680,23 +3355,24 @@ struct packet_batch { struct dp_netdev_flow *flow; - struct dpif_packet *packets[NETDEV_MAX_RX_BATCH]; + struct dp_packet *packets[NETDEV_MAX_BURST]; }; static inline void -packet_batch_update(struct packet_batch *batch, struct dpif_packet *packet, +packet_batch_update(struct packet_batch *batch, struct dp_packet *packet, const struct miniflow *mf) { batch->tcp_flags |= miniflow_get_tcp_flags(mf); batch->packets[batch->packet_count++] = packet; - batch->byte_count += ofpbuf_size(&packet->ofpbuf); + batch->byte_count += dp_packet_size(packet); } static inline void packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow) { - batch->flow = flow; + flow->batch = batch; + batch->flow = flow; batch->packet_count = 0; batch->byte_count = 0; batch->tcp_flags = 0; @@ -2704,139 +3380,124 @@ packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow) static inline void packet_batch_execute(struct packet_batch *batch, - struct dp_netdev_pmd_thread *pmd) + struct dp_netdev_pmd_thread *pmd, + long long now) { struct dp_netdev_actions *actions; struct dp_netdev_flow *flow = batch->flow; - dp_netdev_flow_used(batch->flow, batch->packet_count, batch->byte_count, - batch->tcp_flags); + dp_netdev_flow_used(flow, batch->packet_count, batch->byte_count, + batch->tcp_flags, now); actions = dp_netdev_flow_get_actions(flow); dp_netdev_execute_actions(pmd, batch->packets, batch->packet_count, true, actions->actions, actions->size); - - dp_netdev_count_packet(pmd->dp, DP_STAT_HIT, batch->packet_count); } -static inline bool -dp_netdev_queue_batches(struct dpif_packet *pkt, +static inline void +dp_netdev_queue_batches(struct dp_packet *pkt, struct dp_netdev_flow *flow, const struct miniflow *mf, - struct packet_batch *batches, size_t *n_batches, - size_t max_batches) -{ - struct packet_batch *batch = NULL; - int j; - - if (OVS_UNLIKELY(!flow)) { - return false; - } - /* XXX: This O(n^2) algortihm makes sense if we're operating under the - * assumption that the number of distinct flows (and therefore the - * number of distinct batches) is quite small. If this turns out not - * to be the case, it may make sense to pre sort based on the - * netdev_flow pointer. That done we can get the appropriate batching - * in O(n * log(n)) instead. */ - for (j = *n_batches - 1; j >= 0; j--) { - if (batches[j].flow == flow) { - batch = &batches[j]; - packet_batch_update(batch, pkt, mf); - return true; - } - } - if (OVS_UNLIKELY(*n_batches >= max_batches)) { - return false; + struct packet_batch *batches, size_t *n_batches) +{ + struct packet_batch *batch = flow->batch; + + if (OVS_UNLIKELY(!batch)) { + batch = &batches[(*n_batches)++]; + packet_batch_init(batch, flow); } - batch = &batches[(*n_batches)++]; - packet_batch_init(batch, flow); packet_batch_update(batch, pkt, mf); - return true; -} - -static inline void -dpif_packet_swap(struct dpif_packet **a, struct dpif_packet **b) -{ - struct dpif_packet *tmp = *a; - *a = *b; - *b = tmp; } /* Try to process all ('cnt') the 'packets' using only the exact match cache - * 'flow_cache'. If a flow is not found for a packet 'packets[i]', or if there - * is no matching batch for a packet's flow, the miniflow is copied into 'keys' - * and the packet pointer is moved at the beginning of the 'packets' array. + * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the + * miniflow is copied into 'keys' and the packet pointer is moved at the + * beginning of the 'packets' array. * * The function returns the number of packets that needs to be processed in the * 'packets' array (they have been moved to the beginning of the vector). + * + * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be + * initialized by this function using 'port_no'. */ static inline size_t -emc_processing(struct dp_netdev_pmd_thread *pmd, struct dpif_packet **packets, - size_t cnt, struct netdev_flow_key *keys) +emc_processing(struct dp_netdev_pmd_thread *pmd, struct dp_packet **packets, + size_t cnt, struct netdev_flow_key *keys, + struct packet_batch batches[], size_t *n_batches, + bool md_is_valid, odp_port_t port_no) { - struct netdev_flow_key key; - struct packet_batch batches[4]; struct emc_cache *flow_cache = &pmd->flow_cache; - size_t n_batches, i; - size_t notfound_cnt = 0; + struct netdev_flow_key *key = &keys[0]; + size_t i, n_missed = 0, n_dropped = 0; - n_batches = 0; - miniflow_initialize(&key.mf, key.buf); for (i = 0; i < cnt; i++) { struct dp_netdev_flow *flow; + struct dp_packet *packet = packets[i]; - if (OVS_UNLIKELY(ofpbuf_size(&packets[i]->ofpbuf) < ETH_HEADER_LEN)) { - dpif_packet_delete(packets[i]); + if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) { + dp_packet_delete(packet); + n_dropped++; continue; } - miniflow_extract(&packets[i]->ofpbuf, &packets[i]->md, &key.mf); - key.len = 0; /* Not computed yet. */ - key.hash = dpif_netdev_packet_get_dp_hash(packets[i], &key.mf); - - flow = emc_lookup(flow_cache, &key); - if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets[i], flow, &key.mf, - batches, &n_batches, - ARRAY_SIZE(batches)))) { - if (i != notfound_cnt) { - dpif_packet_swap(&packets[i], &packets[notfound_cnt]); - } + if (i != cnt - 1) { + /* Prefetch next packet data and metadata. */ + OVS_PREFETCH(dp_packet_data(packets[i+1])); + pkt_metadata_prefetch_init(&packets[i+1]->md); + } - keys[notfound_cnt++] = key; + if (!md_is_valid) { + pkt_metadata_init(&packet->md, port_no); + } + miniflow_extract(packet, &key->mf); + key->len = 0; /* Not computed yet. */ + key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf); + + flow = emc_lookup(flow_cache, key); + if (OVS_LIKELY(flow)) { + dp_netdev_queue_batches(packet, flow, &key->mf, batches, + n_batches); + } else { + /* Exact match cache missed. Group missed packets together at + * the beginning of the 'packets' array. */ + packets[n_missed] = packet; + /* 'key[n_missed]' contains the key of the current packet and it + * must be returned to the caller. The next key should be extracted + * to 'keys[n_missed + 1]'. */ + key = &keys[++n_missed]; } } - for (i = 0; i < n_batches; i++) { - packet_batch_execute(&batches[i], pmd); - } + dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT, cnt - n_dropped - n_missed); - return notfound_cnt; + return n_missed; } static inline void fast_path_processing(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **packets, size_t cnt, - struct netdev_flow_key *keys) + struct dp_packet **packets, size_t cnt, + struct netdev_flow_key *keys, + struct packet_batch batches[], size_t *n_batches) { #if !defined(__CHECKER__) && !defined(_WIN32) const size_t PKT_ARRAY_SIZE = cnt; #else /* Sparse or MSVC doesn't like variable length array. */ - enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH }; + enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; #endif - struct packet_batch batches[PKT_ARRAY_SIZE]; struct dpcls_rule *rules[PKT_ARRAY_SIZE]; struct dp_netdev *dp = pmd->dp; struct emc_cache *flow_cache = &pmd->flow_cache; - size_t n_batches, i; + int miss_cnt = 0, lost_cnt = 0; bool any_miss; + size_t i; for (i = 0; i < cnt; i++) { /* Key length is needed in all the cases, hash computed on demand. */ - keys[i].len = netdev_flow_key_size(count_1bits(keys[i].mf.map)); + keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf)); } - any_miss = !dpcls_lookup(&dp->cls, keys, rules, cnt); + any_miss = !dpcls_lookup(&pmd->cls, keys, rules, cnt); if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { uint64_t actions_stub[512 / 8], slow_stub[512 / 8]; struct ofpbuf actions, put_actions; @@ -2858,36 +3519,47 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, /* It's possible that an earlier slow path execution installed * a rule covering this flow. In this case, it's a lot cheaper * to catch it here than execute a miss. */ - netdev_flow = dp_netdev_lookup_flow(dp, &keys[i]); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i]); if (netdev_flow) { rules[i] = &netdev_flow->cr; continue; } + miss_cnt++; + + match.tun_md.valid = false; miniflow_expand(&keys[i].mf, &match.flow); ofpbuf_clear(&actions); ofpbuf_clear(&put_actions); dpif_flow_hash(dp->dpif, &match.flow, sizeof match.flow, &ufid); - error = dp_netdev_upcall(dp, packets[i], &match.flow, &match.wc, + error = dp_netdev_upcall(pmd, packets[i], &match.flow, &match.wc, &ufid, DPIF_UC_MISS, NULL, &actions, &put_actions); if (OVS_UNLIKELY(error && error != ENOSPC)) { + dp_packet_delete(packets[i]); + lost_cnt++; continue; } + /* The Netlink encoding of datapath flow keys cannot express + * wildcarding the presence of a VLAN tag. Instead, a missing VLAN + * tag is interpreted as exact match on the fact that there is no + * VLAN. Unless we refactor a lot of code that translates between + * Netlink and struct flow representations, we have to do the same + * here. */ + if (!match.wc.masks.vlan_tci) { + match.wc.masks.vlan_tci = htons(0xffff); + } + /* We can't allow the packet batching in the next loop to execute * the actions. Otherwise, if there are any slow path actions, * we'll send the packet up twice. */ dp_netdev_execute_actions(pmd, &packets[i], 1, true, - ofpbuf_data(&actions), - ofpbuf_size(&actions)); - - add_actions = ofpbuf_size(&put_actions) - ? &put_actions - : &actions; + actions.data, actions.size); + add_actions = put_actions.size ? &put_actions : &actions; if (OVS_LIKELY(error != ENOSPC)) { /* XXX: There's a race window where a flow covering this packet * could have already been installed since we last did the flow @@ -2895,14 +3567,14 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, * mutex lock outside the loop, but that's an awful long time * to be locking everyone out of making flow installs. If we * move to a per-core classifier, it would be reasonable. */ - ovs_mutex_lock(&dp->flow_mutex); - netdev_flow = dp_netdev_lookup_flow(dp, &keys[i]); + ovs_mutex_lock(&pmd->flow_mutex); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i]); if (OVS_LIKELY(!netdev_flow)) { - netdev_flow = dp_netdev_flow_add(dp, &match, - ofpbuf_data(add_actions), - ofpbuf_size(add_actions)); + netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid, + add_actions->data, + add_actions->size); } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); emc_insert(flow_cache, &keys[i], netdev_flow); } @@ -2911,22 +3583,19 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, ofpbuf_uninit(&actions); ofpbuf_uninit(&put_actions); fat_rwlock_unlock(&dp->upcall_rwlock); + dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); } else if (OVS_UNLIKELY(any_miss)) { - int dropped_cnt = 0; - for (i = 0; i < cnt; i++) { if (OVS_UNLIKELY(!rules[i])) { - dpif_packet_delete(packets[i]); - dropped_cnt++; + dp_packet_delete(packets[i]); + lost_cnt++; + miss_cnt++; } } - - dp_netdev_count_packet(dp, DP_STAT_LOST, dropped_cnt); } - n_batches = 0; for (i = 0; i < cnt; i++) { - struct dpif_packet *packet = packets[i]; + struct dp_packet *packet = packets[i]; struct dp_netdev_flow *flow; if (OVS_UNLIKELY(!rules[i])) { @@ -2936,38 +3605,80 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, flow = dp_netdev_flow_cast(rules[i]); emc_insert(flow_cache, &keys[i], flow); - dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, - &n_batches, ARRAY_SIZE(batches)); + dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches); } - for (i = 0; i < n_batches; i++) { - packet_batch_execute(&batches[i], pmd); - } + dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt); + dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt); + dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); } +/* Packets enter the datapath from a port (or from recirculation) here. + * + * For performance reasons a caller may choose not to initialize the metadata + * in 'packets': in this case 'mdinit' is false and this function needs to + * initialize it using 'port_no'. If the metadata in 'packets' is already + * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */ static void -dp_netdev_input(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **packets, int cnt) +dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, + struct dp_packet **packets, int cnt, + bool md_is_valid, odp_port_t port_no) { #if !defined(__CHECKER__) && !defined(_WIN32) const size_t PKT_ARRAY_SIZE = cnt; #else /* Sparse or MSVC doesn't like variable length array. */ - enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH }; + enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; #endif struct netdev_flow_key keys[PKT_ARRAY_SIZE]; - size_t newcnt; + struct packet_batch batches[PKT_ARRAY_SIZE]; + long long now = time_msec(); + size_t newcnt, n_batches, i; - newcnt = emc_processing(pmd, packets, cnt, keys); + n_batches = 0; + newcnt = emc_processing(pmd, packets, cnt, keys, batches, &n_batches, + md_is_valid, port_no); if (OVS_UNLIKELY(newcnt)) { - fast_path_processing(pmd, packets, newcnt, keys); + fast_path_processing(pmd, packets, newcnt, keys, batches, &n_batches); + } + + for (i = 0; i < n_batches; i++) { + batches[i].flow->batch = NULL; + } + + for (i = 0; i < n_batches; i++) { + packet_batch_execute(&batches[i], pmd, now); } } +static void +dp_netdev_input(struct dp_netdev_pmd_thread *pmd, + struct dp_packet **packets, int cnt, + odp_port_t port_no) +{ + dp_netdev_input__(pmd, packets, cnt, false, port_no); +} + +static void +dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, + struct dp_packet **packets, int cnt) +{ + dp_netdev_input__(pmd, packets, cnt, true, 0); +} + struct dp_netdev_execute_aux { struct dp_netdev_pmd_thread *pmd; }; +static void +dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, + void *aux) +{ + struct dp_netdev *dp = get_dp_netdev(dpif); + dp->dp_purge_aux = aux; + dp->dp_purge_cb = cb; +} + static void dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux) @@ -2978,13 +3689,13 @@ dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, } static void -dp_netdev_drop_packets(struct dpif_packet ** packets, int cnt, bool may_steal) +dp_netdev_drop_packets(struct dp_packet **packets, int cnt, bool may_steal) { if (may_steal) { int i; for (i = 0; i < cnt; i++) { - dpif_packet_delete(packets[i]); + dp_packet_delete(packets[i]); } } } @@ -2992,7 +3703,7 @@ dp_netdev_drop_packets(struct dpif_packet ** packets, int cnt, bool may_steal) static int push_tnl_action(const struct dp_netdev *dp, const struct nlattr *attr, - struct dpif_packet **packets, int cnt) + struct dp_packet **packets, int cnt) { struct dp_netdev_port *tun_port; const struct ovs_action_push_tnl *data; @@ -3009,25 +3720,25 @@ push_tnl_action(const struct dp_netdev *dp, } static void -dp_netdev_clone_pkt_batch(struct dpif_packet **tnl_pkt, - struct dpif_packet **packets, int cnt) +dp_netdev_clone_pkt_batch(struct dp_packet **dst_pkts, + struct dp_packet **src_pkts, int cnt) { int i; for (i = 0; i < cnt; i++) { - tnl_pkt[i] = dpif_packet_clone(packets[i]); + dst_pkts[i] = dp_packet_clone(src_pkts[i]); } } static void -dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, +dp_execute_cb(void *aux_, struct dp_packet **packets, int cnt, const struct nlattr *a, bool may_steal) OVS_NO_THREAD_SAFETY_ANALYSIS { struct dp_netdev_execute_aux *aux = aux_; uint32_t *depth = recirc_depth_get(); - struct dp_netdev_pmd_thread *pmd= aux->pmd; - struct dp_netdev *dp= pmd->dp; + struct dp_netdev_pmd_thread *pmd = aux->pmd; + struct dp_netdev *dp = pmd->dp; int type = nl_attr_type(a); struct dp_netdev_port *p; int i; @@ -3036,14 +3747,18 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, case OVS_ACTION_ATTR_OUTPUT: p = dp_netdev_lookup_port(dp, u32_to_odp(nl_attr_get_u32(a))); if (OVS_LIKELY(p)) { - netdev_send(p->netdev, pmd->core_id, packets, cnt, may_steal); + int tx_qid; + + atomic_read_relaxed(&pmd->tx_qid, &tx_qid); + + netdev_send(p->netdev, tx_qid, packets, cnt, may_steal); return; } break; case OVS_ACTION_ATTR_TUNNEL_PUSH: if (*depth < MAX_RECIRC_DEPTH) { - struct dpif_packet *tnl_pkt[NETDEV_MAX_RX_BATCH]; + struct dp_packet *tnl_pkt[NETDEV_MAX_BURST]; int err; if (!may_steal) { @@ -3054,7 +3769,7 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, err = push_tnl_action(dp, a, packets, cnt); if (!err) { (*depth)++; - dp_netdev_input(pmd, packets, cnt); + dp_netdev_recirculate(pmd, packets, cnt); (*depth)--; } else { dp_netdev_drop_packets(tnl_pkt, cnt, !may_steal); @@ -3069,7 +3784,7 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, p = dp_netdev_lookup_port(dp, portno); if (p) { - struct dpif_packet *tnl_pkt[NETDEV_MAX_RX_BATCH]; + struct dp_packet *tnl_pkt[NETDEV_MAX_BURST]; int err; if (!may_steal) { @@ -3085,7 +3800,7 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, } (*depth)++; - dp_netdev_input(pmd, packets, cnt); + dp_netdev_recirculate(pmd, packets, cnt); (*depth)--; } else { dp_netdev_drop_packets(tnl_pkt, cnt, !may_steal); @@ -3110,17 +3825,16 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, ofpbuf_clear(&actions); - flow_extract(&packets[i]->ofpbuf, &packets[i]->md, &flow); + flow_extract(packets[i], &flow); dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid); - error = dp_netdev_upcall(dp, packets[i], &flow, NULL, &ufid, + error = dp_netdev_upcall(pmd, packets[i], &flow, NULL, &ufid, DPIF_UC_ACTION, userdata,&actions, NULL); if (!error || error == ENOSPC) { dp_netdev_execute_actions(pmd, &packets[i], 1, may_steal, - ofpbuf_data(&actions), - ofpbuf_size(&actions)); + actions.data, actions.size); } else if (may_steal) { - dpif_packet_delete(packets[i]); + dp_packet_delete(packets[i]); } } ofpbuf_uninit(&actions); @@ -3132,21 +3846,19 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, case OVS_ACTION_ATTR_RECIRC: if (*depth < MAX_RECIRC_DEPTH) { + struct dp_packet *recirc_pkts[NETDEV_MAX_BURST]; - (*depth)++; - for (i = 0; i < cnt; i++) { - struct dpif_packet *recirc_pkt; - - recirc_pkt = (may_steal) ? packets[i] - : dpif_packet_clone(packets[i]); - - recirc_pkt->md.recirc_id = nl_attr_get_u32(a); - - /* Hash is private to each packet */ - recirc_pkt->md.dp_hash = dpif_packet_get_dp_hash(packets[i]); + if (!may_steal) { + dp_netdev_clone_pkt_batch(recirc_pkts, packets, cnt); + packets = recirc_pkts; + } - dp_netdev_input(pmd, &recirc_pkt, 1); + for (i = 0; i < cnt; i++) { + packets[i]->md.recirc_id = nl_attr_get_u32(a); } + + (*depth)++; + dp_netdev_recirculate(pmd, packets, cnt); (*depth)--; return; @@ -3155,6 +3867,13 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, VLOG_WARN("Packet dropped. Max recirculation depth exceeded."); break; + case OVS_ACTION_ATTR_CT: + /* If a flow with this action is slow-pathed, datapath assistance is + * required to implement it. However, we don't support this action + * in the userspace datapath. */ + VLOG_WARN("Cannot execute conntrack action in userspace."); + break; + case OVS_ACTION_ATTR_PUSH_VLAN: case OVS_ACTION_ATTR_POP_VLAN: case OVS_ACTION_ATTR_PUSH_MPLS: @@ -3173,7 +3892,7 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **packets, int cnt, + struct dp_packet **packets, int cnt, bool may_steal, const struct nlattr *actions, size_t actions_len) { @@ -3185,6 +3904,7 @@ dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, const struct dpif_class dpif_netdev_class = { "netdev", + dpif_netdev_init, dpif_netdev_enumerate, dpif_netdev_port_open_type, dpif_netdev_open, @@ -3217,10 +3937,15 @@ const struct dpif_class dpif_netdev_class = { NULL, /* recv */ NULL, /* recv_wait */ NULL, /* recv_purge */ + dpif_netdev_register_dp_purge_cb, dpif_netdev_register_upcall_cb, dpif_netdev_enable_upcall, dpif_netdev_disable_upcall, dpif_netdev_get_datapath_version, + NULL, /* ct_dump_start */ + NULL, /* ct_dump_next */ + NULL, /* ct_dump_done */ + NULL, /* ct_flush */ }; static void @@ -3317,21 +4042,36 @@ dpif_dummy_register__(const char *type) dp_register_provider(class); } +static void +dpif_dummy_override(const char *type) +{ + int error; + + /* + * Ignore EAFNOSUPPORT to allow --enable-dummy=system with + * a userland-only build. It's useful for testsuite. + */ + error = dp_unregister_provider(type); + if (error == 0 || error == EAFNOSUPPORT) { + dpif_dummy_register__(type); + } +} + void -dpif_dummy_register(bool override) +dpif_dummy_register(enum dummy_level level) { - if (override) { + if (level == DUMMY_OVERRIDE_ALL) { struct sset types; const char *type; sset_init(&types); dp_enumerate_types(&types); SSET_FOR_EACH (type, &types) { - if (!dp_unregister_provider(type)) { - dpif_dummy_register__(type); - } + dpif_dummy_override(type); } sset_destroy(&types); + } else if (level == DUMMY_OVERRIDE_SYSTEM) { + dpif_dummy_override("system"); } dpif_dummy_register__("dummy"); @@ -3385,6 +4125,7 @@ dpcls_destroy(struct dpcls *cls) struct dpcls_subtable *subtable; CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) { + ovs_assert(cmap_count(&subtable->rules) == 0); dpcls_destroy_subtable(cls, subtable); } cmap_destroy(&cls->subtables_map); @@ -3451,20 +4192,18 @@ dpcls_remove(struct dpcls *cls, struct dpcls_rule *rule) } } -/* Returns true if 'target' satisifies 'key' in 'mask', that is, if each 1-bit - * in 'mask' the values in 'key' and 'target' are the same. - * - * Note: 'key' and 'mask' have the same mask, and 'key' is already masked. */ +/* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit + * in 'mask' the values in 'key' and 'target' are the same. */ static inline bool dpcls_rule_matches_key(const struct dpcls_rule *rule, const struct netdev_flow_key *target) { - const uint32_t *keyp = rule->flow.mf.inline_values; - const uint32_t *maskp = rule->mask->mf.inline_values; - uint32_t target_u32; + const uint64_t *keyp = miniflow_get_values(&rule->flow.mf); + const uint64_t *maskp = miniflow_get_values(&rule->mask->mf); + uint64_t value; - NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u32, target, rule->flow.mf.map) { - if (OVS_UNLIKELY((target_u32 & *maskp++) != *keyp++)) { + NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) { + if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) { return false; } } @@ -3492,7 +4231,7 @@ dpcls_lookup(const struct dpcls *cls, const struct netdev_flow_key keys[], #if !defined(__CHECKER__) && !defined(_WIN32) const int N_MAPS = DIV_ROUND_UP(cnt, MAP_BITS); #else - enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_RX_BATCH, MAP_BITS) }; + enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_BURST, MAP_BITS) }; #endif map_type maps[N_MAPS]; struct dpcls_subtable *subtable; @@ -3522,14 +4261,14 @@ dpcls_lookup(const struct dpcls *cls, const struct netdev_flow_key keys[], } /* Compute hashes for the remaining keys. */ - ULONG_FOR_EACH_1(i, map) { + ULLONG_FOR_EACH_1(i, map) { hashes[i] = netdev_flow_key_hash_in_mask(&mkeys[i], &subtable->mask); } /* Lookup. */ map = cmap_find_batch(&subtable->rules, map, hashes, nodes); /* Check results. */ - ULONG_FOR_EACH_1(i, map) { + ULLONG_FOR_EACH_1(i, map) { struct dpcls_rule *rule; CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) { @@ -3538,7 +4277,7 @@ dpcls_lookup(const struct dpcls *cls, const struct netdev_flow_key keys[], goto next; } } - ULONG_SET0(map, i); /* Did not match. */ + ULLONG_SET0(map, i); /* Did not match. */ next: ; /* Keep Sparse happy. */ }