X-Git-Url: http://git.cascardo.eti.br/?a=blobdiff_plain;f=lib%2Fdpif-netdev.c;h=d1465462fd847e8165726ce0c9d8bc1958c5709b;hb=35303d715b1f0db46e6a27146815061a60385dc6;hp=f184a35a1667e4ccbed6a36c0cd500fb61781b3a;hpb=53e1d6f1ef360cb1b2daa70f4e65f9f5c02db2f9;p=cascardo%2Fovs.git diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index f184a35a1..d1465462f 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -33,6 +33,7 @@ #include "cmap.h" #include "csum.h" +#include "dp-packet.h" #include "dpif.h" #include "dpif-provider.h" #include "dummy.h" @@ -54,7 +55,6 @@ #include "ofpbuf.h" #include "ovs-numa.h" #include "ovs-rcu.h" -#include "packet-dpif.h" #include "packets.h" #include "poll-loop.h" #include "pvector.h" @@ -66,7 +66,7 @@ #include "tnl-arp-cache.h" #include "unixctl.h" #include "util.h" -#include "vlog.h" +#include "openvswitch/vlog.h" VLOG_DEFINE_THIS_MODULE(dpif_netdev); @@ -93,7 +93,7 @@ struct netdev_flow_key { uint32_t hash; /* Hash function differs for different users. */ uint32_t len; /* Length of the following miniflow (incl. map). */ struct miniflow mf; - uint32_t buf[FLOW_MAX_PACKET_U32S - MINI_N_INLINE]; + uint64_t buf[FLOW_MAX_PACKET_U64S - MINI_N_INLINE]; }; /* Exact match cache for frequently used flows @@ -117,7 +117,7 @@ struct netdev_flow_key { * If dp_netdev_input is not called from a pmd thread, a mutex is used. */ -#define EM_FLOW_HASH_SHIFT 10 +#define EM_FLOW_HASH_SHIFT 13 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT) #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1) #define EM_FLOW_HASH_SEGS 2 @@ -177,7 +177,6 @@ static bool dpcls_lookup(const struct dpcls *cls, * * dp_netdev_mutex (global) * port_mutex - * flow_mutex */ struct dp_netdev { const struct dpif_class *const class; @@ -186,20 +185,6 @@ struct dp_netdev { struct ovs_refcount ref_cnt; atomic_flag destroyed; - /* Flows. - * - * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding - * changes to 'cls' must be made while still holding the 'flow_mutex'. - */ - struct ovs_mutex flow_mutex; - struct dpcls cls; - struct cmap flow_table OVS_GUARDED; /* Flow table. */ - - /* Statistics. - * - * ovsthread_stats is internally synchronized. */ - struct ovsthread_stats stats; /* Contains 'struct dp_netdev_stats *'. */ - /* Ports. * * Protected by RCU. Take the mutex to add or remove ports. */ @@ -235,41 +220,46 @@ static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t); enum dp_stat_type { - DP_STAT_HIT, /* Packets that matched in the flow table. */ + DP_STAT_EXACT_HIT, /* Packets that had an exact match (emc). */ + DP_STAT_MASKED_HIT, /* Packets that matched in the flow table. */ DP_STAT_MISS, /* Packets that did not match. */ DP_STAT_LOST, /* Packets not passed up to the client. */ DP_N_STATS }; -/* Contained by struct dp_netdev's 'stats' member. */ -struct dp_netdev_stats { - struct ovs_mutex mutex; /* Protects 'n'. */ - - /* Indexed by DP_STAT_*, protected by 'mutex'. */ - unsigned long long int n[DP_N_STATS] OVS_GUARDED; +enum pmd_cycles_counter_type { + PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */ + PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */ + PMD_N_CYCLES }; - /* A port in a netdev-based datapath. */ struct dp_netdev_port { - struct cmap_node node; /* Node in dp_netdev's 'ports'. */ odp_port_t port_no; struct netdev *netdev; + struct cmap_node node; /* Node in dp_netdev's 'ports'. */ struct netdev_saved_flags *sf; struct netdev_rxq **rxq; struct ovs_refcount ref_cnt; char *type; /* Port type as requested by user. */ }; - -/* A flow in dp_netdev's 'flow_table'. +/* Contained by struct dp_netdev_flow's 'stats' member. */ +struct dp_netdev_flow_stats { + atomic_llong used; /* Last used time, in monotonic msecs. */ + atomic_ullong packet_count; /* Number of packets matched. */ + atomic_ullong byte_count; /* Number of bytes matched. */ + atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */ +}; + +/* A flow in 'dp_netdev_pmd_thread's 'flow_table'. * * * Thread-safety * ============= * * Except near the beginning or ending of its lifespan, rule 'rule' belongs to - * its dp_netdev's classifier. The text below calls this classifier 'cls'. + * its pmd thread's classifier. The text below calls this classifier 'cls'. * * Motivation * ---------- @@ -300,11 +290,13 @@ struct dp_netdev_port { * requires synchronization, as noted in more detail below. */ struct dp_netdev_flow { - bool dead; - - /* Hash table index by unmasked flow. */ - const struct cmap_node node; /* In owning dp_netdev's 'flow_table'. */ const struct flow flow; /* Unmasked flow that created this entry. */ + /* Hash table index by unmasked flow. */ + const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */ + /* 'flow_table'. */ + const ovs_u128 ufid; /* Unique flow identifier. */ + const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */ + /* flow. */ /* Number of references. * The classifier owns one reference. @@ -312,14 +304,20 @@ struct dp_netdev_flow { * reference. */ struct ovs_refcount ref_cnt; - /* Statistics. - * - * Reading or writing these members requires 'mutex'. */ - struct ovsthread_stats stats; /* Contains "struct dp_netdev_flow_stats". */ + bool dead; + + /* Statistics. */ + struct dp_netdev_flow_stats stats; /* Actions. */ OVSRCU_TYPE(struct dp_netdev_actions *) actions; + /* While processing a group of input packets, the datapath uses the next + * member to store a pointer to the output batch for the flow. It is + * reset after the batch has been sent out (See dp_netdev_queue_batches(), + * packet_batch_init() and packet_batch_execute()). */ + struct packet_batch *batch; + /* Packet classification. */ struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */ /* 'cr' must be the last member. */ @@ -327,16 +325,8 @@ struct dp_netdev_flow { static void dp_netdev_flow_unref(struct dp_netdev_flow *); static bool dp_netdev_flow_ref(struct dp_netdev_flow *); - -/* Contained by struct dp_netdev_flow's 'stats' member. */ -struct dp_netdev_flow_stats { - struct ovs_mutex mutex; /* Guards all the other members. */ - - long long int used OVS_GUARDED; /* Last used time, in monotonic msecs. */ - long long int packet_count OVS_GUARDED; /* Number of packets matched. */ - long long int byte_count OVS_GUARDED; /* Number of bytes matched. */ - uint16_t tcp_flags OVS_GUARDED; /* Bitwise-OR of seen tcp_flags values. */ -}; +static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t, + struct flow *); /* A set of datapath actions within a "struct dp_netdev_flow". * @@ -348,8 +338,8 @@ struct dp_netdev_flow_stats { struct dp_netdev_actions { /* These members are immutable: they do not change during the struct's * lifetime. */ - struct nlattr *actions; /* Sequence of OVS_ACTION_ATTR_* attributes. */ unsigned int size; /* Size of 'actions', in bytes. */ + struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */ }; struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *, @@ -358,20 +348,37 @@ struct dp_netdev_actions *dp_netdev_flow_get_actions( const struct dp_netdev_flow *); static void dp_netdev_actions_free(struct dp_netdev_actions *); +/* Contained by struct dp_netdev_pmd_thread's 'stats' member. */ +struct dp_netdev_pmd_stats { + /* Indexed by DP_STAT_*. */ + atomic_ullong n[DP_N_STATS]; +}; + +/* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */ +struct dp_netdev_pmd_cycles { + /* Indexed by PMD_CYCLES_*. */ + atomic_ullong n[PMD_N_CYCLES]; +}; + /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate * the performance overhead of interrupt processing. Therefore netdev can * not implement rx-wait for these devices. dpif-netdev needs to poll * these device to check for recv buffer. pmd-thread does polling for - * devices assigned to itself thread. + * devices assigned to itself. * * DPDK used PMD for accessing NIC. * * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for * I/O of all non-pmd threads. There will be no actual thread created * for the instance. - **/ + * + * Each struct has its own flow table and classifier. Packets received + * from managed ports are looked up in the corresponding pmd thread's + * flow table, and are executed with the found actions. + * */ struct dp_netdev_pmd_thread { struct dp_netdev *dp; + struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */ struct cmap_node node; /* In 'dp->poll_threads'. */ pthread_cond_t cond; /* For synchronizing pmd thread reload. */ @@ -382,13 +389,41 @@ struct dp_netdev_pmd_thread { * need to be protected (e.g. by 'dp_netdev_mutex'). All other * instances will only be accessed by its own pmd thread. */ struct emc_cache flow_cache; + + /* Classifier and Flow-Table. + * + * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding + * changes to 'cls' must be made while still holding the 'flow_mutex'. + */ + struct ovs_mutex flow_mutex; + struct dpcls cls; + struct cmap flow_table OVS_GUARDED; /* Flow table. */ + + /* Statistics. */ + struct dp_netdev_pmd_stats stats; + + /* Cycles counters */ + struct dp_netdev_pmd_cycles cycles; + + /* Used to count cicles. See 'cycles_counter_end()' */ + unsigned long long last_cycles; + struct latch exit_latch; /* For terminating the pmd thread. */ atomic_uint change_seq; /* For reloading pmd ports. */ pthread_t thread; int index; /* Idx of this pmd thread among pmd*/ /* threads on same numa node. */ - int core_id; /* CPU core id of this pmd thread. */ + unsigned core_id; /* CPU core id of this pmd thread. */ int numa_id; /* numa node id of this pmd thread. */ + int tx_qid; /* Queue id used by this pmd thread to + * send packets on all netdevs */ + + /* Only a pmd thread can write on its own 'cycles' and 'stats'. + * The main thread keeps 'stats_zero' and 'cycles_zero' as base + * values and subtracts them from 'stats' and 'cycles' before + * reporting to the user */ + unsigned long long stats_zero[DP_N_STATS]; + uint64_t cycles_zero[PMD_N_CYCLES]; }; #define PMD_INITIAL_SEQ 1 @@ -406,7 +441,6 @@ static int get_port_by_name(struct dp_netdev *dp, const char *devname, struct dp_netdev_port **portp); static void dp_netdev_free(struct dp_netdev *) OVS_REQUIRES(dp_netdev_mutex); -static void dp_netdev_flow_flush(struct dp_netdev *); static int do_add_port(struct dp_netdev *dp, const char *devname, const char *type, odp_port_t port_no) OVS_REQUIRES(dp->port_mutex); @@ -415,24 +449,31 @@ static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *) static int dpif_netdev_open(const struct dpif_class *, const char *name, bool create, struct dpif **); static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **, int c, + struct dp_packet **, int c, bool may_steal, const struct nlattr *actions, size_t actions_len); static void dp_netdev_input(struct dp_netdev_pmd_thread *, - struct dpif_packet **, int cnt); + struct dp_packet **, int cnt); static void dp_netdev_disable_upcall(struct dp_netdev *); void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, int index, - int core_id, int numa_id); + unsigned core_id, int numa_id); +static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_set_nonpmd(struct dp_netdev *dp); -static struct dp_netdev_pmd_thread *dp_netdev_get_nonpmd(struct dp_netdev *dp); +static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp, + unsigned core_id); +static struct dp_netdev_pmd_thread * +dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos); static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp); static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id); static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id); static void dp_netdev_reset_pmd_threads(struct dp_netdev *dp); +static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd); +static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd); +static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd); static inline bool emc_entry_alive(struct emc_entry *ce); static void emc_clear_entry(struct emc_entry *ce); @@ -478,10 +519,17 @@ emc_cache_slow_sweep(struct emc_cache *flow_cache) flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK; } +/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */ +bool +dpif_is_netdev(const struct dpif *dpif) +{ + return dpif->dpif_class->open == dpif_netdev_open; +} + static struct dpif_netdev * dpif_netdev_cast(const struct dpif *dpif) { - ovs_assert(dpif->dpif_class->open == dpif_netdev_open); + ovs_assert(dpif_is_netdev(dpif)); return CONTAINER_OF(dpif, struct dpif_netdev, dpif); } @@ -490,6 +538,182 @@ get_dp_netdev(const struct dpif *dpif) { return dpif_netdev_cast(dpif)->dp; } + +enum pmd_info_type { + PMD_INFO_SHOW_STATS, /* show how cpu cycles are spent */ + PMD_INFO_CLEAR_STATS /* set the cycles count to 0 */ +}; + +static void +pmd_info_show_stats(struct ds *reply, + struct dp_netdev_pmd_thread *pmd, + unsigned long long stats[DP_N_STATS], + uint64_t cycles[PMD_N_CYCLES]) +{ + unsigned long long total_packets = 0; + uint64_t total_cycles = 0; + int i; + + /* These loops subtracts reference values ('*_zero') from the counters. + * Since loads and stores are relaxed, it might be possible for a '*_zero' + * value to be more recent than the current value we're reading from the + * counter. This is not a big problem, since these numbers are not + * supposed to be too accurate, but we should at least make sure that + * the result is not negative. */ + for (i = 0; i < DP_N_STATS; i++) { + if (stats[i] > pmd->stats_zero[i]) { + stats[i] -= pmd->stats_zero[i]; + } else { + stats[i] = 0; + } + + if (i != DP_STAT_LOST) { + /* Lost packets are already included in DP_STAT_MISS */ + total_packets += stats[i]; + } + } + + for (i = 0; i < PMD_N_CYCLES; i++) { + if (cycles[i] > pmd->cycles_zero[i]) { + cycles[i] -= pmd->cycles_zero[i]; + } else { + cycles[i] = 0; + } + + total_cycles += cycles[i]; + } + + ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID) + ? "main thread" : "pmd thread"); + + if (pmd->numa_id != OVS_NUMA_UNSPEC) { + ds_put_format(reply, " numa_id %d", pmd->numa_id); + } + if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) { + ds_put_format(reply, " core_id %u", pmd->core_id); + } + ds_put_cstr(reply, ":\n"); + + ds_put_format(reply, + "\temc hits:%llu\n\tmegaflow hits:%llu\n" + "\tmiss:%llu\n\tlost:%llu\n", + stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT], + stats[DP_STAT_MISS], stats[DP_STAT_LOST]); + + if (total_cycles == 0) { + return; + } + + ds_put_format(reply, + "\tpolling cycles:%"PRIu64" (%.02f%%)\n" + "\tprocessing cycles:%"PRIu64" (%.02f%%)\n", + cycles[PMD_CYCLES_POLLING], + cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100, + cycles[PMD_CYCLES_PROCESSING], + cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100); + + if (total_packets == 0) { + return; + } + + ds_put_format(reply, + "\tavg cycles per packet: %.02f (%"PRIu64"/%llu)\n", + total_cycles / (double)total_packets, + total_cycles, total_packets); + + ds_put_format(reply, + "\tavg processing cycles per packet: " + "%.02f (%"PRIu64"/%llu)\n", + cycles[PMD_CYCLES_PROCESSING] / (double)total_packets, + cycles[PMD_CYCLES_PROCESSING], total_packets); +} + +static void +pmd_info_clear_stats(struct ds *reply OVS_UNUSED, + struct dp_netdev_pmd_thread *pmd, + unsigned long long stats[DP_N_STATS], + uint64_t cycles[PMD_N_CYCLES]) +{ + int i; + + /* We cannot write 'stats' and 'cycles' (because they're written by other + * threads) and we shouldn't change 'stats' (because they're used to count + * datapath stats, which must not be cleared here). Instead, we save the + * current values and subtract them from the values to be displayed in the + * future */ + for (i = 0; i < DP_N_STATS; i++) { + pmd->stats_zero[i] = stats[i]; + } + for (i = 0; i < PMD_N_CYCLES; i++) { + pmd->cycles_zero[i] = cycles[i]; + } +} + +static void +dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[], + void *aux) +{ + struct ds reply = DS_EMPTY_INITIALIZER; + struct dp_netdev_pmd_thread *pmd; + struct dp_netdev *dp = NULL; + enum pmd_info_type type = *(enum pmd_info_type *) aux; + + ovs_mutex_lock(&dp_netdev_mutex); + + if (argc == 2) { + dp = shash_find_data(&dp_netdevs, argv[1]); + } else if (shash_count(&dp_netdevs) == 1) { + /* There's only one datapath */ + dp = shash_first(&dp_netdevs)->data; + } + + if (!dp) { + ovs_mutex_unlock(&dp_netdev_mutex); + unixctl_command_reply_error(conn, + "please specify an existing datapath"); + return; + } + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + unsigned long long stats[DP_N_STATS]; + uint64_t cycles[PMD_N_CYCLES]; + int i; + + /* Read current stats and cycle counters */ + for (i = 0; i < ARRAY_SIZE(stats); i++) { + atomic_read_relaxed(&pmd->stats.n[i], &stats[i]); + } + for (i = 0; i < ARRAY_SIZE(cycles); i++) { + atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]); + } + + if (type == PMD_INFO_CLEAR_STATS) { + pmd_info_clear_stats(&reply, pmd, stats, cycles); + } else if (type == PMD_INFO_SHOW_STATS) { + pmd_info_show_stats(&reply, pmd, stats, cycles); + } + } + + ovs_mutex_unlock(&dp_netdev_mutex); + + unixctl_command_reply(conn, ds_cstr(&reply)); + ds_destroy(&reply); +} + +static int +dpif_netdev_init(void) +{ + static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS, + clear_aux = PMD_INFO_CLEAR_STATS; + + unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]", + 0, 1, dpif_netdev_pmd_info, + (void *)&show_aux); + unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]", + 0, 1, dpif_netdev_pmd_info, + (void *)&clear_aux); + return 0; +} static int dpif_netdev_enumerate(struct sset *all_dps, @@ -600,12 +824,6 @@ create_dp_netdev(const char *name, const struct dpif_class *class, ovs_refcount_init(&dp->ref_cnt); atomic_flag_clear(&dp->destroyed); - ovs_mutex_init(&dp->flow_mutex); - dpcls_init(&dp->cls); - cmap_init(&dp->flow_table); - - ovsthread_stats_init(&dp->stats); - ovs_mutex_init(&dp->port_mutex); cmap_init(&dp->ports); dp->port_seq = seq_create(); @@ -620,8 +838,6 @@ create_dp_netdev(const char *name, const struct dpif_class *class, ovs_mutex_init_recursive(&dp->non_pmd_mutex); ovsthread_key_create(&dp->per_pmd_key, NULL); - /* Reserves the core NON_PMD_CORE_ID for all non-pmd threads. */ - ovs_numa_try_pin_core_specific(NON_PMD_CORE_ID); dp_netdev_set_nonpmd(dp); dp->n_dpdk_rxqs = NR_QUEUE; @@ -682,8 +898,6 @@ dp_netdev_free(struct dp_netdev *dp) OVS_REQUIRES(dp_netdev_mutex) { struct dp_netdev_port *port; - struct dp_netdev_stats *bucket; - int i; shash_find_and_delete(&dp_netdevs, dp->name); @@ -692,22 +906,12 @@ dp_netdev_free(struct dp_netdev *dp) ovs_mutex_destroy(&dp->non_pmd_mutex); ovsthread_key_delete(dp->per_pmd_key); - dp_netdev_flow_flush(dp); ovs_mutex_lock(&dp->port_mutex); CMAP_FOR_EACH (port, node, &dp->ports) { do_del_port(dp, port); } ovs_mutex_unlock(&dp->port_mutex); - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) { - ovs_mutex_destroy(&bucket->mutex); - free_cacheline(bucket); - } - ovsthread_stats_destroy(&dp->stats); - - dpcls_destroy(&dp->cls); - cmap_destroy(&dp->flow_table); - ovs_mutex_destroy(&dp->flow_mutex); seq_destroy(dp->port_seq); cmap_destroy(&dp->ports); @@ -757,22 +961,40 @@ dpif_netdev_destroy(struct dpif *dpif) return 0; } +/* Add 'n' to the atomic variable 'var' non-atomically and using relaxed + * load/store semantics. While the increment is not atomic, the load and + * store operations are, making it impossible to read inconsistent values. + * + * This is used to update thread local stats counters. */ +static void +non_atomic_ullong_add(atomic_ullong *var, unsigned long long n) +{ + unsigned long long tmp; + + atomic_read_relaxed(var, &tmp); + tmp += n; + atomic_store_relaxed(var, tmp); +} + static int dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) { struct dp_netdev *dp = get_dp_netdev(dpif); - struct dp_netdev_stats *bucket; - size_t i; - - stats->n_flows = cmap_count(&dp->flow_table); + struct dp_netdev_pmd_thread *pmd; - stats->n_hit = stats->n_missed = stats->n_lost = 0; - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) { - ovs_mutex_lock(&bucket->mutex); - stats->n_hit += bucket->n[DP_STAT_HIT]; - stats->n_missed += bucket->n[DP_STAT_MISS]; - stats->n_lost += bucket->n[DP_STAT_LOST]; - ovs_mutex_unlock(&bucket->mutex); + stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0; + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + unsigned long long n; + stats->n_flows += cmap_count(&pmd->flow_table); + + atomic_read_relaxed(&pmd->stats.n[DP_STAT_MASKED_HIT], &n); + stats->n_hit += n; + atomic_read_relaxed(&pmd->stats.n[DP_STAT_EXACT_HIT], &n); + stats->n_hit += n; + atomic_read_relaxed(&pmd->stats.n[DP_STAT_MISS], &n); + stats->n_missed += n; + atomic_read_relaxed(&pmd->stats.n[DP_STAT_LOST], &n); + stats->n_lost += n; } stats->n_masks = UINT32_MAX; stats->n_mask_hit = UINT64_MAX; @@ -826,7 +1048,10 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, int error; int i; - /* XXX reject devices already in some dp_netdev. */ + /* Reject devices already in 'dp'. */ + if (!get_port_by_name(dp, devname, &port)) { + return EEXIST; + } /* Open and validate network device. */ open_type = dpif_netdev_port_open_type(dp->class, type); @@ -851,8 +1076,9 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, return ENOENT; } /* There can only be ovs_numa_get_n_cores() pmd threads, - * so creates a txq for each. */ - error = netdev_set_multiq(netdev, n_cores, dp->n_dpdk_rxqs); + * so creates a txq for each, and one extra for the non + * pmd threads. */ + error = netdev_set_multiq(netdev, n_cores + 1, dp->n_dpdk_rxqs); if (error && (error != EOPNOTSUPP)) { VLOG_ERR("%s, cannot set multiq", devname); return errno; @@ -1136,15 +1362,6 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, static void dp_netdev_flow_free(struct dp_netdev_flow *flow) { - struct dp_netdev_flow_stats *bucket; - size_t i; - - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &flow->stats) { - ovs_mutex_destroy(&bucket->mutex); - free_cacheline(bucket); - } - ovsthread_stats_destroy(&flow->stats); - dp_netdev_actions_free(dp_netdev_flow_get_actions(flow)); free(flow); } @@ -1156,37 +1373,48 @@ static void dp_netdev_flow_unref(struct dp_netdev_flow *flow) } } +static uint32_t +dp_netdev_flow_hash(const ovs_u128 *ufid) +{ + return ufid->u32[0]; +} + static void -dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow) - OVS_REQUIRES(dp->flow_mutex) +dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd, + struct dp_netdev_flow *flow) + OVS_REQUIRES(pmd->flow_mutex) { struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node); - dpcls_remove(&dp->cls, &flow->cr); - cmap_remove(&dp->flow_table, node, flow_hash(&flow->flow, 0)); + dpcls_remove(&pmd->cls, &flow->cr); + cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid)); flow->dead = true; dp_netdev_flow_unref(flow); } static void -dp_netdev_flow_flush(struct dp_netdev *dp) +dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd) { struct dp_netdev_flow *netdev_flow; - ovs_mutex_lock(&dp->flow_mutex); - CMAP_FOR_EACH (netdev_flow, node, &dp->flow_table) { - dp_netdev_remove_flow(dp, netdev_flow); + ovs_mutex_lock(&pmd->flow_mutex); + CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) { + dp_netdev_pmd_remove_flow(pmd, netdev_flow); } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); } static int dpif_netdev_flow_flush(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); + struct dp_netdev_pmd_thread *pmd; + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + dp_netdev_pmd_flow_flush(pmd); + } - dp_netdev_flow_flush(dp); return 0; } @@ -1336,16 +1564,16 @@ static void netdev_flow_key_from_flow(struct netdev_flow_key *dst, const struct flow *src) { - struct ofpbuf packet; + struct dp_packet packet; uint64_t buf_stub[512 / 8]; - struct pkt_metadata md = pkt_metadata_from_flow(src); miniflow_initialize(&dst->mf, dst->buf); - ofpbuf_use_stub(&packet, buf_stub, sizeof buf_stub); + dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub); + pkt_metadata_from_flow(&packet.md, src); flow_compose(&packet, src); - miniflow_extract(&packet, &md, &dst->mf); - ofpbuf_uninit(&packet); + miniflow_extract(&packet, &dst->mf); + dp_packet_uninit(&packet); dst->len = netdev_flow_key_size(count_1bits(dst->mf.map)); dst->hash = 0; /* Not computed yet. */ @@ -1356,8 +1584,8 @@ static inline void netdev_flow_mask_init(struct netdev_flow_key *mask, const struct match *match) { - const uint32_t *mask_u32 = (const uint32_t *) &match->wc.masks; - uint32_t *dst = mask->mf.inline_values; + const uint64_t *mask_u64 = (const uint64_t *) &match->wc.masks; + uint64_t *dst = mask->mf.inline_values; uint64_t map, mask_map = 0; uint32_t hash = 0; int n; @@ -1369,10 +1597,10 @@ netdev_flow_mask_init(struct netdev_flow_key *mask, uint64_t rm1bit = rightmost_1bit(map); int i = raw_ctz(map); - if (mask_u32[i]) { + if (mask_u64[i]) { mask_map |= rm1bit; - *dst++ = mask_u32[i]; - hash = hash_add(hash, mask_u32[i]); + *dst++ = mask_u64[i]; + hash = hash_add64(hash, mask_u64[i]); } map -= rm1bit; } @@ -1380,12 +1608,11 @@ netdev_flow_mask_init(struct netdev_flow_key *mask, mask->mf.values_inline = true; mask->mf.map = mask_map; - hash = hash_add(hash, mask_map); - hash = hash_add(hash, mask_map >> 32); + hash = hash_add64(hash, mask_map); n = dst - mask->mf.inline_values; - mask->hash = hash_finish(hash, n * 4); + mask->hash = hash_finish(hash, n * 8); mask->len = netdev_flow_key_size(n); } @@ -1395,23 +1622,23 @@ netdev_flow_key_init_masked(struct netdev_flow_key *dst, const struct flow *flow, const struct netdev_flow_key *mask) { - uint32_t *dst_u32 = dst->mf.inline_values; - const uint32_t *mask_u32 = mask->mf.inline_values; + uint64_t *dst_u64 = dst->mf.inline_values; + const uint64_t *mask_u64 = mask->mf.inline_values; uint32_t hash = 0; - uint32_t value; + uint64_t value; dst->len = mask->len; dst->mf.values_inline = true; dst->mf.map = mask->mf.map; FLOW_FOR_EACH_IN_MAP(value, flow, mask->mf.map) { - *dst_u32 = value & *mask_u32++; - hash = hash_add(hash, *dst_u32++); + *dst_u64 = value & *mask_u64++; + hash = hash_add64(hash, *dst_u64++); } - dst->hash = hash_finish(hash, (dst_u32 - dst->mf.inline_values) * 4); + dst->hash = hash_finish(hash, (dst_u64 - dst->mf.inline_values) * 8); } -/* Iterate through all netdev_flow_key u32 values specified by 'MAP' */ +/* Iterate through all netdev_flow_key u64 values specified by 'MAP' */ #define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \ for (struct mf_for_each_in_map_aux aux__ \ = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \ @@ -1424,15 +1651,15 @@ static inline uint32_t netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key, const struct netdev_flow_key *mask) { - const uint32_t *p = mask->mf.inline_values; + const uint64_t *p = mask->mf.inline_values; uint32_t hash = 0; - uint32_t key_u32; + uint64_t key_u64; - NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u32, key, mask->mf.map) { - hash = hash_add(hash, key_u32 & *p++); + NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u64, key, mask->mf.map) { + hash = hash_add64(hash, key_u64 & *p++); } - return hash_finish(hash, (p - mask->mf.inline_values) * 4); + return hash_finish(hash, (p - mask->mf.inline_values) * 8); } static inline bool @@ -1518,27 +1745,40 @@ emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key) } static struct dp_netdev_flow * -dp_netdev_lookup_flow(const struct dp_netdev *dp, - const struct netdev_flow_key *key) +dp_netdev_pmd_lookup_flow(const struct dp_netdev_pmd_thread *pmd, + const struct netdev_flow_key *key) { struct dp_netdev_flow *netdev_flow; struct dpcls_rule *rule; - dpcls_lookup(&dp->cls, key, &rule, 1); + dpcls_lookup(&pmd->cls, key, &rule, 1); netdev_flow = dp_netdev_flow_cast(rule); return netdev_flow; } static struct dp_netdev_flow * -dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow) +dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd, + const ovs_u128 *ufidp, const struct nlattr *key, + size_t key_len) { struct dp_netdev_flow *netdev_flow; + struct flow flow; + ovs_u128 ufid; + + /* If a UFID is not provided, determine one based on the key. */ + if (!ufidp && key && key_len + && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow)) { + dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid); + ufidp = &ufid; + } - CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0), - &dp->flow_table) { - if (flow_equal(&netdev_flow->flow, flow)) { - return netdev_flow; + if (ufidp) { + CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp), + &pmd->flow_table) { + if (ovs_u128_equals(&netdev_flow->ufid, ufidp)) { + return netdev_flow; + } } } @@ -1546,41 +1786,74 @@ dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow) } static void -get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow, +get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_, struct dpif_flow_stats *stats) { - struct dp_netdev_flow_stats *bucket; - size_t i; - - memset(stats, 0, sizeof *stats); - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &netdev_flow->stats) { - ovs_mutex_lock(&bucket->mutex); - stats->n_packets += bucket->packet_count; - stats->n_bytes += bucket->byte_count; - stats->used = MAX(stats->used, bucket->used); - stats->tcp_flags |= bucket->tcp_flags; - ovs_mutex_unlock(&bucket->mutex); - } -} - + struct dp_netdev_flow *netdev_flow; + unsigned long long n; + long long used; + uint16_t flags; + + netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_); + + atomic_read_relaxed(&netdev_flow->stats.packet_count, &n); + stats->n_packets = n; + atomic_read_relaxed(&netdev_flow->stats.byte_count, &n); + stats->n_bytes = n; + atomic_read_relaxed(&netdev_flow->stats.used, &used); + stats->used = used; + atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); + stats->tcp_flags = flags; +} + +/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for + * storing the netlink-formatted key/mask. 'key_buf' may be the same as + * 'mask_buf'. Actions will be returned without copying, by relying on RCU to + * protect them. */ static void dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow, - struct ofpbuf *buffer, struct dpif_flow *flow) + struct ofpbuf *key_buf, struct ofpbuf *mask_buf, + struct dpif_flow *flow, bool terse) { - struct flow_wildcards wc; - struct dp_netdev_actions *actions; - - miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); - odp_flow_key_from_mask(buffer, &wc.masks, &netdev_flow->flow, - odp_to_u32(wc.masks.in_port.odp_port), - SIZE_MAX, true); - flow->mask = ofpbuf_data(buffer); - flow->mask_len = ofpbuf_size(buffer); + if (terse) { + memset(flow, 0, sizeof *flow); + } else { + struct flow_wildcards wc; + struct dp_netdev_actions *actions; + size_t offset; + struct odp_flow_key_parms odp_parms = { + .flow = &netdev_flow->flow, + .mask = &wc.masks, + .recirc = true, + .max_mpls_depth = SIZE_MAX, + }; - actions = dp_netdev_flow_get_actions(netdev_flow); - flow->actions = actions->actions; - flow->actions_len = actions->size; + miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); + /* Key */ + offset = key_buf->size; + flow->key = ofpbuf_tail(key_buf); + odp_parms.odp_in_port = netdev_flow->flow.in_port.odp_port; + odp_flow_key_from_flow(&odp_parms, key_buf); + flow->key_len = key_buf->size - offset; + + /* Mask */ + offset = mask_buf->size; + flow->mask = ofpbuf_tail(mask_buf); + odp_parms.odp_in_port = wc.masks.in_port.odp_port; + odp_parms.key_buf = key_buf; + odp_flow_key_from_mask(&odp_parms, mask_buf); + flow->mask_len = mask_buf->size - offset; + + /* Actions */ + actions = dp_netdev_flow_get_actions(netdev_flow); + flow->actions = actions->actions; + flow->actions_len = actions->size; + } + + flow->ufid = netdev_flow->ufid; + flow->ufid_present = true; + flow->pmd_id = netdev_flow->pmd_id; get_dpif_flow_stats(netdev_flow, &flow->stats); } @@ -1593,7 +1866,8 @@ dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len, if (mask_key_len) { enum odp_key_fitness fitness; - fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow); + fitness = odp_flow_key_to_mask(mask_key, mask_key_len, key, key_len, + mask, flow); if (fitness) { /* This should not happen: it indicates that * odp_flow_key_from_mask() and odp_flow_key_to_mask() @@ -1681,29 +1955,35 @@ dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; - struct flow key; - int error; + struct dp_netdev_pmd_thread *pmd; + unsigned pmd_id = get->pmd_id == PMD_ID_NULL + ? NON_PMD_CORE_ID : get->pmd_id; + int error = 0; - error = dpif_netdev_flow_from_nlattrs(get->key, get->key_len, &key); - if (error) { - return error; + pmd = dp_netdev_get_pmd(dp, pmd_id); + if (!pmd) { + return EINVAL; } - netdev_flow = dp_netdev_find_flow(dp, &key); - + netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key, + get->key_len); if (netdev_flow) { - dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->flow); - } else { + dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer, + get->flow, false); + } else { error = ENOENT; } + dp_netdev_pmd_unref(pmd); + return error; } static struct dp_netdev_flow * -dp_netdev_flow_add(struct dp_netdev *dp, struct match *match, +dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, + struct match *match, const ovs_u128 *ufid, const struct nlattr *actions, size_t actions_len) - OVS_REQUIRES(dp->flow_mutex) + OVS_REQUIRES(pmd->flow_mutex) { struct dp_netdev_flow *flow; struct netdev_flow_key mask; @@ -1714,17 +1994,20 @@ dp_netdev_flow_add(struct dp_netdev *dp, struct match *match, /* Do not allocate extra space. */ flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len); + memset(&flow->stats, 0, sizeof flow->stats); flow->dead = false; + flow->batch = NULL; + *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id; *CONST_CAST(struct flow *, &flow->flow) = match->flow; + *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid; ovs_refcount_init(&flow->ref_cnt); - ovsthread_stats_init(&flow->stats); ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len)); - cmap_insert(&dp->flow_table, - CONST_CAST(struct cmap_node *, &flow->node), - flow_hash(&flow->flow, 0)); netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask); - dpcls_insert(&dp->cls, &flow->cr, &mask); + dpcls_insert(&pmd->cls, &flow->cr, &mask); + + cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node), + dp_netdev_flow_hash(&flow->ufid)); if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) { struct match match; @@ -1734,6 +2017,8 @@ dp_netdev_flow_add(struct dp_netdev *dp, struct match *match, miniflow_expand(&flow->cr.mask->mf, &match.wc.masks); ds_put_cstr(&ds, "flow_add: "); + odp_format_ufid(ufid, &ds); + ds_put_cstr(&ds, " "); match_format(&match, &ds, OFP_DEFAULT_PRIORITY); ds_put_cstr(&ds, ", actions:"); format_odp_actions(&ds, actions, actions_len); @@ -1746,29 +2031,17 @@ dp_netdev_flow_add(struct dp_netdev *dp, struct match *match, return flow; } -static void -clear_stats(struct dp_netdev_flow *netdev_flow) -{ - struct dp_netdev_flow_stats *bucket; - size_t i; - - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &netdev_flow->stats) { - ovs_mutex_lock(&bucket->mutex); - bucket->used = 0; - bucket->packet_count = 0; - bucket->byte_count = 0; - bucket->tcp_flags = 0; - ovs_mutex_unlock(&bucket->mutex); - } -} - static int dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; struct netdev_flow_key key; + struct dp_netdev_pmd_thread *pmd; struct match match; + ovs_u128 ufid; + unsigned pmd_id = put->pmd_id == PMD_ID_NULL + ? NON_PMD_CORE_ID : put->pmd_id; int error; error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow); @@ -1782,20 +2055,32 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) return error; } + pmd = dp_netdev_get_pmd(dp, pmd_id); + if (!pmd) { + return EINVAL; + } + /* Must produce a netdev_flow_key for lookup. * This interface is no longer performance critical, since it is not used * for upcall processing any more. */ netdev_flow_key_from_flow(&key, &match.flow); - ovs_mutex_lock(&dp->flow_mutex); - netdev_flow = dp_netdev_lookup_flow(dp, &key); + if (put->ufid) { + ufid = *put->ufid; + } else { + dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid); + } + + ovs_mutex_lock(&pmd->flow_mutex); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &key); if (!netdev_flow) { if (put->flags & DPIF_FP_CREATE) { - if (cmap_count(&dp->flow_table) < MAX_FLOWS) { + if (cmap_count(&pmd->flow_table) < MAX_FLOWS) { if (put->stats) { memset(put->stats, 0, sizeof *put->stats); } - dp_netdev_flow_add(dp, &match, put->actions, put->actions_len); + dp_netdev_flow_add(pmd, &match, &ufid, put->actions, + put->actions_len); error = 0; } else { error = EFBIG; @@ -1819,7 +2104,16 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) get_dpif_flow_stats(netdev_flow, put->stats); } if (put->flags & DPIF_FP_ZERO_STATS) { - clear_stats(netdev_flow); + /* XXX: The userspace datapath uses thread local statistics + * (for flows), which should be updated only by the owning + * thread. Since we cannot write on stats memory here, + * we choose not to support this flag. Please note: + * - This feature is currently used only by dpctl commands with + * option --clear. + * - Should the need arise, this operation can be implemented + * by keeping a base value (to be update here) for each + * counter, and subtracting it before outputting the stats */ + error = EOPNOTSUPP; } ovsrcu_postpone(dp_netdev_actions_free, old_actions); @@ -1830,7 +2124,8 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) error = EINVAL; } } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); + dp_netdev_pmd_unref(pmd); return error; } @@ -1840,32 +2135,38 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; - struct flow key; - int error; + struct dp_netdev_pmd_thread *pmd; + unsigned pmd_id = del->pmd_id == PMD_ID_NULL + ? NON_PMD_CORE_ID : del->pmd_id; + int error = 0; - error = dpif_netdev_flow_from_nlattrs(del->key, del->key_len, &key); - if (error) { - return error; + pmd = dp_netdev_get_pmd(dp, pmd_id); + if (!pmd) { + return EINVAL; } - ovs_mutex_lock(&dp->flow_mutex); - netdev_flow = dp_netdev_find_flow(dp, &key); + ovs_mutex_lock(&pmd->flow_mutex); + netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key, + del->key_len); if (netdev_flow) { if (del->stats) { get_dpif_flow_stats(netdev_flow, del->stats); } - dp_netdev_remove_flow(dp, netdev_flow); + dp_netdev_pmd_remove_flow(pmd, netdev_flow); } else { error = ENOENT; } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); + dp_netdev_pmd_unref(pmd); return error; } struct dpif_netdev_flow_dump { struct dpif_flow_dump up; - struct cmap_position pos; + struct cmap_position poll_thread_pos; + struct cmap_position flow_pos; + struct dp_netdev_pmd_thread *cur_pmd; int status; struct ovs_mutex mutex; }; @@ -1877,14 +2178,13 @@ dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump) } static struct dpif_flow_dump * -dpif_netdev_flow_dump_create(const struct dpif *dpif_) +dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse) { struct dpif_netdev_flow_dump *dump; - dump = xmalloc(sizeof *dump); + dump = xzalloc(sizeof *dump); dpif_flow_dump_init(&dump->up, dpif_); - memset(&dump->pos, 0, sizeof dump->pos); - dump->status = 0; + dump->up.terse = terse; ovs_mutex_init(&dump->mutex); return &dump->up; @@ -1941,26 +2241,58 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, struct dpif_netdev_flow_dump_thread *thread = dpif_netdev_flow_dump_thread_cast(thread_); struct dpif_netdev_flow_dump *dump = thread->dump; - struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif); struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH]; - struct dp_netdev *dp = get_dp_netdev(&dpif->dpif); int n_flows = 0; int i; ovs_mutex_lock(&dump->mutex); if (!dump->status) { - for (n_flows = 0; n_flows < MIN(max_flows, FLOW_DUMP_MAX_BATCH); - n_flows++) { - struct cmap_node *node; + struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif); + struct dp_netdev *dp = get_dp_netdev(&dpif->dpif); + struct dp_netdev_pmd_thread *pmd = dump->cur_pmd; + int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH); + + /* First call to dump_next(), extracts the first pmd thread. + * If there is no pmd thread, returns immediately. */ + if (!pmd) { + pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); + if (!pmd) { + ovs_mutex_unlock(&dump->mutex); + return n_flows; - node = cmap_next_position(&dp->flow_table, &dump->pos); - if (!node) { - dump->status = EOF; - break; } - netdev_flows[n_flows] = CONTAINER_OF(node, struct dp_netdev_flow, - node); } + + do { + for (n_flows = 0; n_flows < flow_limit; n_flows++) { + struct cmap_node *node; + + node = cmap_next_position(&pmd->flow_table, &dump->flow_pos); + if (!node) { + break; + } + netdev_flows[n_flows] = CONTAINER_OF(node, + struct dp_netdev_flow, + node); + } + /* When finishing dumping the current pmd thread, moves to + * the next. */ + if (n_flows < flow_limit) { + memset(&dump->flow_pos, 0, sizeof dump->flow_pos); + dp_netdev_pmd_unref(pmd); + pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); + if (!pmd) { + dump->status = EOF; + break; + } + } + /* Keeps the reference to next caller. */ + dump->cur_pmd = pmd; + + /* If the current dump is empty, do not exit the loop, since the + * remaining pmds could have flows to be dumped. Just dumps again + * on the new 'pmd'. */ + } while (!n_flows); } ovs_mutex_unlock(&dump->mutex); @@ -1969,34 +2301,12 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, struct odputil_keybuf *keybuf = &thread->keybuf[i]; struct dp_netdev_flow *netdev_flow = netdev_flows[i]; struct dpif_flow *f = &flows[i]; - struct dp_netdev_actions *dp_actions; - struct flow_wildcards wc; - struct ofpbuf buf; - - miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); - - /* Key. */ - ofpbuf_use_stack(&buf, keybuf, sizeof *keybuf); - odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks, - netdev_flow->flow.in_port.odp_port, true); - f->key = ofpbuf_data(&buf); - f->key_len = ofpbuf_size(&buf); - - /* Mask. */ - ofpbuf_use_stack(&buf, maskbuf, sizeof *maskbuf); - odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow, - odp_to_u32(wc.masks.in_port.odp_port), - SIZE_MAX, true); - f->mask = ofpbuf_data(&buf); - f->mask_len = ofpbuf_size(&buf); - - /* Actions. */ - dp_actions = dp_netdev_flow_get_actions(netdev_flow); - f->actions = dp_actions->actions; - f->actions_len = dp_actions->size; + struct ofpbuf key, mask; - /* Stats. */ - get_dpif_flow_stats(netdev_flow, &f->stats); + ofpbuf_use_stack(&key, keybuf, sizeof *keybuf); + ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf); + dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f, + dump->up.terse); } return n_flows; @@ -2008,41 +2318,37 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_pmd_thread *pmd; - struct dpif_packet packet, *pp; + struct dp_packet *pp; - if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN || - ofpbuf_size(execute->packet) > UINT16_MAX) { + if (dp_packet_size(execute->packet) < ETH_HEADER_LEN || + dp_packet_size(execute->packet) > UINT16_MAX) { return EINVAL; } - packet.ofpbuf = *execute->packet; - packet.md = execute->md; - pp = &packet; - /* Tries finding the 'pmd'. If NULL is returned, that means * the current thread is a non-pmd thread and should use - * dp_netdev_get_nonpmd(). */ + * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */ pmd = ovsthread_getspecific(dp->per_pmd_key); if (!pmd) { - pmd = dp_netdev_get_nonpmd(dp); + pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); } /* If the current thread is non-pmd thread, acquires * the 'non_pmd_mutex'. */ if (pmd->core_id == NON_PMD_CORE_ID) { ovs_mutex_lock(&dp->non_pmd_mutex); + ovs_mutex_lock(&dp->port_mutex); } + + pp = execute->packet; dp_netdev_execute_actions(pmd, &pp, 1, false, execute->actions, execute->actions_len); if (pmd->core_id == NON_PMD_CORE_ID) { + dp_netdev_pmd_unref(pmd); + ovs_mutex_unlock(&dp->port_mutex); ovs_mutex_unlock(&dp->non_pmd_mutex); } - /* Even though may_steal is set to false, some actions could modify or - * reallocate the ofpbuf memory. We need to pass those changes to the - * caller */ - *execute->packet = packet.ofpbuf; - execute->md = packet.md; return 0; } @@ -2112,7 +2418,8 @@ dpif_netdev_pmd_set(struct dpif *dpif, unsigned int n_rxqs, const char *cmask) } /* Sets the new rx queue config. */ - err = netdev_set_multiq(port->netdev, ovs_numa_get_n_cores(), + err = netdev_set_multiq(port->netdev, + ovs_numa_get_n_cores() + 1, n_rxqs); if (err && (err != EOPNOTSUPP)) { VLOG_ERR("Failed to set dpdk interface %s rx_queue to:" @@ -2154,16 +2461,15 @@ dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED, } -/* Creates and returns a new 'struct dp_netdev_actions', with a reference count - * of 1, whose actions are a copy of from the 'ofpacts_len' bytes of - * 'ofpacts'. */ +/* Creates and returns a new 'struct dp_netdev_actions', whose actions are + * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */ struct dp_netdev_actions * dp_netdev_actions_create(const struct nlattr *actions, size_t size) { struct dp_netdev_actions *netdev_actions; - netdev_actions = xmalloc(sizeof *netdev_actions); - netdev_actions->actions = xmemdup(actions, size); + netdev_actions = xmalloc(sizeof *netdev_actions + size); + memcpy(netdev_actions->actions, actions, size); netdev_actions->size = size; return netdev_actions; @@ -2178,20 +2484,54 @@ dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow) static void dp_netdev_actions_free(struct dp_netdev_actions *actions) { - free(actions->actions); free(actions); } +static inline unsigned long long +cycles_counter(void) +{ +#ifdef DPDK_NETDEV + return rte_get_tsc_cycles(); +#else + return 0; +#endif +} + +/* Fake mutex to make sure that the calls to cycles_count_* are balanced */ +extern struct ovs_mutex cycles_counter_fake_mutex; + +/* Start counting cycles. Must be followed by 'cycles_count_end()' */ +static inline void +cycles_count_start(struct dp_netdev_pmd_thread *pmd) + OVS_ACQUIRES(&cycles_counter_fake_mutex) + OVS_NO_THREAD_SAFETY_ANALYSIS +{ + pmd->last_cycles = cycles_counter(); +} + +/* Stop counting cycles and add them to the counter 'type' */ +static inline void +cycles_count_end(struct dp_netdev_pmd_thread *pmd, + enum pmd_cycles_counter_type type) + OVS_RELEASES(&cycles_counter_fake_mutex) + OVS_NO_THREAD_SAFETY_ANALYSIS +{ + unsigned long long interval = cycles_counter() - pmd->last_cycles; + + non_atomic_ullong_add(&pmd->cycles.n[type], interval); +} static void dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port, struct netdev_rxq *rxq) { - struct dpif_packet *packets[NETDEV_MAX_RX_BATCH]; + struct dp_packet *packets[NETDEV_MAX_BURST]; int error, cnt; + cycles_count_start(pmd); error = netdev_rxq_recv(rxq, packets, &cnt); + cycles_count_end(pmd, PMD_CYCLES_POLLING); if (!error) { int i; @@ -2199,9 +2539,11 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, /* XXX: initialize md in netdev implementation. */ for (i = 0; i < cnt; i++) { - packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no); + pkt_metadata_init(&packets[i]->md, port->port_no); } + cycles_count_start(pmd); dp_netdev_input(pmd, packets, cnt); + cycles_count_end(pmd, PMD_CYCLES_PROCESSING); } else if (error != EAGAIN && error != EOPNOTSUPP) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -2216,7 +2558,8 @@ dpif_netdev_run(struct dpif *dpif) { struct dp_netdev_port *port; struct dp_netdev *dp = get_dp_netdev(dpif); - struct dp_netdev_pmd_thread *non_pmd = dp_netdev_get_nonpmd(dp); + struct dp_netdev_pmd_thread *non_pmd = dp_netdev_get_pmd(dp, + NON_PMD_CORE_ID); uint64_t new_tnl_seq; ovs_mutex_lock(&dp->non_pmd_mutex); @@ -2230,6 +2573,8 @@ dpif_netdev_run(struct dpif *dpif) } } ovs_mutex_unlock(&dp->non_pmd_mutex); + dp_netdev_pmd_unref(non_pmd); + tnl_arp_cache_run(); new_tnl_seq = seq_read(tnl_conf_seq); @@ -2332,6 +2677,11 @@ reload: emc_cache_init(&pmd->flow_cache); poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt); + /* List port/core affinity */ + for (i = 0; i < poll_cnt; i++) { + VLOG_INFO("Core %d processing port \'%s\'\n", pmd->core_id, netdev_get_name(poll_list[i].port->netdev)); + } + /* Signal here to make sure the pmd finishes * reloading the updated configuration. */ dp_netdev_pmd_reload_done(pmd); @@ -2413,18 +2763,23 @@ dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) ovs_mutex_unlock(&pmd->cond_mutex); } -/* Returns the pointer to the dp_netdev_pmd_thread for non-pmd threads. */ +/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns + * the pointer if succeeds, otherwise, NULL. + * + * Caller must unrefs the returned reference. */ static struct dp_netdev_pmd_thread * -dp_netdev_get_nonpmd(struct dp_netdev *dp) +dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id) { struct dp_netdev_pmd_thread *pmd; const struct cmap_node *pnode; - pnode = cmap_find(&dp->poll_threads, hash_int(NON_PMD_CORE_ID, 0)); - ovs_assert(pnode); + pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0)); + if (!pnode) { + return NULL; + } pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node); - return pmd; + return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL; } /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */ @@ -2438,19 +2793,70 @@ dp_netdev_set_nonpmd(struct dp_netdev *dp) OVS_NUMA_UNSPEC); } +/* Caller must have valid pointer to 'pmd'. */ +static bool +dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd) +{ + return ovs_refcount_try_ref_rcu(&pmd->ref_cnt); +} + +static void +dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd) +{ + if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) { + ovsrcu_postpone(dp_netdev_destroy_pmd, pmd); + } +} + +/* Given cmap position 'pos', tries to ref the next node. If try_ref() + * fails, keeps checking for next node until reaching the end of cmap. + * + * Caller must unrefs the returned reference. */ +static struct dp_netdev_pmd_thread * +dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos) +{ + struct dp_netdev_pmd_thread *next; + + do { + struct cmap_node *node; + + node = cmap_next_position(&dp->poll_threads, pos); + next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node) + : NULL; + } while (next && !dp_netdev_pmd_try_ref(next)); + + return next; +} + +static int +core_id_to_qid(unsigned core_id) +{ + if (core_id != NON_PMD_CORE_ID) { + return core_id; + } else { + return ovs_numa_get_n_cores(); + } +} + /* Configures the 'pmd' based on the input argument. */ static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, - int index, int core_id, int numa_id) + int index, unsigned core_id, int numa_id) { pmd->dp = dp; pmd->index = index; pmd->core_id = core_id; + pmd->tx_qid = core_id_to_qid(core_id); pmd->numa_id = numa_id; + + ovs_refcount_init(&pmd->ref_cnt); latch_init(&pmd->exit_latch); atomic_init(&pmd->change_seq, PMD_INITIAL_SEQ); xpthread_cond_init(&pmd->cond, NULL); ovs_mutex_init(&pmd->cond_mutex); + ovs_mutex_init(&pmd->flow_mutex); + dpcls_init(&pmd->cls); + cmap_init(&pmd->flow_table); /* init the 'flow_cache' since there is no * actual thread created for NON_PMD_CORE_ID. */ if (core_id == NON_PMD_CORE_ID) { @@ -2460,13 +2866,26 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, hash_int(core_id, 0)); } -/* Stops the pmd thread, removes it from the 'dp->poll_threads' - * and destroys the struct. */ +static void +dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd) +{ + dp_netdev_pmd_flow_flush(pmd); + dpcls_destroy(&pmd->cls); + cmap_destroy(&pmd->flow_table); + ovs_mutex_destroy(&pmd->flow_mutex); + latch_destroy(&pmd->exit_latch); + xpthread_cond_destroy(&pmd->cond); + ovs_mutex_destroy(&pmd->cond_mutex); + free(pmd); +} + +/* Stops the pmd thread, removes it from the 'dp->poll_threads', + * and unrefs the struct. */ static void dp_netdev_del_pmd(struct dp_netdev_pmd_thread *pmd) { /* Uninit the 'flow_cache' since there is - * no actual thread uninit it. */ + * no actual thread uninit it for NON_PMD_CORE_ID. */ if (pmd->core_id == NON_PMD_CORE_ID) { emc_cache_uninit(&pmd->flow_cache); } else { @@ -2476,10 +2895,7 @@ dp_netdev_del_pmd(struct dp_netdev_pmd_thread *pmd) xpthread_join(pmd->thread, NULL); } cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0)); - latch_destroy(&pmd->exit_latch); - xpthread_cond_destroy(&pmd->cond); - ovs_mutex_destroy(&pmd->cond_mutex); - free(pmd); + dp_netdev_pmd_unref(pmd); } /* Destroys all pmd threads. */ @@ -2539,7 +2955,7 @@ dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id) can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS); for (i = 0; i < can_have; i++) { struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd); - int core_id = ovs_numa_get_unpinned_core_on_numa(numa_id); + unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id); dp_netdev_configure_pmd(pmd, dp, i, core_id, numa_id); /* Each thread will distribute all devices rx-queues among @@ -2551,14 +2967,6 @@ dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id) } -static void * -dp_netdev_flow_stats_new_cb(void) -{ - struct dp_netdev_flow_stats *bucket = xzalloc_cacheline(sizeof *bucket); - ovs_mutex_init(&bucket->mutex); - return bucket; -} - /* Called after pmd threads config change. Restarts pmd threads with * new configuration. */ static void @@ -2582,54 +2990,33 @@ dpif_netdev_get_datapath_version(void) } static void -dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, - int cnt, int size, - uint16_t tcp_flags) +dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size, + uint16_t tcp_flags, long long now) { - long long int now = time_msec(); - struct dp_netdev_flow_stats *bucket; + uint16_t flags; - bucket = ovsthread_stats_bucket_get(&netdev_flow->stats, - dp_netdev_flow_stats_new_cb); - - ovs_mutex_lock(&bucket->mutex); - bucket->used = MAX(now, bucket->used); - bucket->packet_count += cnt; - bucket->byte_count += size; - bucket->tcp_flags |= tcp_flags; - ovs_mutex_unlock(&bucket->mutex); -} - -static void * -dp_netdev_stats_new_cb(void) -{ - struct dp_netdev_stats *bucket = xzalloc_cacheline(sizeof *bucket); - ovs_mutex_init(&bucket->mutex); - return bucket; + atomic_store_relaxed(&netdev_flow->stats.used, now); + non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt); + non_atomic_ullong_add(&netdev_flow->stats.byte_count, size); + atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); + flags |= tcp_flags; + atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags); } static void -dp_netdev_count_packet(struct dp_netdev *dp, enum dp_stat_type type, int cnt) +dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd, + enum dp_stat_type type, int cnt) { - struct dp_netdev_stats *bucket; - - bucket = ovsthread_stats_bucket_get(&dp->stats, dp_netdev_stats_new_cb); - ovs_mutex_lock(&bucket->mutex); - bucket->n[type] += cnt; - ovs_mutex_unlock(&bucket->mutex); + non_atomic_ullong_add(&pmd->stats.n[type], cnt); } static int -dp_netdev_upcall(struct dp_netdev *dp, struct dpif_packet *packet_, - struct flow *flow, struct flow_wildcards *wc, +dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_, + struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid, enum dpif_upcall_type type, const struct nlattr *userdata, struct ofpbuf *actions, struct ofpbuf *put_actions) { - struct ofpbuf *packet = &packet_->ofpbuf; - - if (type == DPIF_UC_MISS) { - dp_netdev_count_packet(dp, DP_STAT_MISS, 1); - } + struct dp_netdev *dp = pmd->dp; if (OVS_UNLIKELY(!dp->upcall_cb)) { return ENODEV; @@ -2637,40 +3024,53 @@ dp_netdev_upcall(struct dp_netdev *dp, struct dpif_packet *packet_, if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) { struct ds ds = DS_EMPTY_INITIALIZER; - struct ofpbuf key; char *packet_str; + struct ofpbuf key; + struct odp_flow_key_parms odp_parms = { + .flow = flow, + .mask = &wc->masks, + .odp_in_port = flow->in_port.odp_port, + .recirc = true, + }; ofpbuf_init(&key, 0); - odp_flow_key_from_flow(&key, flow, &wc->masks, flow->in_port.odp_port, - true); + odp_flow_key_from_flow(&odp_parms, &key); + packet_str = ofp_packet_to_string(dp_packet_data(packet_), + dp_packet_size(packet_)); - packet_str = ofp_packet_to_string(ofpbuf_data(packet), - ofpbuf_size(packet)); - - odp_flow_key_format(ofpbuf_data(&key), ofpbuf_size(&key), &ds); + odp_flow_key_format(key.data, key.size, &ds); VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name, dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str); ofpbuf_uninit(&key); free(packet_str); + ds_destroy(&ds); } - return dp->upcall_cb(packet, flow, type, userdata, actions, wc, - put_actions, dp->upcall_aux); + return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata, + actions, wc, put_actions, dp->upcall_aux); } static inline uint32_t -dpif_netdev_packet_get_dp_hash(struct dpif_packet *packet, - const struct miniflow *mf) +dpif_netdev_packet_get_rss_hash(struct dp_packet *packet, + const struct miniflow *mf) { - uint32_t hash; + uint32_t hash, recirc_depth; - hash = dpif_packet_get_dp_hash(packet); + hash = dp_packet_get_rss_hash(packet); if (OVS_UNLIKELY(!hash)) { hash = miniflow_hash_5tuple(mf, 0); - dpif_packet_set_dp_hash(packet, hash); + dp_packet_set_rss_hash(packet, hash); + } + + /* The RSS hash must account for the recirculation depth to avoid + * collisions in the exact match cache */ + recirc_depth = *recirc_depth_get_unsafe(); + if (OVS_UNLIKELY(recirc_depth)) { + hash = hash_finish(hash, recirc_depth); + dp_packet_set_rss_hash(packet, hash); } return hash; } @@ -2682,23 +3082,24 @@ struct packet_batch { struct dp_netdev_flow *flow; - struct dpif_packet *packets[NETDEV_MAX_RX_BATCH]; + struct dp_packet *packets[NETDEV_MAX_BURST]; }; static inline void -packet_batch_update(struct packet_batch *batch, struct dpif_packet *packet, +packet_batch_update(struct packet_batch *batch, struct dp_packet *packet, const struct miniflow *mf) { batch->tcp_flags |= miniflow_get_tcp_flags(mf); batch->packets[batch->packet_count++] = packet; - batch->byte_count += ofpbuf_size(&packet->ofpbuf); + batch->byte_count += dp_packet_size(packet); } static inline void packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow) { - batch->flow = flow; + flow->batch = batch; + batch->flow = flow; batch->packet_count = 0; batch->byte_count = 0; batch->tcp_flags = 0; @@ -2706,142 +3107,127 @@ packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow) static inline void packet_batch_execute(struct packet_batch *batch, - struct dp_netdev_pmd_thread *pmd) + struct dp_netdev_pmd_thread *pmd, + long long now) { struct dp_netdev_actions *actions; struct dp_netdev_flow *flow = batch->flow; - dp_netdev_flow_used(batch->flow, batch->packet_count, batch->byte_count, - batch->tcp_flags); + dp_netdev_flow_used(flow, batch->packet_count, batch->byte_count, + batch->tcp_flags, now); actions = dp_netdev_flow_get_actions(flow); dp_netdev_execute_actions(pmd, batch->packets, batch->packet_count, true, actions->actions, actions->size); - - dp_netdev_count_packet(pmd->dp, DP_STAT_HIT, batch->packet_count); } -static inline bool -dp_netdev_queue_batches(struct dpif_packet *pkt, +static inline void +dp_netdev_queue_batches(struct dp_packet *pkt, struct dp_netdev_flow *flow, const struct miniflow *mf, - struct packet_batch *batches, size_t *n_batches, - size_t max_batches) -{ - struct packet_batch *batch = NULL; - int j; - - if (OVS_UNLIKELY(!flow)) { - return false; - } - /* XXX: This O(n^2) algortihm makes sense if we're operating under the - * assumption that the number of distinct flows (and therefore the - * number of distinct batches) is quite small. If this turns out not - * to be the case, it may make sense to pre sort based on the - * netdev_flow pointer. That done we can get the appropriate batching - * in O(n * log(n)) instead. */ - for (j = *n_batches - 1; j >= 0; j--) { - if (batches[j].flow == flow) { - batch = &batches[j]; - packet_batch_update(batch, pkt, mf); - return true; - } - } - if (OVS_UNLIKELY(*n_batches >= max_batches)) { - return false; + struct packet_batch *batches, size_t *n_batches) +{ + struct packet_batch *batch = flow->batch; + + if (OVS_LIKELY(batch)) { + packet_batch_update(batch, pkt, mf); + return; } batch = &batches[(*n_batches)++]; packet_batch_init(batch, flow); packet_batch_update(batch, pkt, mf); - return true; } static inline void -dpif_packet_swap(struct dpif_packet **a, struct dpif_packet **b) +dp_packet_swap(struct dp_packet **a, struct dp_packet **b) { - struct dpif_packet *tmp = *a; + struct dp_packet *tmp = *a; *a = *b; *b = tmp; } /* Try to process all ('cnt') the 'packets' using only the exact match cache - * 'flow_cache'. If a flow is not found for a packet 'packets[i]', or if there - * is no matching batch for a packet's flow, the miniflow is copied into 'keys' - * and the packet pointer is moved at the beginning of the 'packets' array. + * 'flow_cache'. If a flow is not found for a packet 'packets[i]', the + * miniflow is copied into 'keys' and the packet pointer is moved at the + * beginning of the 'packets' array. * * The function returns the number of packets that needs to be processed in the * 'packets' array (they have been moved to the beginning of the vector). */ static inline size_t -emc_processing(struct dp_netdev_pmd_thread *pmd, struct dpif_packet **packets, - size_t cnt, struct netdev_flow_key *keys) +emc_processing(struct dp_netdev_pmd_thread *pmd, struct dp_packet **packets, + size_t cnt, struct netdev_flow_key *keys, + struct packet_batch batches[], size_t *n_batches) { - struct netdev_flow_key key; - struct packet_batch batches[4]; struct emc_cache *flow_cache = &pmd->flow_cache; - size_t n_batches, i; - size_t notfound_cnt = 0; + struct netdev_flow_key key; + size_t i, notfound_cnt = 0; - n_batches = 0; miniflow_initialize(&key.mf, key.buf); for (i = 0; i < cnt; i++) { struct dp_netdev_flow *flow; - if (OVS_UNLIKELY(ofpbuf_size(&packets[i]->ofpbuf) < ETH_HEADER_LEN)) { - dpif_packet_delete(packets[i]); + if (OVS_UNLIKELY(dp_packet_size(packets[i]) < ETH_HEADER_LEN)) { + dp_packet_delete(packets[i]); continue; } - miniflow_extract(&packets[i]->ofpbuf, &packets[i]->md, &key.mf); + if (i != cnt - 1) { + /* Prefetch next packet data */ + OVS_PREFETCH(dp_packet_data(packets[i+1])); + } + + miniflow_extract(packets[i], &key.mf); key.len = 0; /* Not computed yet. */ - key.hash = dpif_netdev_packet_get_dp_hash(packets[i], &key.mf); + key.hash = dpif_netdev_packet_get_rss_hash(packets[i], &key.mf); flow = emc_lookup(flow_cache, &key); - if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets[i], flow, &key.mf, - batches, &n_batches, - ARRAY_SIZE(batches)))) { + if (OVS_LIKELY(flow)) { + dp_netdev_queue_batches(packets[i], flow, &key.mf, batches, + n_batches); + } else { if (i != notfound_cnt) { - dpif_packet_swap(&packets[i], &packets[notfound_cnt]); + dp_packet_swap(&packets[i], &packets[notfound_cnt]); } keys[notfound_cnt++] = key; } } - for (i = 0; i < n_batches; i++) { - packet_batch_execute(&batches[i], pmd); - } + dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT, cnt - notfound_cnt); return notfound_cnt; } static inline void fast_path_processing(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **packets, size_t cnt, - struct netdev_flow_key *keys) + struct dp_packet **packets, size_t cnt, + struct netdev_flow_key *keys, + struct packet_batch batches[], size_t *n_batches) { #if !defined(__CHECKER__) && !defined(_WIN32) const size_t PKT_ARRAY_SIZE = cnt; #else /* Sparse or MSVC doesn't like variable length array. */ - enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH }; + enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; #endif - struct packet_batch batches[PKT_ARRAY_SIZE]; struct dpcls_rule *rules[PKT_ARRAY_SIZE]; struct dp_netdev *dp = pmd->dp; struct emc_cache *flow_cache = &pmd->flow_cache; - size_t n_batches, i; + int miss_cnt = 0, lost_cnt = 0; bool any_miss; + size_t i; for (i = 0; i < cnt; i++) { /* Key length is needed in all the cases, hash computed on demand. */ keys[i].len = netdev_flow_key_size(count_1bits(keys[i].mf.map)); } - any_miss = !dpcls_lookup(&dp->cls, keys, rules, cnt); + any_miss = !dpcls_lookup(&pmd->cls, keys, rules, cnt); if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { uint64_t actions_stub[512 / 8], slow_stub[512 / 8]; struct ofpbuf actions, put_actions; + ovs_u128 ufid; ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub); ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub); @@ -2859,21 +3245,26 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, /* It's possible that an earlier slow path execution installed * a rule covering this flow. In this case, it's a lot cheaper * to catch it here than execute a miss. */ - netdev_flow = dp_netdev_lookup_flow(dp, &keys[i]); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i]); if (netdev_flow) { rules[i] = &netdev_flow->cr; continue; } + miss_cnt++; + miniflow_expand(&keys[i].mf, &match.flow); ofpbuf_clear(&actions); ofpbuf_clear(&put_actions); - error = dp_netdev_upcall(dp, packets[i], &match.flow, &match.wc, - DPIF_UC_MISS, NULL, &actions, + dpif_flow_hash(dp->dpif, &match.flow, sizeof match.flow, &ufid); + error = dp_netdev_upcall(pmd, packets[i], &match.flow, &match.wc, + &ufid, DPIF_UC_MISS, NULL, &actions, &put_actions); if (OVS_UNLIKELY(error && error != ENOSPC)) { + dp_packet_delete(packets[i]); + lost_cnt++; continue; } @@ -2881,13 +3272,9 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, * the actions. Otherwise, if there are any slow path actions, * we'll send the packet up twice. */ dp_netdev_execute_actions(pmd, &packets[i], 1, true, - ofpbuf_data(&actions), - ofpbuf_size(&actions)); - - add_actions = ofpbuf_size(&put_actions) - ? &put_actions - : &actions; + actions.data, actions.size); + add_actions = put_actions.size ? &put_actions : &actions; if (OVS_LIKELY(error != ENOSPC)) { /* XXX: There's a race window where a flow covering this packet * could have already been installed since we last did the flow @@ -2895,14 +3282,14 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, * mutex lock outside the loop, but that's an awful long time * to be locking everyone out of making flow installs. If we * move to a per-core classifier, it would be reasonable. */ - ovs_mutex_lock(&dp->flow_mutex); - netdev_flow = dp_netdev_lookup_flow(dp, &keys[i]); + ovs_mutex_lock(&pmd->flow_mutex); + netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i]); if (OVS_LIKELY(!netdev_flow)) { - netdev_flow = dp_netdev_flow_add(dp, &match, - ofpbuf_data(add_actions), - ofpbuf_size(add_actions)); + netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid, + add_actions->data, + add_actions->size); } - ovs_mutex_unlock(&dp->flow_mutex); + ovs_mutex_unlock(&pmd->flow_mutex); emc_insert(flow_cache, &keys[i], netdev_flow); } @@ -2911,22 +3298,19 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, ofpbuf_uninit(&actions); ofpbuf_uninit(&put_actions); fat_rwlock_unlock(&dp->upcall_rwlock); + dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); } else if (OVS_UNLIKELY(any_miss)) { - int dropped_cnt = 0; - for (i = 0; i < cnt; i++) { if (OVS_UNLIKELY(!rules[i])) { - dpif_packet_delete(packets[i]); - dropped_cnt++; + dp_packet_delete(packets[i]); + lost_cnt++; + miss_cnt++; } } - - dp_netdev_count_packet(dp, DP_STAT_LOST, dropped_cnt); } - n_batches = 0; for (i = 0; i < cnt; i++) { - struct dpif_packet *packet = packets[i]; + struct dp_packet *packet = packets[i]; struct dp_netdev_flow *flow; if (OVS_UNLIKELY(!rules[i])) { @@ -2936,31 +3320,41 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, flow = dp_netdev_flow_cast(rules[i]); emc_insert(flow_cache, &keys[i], flow); - dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, - &n_batches, ARRAY_SIZE(batches)); + dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches); } - for (i = 0; i < n_batches; i++) { - packet_batch_execute(&batches[i], pmd); - } + dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt); + dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt); + dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); } static void dp_netdev_input(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **packets, int cnt) + struct dp_packet **packets, int cnt) { #if !defined(__CHECKER__) && !defined(_WIN32) const size_t PKT_ARRAY_SIZE = cnt; #else /* Sparse or MSVC doesn't like variable length array. */ - enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH }; + enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; #endif struct netdev_flow_key keys[PKT_ARRAY_SIZE]; - size_t newcnt; + struct packet_batch batches[PKT_ARRAY_SIZE]; + long long now = time_msec(); + size_t newcnt, n_batches, i; - newcnt = emc_processing(pmd, packets, cnt, keys); + n_batches = 0; + newcnt = emc_processing(pmd, packets, cnt, keys, batches, &n_batches); if (OVS_UNLIKELY(newcnt)) { - fast_path_processing(pmd, packets, newcnt, keys); + fast_path_processing(pmd, packets, newcnt, keys, batches, &n_batches); + } + + for (i = 0; i < n_batches; i++) { + batches[i].flow->batch = NULL; + } + + for (i = 0; i < n_batches; i++) { + packet_batch_execute(&batches[i], pmd, now); } } @@ -2978,13 +3372,13 @@ dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, } static void -dp_netdev_drop_packets(struct dpif_packet ** packets, int cnt, bool may_steal) +dp_netdev_drop_packets(struct dp_packet **packets, int cnt, bool may_steal) { if (may_steal) { int i; for (i = 0; i < cnt; i++) { - dpif_packet_delete(packets[i]); + dp_packet_delete(packets[i]); } } } @@ -2992,7 +3386,7 @@ dp_netdev_drop_packets(struct dpif_packet ** packets, int cnt, bool may_steal) static int push_tnl_action(const struct dp_netdev *dp, const struct nlattr *attr, - struct dpif_packet **packets, int cnt) + struct dp_packet **packets, int cnt) { struct dp_netdev_port *tun_port; const struct ovs_action_push_tnl *data; @@ -3009,25 +3403,25 @@ push_tnl_action(const struct dp_netdev *dp, } static void -dp_netdev_clone_pkt_batch(struct dpif_packet **tnl_pkt, - struct dpif_packet **packets, int cnt) +dp_netdev_clone_pkt_batch(struct dp_packet **dst_pkts, + struct dp_packet **src_pkts, int cnt) { int i; for (i = 0; i < cnt; i++) { - tnl_pkt[i] = dpif_packet_clone(packets[i]); + dst_pkts[i] = dp_packet_clone(src_pkts[i]); } } static void -dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, +dp_execute_cb(void *aux_, struct dp_packet **packets, int cnt, const struct nlattr *a, bool may_steal) OVS_NO_THREAD_SAFETY_ANALYSIS { struct dp_netdev_execute_aux *aux = aux_; uint32_t *depth = recirc_depth_get(); - struct dp_netdev_pmd_thread *pmd= aux->pmd; - struct dp_netdev *dp= pmd->dp; + struct dp_netdev_pmd_thread *pmd = aux->pmd; + struct dp_netdev *dp = pmd->dp; int type = nl_attr_type(a); struct dp_netdev_port *p; int i; @@ -3036,14 +3430,14 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, case OVS_ACTION_ATTR_OUTPUT: p = dp_netdev_lookup_port(dp, u32_to_odp(nl_attr_get_u32(a))); if (OVS_LIKELY(p)) { - netdev_send(p->netdev, pmd->core_id, packets, cnt, may_steal); + netdev_send(p->netdev, pmd->tx_qid, packets, cnt, may_steal); return; } break; case OVS_ACTION_ATTR_TUNNEL_PUSH: if (*depth < MAX_RECIRC_DEPTH) { - struct dpif_packet *tnl_pkt[NETDEV_MAX_RX_BATCH]; + struct dp_packet *tnl_pkt[NETDEV_MAX_BURST]; int err; if (!may_steal) { @@ -3069,7 +3463,7 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, p = dp_netdev_lookup_port(dp, portno); if (p) { - struct dpif_packet *tnl_pkt[NETDEV_MAX_RX_BATCH]; + struct dp_packet *tnl_pkt[NETDEV_MAX_BURST]; int err; if (!may_steal) { @@ -3100,6 +3494,7 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, const struct nlattr *userdata; struct ofpbuf actions; struct flow flow; + ovs_u128 ufid; userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA); ofpbuf_init(&actions, 0); @@ -3109,16 +3504,16 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, ofpbuf_clear(&actions); - flow_extract(&packets[i]->ofpbuf, &packets[i]->md, &flow); - error = dp_netdev_upcall(dp, packets[i], &flow, NULL, - DPIF_UC_ACTION, userdata, &actions, + flow_extract(packets[i], &flow); + dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid); + error = dp_netdev_upcall(pmd, packets[i], &flow, NULL, &ufid, + DPIF_UC_ACTION, userdata,&actions, NULL); if (!error || error == ENOSPC) { dp_netdev_execute_actions(pmd, &packets[i], 1, may_steal, - ofpbuf_data(&actions), - ofpbuf_size(&actions)); + actions.data, actions.size); } else if (may_steal) { - dpif_packet_delete(packets[i]); + dp_packet_delete(packets[i]); } } ofpbuf_uninit(&actions); @@ -3130,21 +3525,19 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, case OVS_ACTION_ATTR_RECIRC: if (*depth < MAX_RECIRC_DEPTH) { + struct dp_packet *recirc_pkts[NETDEV_MAX_BURST]; - (*depth)++; - for (i = 0; i < cnt; i++) { - struct dpif_packet *recirc_pkt; - - recirc_pkt = (may_steal) ? packets[i] - : dpif_packet_clone(packets[i]); - - recirc_pkt->md.recirc_id = nl_attr_get_u32(a); - - /* Hash is private to each packet */ - recirc_pkt->md.dp_hash = dpif_packet_get_dp_hash(packets[i]); + if (!may_steal) { + dp_netdev_clone_pkt_batch(recirc_pkts, packets, cnt); + packets = recirc_pkts; + } - dp_netdev_input(pmd, &recirc_pkt, 1); + for (i = 0; i < cnt; i++) { + packets[i]->md.recirc_id = nl_attr_get_u32(a); } + + (*depth)++; + dp_netdev_input(pmd, packets, cnt); (*depth)--; return; @@ -3171,7 +3564,7 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt, static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, - struct dpif_packet **packets, int cnt, + struct dp_packet **packets, int cnt, bool may_steal, const struct nlattr *actions, size_t actions_len) { @@ -3183,6 +3576,7 @@ dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, const struct dpif_class dpif_netdev_class = { "netdev", + dpif_netdev_init, dpif_netdev_enumerate, dpif_netdev_port_open_type, dpif_netdev_open, @@ -3315,21 +3709,29 @@ dpif_dummy_register__(const char *type) dp_register_provider(class); } +static void +dpif_dummy_override(const char *type) +{ + if (!dp_unregister_provider(type)) { + dpif_dummy_register__(type); + } +} + void -dpif_dummy_register(bool override) +dpif_dummy_register(enum dummy_level level) { - if (override) { + if (level == DUMMY_OVERRIDE_ALL) { struct sset types; const char *type; sset_init(&types); dp_enumerate_types(&types); SSET_FOR_EACH (type, &types) { - if (!dp_unregister_provider(type)) { - dpif_dummy_register__(type); - } + dpif_dummy_override(type); } sset_destroy(&types); + } else if (level == DUMMY_OVERRIDE_SYSTEM) { + dpif_dummy_override("system"); } dpif_dummy_register__("dummy"); @@ -3457,12 +3859,12 @@ static inline bool dpcls_rule_matches_key(const struct dpcls_rule *rule, const struct netdev_flow_key *target) { - const uint32_t *keyp = rule->flow.mf.inline_values; - const uint32_t *maskp = rule->mask->mf.inline_values; - uint32_t target_u32; + const uint64_t *keyp = rule->flow.mf.inline_values; + const uint64_t *maskp = rule->mask->mf.inline_values; + uint64_t target_u64; - NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u32, target, rule->flow.mf.map) { - if (OVS_UNLIKELY((target_u32 & *maskp++) != *keyp++)) { + NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u64, target, rule->flow.mf.map) { + if (OVS_UNLIKELY((target_u64 & *maskp++) != *keyp++)) { return false; } } @@ -3490,7 +3892,7 @@ dpcls_lookup(const struct dpcls *cls, const struct netdev_flow_key keys[], #if !defined(__CHECKER__) && !defined(_WIN32) const int N_MAPS = DIV_ROUND_UP(cnt, MAP_BITS); #else - enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_RX_BATCH, MAP_BITS) }; + enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_BURST, MAP_BITS) }; #endif map_type maps[N_MAPS]; struct dpcls_subtable *subtable; @@ -3520,14 +3922,14 @@ dpcls_lookup(const struct dpcls *cls, const struct netdev_flow_key keys[], } /* Compute hashes for the remaining keys. */ - ULONG_FOR_EACH_1(i, map) { + ULLONG_FOR_EACH_1(i, map) { hashes[i] = netdev_flow_key_hash_in_mask(&mkeys[i], &subtable->mask); } /* Lookup. */ map = cmap_find_batch(&subtable->rules, map, hashes, nodes); /* Check results. */ - ULONG_FOR_EACH_1(i, map) { + ULLONG_FOR_EACH_1(i, map) { struct dpcls_rule *rule; CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) { @@ -3536,7 +3938,7 @@ dpcls_lookup(const struct dpcls *cls, const struct netdev_flow_key keys[], goto next; } } - ULONG_SET0(map, i); /* Did not match. */ + ULLONG_SET0(map, i); /* Did not match. */ next: ; /* Keep Sparse happy. */ }