From: Jesse Gross Date: Mon, 30 Jun 2014 20:43:25 +0000 (-0700) Subject: datapath: Change u64_stats_* to use _irq instead of _bh(). X-Git-Tag: v2.3~45 X-Git-Url: http://git.cascardo.eti.br/?p=cascardo%2Fovs.git;a=commitdiff_plain;h=6b0618950b27a0a125e4b09611206f6d97bb9ce6 datapath: Change u64_stats_* to use _irq instead of _bh(). The upstream u64_stats API has been changed to remove the _bh() versions and switch all consumers to use IRQ safe variants instead. This was done to be safe for netpoll generated packets, which can occur in hard IRQ context. From a safety perspective, this doesn't directly affect OVS since it doesn't support netpoll. However, this change has been backported to older kernels so OVS needs to use the new API to compile. Signed-off-by: Jesse Gross Acked-by: Pritesh Kothari Acked-by: Pravin B Shelar --- diff --git a/acinclude.m4 b/acinclude.m4 index cd0434690..3d6de2884 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -324,6 +324,8 @@ AC_DEFUN([OVS_CHECK_LINUX_COMPAT], [ OVS_GREP_IFELSE([$KSRC/include/linux/percpu.h], [this_cpu_ptr]) + OVS_GREP_IFELSE([$KSRC/include/linux/u64_stats_sync.h], [u64_stats_fetch_begin_irq]) + OVS_GREP_IFELSE([$KSRC/include/linux/openvswitch.h], [openvswitch_handle_frame_hook], [OVS_DEFINE([HAVE_RHEL_OVS_HOOK])]) diff --git a/datapath/datapath.c b/datapath/datapath.c index a4d6473e6..ad445bbcb 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -672,9 +672,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, percpu_stats = per_cpu_ptr(dp->stats_percpu, i); do { - start = u64_stats_fetch_begin_bh(&percpu_stats->sync); + start = u64_stats_fetch_begin_irq(&percpu_stats->sync); local_stats = *percpu_stats; - } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&percpu_stats->sync, start)); stats->n_hit += local_stats.n_hit; stats->n_missed += local_stats.n_missed; diff --git a/datapath/linux/compat/include/linux/u64_stats_sync.h b/datapath/linux/compat/include/linux/u64_stats_sync.h index 234fd9156..9342f73d0 100644 --- a/datapath/linux/compat/include/linux/u64_stats_sync.h +++ b/datapath/linux/compat/include/linux/u64_stats_sync.h @@ -3,7 +3,8 @@ #include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) +#if defined(HAVE_U64_STATS_FETCH_BEGIN_IRQ) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0) #include_next #else @@ -33,8 +34,8 @@ * (On UP, there is no seqcount_t protection, a reader allowing interrupts could * read partial values) * - * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and - * u64_stats_fetch_retry_bh() helpers + * 7) For irq or softirq uses, readers can use u64_stats_fetch_begin_irq() and + * u64_stats_fetch_retry_irq() helpers * * Usage : * @@ -73,6 +74,12 @@ struct u64_stats_sync { #endif }; +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) +# define u64_stats_init(syncp) seqcount_init(syncp.seq) +#else +# define u64_stats_init(syncp) do { } while (0) +#endif + static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) @@ -113,46 +120,36 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, } /* - * In case softirq handlers can update u64 counters, readers can use following helpers + * In case irq handlers can update u64 counters, readers can use following helpers * - SMP 32bit arches use seqcount protection, irq safe. - * - UP 32bit must disable BH. + * - UP 32bit must disable irqs. * - 64bit have no problem atomically reading u64 values, irq safe. */ -static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) +static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 - local_bh_disable(); + local_irq_disable(); #endif return 0; #endif } -static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, +static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 - local_bh_enable(); + local_irq_enable(); #endif return false; #endif } -#endif /* Linux kernel < 2.6.36 */ - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) - -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) -# define u64_stats_init(syncp) seqcount_init(syncp.seq) -#else -# define u64_stats_init(syncp) do { } while (0) -#endif - -#endif +#endif /* !HAVE_U64_STATS_FETCH_BEGIN_IRQ || kernel < 3.13 */ #endif /* _LINUX_U64_STATS_SYNC_WRAPPER_H */ diff --git a/datapath/vport.c b/datapath/vport.c index 0dcecd048..ce46f5b74 100644 --- a/datapath/vport.c +++ b/datapath/vport.c @@ -309,9 +309,9 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) percpu_stats = per_cpu_ptr(vport->percpu_stats, i); do { - start = u64_stats_fetch_begin_bh(&percpu_stats->syncp); + start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); local_stats = *percpu_stats; - } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); stats->rx_bytes += local_stats.rx_bytes; stats->rx_packets += local_stats.rx_packets;