For performance-critical threads like pmd threads, we currently make them
never call coverage_clear() to avoid contention over the global mutex
'coverage_mutex'. So, even though pmd thread still keeps updating their
thread-local coverage count, the count is never attributed to the global
total. But it is useful to have them available.
This commit makes this happen by implementing a non-contending version
of the clear function, coverage_try_clear(). The function will use
the ovs_mutex_trylock() and return immediately if the mutex cannot
be acquired. Since threads like pmd thread are always busy-looping,
the lock will eventually be acquired.
Requested-by: Ilya Maximets <i.maximets@samsung.com>
Signed-off-by: Alex Wang <alexw@nicira.com>
Acked-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Ben Pfaff <blp@nicira.com>
Acked-by: Daniele Di Proietto <diproiettod@vmware.com
/* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
* synchronize per-thread counters with global counters. Every thread maintains
/* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
* synchronize per-thread counters with global counters. Every thread maintains
- * a separate timer to ensure all counters are periodically aggregated. */
-void
-coverage_clear(void)
+ * a separate timer to ensure all counters are periodically aggregated.
+ *
+ * Uses 'ovs_mutex_trylock()' if 'trylock' is true. This is to prevent
+ * multiple performance-critical threads contending over the 'coverage_mutex'.
+ *
+ * */
+static void
+coverage_clear__(bool trylock)
{
long long int now, *thread_time;
{
long long int now, *thread_time;
if (now >= *thread_time) {
size_t i;
if (now >= *thread_time) {
size_t i;
- ovs_mutex_lock(&coverage_mutex);
+ if (trylock) {
+ /* Returns if cannot acquire lock. */
+ if (ovs_mutex_trylock(&coverage_mutex)) {
+ return;
+ }
+ } else {
+ ovs_mutex_lock(&coverage_mutex);
+ }
+
for (i = 0; i < n_coverage_counters; i++) {
struct coverage_counter *c = coverage_counters[i];
c->total += c->count();
for (i = 0; i < n_coverage_counters; i++) {
struct coverage_counter *c = coverage_counters[i];
c->total += c->count();
+void
+coverage_clear(void)
+{
+ coverage_clear__(false);
+}
+
+void
+coverage_try_clear(void)
+{
+ coverage_clear__(true);
+}
+
/* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
* coverage counters' 'min' and 'hr' array. 'min' array is for cumulating
* per second counts into per minute count. 'hr' array is for cumulating per
/* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
* coverage counters' 'min' and 'hr' array. 'min' array is for cumulating
* per second counts into per minute count. 'hr' array is for cumulating per
void coverage_init(void);
void coverage_log(void);
void coverage_clear(void);
void coverage_init(void);
void coverage_log(void);
void coverage_clear(void);
+void coverage_try_clear(void);
void coverage_run(void);
#endif /* coverage.h */
void coverage_run(void);
#endif /* coverage.h */
#include "fat-rwlock.h"
#include "flow.h"
#include "cmap.h"
#include "fat-rwlock.h"
#include "flow.h"
#include "cmap.h"
#include "latch.h"
#include "list.h"
#include "match.h"
#include "latch.h"
#include "list.h"
#include "match.h"
lc = 0;
emc_cache_slow_sweep(&pmd->flow_cache);
lc = 0;
emc_cache_slow_sweep(&pmd->flow_cache);
ovsrcu_quiesce();
atomic_read_relaxed(&pmd->change_seq, &seq);
ovsrcu_quiesce();
atomic_read_relaxed(&pmd->change_seq, &seq);