#include "compiler.h"
#include "fatal-signal.h"
#include "hash.h"
-#include "list.h"
+#include "openvswitch/list.h"
#include "netdev-dpdk.h"
#include "ovs-rcu.h"
#include "poll-loop.h"
xpthread_rwlockattr_destroy(&attr);
}
+/* Provides an error-checking wrapper around pthread_cond_wait().
+ *
+ * If the wait can take a significant amount of time, consider bracketing this
+ * call with calls to ovsrcu_quiesce_start() and ovsrcu_quiesce_end(). */
void
ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_)
{
struct ovs_mutex *mutex = CONST_CAST(struct ovs_mutex *, mutex_);
int error;
- ovsrcu_quiesce_start();
error = pthread_cond_wait(cond, &mutex->lock);
- ovsrcu_quiesce_end();
if (OVS_UNLIKELY(error)) {
ovs_abort(error, "pthread_cond_wait failed");
pthread_t
ovs_thread_create(const char *name, void *(*start)(void *), void *arg)
{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
struct ovsthread_aux *aux;
pthread_t thread;
int error;
forbid_forking("multiple threads exist");
- multithreaded = true;
- ovsrcu_quiesce_end();
+ if (ovsthread_once_start(&once)) {
+ /* The first call to this function has to happen in the main thread.
+ * Before the process becomes multithreaded we make sure that the
+ * main thread is considered non quiescent.
+ *
+ * For other threads this is done in ovs_thread_wrapper(), but the
+ * main thread has no such wrapper.
+ *
+ * There's no reason to call ovsrcu_quiesce_end() in subsequent
+ * invocations of this function and it might introduce problems
+ * for other threads. */
+ ovsrcu_quiesce_end();
+ ovsthread_once_done(&once);
+ }
+
+ multithreaded = true;
aux = xmalloc(sizeof *aux);
aux->start = start;
aux->arg = arg;
}
\f
-/* Parses /proc/cpuinfo for the total number of physical cores on this system
- * across all CPU packages, not counting hyper-threads.
- *
- * Sets *n_cores to the total number of cores on this system, or 0 if the
+/* Returns the total number of cores available to this process, or 0 if the
* number cannot be determined. */
-static void
-parse_cpuinfo(long int *n_cores)
-{
- static const char file_name[] = "/proc/cpuinfo";
- char line[128];
- uint64_t cpu = 0; /* Support up to 64 CPU packages on a single system. */
- long int cores = 0;
- FILE *stream;
-
- stream = fopen(file_name, "r");
- if (!stream) {
- VLOG_DBG("%s: open failed (%s)", file_name, ovs_strerror(errno));
- return;
- }
-
- while (fgets(line, sizeof line, stream)) {
- unsigned int id;
-
- /* Find the next CPU package. */
- if (ovs_scan(line, "physical id%*[^:]: %u", &id)) {
- if (id > 63) {
- VLOG_WARN("Counted over 64 CPU packages on this system. "
- "Parsing %s for core count may be inaccurate.",
- file_name);
- cores = 0;
- break;
- }
-
- if (cpu & (1ULL << id)) {
- /* We've already counted this package's cores. */
- continue;
- }
- cpu |= 1ULL << id;
-
- /* Find the number of cores for this package. */
- while (fgets(line, sizeof line, stream)) {
- int count;
-
- if (ovs_scan(line, "cpu cores%*[^:]: %u", &count)) {
- cores += count;
- break;
- }
- }
- }
- }
- fclose(stream);
-
- *n_cores = cores;
-}
-
-/* Returns the total number of cores on this system, or 0 if the number cannot
- * be determined.
- *
- * Tries not to count hyper-threads, but may be inaccurate - particularly on
- * platforms that do not provide /proc/cpuinfo, but also if /proc/cpuinfo is
- * formatted different to the layout that parse_cpuinfo() expects. */
int
count_cpu_cores(void)
{
if (ovsthread_once_start(&once)) {
#ifndef _WIN32
- parse_cpuinfo(&n_cores);
- if (!n_cores) {
- n_cores = sysconf(_SC_NPROCESSORS_ONLN);
+ n_cores = sysconf(_SC_NPROCESSORS_ONLN);
+#ifdef __linux__
+ if (n_cores > 0) {
+ cpu_set_t *set = CPU_ALLOC(n_cores);
+
+ if (set) {
+ size_t size = CPU_ALLOC_SIZE(n_cores);
+
+ if (!sched_getaffinity(0, size, set)) {
+ n_cores = CPU_COUNT_S(size, set);
+ }
+ CPU_FREE(set);
+ }
}
+#endif
#else
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
int i;
ovs_mutex_lock(&key_mutex);
- list_remove(&slots->list_node);
+ ovs_list_remove(&slots->list_node);
LIST_FOR_EACH (key, list_node, &inuse_keys) {
void *value = clear_slot(slots, key->index);
if (value && key->destructor) {
}
ovs_mutex_lock(&key_mutex);
- if (list_is_empty(&free_keys)) {
+ if (ovs_list_is_empty(&free_keys)) {
key = xmalloc(sizeof *key);
key->index = n_keys++;
if (key->index >= MAX_KEYS) {
abort();
}
} else {
- key = CONTAINER_OF(list_pop_back(&free_keys),
+ key = CONTAINER_OF(ovs_list_pop_back(&free_keys),
struct ovsthread_key, list_node);
}
- list_push_back(&inuse_keys, &key->list_node);
+ ovs_list_push_back(&inuse_keys, &key->list_node);
key->destructor = destructor;
ovs_mutex_unlock(&key_mutex);
ovs_mutex_lock(&key_mutex);
/* Move 'key' from 'inuse_keys' to 'free_keys'. */
- list_remove(&key->list_node);
- list_push_back(&free_keys, &key->list_node);
+ ovs_list_remove(&key->list_node);
+ ovs_list_push_back(&free_keys, &key->list_node);
/* Clear this slot in all threads. */
LIST_FOR_EACH (slots, list_node, &slots_list) {
ovs_mutex_lock(&key_mutex);
pthread_setspecific(tsd_key, slots);
- list_push_back(&slots_list, &slots->list_node);
+ ovs_list_push_back(&slots_list, &slots->list_node);
ovs_mutex_unlock(&key_mutex);
}