#include "flow.h"
#include "cmap.h"
#include "coverage.h"
+#include "hmapx.h"
#include "latch.h"
#include "list.h"
#include "match.h"
static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp);
static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id);
static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id);
+static void dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd);
+static void dp_netdev_del_port_from_pmd(struct dp_netdev_port *port,
+ struct dp_netdev_pmd_thread *pmd);
+static void dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp,
+ struct dp_netdev_port *port);
+static void
+dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port);
static void
dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_port *port, struct netdev_rxq *rx);
cmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
if (netdev_is_pmd(netdev)) {
- int numa_id = netdev_get_numa_id(netdev);
- struct dp_netdev_pmd_thread *pmd;
-
- /* Cannot create pmd threads for invalid numa node. */
- ovs_assert(ovs_numa_numa_id_is_valid(numa_id));
-
- for (i = 0; i < netdev_n_rxq(netdev); i++) {
- pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id);
- if (!pmd) {
- /* There is no pmd threads on this numa node. */
- dp_netdev_set_pmds_on_numa(dp, numa_id);
- /* Assigning of rx queues done. */
- break;
- }
-
- ovs_mutex_lock(&pmd->poll_mutex);
- dp_netdev_add_rxq_to_pmd(pmd, port, port->rxq[i]);
- ovs_mutex_unlock(&pmd->poll_mutex);
- dp_netdev_reload_pmd__(pmd);
- }
+ dp_netdev_add_port_to_pmds(dp, port);
}
seq_change(dp->port_seq);
if (!has_pmd_port_for_numa(dp, numa_id)) {
dp_netdev_del_pmds_on_numa(dp, numa_id);
} else {
- struct dp_netdev_pmd_thread *pmd;
- struct rxq_poll *poll, *next;
-
- CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
- if (pmd->numa_id == numa_id) {
- bool found = false;
-
- ovs_mutex_lock(&pmd->poll_mutex);
- LIST_FOR_EACH_SAFE (poll, next, node, &pmd->poll_list) {
- if (poll->port == port) {
- found = true;
- port_unref(poll->port);
- list_remove(&poll->node);
- pmd->poll_cnt--;
- free(poll);
- }
- }
- ovs_mutex_unlock(&pmd->poll_mutex);
- if (found) {
- dp_netdev_reload_pmd__(pmd);
- }
- }
- }
+ dp_netdev_del_port_from_all_pmds(dp, port);
}
}
static void
dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
{
- struct rxq_poll *poll;
-
/* Uninit the 'flow_cache' since there is
* no actual thread uninit it for NON_PMD_CORE_ID. */
if (pmd->core_id == NON_PMD_CORE_ID) {
}
/* Unref all ports and free poll_list. */
- LIST_FOR_EACH_POP (poll, node, &pmd->poll_list) {
- port_unref(poll->port);
- free(poll);
- }
+ dp_netdev_pmd_clear_poll_list(pmd);
/* Purges the 'pmd''s flows after stopping the thread, but before
* destroying the flows, so that the flow stats can be collected. */
free(free_idx);
}
+/* Deletes all rx queues from pmd->poll_list. */
+static void
+dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd)
+{
+ struct rxq_poll *poll;
+
+ ovs_mutex_lock(&pmd->poll_mutex);
+ LIST_FOR_EACH_POP (poll, node, &pmd->poll_list) {
+ port_unref(poll->port);
+ free(poll);
+ }
+ pmd->poll_cnt = 0;
+ ovs_mutex_unlock(&pmd->poll_mutex);
+}
+
+/* Deletes all rx queues of 'port' from poll_list of pmd thread and
+ * reloads it if poll_list was changed. */
+static void
+dp_netdev_del_port_from_pmd(struct dp_netdev_port *port,
+ struct dp_netdev_pmd_thread *pmd)
+{
+ struct rxq_poll *poll, *next;
+ bool found = false;
+
+ ovs_mutex_lock(&pmd->poll_mutex);
+ LIST_FOR_EACH_SAFE (poll, next, node, &pmd->poll_list) {
+ if (poll->port == port) {
+ found = true;
+ port_unref(poll->port);
+ list_remove(&poll->node);
+ pmd->poll_cnt--;
+ free(poll);
+ }
+ }
+ ovs_mutex_unlock(&pmd->poll_mutex);
+ if (found) {
+ dp_netdev_reload_pmd__(pmd);
+ }
+}
+
+/* Deletes all rx queues of 'port' from all pmd threads of dp and
+ * reloads them if needed. */
+static void
+dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp,
+ struct dp_netdev_port *port)
+{
+ int numa_id = netdev_get_numa_id(port->netdev);
+ struct dp_netdev_pmd_thread *pmd;
+
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ if (pmd->numa_id == numa_id) {
+ dp_netdev_del_port_from_pmd(port, pmd);
+ }
+ }
+}
+
/* Returns PMD thread from this numa node with fewer rx queues to poll.
* Returns NULL if there is no PMD threads on this numa node.
* Can be called safely only by main thread. */
pmd->poll_cnt++;
}
+/* Distributes all rx queues of 'port' between all PMD threads and reloads
+ * them if needed. */
+static void
+dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port)
+{
+ int numa_id = netdev_get_numa_id(port->netdev);
+ struct dp_netdev_pmd_thread *pmd;
+ struct hmapx to_reload;
+ struct hmapx_node *node;
+ int i;
+
+ hmapx_init(&to_reload);
+ /* Cannot create pmd threads for invalid numa node. */
+ ovs_assert(ovs_numa_numa_id_is_valid(numa_id));
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id);
+ if (!pmd) {
+ /* There is no pmd threads on this numa node. */
+ dp_netdev_set_pmds_on_numa(dp, numa_id);
+ /* Assigning of rx queues done. */
+ break;
+ }
+
+ ovs_mutex_lock(&pmd->poll_mutex);
+ dp_netdev_add_rxq_to_pmd(pmd, port, port->rxq[i]);
+ ovs_mutex_unlock(&pmd->poll_mutex);
+
+ hmapx_add(&to_reload, pmd);
+ }
+
+ HMAPX_FOR_EACH (node, &to_reload) {
+ pmd = (struct dp_netdev_pmd_thread *) node->data;
+ dp_netdev_reload_pmd__(pmd);
+ }
+
+ hmapx_destroy(&to_reload);
+}
+
/* Checks the numa node id of 'netdev' and starts pmd threads for
* the numa node. */
static void