COVERAGE_DEFINE(dpif_execute_with_help);
static const struct dpif_class *base_dpif_classes[] = {
-#ifdef __linux__
- &dpif_linux_class,
+#if defined(__linux__) || defined(_WIN32)
+ &dpif_netlink_class,
#endif
&dpif_netdev_class,
};
* meaningful. */
static void
dpif_execute_helper_cb(void *aux_, struct dpif_packet **packets, int cnt,
- struct pkt_metadata *md,
const struct nlattr *action, bool may_steal OVS_UNUSED)
{
struct dpif_execute_helper_aux *aux = aux_;
int type = nl_attr_type(action);
- struct ofpbuf * packet = &packets[0]->ofpbuf;
+ struct ofpbuf *packet = &packets[0]->ofpbuf;
+ struct pkt_metadata *md = &packets[0]->md;
ovs_assert(cnt == 1);
execute.packet = packet;
execute.md = *md;
execute.needs_help = false;
+ execute.probe = false;
aux->error = dpif_execute(aux->dpif, &execute);
log_execute_message(aux->dpif, &execute, true, aux->error);
case OVS_ACTION_ATTR_PUSH_MPLS:
case OVS_ACTION_ATTR_POP_MPLS:
case OVS_ACTION_ATTR_SET:
+ case OVS_ACTION_ATTR_SET_MASKED:
case OVS_ACTION_ATTR_SAMPLE:
case OVS_ACTION_ATTR_UNSPEC:
case __OVS_ACTION_ATTR_MAX:
COVERAGE_INC(dpif_execute_with_help);
packet.ofpbuf = *execute->packet;
+ packet.md = execute->md;
pp = &packet;
- odp_execute_actions(&aux, &pp, 1, false, &execute->md, execute->actions,
+ odp_execute_actions(&aux, &pp, 1, false, execute->actions,
execute->actions_len, dpif_execute_helper_cb);
/* Even though may_steal is set to false, some actions could modify or
* reallocate the ofpbuf memory. We need to pass those changes to the
* caller */
*execute->packet = packet.ofpbuf;
+ execute->md = packet.md;
return aux.error;
}
}
void
-dpif_register_upcall_cb(struct dpif *dpif, exec_upcall_cb *cb)
+dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
{
if (dpif->dpif_class->register_upcall_cb) {
- dpif->dpif_class->register_upcall_cb(dpif, cb);
+ dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
}
}
}
}
+/* If 'dpif' creates its own I/O polling threads, refreshes poll threads
+ * configuration. */
+int
+dpif_poll_threads_set(struct dpif *dpif, unsigned int n_rxqs,
+ const char *cmask)
+{
+ int error = 0;
+
+ if (dpif->dpif_class->poll_threads_set) {
+ error = dpif->dpif_class->poll_threads_set(dpif, n_rxqs, cmask);
+ if (error) {
+ log_operation(dpif, "poll_threads_set", error);
+ }
+ }
+
+ return error;
+}
+
/* Polls for an upcall from 'dpif' for an upcall handler. Since there
* there can be multiple poll loops, 'handler_id' is needed as index to
* identify the corresponding poll loop. If successful, stores the upcall
log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
int error)
{
- if (should_log_flow_message(error)) {
+ if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
struct ds s;
ds_init(&s);
* called after the dpif_provider's '->execute' function, which is allowed to
* modify execute->packet and execute->md. In practice, though:
*
- * - dpif-linux doesn't modify execute->packet or execute->md.
+ * - dpif-netlink doesn't modify execute->packet or execute->md.
*
* - dpif-netdev does modify them but it is less likely to have problems
* because it is built into ovs-vswitchd and cannot have version skew,
log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
bool subexecute, int error)
{
- if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))) {
+ if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
+ && !execute->probe) {
struct ds ds = DS_EMPTY_INITIALIZER;
char *packet;