static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600);
+static struct odp_support dp_netdev_support = {
+ .max_mpls_depth = SIZE_MAX,
+ .recirc = true,
+};
+
/* Stores a miniflow with inline values */
struct netdev_flow_key {
uint32_t hash; /* Hash function differs for different users. */
uint32_t len; /* Length of the following miniflow (incl. map). */
struct miniflow mf;
- uint64_t buf[FLOW_MAX_PACKET_U64S - MINI_N_INLINE];
+ uint64_t buf[FLOW_MAX_PACKET_U64S];
};
/* Exact match cache for frequently used flows
/* A port in a netdev-based datapath. */
struct dp_netdev_port {
- struct pkt_metadata md;
+ odp_port_t port_no;
struct netdev *netdev;
struct cmap_node node; /* Node in dp_netdev's 'ports'. */
struct netdev_saved_flags *sf;
/* threads on same numa node. */
unsigned core_id; /* CPU core id of this pmd thread. */
int numa_id; /* numa node id of this pmd thread. */
+ int tx_qid; /* Queue id used by this pmd thread to
+ * send packets on all netdevs */
/* Only a pmd thread can write on its own 'cycles' and 'stats'.
* The main thread keeps 'stats_zero' and 'cycles_zero' as base
{
int i;
- BUILD_ASSERT(offsetof(struct miniflow, inline_values) == sizeof(uint64_t));
+ BUILD_ASSERT(sizeof(struct miniflow) == 2 * sizeof(uint64_t));
flow_cache->sweep_idx = 0;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
flow_cache->entries[i].flow = NULL;
flow_cache->entries[i].key.hash = 0;
- flow_cache->entries[i].key.len
- = offsetof(struct miniflow, inline_values);
- miniflow_initialize(&flow_cache->entries[i].key.mf,
- flow_cache->entries[i].key.buf);
+ flow_cache->entries[i].key.len = sizeof(struct miniflow);
+ flow_cache->entries[i].key.mf.tnl_map = 0;
+ flow_cache->entries[i].key.mf.pkt_map = 0;
}
}
flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
}
+/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
+bool
+dpif_is_netdev(const struct dpif *dpif)
+{
+ return dpif->dpif_class->open == dpif_netdev_open;
+}
+
static struct dpif_netdev *
dpif_netdev_cast(const struct dpif *dpif)
{
- ovs_assert(dpif->dpif_class->open == dpif_netdev_open);
+ ovs_assert(dpif_is_netdev(dpif));
return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
}
return ENOENT;
}
/* There can only be ovs_numa_get_n_cores() pmd threads,
- * so creates a txq for each. */
- error = netdev_set_multiq(netdev, n_cores, dp->n_dpdk_rxqs);
+ * so creates a txq for each, and one extra for the non
+ * pmd threads. */
+ error = netdev_set_multiq(netdev, n_cores + 1, dp->n_dpdk_rxqs);
if (error && (error != EOPNOTSUPP)) {
VLOG_ERR("%s, cannot set multiq", devname);
return errno;
}
}
port = xzalloc(sizeof *port);
- port->md = PKT_METADATA_INITIALIZER(port_no);
+ port->port_no = port_no;
port->netdev = netdev;
port->rxq = xmalloc(sizeof *port->rxq * netdev_n_rxq(netdev));
port->type = xstrdup(type);
struct dp_netdev_port *port;
CMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) {
- if (port->md.in_port.odp_port == port_no) {
+ if (port->port_no == port_no) {
return port;
}
}
do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
OVS_REQUIRES(dp->port_mutex)
{
- cmap_remove(&dp->ports, &port->node,
- hash_odp_port(port->md.in_port.odp_port));
+ cmap_remove(&dp->ports, &port->node, hash_odp_port(port->port_no));
seq_change(dp->port_seq);
if (netdev_is_pmd(port->netdev)) {
int numa_id = netdev_get_numa_id(port->netdev);
{
dpif_port->name = xstrdup(netdev_get_name(port->netdev));
dpif_port->type = xstrdup(port->type);
- dpif_port->port_no = port->md.in_port.odp_port;
+ dpif_port->port_no = port->port_no;
}
static int
struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
dpcls_remove(&pmd->cls, &flow->cr);
+ flow->cr.mask = NULL; /* Accessing rule's mask after this is not safe. */
+
cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid));
flow->dead = true;
state->name = xstrdup(netdev_get_name(port->netdev));
dpif_port->name = state->name;
dpif_port->type = port->type;
- dpif_port->port_no = port->md.in_port.odp_port;
+ dpif_port->port_no = port->port_no;
retval = 0;
} else {
* miniflow_extract(), if the map is different the miniflow is different.
* Therefore we can be faster by comparing the map and the miniflow in a
* single memcmp().
- * _ netdev_flow_key's miniflow has always inline values.
* - These functions can be inlined by the compiler.
*
* The following assertions make sure that what we're doing with miniflow is
- * safe
+ * safe.
*/
-BUILD_ASSERT_DECL(offsetof(struct miniflow, inline_values)
- == sizeof(uint64_t));
+BUILD_ASSERT_DECL(sizeof(struct miniflow) == 2 * sizeof(uint64_t));
-/* Given the number of bits set in the miniflow map, returns the size of the
+/* Given the number of bits set in miniflow's maps, returns the size of the
* 'netdev_flow_key.mf' */
-static inline uint32_t
-netdev_flow_key_size(uint32_t flow_u32s)
+static inline size_t
+netdev_flow_key_size(size_t flow_u64s)
{
- return offsetof(struct miniflow, inline_values) +
- MINIFLOW_VALUES_SIZE(flow_u32s);
+ return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s);
}
static inline bool
struct dp_packet packet;
uint64_t buf_stub[512 / 8];
- miniflow_initialize(&dst->mf, dst->buf);
-
dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub);
pkt_metadata_from_flow(&packet.md, src);
flow_compose(&packet, src);
miniflow_extract(&packet, &dst->mf);
dp_packet_uninit(&packet);
- dst->len = netdev_flow_key_size(count_1bits(dst->mf.map));
+ dst->len = netdev_flow_key_size(miniflow_n_values(&dst->mf));
dst->hash = 0; /* Not computed yet. */
}
const struct match *match)
{
const uint64_t *mask_u64 = (const uint64_t *) &match->wc.masks;
- uint64_t *dst = mask->mf.inline_values;
- uint64_t map, mask_map = 0;
+ uint64_t *dst = miniflow_values(&mask->mf);
+ struct miniflow maps;
+ uint64_t map;
uint32_t hash = 0;
int n;
/* Only check masks that make sense for the flow. */
- map = flow_wc_map(&match->flow);
+ flow_wc_map(&match->flow, &maps);
+ memset(&mask->mf, 0, sizeof mask->mf); /* Clear maps. */
+ map = maps.tnl_map;
while (map) {
uint64_t rm1bit = rightmost_1bit(map);
int i = raw_ctz(map);
if (mask_u64[i]) {
- mask_map |= rm1bit;
+ mask->mf.tnl_map |= rm1bit;
*dst++ = mask_u64[i];
hash = hash_add64(hash, mask_u64[i]);
}
map -= rm1bit;
}
+ mask_u64 += FLOW_TNL_U64S;
+ map = maps.pkt_map;
+ while (map) {
+ uint64_t rm1bit = rightmost_1bit(map);
+ int i = raw_ctz(map);
- mask->mf.values_inline = true;
- mask->mf.map = mask_map;
+ if (mask_u64[i]) {
+ mask->mf.pkt_map |= rm1bit;
+ *dst++ = mask_u64[i];
+ hash = hash_add64(hash, mask_u64[i]);
+ }
+ map -= rm1bit;
+ }
- hash = hash_add64(hash, mask_map);
+ hash = hash_add64(hash, mask->mf.tnl_map);
+ hash = hash_add64(hash, mask->mf.pkt_map);
- n = dst - mask->mf.inline_values;
+ n = dst - miniflow_get_values(&mask->mf);
mask->hash = hash_finish(hash, n * 8);
mask->len = netdev_flow_key_size(n);
}
-/* Initializes 'dst' as a copy of 'src' masked with 'mask'. */
+/* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
static inline void
netdev_flow_key_init_masked(struct netdev_flow_key *dst,
const struct flow *flow,
const struct netdev_flow_key *mask)
{
- uint64_t *dst_u64 = dst->mf.inline_values;
- const uint64_t *mask_u64 = mask->mf.inline_values;
+ uint64_t *dst_u64 = miniflow_values(&dst->mf);
+ const uint64_t *mask_u64 = miniflow_get_values(&mask->mf);
uint32_t hash = 0;
uint64_t value;
dst->len = mask->len;
- dst->mf.values_inline = true;
- dst->mf.map = mask->mf.map;
+ dst->mf = mask->mf; /* Copy maps. */
- FLOW_FOR_EACH_IN_MAP(value, flow, mask->mf.map) {
+ FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf) {
*dst_u64 = value & *mask_u64++;
hash = hash_add64(hash, *dst_u64++);
}
- dst->hash = hash_finish(hash, (dst_u64 - dst->mf.inline_values) * 8);
+ dst->hash = hash_finish(hash,
+ (dst_u64 - miniflow_get_values(&dst->mf)) * 8);
}
-/* Iterate through all netdev_flow_key u64 values specified by 'MAP' */
-#define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
- for (struct mf_for_each_in_map_aux aux__ \
- = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \
- mf_get_next_in_map(&aux__, &(VALUE)); \
- )
+/* Iterate through netdev_flow_key TNL u64 values specified by 'MAPS'. */
+#define NETDEV_FLOW_KEY_FOR_EACH_IN_TNL_MAP(VALUE, KEY, MAPS) \
+ MINIFLOW_FOR_EACH_IN_TNL_MAP(VALUE, &(KEY)->mf, MAPS)
+
+/* Iterate through netdev_flow_key PKT u64 values specified by 'MAPS'. */
+#define NETDEV_FLOW_KEY_FOR_EACH_IN_PKT_MAP(VALUE, KEY, MAPS) \
+ MINIFLOW_FOR_EACH_IN_PKT_MAP(VALUE, &(KEY)->mf, MAPS)
/* Returns a hash value for the bits of 'key' where there are 1-bits in
* 'mask'. */
netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key,
const struct netdev_flow_key *mask)
{
- const uint64_t *p = mask->mf.inline_values;
+ const uint64_t *p = miniflow_get_values(&mask->mf);
uint32_t hash = 0;
uint64_t key_u64;
- NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u64, key, mask->mf.map) {
+ NETDEV_FLOW_KEY_FOR_EACH_IN_TNL_MAP(key_u64, key, mask->mf) {
+ hash = hash_add64(hash, key_u64 & *p++);
+ }
+ NETDEV_FLOW_KEY_FOR_EACH_IN_PKT_MAP(key_u64, key, mask->mf) {
hash = hash_add64(hash, key_u64 & *p++);
}
- return hash_finish(hash, (p - mask->mf.inline_values) * 8);
+ return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8);
}
static inline bool
if (ufidp) {
CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp),
&pmd->flow_table) {
- if (ovs_u128_equal(&netdev_flow->ufid, ufidp)) {
+ if (ovs_u128_equals(&netdev_flow->ufid, ufidp)) {
return netdev_flow;
}
}
struct flow_wildcards wc;
struct dp_netdev_actions *actions;
size_t offset;
+ struct odp_flow_key_parms odp_parms = {
+ .flow = &netdev_flow->flow,
+ .mask = &wc.masks,
+ .support = dp_netdev_support,
+ };
miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
/* Key */
offset = key_buf->size;
flow->key = ofpbuf_tail(key_buf);
- odp_flow_key_from_flow(key_buf, &netdev_flow->flow, &wc.masks,
- netdev_flow->flow.in_port.odp_port, true);
+ odp_parms.odp_in_port = netdev_flow->flow.in_port.odp_port;
+ odp_flow_key_from_flow(&odp_parms, key_buf);
flow->key_len = key_buf->size - offset;
/* Mask */
offset = mask_buf->size;
flow->mask = ofpbuf_tail(mask_buf);
- odp_flow_key_from_mask(mask_buf, &wc.masks, &netdev_flow->flow,
- odp_to_u32(wc.masks.in_port.odp_port),
- SIZE_MAX, true);
+ odp_parms.odp_in_port = wc.masks.in_port.odp_port;
+ odp_parms.key_buf = key_buf;
+ odp_flow_key_from_mask(&odp_parms, mask_buf);
flow->mask_len = mask_buf->size - offset;
/* Actions */
if (mask_key_len) {
enum odp_key_fitness fitness;
- fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow);
+ fitness = odp_flow_key_to_mask(mask_key, mask_key_len, key, key_len,
+ mask, flow);
if (fitness) {
/* This should not happen: it indicates that
* odp_flow_key_from_mask() and odp_flow_key_to_mask()
for (id = 0; id < MFF_N_IDS; ++id) {
/* Skip registers and metadata. */
if (!(id >= MFF_REG0 && id < MFF_REG0 + FLOW_N_REGS)
+ && !(id >= MFF_XREG0 && id < MFF_XREG0 + FLOW_N_XREGS)
&& id != MFF_METADATA) {
const struct mf_field *mf = mf_from_id(id);
if (mf_are_prereqs_ok(mf, flow)) {
* mask->in_port.ofp_port, which only covers half of the 32-bit datapath
* port number mask->in_port.odp_port. */
mask->in_port.odp_port = u32_to_odp(UINT32_MAX);
-
return 0;
}
netdev_flow_mask_init(&mask, match);
/* Make sure wc does not have metadata. */
- ovs_assert(!(mask.mf.map & (MINIFLOW_MAP(metadata) | MINIFLOW_MAP(regs))));
+ ovs_assert(!(mask.mf.pkt_map
+ & (MINIFLOW_PKT_MAP(metadata) | MINIFLOW_PKT_MAP(regs))));
/* Do not allocate extra space. */
flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
}
/* Sets the new rx queue config. */
- err = netdev_set_multiq(port->netdev, ovs_numa_get_n_cores(),
+ err = netdev_set_multiq(port->netdev,
+ ovs_numa_get_n_cores() + 1,
n_rxqs);
if (err && (err != EOPNOTSUPP)) {
VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
/* XXX: initialize md in netdev implementation. */
for (i = 0; i < cnt; i++) {
- packets[i]->md = port->md;
+ pkt_metadata_init(&packets[i]->md, port->port_no);
}
cycles_count_start(pmd);
dp_netdev_input(pmd, packets, cnt);
emc_cache_init(&pmd->flow_cache);
poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt);
+ /* List port/core affinity */
+ for (i = 0; i < poll_cnt; i++) {
+ VLOG_INFO("Core %d processing port \'%s\'\n", pmd->core_id, netdev_get_name(poll_list[i].port->netdev));
+ }
+
/* Signal here to make sure the pmd finishes
* reloading the updated configuration. */
dp_netdev_pmd_reload_done(pmd);
return next;
}
+static int
+core_id_to_qid(unsigned core_id)
+{
+ if (core_id != NON_PMD_CORE_ID) {
+ return core_id;
+ } else {
+ return ovs_numa_get_n_cores();
+ }
+}
+
/* Configures the 'pmd' based on the input argument. */
static void
dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
pmd->dp = dp;
pmd->index = index;
pmd->core_id = core_id;
+ pmd->tx_qid = core_id_to_qid(core_id);
pmd->numa_id = numa_id;
ovs_refcount_init(&pmd->ref_cnt);
* pmd threads for the numa node. */
if (!n_pmds) {
int can_have, n_unpinned, i;
+ struct dp_netdev_pmd_thread **pmds;
n_unpinned = ovs_numa_get_n_unpinned_cores_on_numa(numa_id);
if (!n_unpinned) {
/* If cpu mask is specified, uses all unpinned cores, otherwise
* tries creating NR_PMD_THREADS pmd threads. */
can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS);
+ pmds = xzalloc(can_have * sizeof *pmds);
for (i = 0; i < can_have; i++) {
- struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd);
unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id);
-
- dp_netdev_configure_pmd(pmd, dp, i, core_id, numa_id);
+ pmds[i] = xzalloc(sizeof **pmds);
+ dp_netdev_configure_pmd(pmds[i], dp, i, core_id, numa_id);
+ }
+ /* The pmd thread code needs to see all the others configured pmd
+ * threads on the same numa node. That's why we call
+ * 'dp_netdev_configure_pmd()' on all the threads and then we actually
+ * start them. */
+ for (i = 0; i < can_have; i++) {
/* Each thread will distribute all devices rx-queues among
* themselves. */
- pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd);
+ pmds[i]->thread = ovs_thread_create("pmd", pmd_thread_main, pmds[i]);
}
+ free(pmds);
VLOG_INFO("Created %d pmd threads on numa node %d", can_have, numa_id);
}
}
struct ds ds = DS_EMPTY_INITIALIZER;
char *packet_str;
struct ofpbuf key;
+ struct odp_flow_key_parms odp_parms = {
+ .flow = flow,
+ .mask = &wc->masks,
+ .odp_in_port = flow->in_port.odp_port,
+ .support = dp_netdev_support,
+ };
ofpbuf_init(&key, 0);
- odp_flow_key_from_flow(&key, flow, &wc->masks, flow->in_port.odp_port,
- true);
+ odp_flow_key_from_flow(&odp_parms, &key);
packet_str = ofp_packet_to_string(dp_packet_data(packet_),
dp_packet_size(packet_));
struct netdev_flow_key key;
size_t i, notfound_cnt = 0;
- miniflow_initialize(&key.mf, key.buf);
for (i = 0; i < cnt; i++) {
struct dp_netdev_flow *flow;
continue;
}
+ if (i != cnt - 1) {
+ /* Prefetch next packet data */
+ OVS_PREFETCH(dp_packet_data(packets[i+1]));
+ }
+
miniflow_extract(packets[i], &key.mf);
key.len = 0; /* Not computed yet. */
key.hash = dpif_netdev_packet_get_rss_hash(packets[i], &key.mf);
for (i = 0; i < cnt; i++) {
/* Key length is needed in all the cases, hash computed on demand. */
- keys[i].len = netdev_flow_key_size(count_1bits(keys[i].mf.map));
+ keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf));
}
any_miss = !dpcls_lookup(&pmd->cls, keys, rules, cnt);
if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
}
static void
-dp_netdev_drop_packets(struct dp_packet ** packets, int cnt, bool may_steal)
+dp_netdev_drop_packets(struct dp_packet **packets, int cnt, bool may_steal)
{
if (may_steal) {
int i;
case OVS_ACTION_ATTR_OUTPUT:
p = dp_netdev_lookup_port(dp, u32_to_odp(nl_attr_get_u32(a)));
if (OVS_LIKELY(p)) {
- netdev_send(p->netdev, pmd->core_id, packets, cnt, may_steal);
+ netdev_send(p->netdev, pmd->tx_qid, packets, cnt, may_steal);
return;
}
break;
}
/* Remove old port. */
- cmap_remove(&dp->ports, &old_port->node, hash_port_no(old_port->md.in_port.odp_port));
+ cmap_remove(&dp->ports, &old_port->node, hash_port_no(old_port->port_no));
ovsrcu_postpone(free, old_port);
/* Insert new port (cmap semantics mean we cannot re-insert 'old_port'). */
new_port = xmemdup(old_port, sizeof *old_port);
- new_port->md.in_port.odp_port = port_no;
+ new_port->port_no = port_no;
cmap_insert(&dp->ports, &new_port->node, hash_port_no(port_no));
seq_change(dp->port_seq);
ovs_mutex_lock(&dp->port_mutex);
if (get_port_by_name(dp, argv[2], &port)) {
unixctl_command_reply_error(conn, "unknown port");
- } else if (port->md.in_port.odp_port == ODPP_LOCAL) {
+ } else if (port->port_no == ODPP_LOCAL) {
unixctl_command_reply_error(conn, "can't delete local port");
} else {
do_del_port(dp, port);
dp_register_provider(class);
}
+static void
+dpif_dummy_override(const char *type)
+{
+ if (!dp_unregister_provider(type)) {
+ dpif_dummy_register__(type);
+ }
+}
+
void
-dpif_dummy_register(bool override)
+dpif_dummy_register(enum dummy_level level)
{
- if (override) {
+ if (level == DUMMY_OVERRIDE_ALL) {
struct sset types;
const char *type;
sset_init(&types);
dp_enumerate_types(&types);
SSET_FOR_EACH (type, &types) {
- if (!dp_unregister_provider(type)) {
- dpif_dummy_register__(type);
- }
+ dpif_dummy_override(type);
}
sset_destroy(&types);
+ } else if (level == DUMMY_OVERRIDE_SYSTEM) {
+ dpif_dummy_override("system");
}
dpif_dummy_register__("dummy");
struct dpcls_subtable *subtable;
CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
+ ovs_assert(cmap_count(&subtable->rules) == 0);
dpcls_destroy_subtable(cls, subtable);
}
cmap_destroy(&cls->subtables_map);
}
}
-/* Returns true if 'target' satisifies 'key' in 'mask', that is, if each 1-bit
- * in 'mask' the values in 'key' and 'target' are the same.
- *
- * Note: 'key' and 'mask' have the same mask, and 'key' is already masked. */
+/* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
+ * in 'mask' the values in 'key' and 'target' are the same. */
static inline bool
dpcls_rule_matches_key(const struct dpcls_rule *rule,
const struct netdev_flow_key *target)
{
- const uint64_t *keyp = rule->flow.mf.inline_values;
- const uint64_t *maskp = rule->mask->mf.inline_values;
+ const uint64_t *keyp = miniflow_get_values(&rule->flow.mf);
+ const uint64_t *maskp = miniflow_get_values(&rule->mask->mf);
uint64_t target_u64;
- NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u64, target, rule->flow.mf.map) {
+ NETDEV_FLOW_KEY_FOR_EACH_IN_TNL_MAP(target_u64, target, rule->flow.mf) {
+ if (OVS_UNLIKELY((target_u64 & *maskp++) != *keyp++)) {
+ return false;
+ }
+ }
+ NETDEV_FLOW_KEY_FOR_EACH_IN_PKT_MAP(target_u64, target, rule->flow.mf) {
if (OVS_UNLIKELY((target_u64 & *maskp++) != *keyp++)) {
return false;
}
}
/* Compute hashes for the remaining keys. */
- ULONG_FOR_EACH_1(i, map) {
+ ULLONG_FOR_EACH_1(i, map) {
hashes[i] = netdev_flow_key_hash_in_mask(&mkeys[i],
&subtable->mask);
}
/* Lookup. */
map = cmap_find_batch(&subtable->rules, map, hashes, nodes);
/* Check results. */
- ULONG_FOR_EACH_1(i, map) {
+ ULLONG_FOR_EACH_1(i, map) {
struct dpcls_rule *rule;
CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) {
goto next;
}
}
- ULONG_SET0(map, i); /* Did not match. */
+ ULLONG_SET0(map, i); /* Did not match. */
next:
; /* Keep Sparse happy. */
}