/* This sequence number should be incremented whenever anything involving flows
* or the wildcarding of flows changes. This will cause build assertion
* failures in places which likely need to be updated. */
-#define FLOW_WC_SEQ 26
+#define FLOW_WC_SEQ 27
+/* Number of Open vSwitch extension 32-bit registers. */
#define FLOW_N_REGS 8
BUILD_ASSERT_DECL(FLOW_N_REGS <= NXM_NX_MAX_REGS);
+/* Number of OpenFlow 1.5+ 64-bit registers.
+ *
+ * Each of these overlays a pair of Open vSwitch 32-bit registers, so there
+ * are half as many of them.*/
+#define FLOW_N_XREGS (FLOW_N_REGS / 2)
+
/* Used for struct flow's dl_type member for frames that have no Ethernet
* type, that is, pure 802.2 frames. */
#define FLOW_DL_TYPE_NONE 0x5ff
#define FLOW_TNL_F_DONT_FRAGMENT (1 << 0)
#define FLOW_TNL_F_CSUM (1 << 1)
#define FLOW_TNL_F_KEY (1 << 2)
+#define FLOW_TNL_F_OAM (1 << 3)
const char *flow_tun_flag_to_string(uint32_t flags);
/* L4 */
ovs_be16 tp_src; /* TCP/UDP/SCTP source port. */
- ovs_be16 tp_dst; /* TCP/UDP/SCTP destination port.
- * Keep last for the BUILD_ASSERT_DECL below */
+ ovs_be16 tp_dst; /* TCP/UDP/SCTP destination port. */
+ ovs_be32 igmp_group_ip4; /* IGMP group IPv4 address */
uint32_t dp_hash; /* Datapath computed hash value. The exact
- computation is opaque to the user space.*/
+ * computation is opaque to the user space.
+ * Keep last for BUILD_ASSERT_DECL below. */
};
BUILD_ASSERT_DECL(sizeof(struct flow) % 4 == 0);
/* Remember to update FLOW_WC_SEQ when changing 'struct flow'. */
BUILD_ASSERT_DECL(offsetof(struct flow, dp_hash) + sizeof(uint32_t)
- == sizeof(struct flow_tnl) + 172
- && FLOW_WC_SEQ == 26);
+ == sizeof(struct flow_tnl) + 176
+ && FLOW_WC_SEQ == 27);
/* Incremental points at which flow classification may be performed in
* segments.
void flow_compose(struct ofpbuf *, const struct flow *);
+static inline uint64_t
+flow_get_xreg(const struct flow *flow, int idx)
+{
+ return ((uint64_t) flow->regs[idx * 2] << 32) | flow->regs[idx * 2 + 1];
+}
+
+static inline void
+flow_set_xreg(struct flow *flow, int idx, uint64_t value)
+{
+ flow->regs[idx * 2] = value >> 32;
+ flow->regs[idx * 2 + 1] = value;
+}
+
static inline int
flow_compare_3way(const struct flow *a, const struct flow *b)
{
{
return hash_int(odp_to_u32(odp_port), 0);
}
-
-uint32_t flow_hash_in_minimask(const struct flow *, const struct minimask *,
- uint32_t basis);
-uint32_t flow_hash_in_minimask_range(const struct flow *,
- const struct minimask *,
- uint8_t start, uint8_t end,
- uint32_t *basis);
\f
/* Wildcards for a flow.
*
void flow_wildcards_set_reg_mask(struct flow_wildcards *,
int idx, uint32_t mask);
+void flow_wildcards_set_xreg_mask(struct flow_wildcards *,
+ int idx, uint64_t mask);
void flow_wildcards_and(struct flow_wildcards *dst,
const struct flow_wildcards *src1,
const struct flow_wildcards *src2);
bool flow_wildcards_has_extra(const struct flow_wildcards *,
const struct flow_wildcards *);
-
-void flow_wildcards_fold_minimask(struct flow_wildcards *,
- const struct minimask *);
-void flow_wildcards_fold_minimask_range(struct flow_wildcards *,
- const struct minimask *,
- uint8_t start, uint8_t end);
-
uint32_t flow_wildcards_hash(const struct flow_wildcards *, uint32_t basis);
bool flow_wildcards_equal(const struct flow_wildcards *,
const struct flow_wildcards *);
/* Compressed flow. */
#define MINI_N_INLINE (sizeof(void *) == 4 ? 7 : 8)
-BUILD_ASSERT_DECL(FLOW_U32S <= 64);
+BUILD_ASSERT_DECL(FLOW_U32S <= 63);
/* A sparse representation of a "struct flow".
*
*
* The 'map' member holds one bit for each uint32_t in a "struct flow". Each
* 0-bit indicates that the corresponding uint32_t is zero, each 1-bit that it
- * *may* be nonzero.
- *
- * 'values' points to the start of an array that has one element for each 1-bit
- * in 'map'. The least-numbered 1-bit is in values[0], the next 1-bit is in
- * values[1], and so on.
- *
- * 'values' may point to a few different locations:
- *
- * - If 'map' has MINI_N_INLINE or fewer 1-bits, it may point to
- * 'inline_values'. One hopes that this is the common case.
+ * *may* be nonzero (see below how this applies to minimasks).
*
- * - If 'map' has more than MINI_N_INLINE 1-bits, it may point to memory
- * allocated with malloc().
+ * The 'values_inline' boolean member indicates that the values are at
+ * 'inline_values'. If 'values_inline' is zero, then the values are
+ * offline at 'offline_values'. In either case, values is an array that has
+ * one element for each 1-bit in 'map'. The least-numbered 1-bit is in
+ * the first element of the values array, the next 1-bit is in the next array
+ * element, and so on.
*
- * - The caller could provide storage on the stack for situations where
- * that makes sense. So far that's only proved useful for
- * minimask_combine(), but the principle works elsewhere.
- *
- * Elements in 'values' are allowed to be zero. This is useful for "struct
+ * Elements in values array are allowed to be zero. This is useful for "struct
* minimatch", for which ensuring that the miniflow and minimask members have
* same 'map' allows optimization. This allowance applies only to a miniflow
* that is not a mask. That is, a minimask may NOT have zero elements in
* its 'values'.
*/
struct miniflow {
- uint64_t map;
- uint32_t *values;
- uint32_t inline_values[MINI_N_INLINE];
+ uint64_t map:63;
+ uint64_t values_inline:1;
+ union {
+ uint32_t *offline_values;
+ uint32_t inline_values[MINI_N_INLINE];
+ };
};
+#define MINIFLOW_VALUES_SIZE(COUNT) ((COUNT) * sizeof(uint32_t))
+
+static inline uint32_t *miniflow_values(struct miniflow *mf)
+{
+ return OVS_LIKELY(mf->values_inline)
+ ? mf->inline_values : mf->offline_values;
+}
+
+static inline const uint32_t *miniflow_get_values(const struct miniflow *mf)
+{
+ return OVS_LIKELY(mf->values_inline)
+ ? mf->inline_values : mf->offline_values;
+}
+
+static inline const uint32_t *miniflow_get_u32_values(const struct miniflow *mf)
+{
+ return miniflow_get_values(mf);
+}
+
+static inline const ovs_be32 *miniflow_get_be32_values(const struct miniflow *mf)
+{
+ return (OVS_FORCE const ovs_be32 *)miniflow_get_values(mf);
+}
+
/* This is useful for initializing a miniflow for a miniflow_extract() call. */
static inline void miniflow_initialize(struct miniflow *mf,
uint32_t buf[FLOW_U32S])
{
mf->map = 0;
- mf->values = buf;
+ mf->values_inline = (buf == (uint32_t *)(mf + 1));
+ if (!mf->values_inline) {
+ mf->offline_values = buf;
+ }
}
struct pkt_metadata;
void miniflow_init_with_minimask(struct miniflow *, const struct flow *,
const struct minimask *);
void miniflow_clone(struct miniflow *, const struct miniflow *);
+void miniflow_clone_inline(struct miniflow *, const struct miniflow *,
+ size_t n_values);
void miniflow_move(struct miniflow *dst, struct miniflow *);
void miniflow_destroy(struct miniflow *);
void miniflow_expand(const struct miniflow *, struct flow *);
+static inline uint32_t flow_u32_value(const struct flow *flow, size_t index)
+{
+ return ((uint32_t *)(flow))[index];
+}
+
+static inline uint32_t *flow_u32_lvalue(struct flow *flow, size_t index)
+{
+ return &((uint32_t *)(flow))[index];
+}
+
+static inline bool
+flow_get_next_in_map(const struct flow *flow, uint64_t map, uint32_t *value)
+{
+ if (map) {
+ *value = flow_u32_value(flow, raw_ctz(map));
+ return true;
+ }
+ return false;
+}
+
+/* Iterate through all flow u32 values specified by 'MAP'. */
+#define FLOW_FOR_EACH_IN_MAP(VALUE, FLOW, MAP) \
+ for (uint64_t map__ = (MAP); \
+ flow_get_next_in_map(FLOW, map__, &(VALUE)); \
+ map__ = zero_rightmost_1bit(map__))
+
+/* Iterate through all struct flow u32 indices specified by 'MAP'. */
+#define MAP_FOR_EACH_INDEX(U32IDX, MAP) \
+ for (uint64_t map__ = (MAP); \
+ ((U32IDX) = ctz64(map__)) < FLOW_U32S; \
+ map__ = zero_rightmost_1bit(map__))
+
#define FLOW_U32_SIZE(FIELD) \
DIV_ROUND_UP(sizeof(((struct flow *)0)->FIELD), sizeof(uint32_t))
return rm1bit != 0;
}
-/* Iterate through all miniflow u32 values specified by the 'MAP'.
+/* Iterate through all miniflow u32 values specified by 'MAP'.
* This works as the first statement in a block.*/
#define MINIFLOW_FOR_EACH_IN_MAP(VALUE, FLOW, MAP) \
- const uint32_t *fp_ = (FLOW)->values; \
+ const uint32_t *fp_ = miniflow_get_u32_values(FLOW); \
uint64_t rm1bit_, fmap_, map_; \
for (fmap_ = (FLOW)->map, map_ = (MAP), rm1bit_ = rightmost_1bit(map_); \
mf_get_next_in_map(&fmap_, rm1bit_, &fp_, &(VALUE)); \
#define MINIFLOW_GET_TYPE(MF, TYPE, OFS) \
(((MF)->map & (UINT64_C(1) << (OFS) / 4)) \
? ((OVS_FORCE const TYPE *) \
- ((MF)->values \
+ (miniflow_get_u32_values(MF) \
+ count_1bits((MF)->map & ((UINT64_C(1) << (OFS) / 4) - 1)))) \
[(OFS) % 4 / sizeof(TYPE)] \
: 0) \
bool miniflow_equal_flow_in_minimask(const struct miniflow *a,
const struct flow *b,
const struct minimask *);
-uint32_t miniflow_hash(const struct miniflow *, uint32_t basis);
-uint32_t miniflow_hash_in_minimask(const struct miniflow *,
- const struct minimask *, uint32_t basis);
-uint64_t miniflow_get_map_in_range(const struct miniflow *miniflow,
- uint8_t start, uint8_t end,
- unsigned int *offset);
uint32_t miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis);
\f
static inline ovs_be64 minimask_get_metadata_mask(const struct minimask *);
bool minimask_equal(const struct minimask *a, const struct minimask *b);
-uint32_t minimask_hash(const struct minimask *, uint32_t basis);
-
bool minimask_has_extra(const struct minimask *, const struct minimask *);
-bool minimask_is_catchall(const struct minimask *);
+
\f
+/* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
+ * or fields. */
+static inline bool
+minimask_is_catchall(const struct minimask *mask)
+{
+ /* For every 1-bit in mask's map, the corresponding value is non-zero,
+ * so the only way the mask can not fix any bits or fields is for the
+ * map the be zero. */
+ return mask->masks.map == 0;
+}
/* Returns the VID within the vlan_tci member of the "struct flow" represented
* by 'flow'. */
return miniflow_get_metadata(&mask->masks);
}
+/* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
+ * fields in 'dst', storing the result in 'dst'. */
+static inline void
+flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
+{
+ uint32_t *dst_u32 = (uint32_t *) dst;
+ const uint32_t *p = miniflow_get_u32_values(src);
+ uint64_t map;
+
+ for (map = src->map; map; map = zero_rightmost_1bit(map)) {
+ dst_u32[raw_ctz(map)] |= *p++;
+ }
+}
+
static inline struct pkt_metadata
pkt_metadata_from_flow(const struct flow *flow)
{