/*
- * Copyright (c) 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "collectors.h"
#include "flow.h"
#include "hash.h"
-#include "hmap.h"
-#include "list.h"
-#include "ofpbuf.h"
+#include "openvswitch/hmap.h"
+#include "netdev.h"
+#include "openvswitch/list.h"
+#include "openvswitch/ofpbuf.h"
#include "ofproto.h"
#include "ofproto-dpif.h"
+#include "dp-packet.h"
#include "packets.h"
#include "poll-loop.h"
#include "sset.h"
#include "util.h"
#include "timeval.h"
-#include "util.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(ipfix);
/* Cf. IETF RFC 5101 Section 10.3.4. */
#define IPFIX_DEFAULT_COLLECTOR_PORT 4739
+/* Cf. IETF RFC 5881 Setion 8. */
+#define BFD_CONTROL_DEST_PORT 3784
+#define BFD_ECHO_DEST_PORT 3785
+
+enum ipfix_sampled_packet_type {
+ IPFIX_SAMPLED_PKT_UNKNOWN = 0x00,
+ IPFIX_SAMPLED_PKT_IPV4_OK = 0x01,
+ IPFIX_SAMPLED_PKT_IPV6_OK = 0x02,
+ IPFIX_SAMPLED_PKT_IPV4_ERROR = 0x03,
+ IPFIX_SAMPLED_PKT_IPV6_ERROR = 0x04,
+ IPFIX_SAMPLED_PKT_OTHERS = 0x05
+};
+
/* The standard layer2SegmentId (ID 351) element is included in vDS to send
* the VxLAN tunnel's VNI. It is 64-bit long, the most significant byte is
* used to indicate the type of tunnel (0x01 = VxLAN, 0x02 = GRE) and the three
* least significant bytes hold the value of the layer 2 overlay network
* segment identifier: a 24-bit VxLAN tunnel's VNI or a 24-bit GRE tunnel's
- * TNI. This is not compatible with GRE-64, as implemented in OVS, as its
- * tunnel IDs are 64-bit.
+ * TNI. This is not compatible with STT, as implemented in OVS, as
+ * its tunnel IDs is 64-bit.
*
* Two new enterprise information elements are defined which are similar to
* laryerSegmentId but support 64-bit IDs:
DPIF_IPFIX_TUNNEL_VXLAN = 0x01,
DPIF_IPFIX_TUNNEL_GRE = 0x02,
DPIF_IPFIX_TUNNEL_LISP = 0x03,
+ DPIF_IPFIX_TUNNEL_STT = 0x04,
DPIF_IPFIX_TUNNEL_IPSEC_GRE = 0x05,
DPIF_IPFIX_TUNNEL_GENEVE = 0x07,
NUM_DPIF_IPFIX_TUNNEL
};
+typedef struct ofputil_ipfix_stats ofproto_ipfix_stats;
+
struct dpif_ipfix_port {
struct hmap_node hmap_node; /* In struct dpif_ipfix's "tunnel_ports" hmap. */
struct ofport *ofport; /* To retrieve port stats. */
struct ovs_list cache_flow_start_timestamp_list; /* ipfix_flow_cache_entry. */
uint32_t cache_active_timeout; /* In seconds. */
uint32_t cache_max_flows;
+ char *virtual_obs_id;
+ uint8_t virtual_obs_len;
+
+ ofproto_ipfix_stats stats;
};
struct dpif_ipfix_bridge_exporter {
struct ipfix_data_record_flow_key_common {
ovs_be32 observation_point_id; /* OBSERVATION_POINT_ID */
uint8_t flow_direction; /* FLOW_DIRECTION */
- uint8_t source_mac_address[ETH_ADDR_LEN]; /* SOURCE_MAC_ADDRESS */
- uint8_t destination_mac_address[ETH_ADDR_LEN]; /* DESTINATION_MAC_ADDRESS */
+ struct eth_addr source_mac_address; /* SOURCE_MAC_ADDRESS */
+ struct eth_addr destination_mac_address; /* DESTINATION_MAC_ADDRESS */
ovs_be16 ethernet_type; /* ETHERNET_TYPE */
uint8_t ethernet_header_length; /* ETHERNET_HEADER_LENGTH */
});
IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_VXLAN */
IPPROTO_GRE, /* DPIF_IPFIX_TUNNEL_GRE */
IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_LISP*/
- 0 , /* reserved */
+ IPPROTO_TCP, /* DPIF_IPFIX_TUNNEL_STT*/
IPPROTO_GRE, /* DPIF_IPFIX_TUNNEL_IPSEC_GRE */
0 , /* reserved */
IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_GENEVE*/
});
BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_ip) == 32);
+/*
+ * Refer to RFC 7011, the length of Variable length element is 0~65535:
+ * In most case, it should be less than 255 octets:
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length (< 255)| Information Element |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ... continuing as needed |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * When it is greater than or equeal to 255 octets:
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | 255 | Length (0 to 65535) | IE |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ... continuing as needed |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *
+ * Now, only the virtual_obs_id whose length < 255 is implemented.
+ */
+
+#define IPFIX_VIRTUAL_OBS_MAX_LEN 254
+
/*
* support tunnel key for:
* VxLAN: 24-bit VIN,
- * GRE: 32- or 64-bit key,
+ * GRE: 32-bit key,
* LISP: 24-bit instance ID
+ * STT: 64-bit key
*/
#define MAX_TUNNEL_KEY_LEN 8
static void dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter *, bool);
+static bool
+nullable_string_is_equal(const char *a, const char *b)
+{
+ return a ? b && !strcmp(a, b) : !b;
+}
+
static bool
ofproto_ipfix_bridge_exporter_options_equal(
const struct ofproto_ipfix_bridge_exporter_options *a,
&& a->enable_tunnel_sampling == b->enable_tunnel_sampling
&& a->enable_input_sampling == b->enable_input_sampling
&& a->enable_output_sampling == b->enable_output_sampling
- && sset_equals(&a->targets, &b->targets));
+ && sset_equals(&a->targets, &b->targets)
+ && nullable_string_is_equal(a->virtual_obs_id, b->virtual_obs_id));
}
static struct ofproto_ipfix_bridge_exporter_options *
struct ofproto_ipfix_bridge_exporter_options *new =
xmemdup(old, sizeof *old);
sset_clone(&new->targets, &old->targets);
+ new->virtual_obs_id = nullable_xstrdup(old->virtual_obs_id);
return new;
}
{
if (options) {
sset_destroy(&options->targets);
+ free(options->virtual_obs_id);
free(options);
}
}
return (a->collector_set_id == b->collector_set_id
&& a->cache_active_timeout == b->cache_active_timeout
&& a->cache_max_flows == b->cache_max_flows
- && sset_equals(&a->targets, &b->targets));
+ && a->enable_tunnel_sampling == b->enable_tunnel_sampling
+ && sset_equals(&a->targets, &b->targets)
+ && nullable_string_is_equal(a->virtual_obs_id, b->virtual_obs_id));
}
static struct ofproto_ipfix_flow_exporter_options *
struct ofproto_ipfix_flow_exporter_options *new =
xmemdup(old, sizeof *old);
sset_clone(&new->targets, &old->targets);
+ new->virtual_obs_id = nullable_xstrdup(old->virtual_obs_id);
return new;
}
{
if (options) {
sset_destroy(&options->targets);
+ free(options->virtual_obs_id);
free(options);
}
}
{
exporter->collectors = NULL;
exporter->seq_number = 1;
- exporter->last_template_set_time = TIME_MIN;
+ exporter->last_template_set_time = 0;
hmap_init(&exporter->cache_flow_key_map);
- list_init(&exporter->cache_flow_start_timestamp_list);
+ ovs_list_init(&exporter->cache_flow_start_timestamp_list);
exporter->cache_active_timeout = 0;
exporter->cache_max_flows = 0;
+ exporter->virtual_obs_id = NULL;
+ exporter->virtual_obs_len = 0;
}
static void
collectors_destroy(exporter->collectors);
exporter->collectors = NULL;
exporter->seq_number = 1;
- exporter->last_template_set_time = TIME_MIN;
+ exporter->last_template_set_time = 0;
exporter->cache_active_timeout = 0;
exporter->cache_max_flows = 0;
+ free(exporter->virtual_obs_id);
+ exporter->virtual_obs_id = NULL;
+ exporter->virtual_obs_len = 0;
}
static void
dpif_ipfix_exporter_set_options(struct dpif_ipfix_exporter *exporter,
const struct sset *targets,
const uint32_t cache_active_timeout,
- const uint32_t cache_max_flows)
+ const uint32_t cache_max_flows,
+ const char *virtual_obs_id)
{
+ size_t virtual_obs_len;
collectors_destroy(exporter->collectors);
collectors_create(targets, IPFIX_DEFAULT_COLLECTOR_PORT,
&exporter->collectors);
}
exporter->cache_active_timeout = cache_active_timeout;
exporter->cache_max_flows = cache_max_flows;
+ virtual_obs_len = virtual_obs_id ? strlen(virtual_obs_id) : 0;
+ if (virtual_obs_len > IPFIX_VIRTUAL_OBS_MAX_LEN) {
+ VLOG_WARN_RL(&rl, "Virtual obsevation ID too long (%d bytes), "
+ "should not be longer than %d bytes.",
+ exporter->virtual_obs_len, IPFIX_VIRTUAL_OBS_MAX_LEN);
+ dpif_ipfix_exporter_clear(exporter);
+ return false;
+ }
+ exporter->virtual_obs_len = virtual_obs_len;
+ exporter->virtual_obs_id = nullable_xstrdup(virtual_obs_id);
return true;
}
/* 32-bit key gre */
dip->tunnel_type = DPIF_IPFIX_TUNNEL_GRE;
dip->tunnel_key_length = 4;
- } else if (strcmp(type, "gre64") == 0) {
- /* 64-bit key gre */
- dip->tunnel_type = DPIF_IPFIX_TUNNEL_GRE;
- dip->tunnel_key_length = 8;
} else if (strcmp(type, "ipsec_gre") == 0) {
/* 32-bit key ipsec_gre */
dip->tunnel_type = DPIF_IPFIX_TUNNEL_IPSEC_GRE;
dip->tunnel_key_length = 4;
- } else if (strcmp(type, "ipsec_gre64") == 0) {
- /* 64-bit key ipsec_gre */
- dip->tunnel_type = DPIF_IPFIX_TUNNEL_IPSEC_GRE;
- dip->tunnel_key_length = 8;
} else if (strcmp(type, "vxlan") == 0) {
dip->tunnel_type = DPIF_IPFIX_TUNNEL_VXLAN;
dip->tunnel_key_length = 3;
} else if (strcmp(type, "geneve") == 0) {
dip->tunnel_type = DPIF_IPFIX_TUNNEL_GENEVE;
dip->tunnel_key_length = 3;
+ } else if (strcmp(type, "stt") == 0) {
+ dip->tunnel_type = DPIF_IPFIX_TUNNEL_STT;
+ dip->tunnel_key_length = 8;
} else {
free(dip);
goto out;
< sset_count(&options->targets)) {
if (!dpif_ipfix_exporter_set_options(
&exporter->exporter, &options->targets,
- options->cache_active_timeout, options->cache_max_flows)) {
+ options->cache_active_timeout, options->cache_max_flows,
+ options->virtual_obs_id)) {
return;
}
}
< sset_count(&options->targets)) {
if (!dpif_ipfix_exporter_set_options(
&exporter->exporter, &options->targets,
- options->cache_active_timeout, options->cache_max_flows)) {
+ options->cache_active_timeout, options->cache_max_flows,
+ options->virtual_obs_id)) {
return false;
}
}
dpif_ipfix_get_bridge_exporter_input_sampling(const struct dpif_ipfix *di)
OVS_EXCLUDED(mutex)
{
- bool ret = true;
+ bool ret = false;
ovs_mutex_lock(&mutex);
if (di->bridge_exporter.options) {
ret = di->bridge_exporter.options->enable_input_sampling;
dpif_ipfix_get_bridge_exporter_output_sampling(const struct dpif_ipfix *di)
OVS_EXCLUDED(mutex)
{
- bool ret = true;
+ bool ret = false;
ovs_mutex_lock(&mutex);
if (di->bridge_exporter.options) {
ret = di->bridge_exporter.options->enable_output_sampling;
return ret;
}
+bool
+dpif_ipfix_get_flow_exporter_tunnel_sampling(const struct dpif_ipfix *di,
+ const uint32_t collector_set_id)
+ OVS_EXCLUDED(mutex)
+{
+ ovs_mutex_lock(&mutex);
+ struct dpif_ipfix_flow_exporter_map_node *node
+ = dpif_ipfix_find_flow_exporter_map_node(di, collector_set_id);
+ bool ret = (node
+ && node->exporter.options
+ && node->exporter.options->enable_tunnel_sampling);
+ ovs_mutex_unlock(&mutex);
+
+ return ret;
+}
+
static void
dpif_ipfix_clear(struct dpif_ipfix *di) OVS_REQUIRES(mutex)
{
- struct dpif_ipfix_flow_exporter_map_node *exp_node, *exp_next;
+ struct dpif_ipfix_flow_exporter_map_node *exp_node;
struct dpif_ipfix_port *dip, *next;
dpif_ipfix_bridge_exporter_clear(&di->bridge_exporter);
- HMAP_FOR_EACH_SAFE (exp_node, exp_next, node, &di->flow_exporter_map) {
- hmap_remove(&di->flow_exporter_map, &exp_node->node);
+ HMAP_FOR_EACH_POP (exp_node, node, &di->flow_exporter_map) {
dpif_ipfix_flow_exporter_destroy(&exp_node->exporter);
free(exp_node);
}
static void
ipfix_init_header(uint32_t export_time_sec, uint32_t seq_number,
- uint32_t obs_domain_id, struct ofpbuf *msg)
+ uint32_t obs_domain_id, struct dp_packet *msg)
{
struct ipfix_header *hdr;
- hdr = ofpbuf_put_zeros(msg, sizeof *hdr);
+ hdr = dp_packet_put_zeros(msg, sizeof *hdr);
hdr->version = htons(IPFIX_VERSION);
hdr->length = htons(sizeof *hdr); /* Updated in ipfix_send_msg. */
hdr->export_time = htonl(export_time_sec);
hdr->obs_domain_id = htonl(obs_domain_id);
}
-static void
-ipfix_send_msg(const struct collectors *collectors, struct ofpbuf *msg)
+static size_t
+ipfix_send_msg(const struct collectors *collectors, struct dp_packet *msg)
{
struct ipfix_header *hdr;
+ size_t tx_errors;
/* Adjust the length in the header. */
- hdr = ofpbuf_data(msg);
- hdr->length = htons(ofpbuf_size(msg));
+ hdr = dp_packet_data(msg);
+ hdr->length = htons(dp_packet_size(msg));
- collectors_send(collectors, ofpbuf_data(msg), ofpbuf_size(msg));
- ofpbuf_set_size(msg, 0);
+ tx_errors = collectors_send(collectors,
+ dp_packet_data(msg), dp_packet_size(msg));
+ dp_packet_set_size(msg, 0);
+
+ return tx_errors;
}
static uint16_t
ipfix_define_template_entity(enum ipfix_entity_id id,
enum ipfix_entity_size size,
enum ipfix_entity_enterprise enterprise,
- struct ofpbuf *msg)
+ struct dp_packet *msg)
{
struct ipfix_template_field_specifier *field;
size_t field_size;
/* No enterprise number */
field_size = sizeof *field - sizeof(ovs_be32);
}
- field = ofpbuf_put_zeros(msg, field_size);
+ field = dp_packet_put_zeros(msg, field_size);
field->element_id = htons(id);
if (size) {
field->field_length = htons(size);
static uint16_t
ipfix_define_template_fields(enum ipfix_proto_l2 l2, enum ipfix_proto_l3 l3,
enum ipfix_proto_l4 l4, enum ipfix_proto_tunnel tunnel,
- struct ofpbuf *msg)
+ bool virtual_obs_id_set,
+ struct dp_packet *msg)
{
uint16_t count = 0;
DEF(TUNNEL_KEY);
}
- /* 2. Flow aggregated data. */
+ /* 2. Virtual observation ID, which is not a part of flow key. */
+ if (virtual_obs_id_set) {
+ DEF(VIRTUAL_OBS_ID);
+ }
+
+ /* 3. Flow aggregated data. */
DEF(FLOW_START_DELTA_MICROSECONDS);
DEF(FLOW_END_DELTA_MICROSECONDS);
DEF(MINIMUM_IP_TOTAL_LENGTH);
DEF(MAXIMUM_IP_TOTAL_LENGTH);
}
-
-
#undef DEF
return count;
static void
ipfix_init_template_msg(void *msg_stub, uint32_t export_time_sec,
uint32_t seq_number, uint32_t obs_domain_id,
- struct ofpbuf *msg, size_t *set_hdr_offset)
+ struct dp_packet *msg, size_t *set_hdr_offset)
{
struct ipfix_set_header *set_hdr;
- ofpbuf_use_stub(msg, msg_stub, sizeof msg_stub);
+ dp_packet_use_stub(msg, msg_stub, sizeof msg_stub);
ipfix_init_header(export_time_sec, seq_number, obs_domain_id, msg);
- *set_hdr_offset = ofpbuf_size(msg);
+ *set_hdr_offset = dp_packet_size(msg);
/* Add a Template Set. */
- set_hdr = ofpbuf_put_zeros(msg, sizeof *set_hdr);
+ set_hdr = dp_packet_put_zeros(msg, sizeof *set_hdr);
set_hdr->set_id = htons(IPFIX_SET_ID_TEMPLATE);
}
-static void
+static size_t
ipfix_send_template_msg(const struct collectors *collectors,
- struct ofpbuf *msg, size_t set_hdr_offset)
+ struct dp_packet *msg, size_t set_hdr_offset)
{
struct ipfix_set_header *set_hdr;
+ size_t tx_errors;
/* Send template message. */
set_hdr = (struct ipfix_set_header*)
- ((uint8_t*)ofpbuf_data(msg) + set_hdr_offset);
- set_hdr->length = htons(ofpbuf_size(msg) - set_hdr_offset);
+ ((uint8_t*)dp_packet_data(msg) + set_hdr_offset);
+ set_hdr->length = htons(dp_packet_size(msg) - set_hdr_offset);
+
+ tx_errors = ipfix_send_msg(collectors, msg);
- ipfix_send_msg(collectors, msg);
+ dp_packet_uninit(msg);
- ofpbuf_uninit(msg);
+ return tx_errors;
}
static void
uint32_t export_time_sec, uint32_t obs_domain_id)
{
uint64_t msg_stub[DIV_ROUND_UP(MAX_MESSAGE_LEN, 8)];
- struct ofpbuf msg;
- size_t set_hdr_offset, tmpl_hdr_offset;
+ struct dp_packet msg;
+ size_t set_hdr_offset, tmpl_hdr_offset, error_pkts;
struct ipfix_template_record_header *tmpl_hdr;
uint16_t field_count;
+ size_t tx_packets = 0;
+ size_t tx_errors = 0;
enum ipfix_proto_l2 l2;
enum ipfix_proto_l3 l3;
enum ipfix_proto_l4 l4;
* And then reinitialize the msg to construct a new
* packet for the following templates.
*/
- if (ofpbuf_size(&msg) >= MAX_MESSAGE_LEN) {
+ if (dp_packet_size(&msg) >= MAX_MESSAGE_LEN) {
/* Send template message. */
- ipfix_send_template_msg(exporter->collectors,
- &msg, set_hdr_offset);
+ error_pkts = ipfix_send_template_msg(exporter->collectors,
+ &msg, set_hdr_offset);
+ tx_errors += error_pkts;
+ tx_packets += collectors_count(exporter->collectors) - error_pkts;
/* Reinitialize the template msg. */
ipfix_init_template_msg(msg_stub, export_time_sec,
&set_hdr_offset);
}
- tmpl_hdr_offset = ofpbuf_size(&msg);
- tmpl_hdr = ofpbuf_put_zeros(&msg, sizeof *tmpl_hdr);
+ tmpl_hdr_offset = dp_packet_size(&msg);
+ tmpl_hdr = dp_packet_put_zeros(&msg, sizeof *tmpl_hdr);
tmpl_hdr->template_id = htons(
ipfix_get_template_id(l2, l3, l4, tunnel));
- field_count =
- ipfix_define_template_fields(l2, l3, l4, tunnel, &msg);
+ field_count = ipfix_define_template_fields(
+ l2, l3, l4, tunnel, exporter->virtual_obs_id != NULL,
+ &msg);
tmpl_hdr = (struct ipfix_template_record_header*)
- ((uint8_t*)ofpbuf_data(&msg) + tmpl_hdr_offset);
+ ((uint8_t*)dp_packet_data(&msg) + tmpl_hdr_offset);
tmpl_hdr->field_count = htons(field_count);
}
}
}
/* Send template message. */
- ipfix_send_template_msg(exporter->collectors, &msg, set_hdr_offset);
+ error_pkts = ipfix_send_template_msg(exporter->collectors, &msg, set_hdr_offset);
+ tx_errors += error_pkts;
+ tx_packets += collectors_count(exporter->collectors) - error_pkts;
+
+ exporter->stats.tx_pkts += tx_packets;
+ exporter->stats.tx_errors += tx_errors;
/* XXX: Add Options Template Sets, at least to define a Flow Keys
* Option Template. */
}
}
+/* Get statistics */
+static void
+ipfix_get_stats__(const struct dpif_ipfix_exporter *exporter,
+ ofproto_ipfix_stats *stats)
+{
+ memset(stats, 0xff, sizeof *stats);
+
+ if (!exporter) {
+ return;
+ }
+
+ *stats = exporter->stats;
+}
+
+static void
+ipfix_get_bridge_stats(const struct dpif_ipfix_bridge_exporter *exporter,
+ ofproto_ipfix_stats *stats)
+{
+ ipfix_get_stats__(&exporter->exporter, stats);
+}
+
+static void
+ipfix_get_flow_stats(const struct dpif_ipfix_flow_exporter *exporter,
+ ofproto_ipfix_stats *stats)
+{
+ ipfix_get_stats__(&exporter->exporter, stats);
+ stats->collector_set_id = exporter->options->collector_set_id;
+}
+
+int
+dpif_ipfix_get_stats(const struct dpif_ipfix *di,
+ bool bridge_ipfix,
+ struct ovs_list *replies)
+ OVS_EXCLUDED(mutex)
+{
+ struct dpif_ipfix_flow_exporter_map_node *flow_exporter_node;
+ struct ofputil_ipfix_stats ois;
+
+ ovs_mutex_lock(&mutex);
+ if (bridge_ipfix) {
+ if (!di->bridge_exporter.options) {
+ ovs_mutex_unlock(&mutex);
+ return OFPERR_NXST_NOT_CONFIGURED;
+ }
+
+ ipfix_get_bridge_stats(&di->bridge_exporter, &ois);
+ ofputil_append_ipfix_stat(replies, &ois);
+ } else {
+ if (hmap_count(&di->flow_exporter_map) == 0) {
+ ovs_mutex_unlock(&mutex);
+ return OFPERR_NXST_NOT_CONFIGURED;
+ }
+
+ HMAP_FOR_EACH (flow_exporter_node, node,
+ &di->flow_exporter_map) {
+ ipfix_get_flow_stats(&flow_exporter_node->exporter, &ois);
+ ofputil_append_ipfix_stat(replies, &ois);
+ }
+ }
+ ovs_mutex_unlock(&mutex);
+
+ return 0;
+}
+
+/* Update partial ipfix stats */
+static void
+ipfix_update_stats(struct dpif_ipfix_exporter *exporter,
+ bool new_flow,
+ size_t current_flows,
+ enum ipfix_sampled_packet_type sampled_pkt_type)
+{
+ if (new_flow) {
+ exporter->stats.total_flows++;
+ exporter->stats.current_flows = current_flows;
+ }
+ exporter->stats.pkts++;
+
+ switch (sampled_pkt_type) {
+ case IPFIX_SAMPLED_PKT_IPV4_OK:
+ exporter->stats.ipv4_pkts++;
+ break;
+ case IPFIX_SAMPLED_PKT_IPV6_OK:
+ exporter->stats.ipv6_pkts++;
+ break;
+ case IPFIX_SAMPLED_PKT_IPV4_ERROR:
+ exporter->stats.ipv4_error_pkts++;
+ exporter->stats.error_pkts++;
+ break;
+ case IPFIX_SAMPLED_PKT_IPV6_ERROR:
+ exporter->stats.ipv6_error_pkts++;
+ exporter->stats.error_pkts++;
+ break;
+ case IPFIX_SAMPLED_PKT_UNKNOWN:
+ exporter->stats.error_pkts++;
+ break;
+ case IPFIX_SAMPLED_PKT_OTHERS:
+ default:
+ break;
+ }
+}
+
/* Add an entry into a flow cache. The entry is either aggregated into
* an existing entry with the same flow key and free()d, or it is
- * inserted into the cache. */
+ * inserted into the cache. And IPFIX stats will be updated */
static void
ipfix_cache_update(struct dpif_ipfix_exporter *exporter,
- struct ipfix_flow_cache_entry *entry)
+ struct ipfix_flow_cache_entry *entry,
+ enum ipfix_sampled_packet_type sampled_pkt_type)
{
struct ipfix_flow_cache_entry *old_entry;
+ size_t current_flows = 0;
old_entry = ipfix_cache_find_entry(exporter, &entry->flow_key);
/* As the latest entry added into the cache, it should
* logically have the highest flow_start_timestamp_usec, so
* append it at the tail. */
- list_push_back(&exporter->cache_flow_start_timestamp_list,
+ ovs_list_push_back(&exporter->cache_flow_start_timestamp_list,
&entry->cache_flow_start_timestamp_list_node);
/* Enforce exporter->cache_max_flows limit. */
- if (hmap_count(&exporter->cache_flow_key_map)
- > exporter->cache_max_flows) {
+ current_flows = hmap_count(&exporter->cache_flow_key_map);
+ ipfix_update_stats(exporter, true, current_flows, sampled_pkt_type);
+ if (current_flows > exporter->cache_max_flows) {
dpif_ipfix_cache_expire_now(exporter, false);
}
} else {
ipfix_cache_aggregate_entries(entry, old_entry);
free(entry);
+ ipfix_update_stats(exporter, false, current_flows, sampled_pkt_type);
}
}
-static void
+static enum ipfix_sampled_packet_type
ipfix_cache_entry_init(struct ipfix_flow_cache_entry *entry,
- const struct ofpbuf *packet, const struct flow *flow,
+ const struct dp_packet *packet, const struct flow *flow,
uint64_t packet_delta_count, uint32_t obs_domain_id,
uint32_t obs_point_id, odp_port_t output_odp_port,
const struct dpif_ipfix_port *tunnel_port,
const struct flow_tnl *tunnel_key)
{
struct ipfix_flow_key *flow_key;
- struct ofpbuf msg;
+ struct dp_packet msg;
enum ipfix_proto_l2 l2;
enum ipfix_proto_l3 l3;
enum ipfix_proto_l4 l4;
enum ipfix_proto_tunnel tunnel = IPFIX_PROTO_NOT_TUNNELED;
+ enum ipfix_sampled_packet_type sampled_pkt_type = IPFIX_SAMPLED_PKT_UNKNOWN;
uint8_t ethernet_header_length;
uint16_t ethernet_total_length;
flow_key = &entry->flow_key;
- ofpbuf_use_stack(&msg, flow_key->flow_key_msg_part,
- sizeof flow_key->flow_key_msg_part);
+ dp_packet_use_stub(&msg, flow_key->flow_key_msg_part,
+ sizeof flow_key->flow_key_msg_part);
/* Choose the right template ID matching the protocols in the
* sampled packet. */
case IPPROTO_UDP:
case IPPROTO_SCTP:
l4 = IPFIX_PROTO_L4_TCP_UDP_SCTP;
+ sampled_pkt_type = IPFIX_SAMPLED_PKT_IPV4_OK;
break;
case IPPROTO_ICMP:
l4 = IPFIX_PROTO_L4_ICMP;
+ sampled_pkt_type = IPFIX_SAMPLED_PKT_IPV4_OK;
break;
default:
l4 = IPFIX_PROTO_L4_UNKNOWN;
+ sampled_pkt_type = IPFIX_SAMPLED_PKT_IPV4_ERROR;
}
break;
case ETH_TYPE_IPV6:
case IPPROTO_UDP:
case IPPROTO_SCTP:
l4 = IPFIX_PROTO_L4_TCP_UDP_SCTP;
+ sampled_pkt_type = IPFIX_SAMPLED_PKT_IPV6_OK;
break;
case IPPROTO_ICMPV6:
l4 = IPFIX_PROTO_L4_ICMP;
+ sampled_pkt_type = IPFIX_SAMPLED_PKT_IPV6_OK;
break;
default:
l4 = IPFIX_PROTO_L4_UNKNOWN;
+ sampled_pkt_type = IPFIX_SAMPLED_PKT_IPV6_ERROR;
}
break;
default:
l3 = IPFIX_PROTO_L3_UNKNOWN;
l4 = IPFIX_PROTO_L4_UNKNOWN;
+ sampled_pkt_type = IPFIX_SAMPLED_PKT_OTHERS;
}
if (tunnel_port && tunnel_key) {
ethernet_header_length = (l2 == IPFIX_PROTO_L2_VLAN)
? VLAN_ETH_HEADER_LEN : ETH_HEADER_LEN;
- ethernet_total_length = ofpbuf_size(packet);
+ ethernet_total_length = dp_packet_size(packet);
/* Common Ethernet entities. */
{
struct ipfix_data_record_flow_key_common *data_common;
- data_common = ofpbuf_put_zeros(&msg, sizeof *data_common);
+ data_common = dp_packet_put_zeros(&msg, sizeof *data_common);
data_common->observation_point_id = htonl(obs_point_id);
data_common->flow_direction =
(output_odp_port == ODPP_NONE) ? INGRESS_FLOW : EGRESS_FLOW;
- memcpy(data_common->source_mac_address, flow->dl_src,
- sizeof flow->dl_src);
- memcpy(data_common->destination_mac_address, flow->dl_dst,
- sizeof flow->dl_dst);
+ data_common->source_mac_address = flow->dl_src;
+ data_common->destination_mac_address = flow->dl_dst;
data_common->ethernet_type = flow->dl_type;
data_common->ethernet_header_length = ethernet_header_length;
}
uint16_t vlan_id = vlan_tci_to_vid(flow->vlan_tci);
uint8_t priority = vlan_tci_to_pcp(flow->vlan_tci);
- data_vlan = ofpbuf_put_zeros(&msg, sizeof *data_vlan);
+ data_vlan = dp_packet_put_zeros(&msg, sizeof *data_vlan);
data_vlan->vlan_id = htons(vlan_id);
data_vlan->dot1q_vlan_id = htons(vlan_id);
data_vlan->dot1q_priority = priority;
if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
struct ipfix_data_record_flow_key_ip *data_ip;
- data_ip = ofpbuf_put_zeros(&msg, sizeof *data_ip);
+ data_ip = dp_packet_put_zeros(&msg, sizeof *data_ip);
data_ip->ip_version = (l3 == IPFIX_PROTO_L3_IPV4) ? 4 : 6;
data_ip->ip_ttl = flow->nw_ttl;
data_ip->protocol_identifier = flow->nw_proto;
if (l3 == IPFIX_PROTO_L3_IPV4) {
struct ipfix_data_record_flow_key_ipv4 *data_ipv4;
- data_ipv4 = ofpbuf_put_zeros(&msg, sizeof *data_ipv4);
+ data_ipv4 = dp_packet_put_zeros(&msg, sizeof *data_ipv4);
data_ipv4->source_ipv4_address = flow->nw_src;
data_ipv4->destination_ipv4_address = flow->nw_dst;
} else { /* l3 == IPFIX_PROTO_L3_IPV6 */
struct ipfix_data_record_flow_key_ipv6 *data_ipv6;
- data_ipv6 = ofpbuf_put_zeros(&msg, sizeof *data_ipv6);
+ data_ipv6 = dp_packet_put_zeros(&msg, sizeof *data_ipv6);
memcpy(data_ipv6->source_ipv6_address, &flow->ipv6_src,
sizeof flow->ipv6_src);
memcpy(data_ipv6->destination_ipv6_address, &flow->ipv6_dst,
if (l4 == IPFIX_PROTO_L4_TCP_UDP_SCTP) {
struct ipfix_data_record_flow_key_transport *data_transport;
- data_transport = ofpbuf_put_zeros(&msg, sizeof *data_transport);
+ data_transport = dp_packet_put_zeros(&msg, sizeof *data_transport);
data_transport->source_transport_port = flow->tp_src;
data_transport->destination_transport_port = flow->tp_dst;
} else if (l4 == IPFIX_PROTO_L4_ICMP) {
struct ipfix_data_record_flow_key_icmp *data_icmp;
- data_icmp = ofpbuf_put_zeros(&msg, sizeof *data_icmp);
+ data_icmp = dp_packet_put_zeros(&msg, sizeof *data_icmp);
data_icmp->icmp_type = ntohs(flow->tp_src) & 0xff;
data_icmp->icmp_code = ntohs(flow->tp_dst) & 0xff;
}
struct ipfix_data_record_flow_key_tunnel *data_tunnel;
const uint8_t *tun_id;
- data_tunnel = ofpbuf_put_zeros(&msg, sizeof *data_tunnel +
+ data_tunnel = dp_packet_put_zeros(&msg, sizeof *data_tunnel +
tunnel_port->tunnel_key_length);
data_tunnel->tunnel_source_ipv4_address = tunnel_key->ip_src;
data_tunnel->tunnel_destination_ipv4_address = tunnel_key->ip_dst;
tunnel_port->tunnel_key_length);
}
- flow_key->flow_key_msg_part_size = ofpbuf_size(&msg);
+ flow_key->flow_key_msg_part_size = dp_packet_size(&msg);
{
struct timeval now;
entry->minimum_ip_total_length = 0;
entry->maximum_ip_total_length = 0;
}
+
+ return sampled_pkt_type;
}
/* Send each single data record in its own data set, to simplify the
ipfix_put_data_set(uint32_t export_time_sec,
struct ipfix_flow_cache_entry *entry,
enum ipfix_flow_end_reason flow_end_reason,
- struct ofpbuf *msg)
+ const char *virtual_obs_id,
+ uint8_t virtual_obs_len,
+ struct dp_packet *msg)
{
size_t set_hdr_offset;
struct ipfix_set_header *set_hdr;
- set_hdr_offset = ofpbuf_size(msg);
+ set_hdr_offset = dp_packet_size(msg);
/* Put a Data Set. */
- set_hdr = ofpbuf_put_zeros(msg, sizeof *set_hdr);
+ set_hdr = dp_packet_put_zeros(msg, sizeof *set_hdr);
set_hdr->set_id = htons(entry->flow_key.template_id);
/* Copy the flow key part of the data record. */
- ofpbuf_put(msg, entry->flow_key.flow_key_msg_part,
+ dp_packet_put(msg, entry->flow_key.flow_key_msg_part,
entry->flow_key.flow_key_msg_part_size);
+ /* Export virtual observation ID. */
+ if (virtual_obs_id) {
+ dp_packet_put(msg, &virtual_obs_len, sizeof(virtual_obs_len));
+ dp_packet_put(msg, virtual_obs_id, virtual_obs_len);
+ }
+
/* Put the non-key part of the data record. */
{
flow_end_delta_usec = export_time_usec
- entry->flow_end_timestamp_usec;
- data_aggregated_common = ofpbuf_put_zeros(
+ data_aggregated_common = dp_packet_put_zeros(
msg, sizeof *data_aggregated_common);
data_aggregated_common->flow_start_delta_microseconds = htonl(
flow_start_delta_usec);
if (entry->octet_delta_sum_of_squares) { /* IP packet. */
struct ipfix_data_record_aggregated_ip *data_aggregated_ip;
- data_aggregated_ip = ofpbuf_put_zeros(
+ data_aggregated_ip = dp_packet_put_zeros(
msg, sizeof *data_aggregated_ip);
data_aggregated_ip->octet_delta_count = htonll(
entry->octet_delta_count);
entry->maximum_ip_total_length);
}
- set_hdr = (struct ipfix_set_header*)((uint8_t*)ofpbuf_data(msg) + set_hdr_offset);
- set_hdr->length = htons(ofpbuf_size(msg) - set_hdr_offset);
+ set_hdr = (struct ipfix_set_header*)((uint8_t*)dp_packet_data(msg) + set_hdr_offset);
+ set_hdr->length = htons(dp_packet_size(msg) - set_hdr_offset);
}
/* Send an IPFIX message with a single data record. */
enum ipfix_flow_end_reason flow_end_reason)
{
uint64_t msg_stub[DIV_ROUND_UP(MAX_MESSAGE_LEN, 8)];
- struct ofpbuf msg;
- ofpbuf_use_stub(&msg, msg_stub, sizeof msg_stub);
+ struct dp_packet msg;
+ size_t tx_errors;
+
+ dp_packet_use_stub(&msg, msg_stub, sizeof msg_stub);
ipfix_init_header(export_time_sec, exporter->seq_number++,
entry->flow_key.obs_domain_id, &msg);
- ipfix_put_data_set(export_time_sec, entry, flow_end_reason, &msg);
- ipfix_send_msg(exporter->collectors, &msg);
+ ipfix_put_data_set(export_time_sec, entry, flow_end_reason,
+ exporter->virtual_obs_id, exporter->virtual_obs_len,
+ &msg);
+ tx_errors = ipfix_send_msg(exporter->collectors, &msg);
- ofpbuf_uninit(&msg);
+ dp_packet_uninit(&msg);
+
+ exporter->stats.current_flows--;
+ exporter->stats.tx_pkts += collectors_count(exporter->collectors) - tx_errors;
+ exporter->stats.tx_errors += tx_errors;
}
static void
dpif_ipfix_sample(struct dpif_ipfix_exporter *exporter,
- const struct ofpbuf *packet, const struct flow *flow,
+ const struct dp_packet *packet, const struct flow *flow,
uint64_t packet_delta_count, uint32_t obs_domain_id,
uint32_t obs_point_id, odp_port_t output_odp_port,
const struct dpif_ipfix_port *tunnel_port,
const struct flow_tnl *tunnel_key)
{
struct ipfix_flow_cache_entry *entry;
+ enum ipfix_sampled_packet_type sampled_packet_type;
/* Create a flow cache entry from the sample. */
entry = xmalloc(sizeof *entry);
- ipfix_cache_entry_init(entry, packet, flow, packet_delta_count,
- obs_domain_id, obs_point_id,
- output_odp_port, tunnel_port, tunnel_key);
- ipfix_cache_update(exporter, entry);
+ sampled_packet_type = ipfix_cache_entry_init(entry, packet,
+ flow, packet_delta_count,
+ obs_domain_id, obs_point_id,
+ output_odp_port, tunnel_port,
+ tunnel_key);
+ ipfix_cache_update(exporter, entry, sampled_packet_type);
+}
+
+static bool
+bridge_exporter_enabled(struct dpif_ipfix *di)
+{
+ return di->bridge_exporter.probability > 0;
}
void
-dpif_ipfix_bridge_sample(struct dpif_ipfix *di, const struct ofpbuf *packet,
+dpif_ipfix_bridge_sample(struct dpif_ipfix *di, const struct dp_packet *packet,
const struct flow *flow,
odp_port_t input_odp_port, odp_port_t output_odp_port,
const struct flow_tnl *output_tunnel_key)
struct dpif_ipfix_port * tunnel_port = NULL;
ovs_mutex_lock(&mutex);
+ if (!bridge_exporter_enabled(di)) {
+ ovs_mutex_unlock(&mutex);
+ return;
+ }
+
+ /* Skip BFD packets:
+ * Bidirectional Forwarding Detection(BFD) packets are for monitoring
+ * the tunnel link status and consumed by ovs itself. No need to
+ * smaple them.
+ * CF IETF RFC 5881, BFD control packet is the UDP packet with
+ * destination port 3784, and BFD echo packet is the UDP packet with
+ * destination port 3785.
+ */
+ if (is_ip_any(flow) &&
+ flow->nw_proto == IPPROTO_UDP &&
+ (flow->tp_dst == htons(BFD_CONTROL_DEST_PORT) ||
+ flow->tp_dst == htons(BFD_ECHO_DEST_PORT))) {
+ ovs_mutex_unlock(&mutex);
+ return;
+ }
+
/* Use the sampling probability as an approximation of the number
* of matched packets. */
packet_delta_count = UINT32_MAX / di->bridge_exporter.probability;
tunnel_port = dpif_ipfix_find_port(di, output_odp_port);
}
}
+
dpif_ipfix_sample(&di->bridge_exporter.exporter, packet, flow,
packet_delta_count,
di->bridge_exporter.options->obs_domain_id,
}
void
-dpif_ipfix_flow_sample(struct dpif_ipfix *di, const struct ofpbuf *packet,
- const struct flow *flow, uint32_t collector_set_id,
- uint16_t probability, uint32_t obs_domain_id,
- uint32_t obs_point_id) OVS_EXCLUDED(mutex)
+dpif_ipfix_flow_sample(struct dpif_ipfix *di, const struct dp_packet *packet,
+ const struct flow *flow,
+ const union user_action_cookie *cookie,
+ odp_port_t input_odp_port,
+ const struct flow_tnl *output_tunnel_key)
+ OVS_EXCLUDED(mutex)
{
struct dpif_ipfix_flow_exporter_map_node *node;
+ const struct flow_tnl *tunnel_key = NULL;
+ struct dpif_ipfix_port * tunnel_port = NULL;
+ odp_port_t output_odp_port = cookie->flow_sample.output_odp_port;
+ uint32_t collector_set_id = cookie->flow_sample.collector_set_id;
+ uint16_t probability = cookie->flow_sample.probability;
+
/* Use the sampling probability as an approximation of the number
* of matched packets. */
uint64_t packet_delta_count = USHRT_MAX / probability;
ovs_mutex_lock(&mutex);
node = dpif_ipfix_find_flow_exporter_map_node(di, collector_set_id);
if (node) {
+ if (node->exporter.options->enable_tunnel_sampling) {
+ if (output_odp_port == ODPP_NONE && flow->tunnel.ip_dst) {
+ /* Input tunnel. */
+ tunnel_key = &flow->tunnel;
+ tunnel_port = dpif_ipfix_find_port(di, input_odp_port);
+ }
+ if (output_odp_port != ODPP_NONE && output_tunnel_key) {
+ /* Output tunnel, output_tunnel_key must be valid. */
+ tunnel_key = output_tunnel_key;
+ tunnel_port = dpif_ipfix_find_port(di, output_odp_port);
+ }
+ }
+
dpif_ipfix_sample(&node->exporter.exporter, packet, flow,
- packet_delta_count, obs_domain_id, obs_point_id,
- ODPP_NONE, NULL, NULL);
+ packet_delta_count,
+ cookie->flow_sample.obs_domain_id,
+ cookie->flow_sample.obs_point_id,
+ output_odp_port, tunnel_port, tunnel_key);
}
ovs_mutex_unlock(&mutex);
}
bool template_msg_sent = false;
enum ipfix_flow_end_reason flow_end_reason;
- if (list_is_empty(&exporter->cache_flow_start_timestamp_list)) {
+ if (ovs_list_is_empty(&exporter->cache_flow_start_timestamp_list)) {
return;
}
break;
}
- list_remove(&entry->cache_flow_start_timestamp_list_node);
+ ovs_list_remove(&entry->cache_flow_start_timestamp_list_node);
hmap_remove(&exporter->cache_flow_key_map,
&entry->flow_key_map_node);
ovs_mutex_lock(&mutex);
get_export_time_now(&export_time_usec, &export_time_sec);
- if (di->bridge_exporter.probability > 0) { /* Bridge exporter enabled. */
+ if (bridge_exporter_enabled(di)) {
dpif_ipfix_cache_expire(
&di->bridge_exporter.exporter, false, export_time_usec,
export_time_sec);
struct dpif_ipfix_flow_exporter_map_node *flow_exporter_node;
ovs_mutex_lock(&mutex);
- if (di->bridge_exporter.probability > 0) { /* Bridge exporter enabled. */
+ if (bridge_exporter_enabled(di)) {
if (ipfix_cache_next_timeout_msec(
&di->bridge_exporter.exporter, &next_timeout_msec)) {
poll_timer_wait_until(next_timeout_msec);