2 * Copyright (c) 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "ofproto-dpif-ipfix.h"
20 #include "byte-order.h"
21 #include "collectors.h"
28 #include "ofproto-dpif.h"
29 #include "dp-packet.h"
31 #include "poll-loop.h"
36 #include "openvswitch/vlog.h"
38 VLOG_DEFINE_THIS_MODULE(ipfix);
40 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
41 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
43 /* Cf. IETF RFC 5101 Section 10.3.4. */
44 #define IPFIX_DEFAULT_COLLECTOR_PORT 4739
46 /* The standard layer2SegmentId (ID 351) element is included in vDS to send
47 * the VxLAN tunnel's VNI. It is 64-bit long, the most significant byte is
48 * used to indicate the type of tunnel (0x01 = VxLAN, 0x02 = GRE) and the three
49 * least significant bytes hold the value of the layer 2 overlay network
50 * segment identifier: a 24-bit VxLAN tunnel's VNI or a 24-bit GRE tunnel's
51 * TNI. This is not compatible with GRE-64, as implemented in OVS, as its
52 * tunnel IDs are 64-bit.
54 * Two new enterprise information elements are defined which are similar to
55 * laryerSegmentId but support 64-bit IDs:
56 * tunnelType (ID 891) and tunnelKey (ID 892).
58 * The enum dpif_ipfix_tunnel_type is to declare the types supported in the
60 * The number of ipfix tunnel types includes two reserverd types: 0x04 and 0x06.
62 enum dpif_ipfix_tunnel_type {
63 DPIF_IPFIX_TUNNEL_UNKNOWN = 0x00,
64 DPIF_IPFIX_TUNNEL_VXLAN = 0x01,
65 DPIF_IPFIX_TUNNEL_GRE = 0x02,
66 DPIF_IPFIX_TUNNEL_LISP = 0x03,
67 DPIF_IPFIX_TUNNEL_IPSEC_GRE = 0x05,
68 DPIF_IPFIX_TUNNEL_GENEVE = 0x07,
72 struct dpif_ipfix_port {
73 struct hmap_node hmap_node; /* In struct dpif_ipfix's "tunnel_ports" hmap. */
74 struct ofport *ofport; /* To retrieve port stats. */
76 enum dpif_ipfix_tunnel_type tunnel_type;
77 uint8_t tunnel_key_length;
80 struct dpif_ipfix_exporter {
81 struct collectors *collectors;
83 time_t last_template_set_time;
84 struct hmap cache_flow_key_map; /* ipfix_flow_cache_entry. */
85 struct ovs_list cache_flow_start_timestamp_list; /* ipfix_flow_cache_entry. */
86 uint32_t cache_active_timeout; /* In seconds. */
87 uint32_t cache_max_flows;
90 struct dpif_ipfix_bridge_exporter {
91 struct dpif_ipfix_exporter exporter;
92 struct ofproto_ipfix_bridge_exporter_options *options;
96 struct dpif_ipfix_flow_exporter {
97 struct dpif_ipfix_exporter exporter;
98 struct ofproto_ipfix_flow_exporter_options *options;
101 struct dpif_ipfix_flow_exporter_map_node {
102 struct hmap_node node;
103 struct dpif_ipfix_flow_exporter exporter;
107 struct dpif_ipfix_bridge_exporter bridge_exporter;
108 struct hmap flow_exporter_map; /* dpif_ipfix_flow_exporter_map_node. */
109 struct hmap tunnel_ports; /* Contains "struct dpif_ipfix_port"s.
110 * It makes tunnel port lookups faster in
111 * sampling upcalls. */
112 struct ovs_refcount ref_cnt;
115 #define IPFIX_VERSION 0x000a
117 /* When using UDP, IPFIX Template Records must be re-sent regularly.
118 * The standard default interval is 10 minutes (600 seconds).
119 * Cf. IETF RFC 5101 Section 10.3.6. */
120 #define IPFIX_TEMPLATE_INTERVAL 600
122 /* Cf. IETF RFC 5101 Section 3.1. */
124 struct ipfix_header {
125 ovs_be16 version; /* IPFIX_VERSION. */
126 ovs_be16 length; /* Length in bytes including this header. */
127 ovs_be32 export_time; /* Seconds since the epoch. */
128 ovs_be32 seq_number; /* Message sequence number. */
129 ovs_be32 obs_domain_id; /* Observation Domain ID. */
131 BUILD_ASSERT_DECL(sizeof(struct ipfix_header) == 16);
133 #define IPFIX_SET_ID_TEMPLATE 2
134 #define IPFIX_SET_ID_OPTION_TEMPLATE 3
136 /* Cf. IETF RFC 5101 Section 3.3.2. */
138 struct ipfix_set_header {
139 ovs_be16 set_id; /* IPFIX_SET_ID_* or valid template ID for Data Sets. */
140 ovs_be16 length; /* Length of the set in bytes including header. */
142 BUILD_ASSERT_DECL(sizeof(struct ipfix_set_header) == 4);
144 /* Alternatives for templates at each layer. A template is defined by
145 * a combination of one value for each layer. */
146 enum ipfix_proto_l2 {
147 IPFIX_PROTO_L2_ETH = 0, /* No VLAN. */
151 enum ipfix_proto_l3 {
152 IPFIX_PROTO_L3_UNKNOWN = 0,
157 enum ipfix_proto_l4 {
158 IPFIX_PROTO_L4_UNKNOWN = 0,
159 IPFIX_PROTO_L4_TCP_UDP_SCTP,
163 enum ipfix_proto_tunnel {
164 IPFIX_PROTO_NOT_TUNNELED = 0,
165 IPFIX_PROTO_TUNNELED, /* Support gre, lisp and vxlan. */
166 NUM_IPFIX_PROTO_TUNNEL
169 /* Any Template ID > 255 is usable for Template Records. */
170 #define IPFIX_TEMPLATE_ID_MIN 256
172 /* Cf. IETF RFC 5101 Section 3.4.1. */
174 struct ipfix_template_record_header {
175 ovs_be16 template_id;
176 ovs_be16 field_count;
178 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_record_header) == 4);
180 enum ipfix_entity_id {
181 /* standard IPFIX elements */
182 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ID_##ENUM = ID,
183 #include "ofproto/ipfix-entities.def"
184 /* non-standard IPFIX elements */
185 #define IPFIX_SET_ENTERPRISE(v) (((v) | 0x8000))
186 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
187 IPFIX_ENTITY_ID_##ENUM = IPFIX_SET_ENTERPRISE(ID),
188 #include "ofproto/ipfix-enterprise-entities.def"
191 enum ipfix_entity_size {
192 /* standard IPFIX elements */
193 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_SIZE_##ENUM = SIZE,
194 #include "ofproto/ipfix-entities.def"
195 /* non-standard IPFIX elements */
196 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
197 IPFIX_ENTITY_SIZE_##ENUM = SIZE,
198 #include "ofproto/ipfix-enterprise-entities.def"
201 enum ipfix_entity_enterprise {
202 /* standard IPFIX elements */
203 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ENTERPRISE_##ENUM = 0,
204 #include "ofproto/ipfix-entities.def"
205 /* non-standard IPFIX elements */
206 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
207 IPFIX_ENTITY_ENTERPRISE_##ENUM = ENTERPRISE,
208 #include "ofproto/ipfix-enterprise-entities.def"
212 struct ipfix_template_field_specifier {
213 ovs_be16 element_id; /* IPFIX_ENTITY_ID_*. */
214 ovs_be16 field_length; /* Length of the field's value, in bytes.
215 * For Variable-Length element, it should be 65535.
217 ovs_be32 enterprise; /* Enterprise number */
219 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_field_specifier) == 8);
221 /* Cf. IETF RFC 5102 Section 5.11.6. */
222 enum ipfix_flow_direction {
227 /* Part of data record flow key for common metadata and Ethernet entities. */
229 struct ipfix_data_record_flow_key_common {
230 ovs_be32 observation_point_id; /* OBSERVATION_POINT_ID */
231 uint8_t flow_direction; /* FLOW_DIRECTION */
232 uint8_t source_mac_address[ETH_ADDR_LEN]; /* SOURCE_MAC_ADDRESS */
233 uint8_t destination_mac_address[ETH_ADDR_LEN]; /* DESTINATION_MAC_ADDRESS */
234 ovs_be16 ethernet_type; /* ETHERNET_TYPE */
235 uint8_t ethernet_header_length; /* ETHERNET_HEADER_LENGTH */
237 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_common) == 20);
239 /* Part of data record flow key for VLAN entities. */
241 struct ipfix_data_record_flow_key_vlan {
242 ovs_be16 vlan_id; /* VLAN_ID */
243 ovs_be16 dot1q_vlan_id; /* DOT1Q_VLAN_ID */
244 uint8_t dot1q_priority; /* DOT1Q_PRIORITY */
246 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_vlan) == 5);
248 /* Part of data record flow key for IP entities. */
249 /* XXX: Replace IP_TTL with MINIMUM_TTL and MAXIMUM_TTL? */
251 struct ipfix_data_record_flow_key_ip {
252 uint8_t ip_version; /* IP_VERSION */
253 uint8_t ip_ttl; /* IP_TTL */
254 uint8_t protocol_identifier; /* PROTOCOL_IDENTIFIER */
255 uint8_t ip_diff_serv_code_point; /* IP_DIFF_SERV_CODE_POINT */
256 uint8_t ip_precedence; /* IP_PRECEDENCE */
257 uint8_t ip_class_of_service; /* IP_CLASS_OF_SERVICE */
259 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ip) == 6);
261 /* Part of data record flow key for IPv4 entities. */
263 struct ipfix_data_record_flow_key_ipv4 {
264 ovs_be32 source_ipv4_address; /* SOURCE_IPV4_ADDRESS */
265 ovs_be32 destination_ipv4_address; /* DESTINATION_IPV4_ADDRESS */
267 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv4) == 8);
269 /* Part of data record flow key for IPv6 entities. */
271 struct ipfix_data_record_flow_key_ipv6 {
272 uint8_t source_ipv6_address[16]; /* SOURCE_IPV6_ADDRESS */
273 uint8_t destination_ipv6_address[16]; /* DESTINATION_IPV6_ADDRESS */
274 ovs_be32 flow_label_ipv6; /* FLOW_LABEL_IPV6 */
276 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv6) == 36);
278 /* Part of data record flow key for TCP/UDP/SCTP entities. */
280 struct ipfix_data_record_flow_key_transport {
281 ovs_be16 source_transport_port; /* SOURCE_TRANSPORT_PORT */
282 ovs_be16 destination_transport_port; /* DESTINATION_TRANSPORT_PORT */
284 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_transport) == 4);
286 /* Part of data record flow key for ICMP entities. */
288 struct ipfix_data_record_flow_key_icmp {
289 uint8_t icmp_type; /* ICMP_TYPE_IPV4 / ICMP_TYPE_IPV6 */
290 uint8_t icmp_code; /* ICMP_CODE_IPV4 / ICMP_CODE_IPV6 */
292 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_icmp) == 2);
294 /* For the tunnel type that is on the top of IPSec, the protocol identifier
295 * of the upper tunnel type is used.
297 static uint8_t tunnel_protocol[NUM_DPIF_IPFIX_TUNNEL] = {
299 IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_VXLAN */
300 IPPROTO_GRE, /* DPIF_IPFIX_TUNNEL_GRE */
301 IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_LISP*/
303 IPPROTO_GRE, /* DPIF_IPFIX_TUNNEL_IPSEC_GRE */
305 IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_GENEVE*/
309 struct ipfix_data_record_flow_key_tunnel {
310 ovs_be32 tunnel_source_ipv4_address; /* TUNNEL_SOURCE_IPV4_ADDRESS */
311 ovs_be32 tunnel_destination_ipv4_address; /* TUNNEL_DESTINATION_IPV4_ADDRESS */
312 uint8_t tunnel_protocol_identifier; /* TUNNEL_PROTOCOL_IDENTIFIER */
313 ovs_be16 tunnel_source_transport_port; /* TUNNEL_SOURCE_TRANSPORT_PORT */
314 ovs_be16 tunnel_destination_transport_port; /* TUNNEL_DESTINATION_TRANSPORT_PORT */
315 uint8_t tunnel_type; /* TUNNEL_TYPE */
316 uint8_t tunnel_key_length; /* length of TUNNEL_KEY */
317 uint8_t tunnel_key[]; /* data of TUNNEL_KEY */
319 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_tunnel) == 15);
321 /* Cf. IETF RFC 5102 Section 5.11.3. */
322 enum ipfix_flow_end_reason {
324 ACTIVE_TIMEOUT = 0x02,
325 END_OF_FLOW_DETECTED = 0x03,
327 LACK_OF_RESOURCES = 0x05
330 /* Part of data record for common aggregated elements. */
332 struct ipfix_data_record_aggregated_common {
333 ovs_be32 flow_start_delta_microseconds; /* FLOW_START_DELTA_MICROSECONDS */
334 ovs_be32 flow_end_delta_microseconds; /* FLOW_END_DELTA_MICROSECONDS */
335 ovs_be64 packet_delta_count; /* PACKET_DELTA_COUNT */
336 ovs_be64 layer2_octet_delta_count; /* LAYER2_OCTET_DELTA_COUNT */
337 uint8_t flow_end_reason; /* FLOW_END_REASON */
339 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_common) == 25);
341 /* Part of data record for IP aggregated elements. */
343 struct ipfix_data_record_aggregated_ip {
344 ovs_be64 octet_delta_count; /* OCTET_DELTA_COUNT */
345 ovs_be64 octet_delta_sum_of_squares; /* OCTET_DELTA_SUM_OF_SQUARES */
346 ovs_be64 minimum_ip_total_length; /* MINIMUM_IP_TOTAL_LENGTH */
347 ovs_be64 maximum_ip_total_length; /* MAXIMUM_IP_TOTAL_LENGTH */
349 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_ip) == 32);
352 * support tunnel key for:
354 * GRE: 32- or 64-bit key,
355 * LISP: 24-bit instance ID
357 #define MAX_TUNNEL_KEY_LEN 8
359 #define MAX_FLOW_KEY_LEN \
360 (sizeof(struct ipfix_data_record_flow_key_common) \
361 + sizeof(struct ipfix_data_record_flow_key_vlan) \
362 + sizeof(struct ipfix_data_record_flow_key_ip) \
363 + MAX(sizeof(struct ipfix_data_record_flow_key_ipv4), \
364 sizeof(struct ipfix_data_record_flow_key_ipv6)) \
365 + MAX(sizeof(struct ipfix_data_record_flow_key_icmp), \
366 sizeof(struct ipfix_data_record_flow_key_transport)) \
367 + sizeof(struct ipfix_data_record_flow_key_tunnel) \
368 + MAX_TUNNEL_KEY_LEN)
370 #define MAX_DATA_RECORD_LEN \
372 + sizeof(struct ipfix_data_record_aggregated_common) \
373 + sizeof(struct ipfix_data_record_aggregated_ip))
375 /* Max length of a data set. To simplify the implementation, each
376 * data record is sent in a separate data set, so each data set
377 * contains at most one data record. */
378 #define MAX_DATA_SET_LEN \
379 (sizeof(struct ipfix_set_header) \
380 + MAX_DATA_RECORD_LEN)
382 /* Max length of an IPFIX message. Arbitrarily set to accommodate low
384 #define MAX_MESSAGE_LEN 1024
386 /* Cache structures. */
389 struct ipfix_flow_key {
390 uint32_t obs_domain_id;
391 uint16_t template_id;
392 size_t flow_key_msg_part_size;
393 uint64_t flow_key_msg_part[DIV_ROUND_UP(MAX_FLOW_KEY_LEN, 8)];
396 /* Flow cache entry. */
397 struct ipfix_flow_cache_entry {
398 struct hmap_node flow_key_map_node;
399 struct ovs_list cache_flow_start_timestamp_list_node;
400 struct ipfix_flow_key flow_key;
401 /* Common aggregated elements. */
402 uint64_t flow_start_timestamp_usec;
403 uint64_t flow_end_timestamp_usec;
404 uint64_t packet_delta_count;
405 uint64_t layer2_octet_delta_count;
406 uint64_t octet_delta_count;
407 uint64_t octet_delta_sum_of_squares; /* 0 if not IP. */
408 uint16_t minimum_ip_total_length; /* 0 if not IP. */
409 uint16_t maximum_ip_total_length; /* 0 if not IP. */
412 static void dpif_ipfix_cache_expire(struct dpif_ipfix_exporter *, bool,
413 const uint64_t, const uint32_t);
415 static void get_export_time_now(uint64_t *, uint32_t *);
417 static void dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter *, bool);
420 ofproto_ipfix_bridge_exporter_options_equal(
421 const struct ofproto_ipfix_bridge_exporter_options *a,
422 const struct ofproto_ipfix_bridge_exporter_options *b)
424 return (a->obs_domain_id == b->obs_domain_id
425 && a->obs_point_id == b->obs_point_id
426 && a->sampling_rate == b->sampling_rate
427 && a->cache_active_timeout == b->cache_active_timeout
428 && a->cache_max_flows == b->cache_max_flows
429 && a->enable_tunnel_sampling == b->enable_tunnel_sampling
430 && a->enable_input_sampling == b->enable_input_sampling
431 && a->enable_output_sampling == b->enable_output_sampling
432 && sset_equals(&a->targets, &b->targets));
435 static struct ofproto_ipfix_bridge_exporter_options *
436 ofproto_ipfix_bridge_exporter_options_clone(
437 const struct ofproto_ipfix_bridge_exporter_options *old)
439 struct ofproto_ipfix_bridge_exporter_options *new =
440 xmemdup(old, sizeof *old);
441 sset_clone(&new->targets, &old->targets);
446 ofproto_ipfix_bridge_exporter_options_destroy(
447 struct ofproto_ipfix_bridge_exporter_options *options)
450 sset_destroy(&options->targets);
456 ofproto_ipfix_flow_exporter_options_equal(
457 const struct ofproto_ipfix_flow_exporter_options *a,
458 const struct ofproto_ipfix_flow_exporter_options *b)
460 return (a->collector_set_id == b->collector_set_id
461 && a->cache_active_timeout == b->cache_active_timeout
462 && a->cache_max_flows == b->cache_max_flows
463 && sset_equals(&a->targets, &b->targets));
466 static struct ofproto_ipfix_flow_exporter_options *
467 ofproto_ipfix_flow_exporter_options_clone(
468 const struct ofproto_ipfix_flow_exporter_options *old)
470 struct ofproto_ipfix_flow_exporter_options *new =
471 xmemdup(old, sizeof *old);
472 sset_clone(&new->targets, &old->targets);
477 ofproto_ipfix_flow_exporter_options_destroy(
478 struct ofproto_ipfix_flow_exporter_options *options)
481 sset_destroy(&options->targets);
487 dpif_ipfix_exporter_init(struct dpif_ipfix_exporter *exporter)
489 exporter->collectors = NULL;
490 exporter->seq_number = 1;
491 exporter->last_template_set_time = TIME_MIN;
492 hmap_init(&exporter->cache_flow_key_map);
493 list_init(&exporter->cache_flow_start_timestamp_list);
494 exporter->cache_active_timeout = 0;
495 exporter->cache_max_flows = 0;
499 dpif_ipfix_exporter_clear(struct dpif_ipfix_exporter *exporter)
501 /* Flush the cache with flow end reason "forced end." */
502 dpif_ipfix_cache_expire_now(exporter, true);
504 collectors_destroy(exporter->collectors);
505 exporter->collectors = NULL;
506 exporter->seq_number = 1;
507 exporter->last_template_set_time = TIME_MIN;
508 exporter->cache_active_timeout = 0;
509 exporter->cache_max_flows = 0;
513 dpif_ipfix_exporter_destroy(struct dpif_ipfix_exporter *exporter)
515 dpif_ipfix_exporter_clear(exporter);
516 hmap_destroy(&exporter->cache_flow_key_map);
520 dpif_ipfix_exporter_set_options(struct dpif_ipfix_exporter *exporter,
521 const struct sset *targets,
522 const uint32_t cache_active_timeout,
523 const uint32_t cache_max_flows)
525 collectors_destroy(exporter->collectors);
526 collectors_create(targets, IPFIX_DEFAULT_COLLECTOR_PORT,
527 &exporter->collectors);
528 if (exporter->collectors == NULL) {
529 VLOG_WARN_RL(&rl, "no collectors could be initialized, "
530 "IPFIX exporter disabled");
531 dpif_ipfix_exporter_clear(exporter);
534 exporter->cache_active_timeout = cache_active_timeout;
535 exporter->cache_max_flows = cache_max_flows;
539 static struct dpif_ipfix_port *
540 dpif_ipfix_find_port(const struct dpif_ipfix *di,
541 odp_port_t odp_port) OVS_REQUIRES(mutex)
543 struct dpif_ipfix_port *dip;
545 HMAP_FOR_EACH_IN_BUCKET (dip, hmap_node, hash_odp_port(odp_port),
547 if (dip->odp_port == odp_port) {
555 dpif_ipfix_del_port(struct dpif_ipfix *di,
556 struct dpif_ipfix_port *dip)
559 hmap_remove(&di->tunnel_ports, &dip->hmap_node);
564 dpif_ipfix_add_tunnel_port(struct dpif_ipfix *di, struct ofport *ofport,
565 odp_port_t odp_port) OVS_EXCLUDED(mutex)
567 struct dpif_ipfix_port *dip;
570 ovs_mutex_lock(&mutex);
571 dip = dpif_ipfix_find_port(di, odp_port);
573 dpif_ipfix_del_port(di, dip);
576 type = netdev_get_type(ofport->netdev);
581 /* Add to table of tunnel ports. */
582 dip = xmalloc(sizeof *dip);
583 dip->ofport = ofport;
584 dip->odp_port = odp_port;
585 if (strcmp(type, "gre") == 0) {
587 dip->tunnel_type = DPIF_IPFIX_TUNNEL_GRE;
588 dip->tunnel_key_length = 4;
589 } else if (strcmp(type, "gre64") == 0) {
591 dip->tunnel_type = DPIF_IPFIX_TUNNEL_GRE;
592 dip->tunnel_key_length = 8;
593 } else if (strcmp(type, "ipsec_gre") == 0) {
594 /* 32-bit key ipsec_gre */
595 dip->tunnel_type = DPIF_IPFIX_TUNNEL_IPSEC_GRE;
596 dip->tunnel_key_length = 4;
597 } else if (strcmp(type, "ipsec_gre64") == 0) {
598 /* 64-bit key ipsec_gre */
599 dip->tunnel_type = DPIF_IPFIX_TUNNEL_IPSEC_GRE;
600 dip->tunnel_key_length = 8;
601 } else if (strcmp(type, "vxlan") == 0) {
602 dip->tunnel_type = DPIF_IPFIX_TUNNEL_VXLAN;
603 dip->tunnel_key_length = 3;
604 } else if (strcmp(type, "lisp") == 0) {
605 dip->tunnel_type = DPIF_IPFIX_TUNNEL_LISP;
606 dip->tunnel_key_length = 3;
607 } else if (strcmp(type, "geneve") == 0) {
608 dip->tunnel_type = DPIF_IPFIX_TUNNEL_GENEVE;
609 dip->tunnel_key_length = 3;
614 hmap_insert(&di->tunnel_ports, &dip->hmap_node, hash_odp_port(odp_port));
617 ovs_mutex_unlock(&mutex);
621 dpif_ipfix_del_tunnel_port(struct dpif_ipfix *di, odp_port_t odp_port)
624 struct dpif_ipfix_port *dip;
625 ovs_mutex_lock(&mutex);
626 dip = dpif_ipfix_find_port(di, odp_port);
628 dpif_ipfix_del_port(di, dip);
630 ovs_mutex_unlock(&mutex);
634 dpif_ipfix_get_tunnel_port(const struct dpif_ipfix *di, odp_port_t odp_port)
637 struct dpif_ipfix_port *dip;
638 ovs_mutex_lock(&mutex);
639 dip = dpif_ipfix_find_port(di, odp_port);
640 ovs_mutex_unlock(&mutex);
645 dpif_ipfix_bridge_exporter_init(struct dpif_ipfix_bridge_exporter *exporter)
647 dpif_ipfix_exporter_init(&exporter->exporter);
648 exporter->options = NULL;
649 exporter->probability = 0;
653 dpif_ipfix_bridge_exporter_clear(struct dpif_ipfix_bridge_exporter *exporter)
655 dpif_ipfix_exporter_clear(&exporter->exporter);
656 ofproto_ipfix_bridge_exporter_options_destroy(exporter->options);
657 exporter->options = NULL;
658 exporter->probability = 0;
662 dpif_ipfix_bridge_exporter_destroy(struct dpif_ipfix_bridge_exporter *exporter)
664 dpif_ipfix_bridge_exporter_clear(exporter);
665 dpif_ipfix_exporter_destroy(&exporter->exporter);
669 dpif_ipfix_bridge_exporter_set_options(
670 struct dpif_ipfix_bridge_exporter *exporter,
671 const struct ofproto_ipfix_bridge_exporter_options *options)
673 bool options_changed;
675 if (!options || sset_is_empty(&options->targets)) {
676 /* No point in doing any work if there are no targets. */
677 dpif_ipfix_bridge_exporter_clear(exporter);
683 || !ofproto_ipfix_bridge_exporter_options_equal(
684 options, exporter->options));
686 /* Configure collectors if options have changed or if we're
687 * shortchanged in collectors (which indicates that opening one or
688 * more of the configured collectors failed, so that we should
691 || collectors_count(exporter->exporter.collectors)
692 < sset_count(&options->targets)) {
693 if (!dpif_ipfix_exporter_set_options(
694 &exporter->exporter, &options->targets,
695 options->cache_active_timeout, options->cache_max_flows)) {
700 /* Avoid reconfiguring if options didn't change. */
701 if (!options_changed) {
705 ofproto_ipfix_bridge_exporter_options_destroy(exporter->options);
706 exporter->options = ofproto_ipfix_bridge_exporter_options_clone(options);
707 exporter->probability =
708 MAX(1, UINT32_MAX / exporter->options->sampling_rate);
710 /* Run over the cache as some entries might have expired after
711 * changing the timeouts. */
712 dpif_ipfix_cache_expire_now(&exporter->exporter, false);
715 static struct dpif_ipfix_flow_exporter_map_node*
716 dpif_ipfix_find_flow_exporter_map_node(
717 const struct dpif_ipfix *di, const uint32_t collector_set_id)
720 struct dpif_ipfix_flow_exporter_map_node *exporter_node;
722 HMAP_FOR_EACH_WITH_HASH (exporter_node, node,
723 hash_int(collector_set_id, 0),
724 &di->flow_exporter_map) {
725 if (exporter_node->exporter.options->collector_set_id
726 == collector_set_id) {
727 return exporter_node;
735 dpif_ipfix_flow_exporter_init(struct dpif_ipfix_flow_exporter *exporter)
737 dpif_ipfix_exporter_init(&exporter->exporter);
738 exporter->options = NULL;
742 dpif_ipfix_flow_exporter_clear(struct dpif_ipfix_flow_exporter *exporter)
744 dpif_ipfix_exporter_clear(&exporter->exporter);
745 ofproto_ipfix_flow_exporter_options_destroy(exporter->options);
746 exporter->options = NULL;
750 dpif_ipfix_flow_exporter_destroy(struct dpif_ipfix_flow_exporter *exporter)
752 dpif_ipfix_flow_exporter_clear(exporter);
753 dpif_ipfix_exporter_destroy(&exporter->exporter);
757 dpif_ipfix_flow_exporter_set_options(
758 struct dpif_ipfix_flow_exporter *exporter,
759 const struct ofproto_ipfix_flow_exporter_options *options)
761 bool options_changed;
763 if (sset_is_empty(&options->targets)) {
764 /* No point in doing any work if there are no targets. */
765 dpif_ipfix_flow_exporter_clear(exporter);
771 || !ofproto_ipfix_flow_exporter_options_equal(
772 options, exporter->options));
774 /* Configure collectors if options have changed or if we're
775 * shortchanged in collectors (which indicates that opening one or
776 * more of the configured collectors failed, so that we should
779 || collectors_count(exporter->exporter.collectors)
780 < sset_count(&options->targets)) {
781 if (!dpif_ipfix_exporter_set_options(
782 &exporter->exporter, &options->targets,
783 options->cache_active_timeout, options->cache_max_flows)) {
788 /* Avoid reconfiguring if options didn't change. */
789 if (!options_changed) {
793 ofproto_ipfix_flow_exporter_options_destroy(exporter->options);
794 exporter->options = ofproto_ipfix_flow_exporter_options_clone(options);
796 /* Run over the cache as some entries might have expired after
797 * changing the timeouts. */
798 dpif_ipfix_cache_expire_now(&exporter->exporter, false);
804 dpif_ipfix_set_options(
805 struct dpif_ipfix *di,
806 const struct ofproto_ipfix_bridge_exporter_options *bridge_exporter_options,
807 const struct ofproto_ipfix_flow_exporter_options *flow_exporters_options,
808 size_t n_flow_exporters_options) OVS_EXCLUDED(mutex)
811 struct ofproto_ipfix_flow_exporter_options *options;
812 struct dpif_ipfix_flow_exporter_map_node *node, *next;
813 size_t n_broken_flow_exporters_options = 0;
815 ovs_mutex_lock(&mutex);
816 dpif_ipfix_bridge_exporter_set_options(&di->bridge_exporter,
817 bridge_exporter_options);
819 /* Add new flow exporters and update current flow exporters. */
820 options = (struct ofproto_ipfix_flow_exporter_options *)
821 flow_exporters_options;
822 for (i = 0; i < n_flow_exporters_options; i++) {
823 node = dpif_ipfix_find_flow_exporter_map_node(
824 di, options->collector_set_id);
826 node = xzalloc(sizeof *node);
827 dpif_ipfix_flow_exporter_init(&node->exporter);
828 hmap_insert(&di->flow_exporter_map, &node->node,
829 hash_int(options->collector_set_id, 0));
831 if (!dpif_ipfix_flow_exporter_set_options(&node->exporter, options)) {
832 n_broken_flow_exporters_options++;
837 ovs_assert(hmap_count(&di->flow_exporter_map) >=
838 (n_flow_exporters_options - n_broken_flow_exporters_options));
840 /* Remove dropped flow exporters, if any needs to be removed. */
841 if (hmap_count(&di->flow_exporter_map) > n_flow_exporters_options) {
842 HMAP_FOR_EACH_SAFE (node, next, node, &di->flow_exporter_map) {
843 /* This is slow but doesn't take any extra memory, and
844 * this table is not supposed to contain many rows anyway. */
845 options = (struct ofproto_ipfix_flow_exporter_options *)
846 flow_exporters_options;
847 for (i = 0; i < n_flow_exporters_options; i++) {
848 if (node->exporter.options->collector_set_id
849 == options->collector_set_id) {
854 if (i == n_flow_exporters_options) { // Not found.
855 hmap_remove(&di->flow_exporter_map, &node->node);
856 dpif_ipfix_flow_exporter_destroy(&node->exporter);
862 ovs_assert(hmap_count(&di->flow_exporter_map) ==
863 (n_flow_exporters_options - n_broken_flow_exporters_options));
864 ovs_mutex_unlock(&mutex);
868 dpif_ipfix_create(void)
870 struct dpif_ipfix *di;
871 di = xzalloc(sizeof *di);
872 dpif_ipfix_bridge_exporter_init(&di->bridge_exporter);
873 hmap_init(&di->flow_exporter_map);
874 hmap_init(&di->tunnel_ports);
875 ovs_refcount_init(&di->ref_cnt);
880 dpif_ipfix_ref(const struct dpif_ipfix *di_)
882 struct dpif_ipfix *di = CONST_CAST(struct dpif_ipfix *, di_);
884 ovs_refcount_ref(&di->ref_cnt);
890 dpif_ipfix_get_bridge_exporter_probability(const struct dpif_ipfix *di)
894 ovs_mutex_lock(&mutex);
895 ret = di->bridge_exporter.probability;
896 ovs_mutex_unlock(&mutex);
901 dpif_ipfix_get_bridge_exporter_input_sampling(const struct dpif_ipfix *di)
905 ovs_mutex_lock(&mutex);
906 if (di->bridge_exporter.options) {
907 ret = di->bridge_exporter.options->enable_input_sampling;
909 ovs_mutex_unlock(&mutex);
914 dpif_ipfix_get_bridge_exporter_output_sampling(const struct dpif_ipfix *di)
918 ovs_mutex_lock(&mutex);
919 if (di->bridge_exporter.options) {
920 ret = di->bridge_exporter.options->enable_output_sampling;
922 ovs_mutex_unlock(&mutex);
927 dpif_ipfix_get_bridge_exporter_tunnel_sampling(const struct dpif_ipfix *di)
931 ovs_mutex_lock(&mutex);
932 if (di->bridge_exporter.options) {
933 ret = di->bridge_exporter.options->enable_tunnel_sampling;
935 ovs_mutex_unlock(&mutex);
940 dpif_ipfix_clear(struct dpif_ipfix *di) OVS_REQUIRES(mutex)
942 struct dpif_ipfix_flow_exporter_map_node *exp_node, *exp_next;
943 struct dpif_ipfix_port *dip, *next;
945 dpif_ipfix_bridge_exporter_clear(&di->bridge_exporter);
947 HMAP_FOR_EACH_SAFE (exp_node, exp_next, node, &di->flow_exporter_map) {
948 hmap_remove(&di->flow_exporter_map, &exp_node->node);
949 dpif_ipfix_flow_exporter_destroy(&exp_node->exporter);
953 HMAP_FOR_EACH_SAFE (dip, next, hmap_node, &di->tunnel_ports) {
954 dpif_ipfix_del_port(di, dip);
959 dpif_ipfix_unref(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
961 if (di && ovs_refcount_unref_relaxed(&di->ref_cnt) == 1) {
962 ovs_mutex_lock(&mutex);
963 dpif_ipfix_clear(di);
964 dpif_ipfix_bridge_exporter_destroy(&di->bridge_exporter);
965 hmap_destroy(&di->flow_exporter_map);
966 hmap_destroy(&di->tunnel_ports);
968 ovs_mutex_unlock(&mutex);
973 ipfix_init_header(uint32_t export_time_sec, uint32_t seq_number,
974 uint32_t obs_domain_id, struct dp_packet *msg)
976 struct ipfix_header *hdr;
978 hdr = dp_packet_put_zeros(msg, sizeof *hdr);
979 hdr->version = htons(IPFIX_VERSION);
980 hdr->length = htons(sizeof *hdr); /* Updated in ipfix_send_msg. */
981 hdr->export_time = htonl(export_time_sec);
982 hdr->seq_number = htonl(seq_number);
983 hdr->obs_domain_id = htonl(obs_domain_id);
987 ipfix_send_msg(const struct collectors *collectors, struct dp_packet *msg)
989 struct ipfix_header *hdr;
991 /* Adjust the length in the header. */
992 hdr = dp_packet_data(msg);
993 hdr->length = htons(dp_packet_size(msg));
995 collectors_send(collectors, dp_packet_data(msg), dp_packet_size(msg));
996 dp_packet_set_size(msg, 0);
1000 ipfix_get_template_id(enum ipfix_proto_l2 l2, enum ipfix_proto_l3 l3,
1001 enum ipfix_proto_l4 l4, enum ipfix_proto_tunnel tunnel)
1003 uint16_t template_id;
1005 template_id = template_id * NUM_IPFIX_PROTO_L3 + l3;
1006 template_id = template_id * NUM_IPFIX_PROTO_L4 + l4;
1007 template_id = template_id * NUM_IPFIX_PROTO_TUNNEL + tunnel;
1008 return IPFIX_TEMPLATE_ID_MIN + template_id;
1012 ipfix_define_template_entity(enum ipfix_entity_id id,
1013 enum ipfix_entity_size size,
1014 enum ipfix_entity_enterprise enterprise,
1015 struct dp_packet *msg)
1017 struct ipfix_template_field_specifier *field;
1021 field_size = sizeof *field;
1023 /* No enterprise number */
1024 field_size = sizeof *field - sizeof(ovs_be32);
1026 field = dp_packet_put_zeros(msg, field_size);
1027 field->element_id = htons(id);
1029 field->field_length = htons(size);
1031 /* RFC 5101, Section 7. Variable-Length Information Element */
1032 field->field_length = OVS_BE16_MAX;
1035 field->enterprise = htonl(enterprise);
1041 ipfix_define_template_fields(enum ipfix_proto_l2 l2, enum ipfix_proto_l3 l3,
1042 enum ipfix_proto_l4 l4, enum ipfix_proto_tunnel tunnel,
1043 struct dp_packet *msg)
1049 ipfix_define_template_entity(IPFIX_ENTITY_ID_##ID, \
1050 IPFIX_ENTITY_SIZE_##ID, \
1051 IPFIX_ENTITY_ENTERPRISE_##ID, msg); \
1057 DEF(OBSERVATION_POINT_ID);
1058 DEF(FLOW_DIRECTION);
1060 /* Common Ethernet entities. */
1061 DEF(SOURCE_MAC_ADDRESS);
1062 DEF(DESTINATION_MAC_ADDRESS);
1064 DEF(ETHERNET_HEADER_LENGTH);
1066 if (l2 == IPFIX_PROTO_L2_VLAN) {
1069 DEF(DOT1Q_PRIORITY);
1072 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1075 DEF(PROTOCOL_IDENTIFIER);
1076 DEF(IP_DIFF_SERV_CODE_POINT);
1078 DEF(IP_CLASS_OF_SERVICE);
1080 if (l3 == IPFIX_PROTO_L3_IPV4) {
1081 DEF(SOURCE_IPV4_ADDRESS);
1082 DEF(DESTINATION_IPV4_ADDRESS);
1083 if (l4 == IPFIX_PROTO_L4_TCP_UDP_SCTP) {
1084 DEF(SOURCE_TRANSPORT_PORT);
1085 DEF(DESTINATION_TRANSPORT_PORT);
1086 } else if (l4 == IPFIX_PROTO_L4_ICMP) {
1087 DEF(ICMP_TYPE_IPV4);
1088 DEF(ICMP_CODE_IPV4);
1090 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1091 DEF(SOURCE_IPV6_ADDRESS);
1092 DEF(DESTINATION_IPV6_ADDRESS);
1093 DEF(FLOW_LABEL_IPV6);
1094 if (l4 == IPFIX_PROTO_L4_TCP_UDP_SCTP) {
1095 DEF(SOURCE_TRANSPORT_PORT);
1096 DEF(DESTINATION_TRANSPORT_PORT);
1097 } else if (l4 == IPFIX_PROTO_L4_ICMP) {
1098 DEF(ICMP_TYPE_IPV6);
1099 DEF(ICMP_CODE_IPV6);
1104 if (tunnel != IPFIX_PROTO_NOT_TUNNELED) {
1105 DEF(TUNNEL_SOURCE_IPV4_ADDRESS);
1106 DEF(TUNNEL_DESTINATION_IPV4_ADDRESS);
1107 DEF(TUNNEL_PROTOCOL_IDENTIFIER);
1108 DEF(TUNNEL_SOURCE_TRANSPORT_PORT);
1109 DEF(TUNNEL_DESTINATION_TRANSPORT_PORT);
1114 /* 2. Flow aggregated data. */
1116 DEF(FLOW_START_DELTA_MICROSECONDS);
1117 DEF(FLOW_END_DELTA_MICROSECONDS);
1118 DEF(PACKET_DELTA_COUNT);
1119 DEF(LAYER2_OCTET_DELTA_COUNT);
1120 DEF(FLOW_END_REASON);
1122 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1123 DEF(OCTET_DELTA_COUNT);
1124 DEF(OCTET_DELTA_SUM_OF_SQUARES);
1125 DEF(MINIMUM_IP_TOTAL_LENGTH);
1126 DEF(MAXIMUM_IP_TOTAL_LENGTH);
1136 ipfix_init_template_msg(void *msg_stub, uint32_t export_time_sec,
1137 uint32_t seq_number, uint32_t obs_domain_id,
1138 struct dp_packet *msg, size_t *set_hdr_offset)
1140 struct ipfix_set_header *set_hdr;
1142 dp_packet_use_stub(msg, msg_stub, sizeof msg_stub);
1144 ipfix_init_header(export_time_sec, seq_number, obs_domain_id, msg);
1145 *set_hdr_offset = dp_packet_size(msg);
1147 /* Add a Template Set. */
1148 set_hdr = dp_packet_put_zeros(msg, sizeof *set_hdr);
1149 set_hdr->set_id = htons(IPFIX_SET_ID_TEMPLATE);
1153 ipfix_send_template_msg(const struct collectors *collectors,
1154 struct dp_packet *msg, size_t set_hdr_offset)
1156 struct ipfix_set_header *set_hdr;
1158 /* Send template message. */
1159 set_hdr = (struct ipfix_set_header*)
1160 ((uint8_t*)dp_packet_data(msg) + set_hdr_offset);
1161 set_hdr->length = htons(dp_packet_size(msg) - set_hdr_offset);
1163 ipfix_send_msg(collectors, msg);
1165 dp_packet_uninit(msg);
1169 ipfix_send_template_msgs(struct dpif_ipfix_exporter *exporter,
1170 uint32_t export_time_sec, uint32_t obs_domain_id)
1172 uint64_t msg_stub[DIV_ROUND_UP(MAX_MESSAGE_LEN, 8)];
1173 struct dp_packet msg;
1174 size_t set_hdr_offset, tmpl_hdr_offset;
1175 struct ipfix_template_record_header *tmpl_hdr;
1176 uint16_t field_count;
1177 enum ipfix_proto_l2 l2;
1178 enum ipfix_proto_l3 l3;
1179 enum ipfix_proto_l4 l4;
1180 enum ipfix_proto_tunnel tunnel;
1182 ipfix_init_template_msg(msg_stub, export_time_sec, exporter->seq_number,
1183 obs_domain_id, &msg, &set_hdr_offset);
1184 /* Define one template for each possible combination of
1186 for (l2 = 0; l2 < NUM_IPFIX_PROTO_L2; l2++) {
1187 for (l3 = 0; l3 < NUM_IPFIX_PROTO_L3; l3++) {
1188 for (l4 = 0; l4 < NUM_IPFIX_PROTO_L4; l4++) {
1189 if (l3 == IPFIX_PROTO_L3_UNKNOWN &&
1190 l4 != IPFIX_PROTO_L4_UNKNOWN) {
1193 for (tunnel = 0; tunnel < NUM_IPFIX_PROTO_TUNNEL; tunnel++) {
1194 /* When the size of the template packet reaches
1195 * MAX_MESSAGE_LEN(1024), send it out.
1196 * And then reinitialize the msg to construct a new
1197 * packet for the following templates.
1199 if (dp_packet_size(&msg) >= MAX_MESSAGE_LEN) {
1200 /* Send template message. */
1201 ipfix_send_template_msg(exporter->collectors,
1202 &msg, set_hdr_offset);
1204 /* Reinitialize the template msg. */
1205 ipfix_init_template_msg(msg_stub, export_time_sec,
1206 exporter->seq_number,
1207 obs_domain_id, &msg,
1211 tmpl_hdr_offset = dp_packet_size(&msg);
1212 tmpl_hdr = dp_packet_put_zeros(&msg, sizeof *tmpl_hdr);
1213 tmpl_hdr->template_id = htons(
1214 ipfix_get_template_id(l2, l3, l4, tunnel));
1216 ipfix_define_template_fields(l2, l3, l4, tunnel, &msg);
1217 tmpl_hdr = (struct ipfix_template_record_header*)
1218 ((uint8_t*)dp_packet_data(&msg) + tmpl_hdr_offset);
1219 tmpl_hdr->field_count = htons(field_count);
1225 /* Send template message. */
1226 ipfix_send_template_msg(exporter->collectors, &msg, set_hdr_offset);
1228 /* XXX: Add Options Template Sets, at least to define a Flow Keys
1229 * Option Template. */
1233 static inline uint32_t
1234 ipfix_hash_flow_key(const struct ipfix_flow_key *flow_key, uint32_t basis)
1237 hash = hash_int(flow_key->obs_domain_id, basis);
1238 hash = hash_int(flow_key->template_id, hash);
1239 hash = hash_bytes(flow_key->flow_key_msg_part,
1240 flow_key->flow_key_msg_part_size, hash);
1245 ipfix_flow_key_equal(const struct ipfix_flow_key *a,
1246 const struct ipfix_flow_key *b)
1248 /* The template ID determines the flow key size, so not need to
1250 return (a->obs_domain_id == b->obs_domain_id
1251 && a->template_id == b->template_id
1252 && memcmp(a->flow_key_msg_part, b->flow_key_msg_part,
1253 a->flow_key_msg_part_size) == 0);
1256 static struct ipfix_flow_cache_entry*
1257 ipfix_cache_find_entry(const struct dpif_ipfix_exporter *exporter,
1258 const struct ipfix_flow_key *flow_key)
1260 struct ipfix_flow_cache_entry *entry;
1262 HMAP_FOR_EACH_WITH_HASH (entry, flow_key_map_node,
1263 ipfix_hash_flow_key(flow_key, 0),
1264 &exporter->cache_flow_key_map) {
1265 if (ipfix_flow_key_equal(&entry->flow_key, flow_key)) {
1274 ipfix_cache_next_timeout_msec(const struct dpif_ipfix_exporter *exporter,
1275 long long int *next_timeout_msec)
1277 struct ipfix_flow_cache_entry *entry;
1279 LIST_FOR_EACH (entry, cache_flow_start_timestamp_list_node,
1280 &exporter->cache_flow_start_timestamp_list) {
1281 *next_timeout_msec = entry->flow_start_timestamp_usec / 1000LL
1282 + 1000LL * exporter->cache_active_timeout;
1290 ipfix_cache_aggregate_entries(struct ipfix_flow_cache_entry *from_entry,
1291 struct ipfix_flow_cache_entry *to_entry)
1293 uint64_t *to_start, *to_end, *from_start, *from_end;
1294 uint16_t *to_min_len, *to_max_len, *from_min_len, *from_max_len;
1296 to_start = &to_entry->flow_start_timestamp_usec;
1297 to_end = &to_entry->flow_end_timestamp_usec;
1298 from_start = &from_entry->flow_start_timestamp_usec;
1299 from_end = &from_entry->flow_end_timestamp_usec;
1301 if (*to_start > *from_start) {
1302 *to_start = *from_start;
1304 if (*to_end < *from_end) {
1305 *to_end = *from_end;
1308 to_entry->packet_delta_count += from_entry->packet_delta_count;
1309 to_entry->layer2_octet_delta_count += from_entry->layer2_octet_delta_count;
1311 to_entry->octet_delta_count += from_entry->octet_delta_count;
1312 to_entry->octet_delta_sum_of_squares +=
1313 from_entry->octet_delta_sum_of_squares;
1315 to_min_len = &to_entry->minimum_ip_total_length;
1316 to_max_len = &to_entry->maximum_ip_total_length;
1317 from_min_len = &from_entry->minimum_ip_total_length;
1318 from_max_len = &from_entry->maximum_ip_total_length;
1320 if (!*to_min_len || (*from_min_len && *to_min_len > *from_min_len)) {
1321 *to_min_len = *from_min_len;
1323 if (*to_max_len < *from_max_len) {
1324 *to_max_len = *from_max_len;
1328 /* Add an entry into a flow cache. The entry is either aggregated into
1329 * an existing entry with the same flow key and free()d, or it is
1330 * inserted into the cache. */
1332 ipfix_cache_update(struct dpif_ipfix_exporter *exporter,
1333 struct ipfix_flow_cache_entry *entry)
1335 struct ipfix_flow_cache_entry *old_entry;
1337 old_entry = ipfix_cache_find_entry(exporter, &entry->flow_key);
1339 if (old_entry == NULL) {
1340 hmap_insert(&exporter->cache_flow_key_map, &entry->flow_key_map_node,
1341 ipfix_hash_flow_key(&entry->flow_key, 0));
1343 /* As the latest entry added into the cache, it should
1344 * logically have the highest flow_start_timestamp_usec, so
1345 * append it at the tail. */
1346 list_push_back(&exporter->cache_flow_start_timestamp_list,
1347 &entry->cache_flow_start_timestamp_list_node);
1349 /* Enforce exporter->cache_max_flows limit. */
1350 if (hmap_count(&exporter->cache_flow_key_map)
1351 > exporter->cache_max_flows) {
1352 dpif_ipfix_cache_expire_now(exporter, false);
1355 ipfix_cache_aggregate_entries(entry, old_entry);
1361 ipfix_cache_entry_init(struct ipfix_flow_cache_entry *entry,
1362 const struct dp_packet *packet, const struct flow *flow,
1363 uint64_t packet_delta_count, uint32_t obs_domain_id,
1364 uint32_t obs_point_id, odp_port_t output_odp_port,
1365 const struct dpif_ipfix_port *tunnel_port,
1366 const struct flow_tnl *tunnel_key)
1368 struct ipfix_flow_key *flow_key;
1369 struct dp_packet msg;
1370 enum ipfix_proto_l2 l2;
1371 enum ipfix_proto_l3 l3;
1372 enum ipfix_proto_l4 l4;
1373 enum ipfix_proto_tunnel tunnel = IPFIX_PROTO_NOT_TUNNELED;
1374 uint8_t ethernet_header_length;
1375 uint16_t ethernet_total_length;
1377 flow_key = &entry->flow_key;
1378 dp_packet_use_stub(&msg, flow_key->flow_key_msg_part,
1379 sizeof flow_key->flow_key_msg_part);
1381 /* Choose the right template ID matching the protocols in the
1382 * sampled packet. */
1383 l2 = (flow->vlan_tci == 0) ? IPFIX_PROTO_L2_ETH : IPFIX_PROTO_L2_VLAN;
1385 switch(ntohs(flow->dl_type)) {
1387 l3 = IPFIX_PROTO_L3_IPV4;
1388 switch(flow->nw_proto) {
1392 l4 = IPFIX_PROTO_L4_TCP_UDP_SCTP;
1395 l4 = IPFIX_PROTO_L4_ICMP;
1398 l4 = IPFIX_PROTO_L4_UNKNOWN;
1402 l3 = IPFIX_PROTO_L3_IPV6;
1403 switch(flow->nw_proto) {
1407 l4 = IPFIX_PROTO_L4_TCP_UDP_SCTP;
1409 case IPPROTO_ICMPV6:
1410 l4 = IPFIX_PROTO_L4_ICMP;
1413 l4 = IPFIX_PROTO_L4_UNKNOWN;
1417 l3 = IPFIX_PROTO_L3_UNKNOWN;
1418 l4 = IPFIX_PROTO_L4_UNKNOWN;
1421 if (tunnel_port && tunnel_key) {
1422 tunnel = IPFIX_PROTO_TUNNELED;
1425 flow_key->obs_domain_id = obs_domain_id;
1426 flow_key->template_id = ipfix_get_template_id(l2, l3, l4, tunnel);
1428 /* The fields defined in the ipfix_data_record_* structs and sent
1429 * below must match exactly the templates defined in
1430 * ipfix_define_template_fields. */
1432 ethernet_header_length = (l2 == IPFIX_PROTO_L2_VLAN)
1433 ? VLAN_ETH_HEADER_LEN : ETH_HEADER_LEN;
1434 ethernet_total_length = dp_packet_size(packet);
1436 /* Common Ethernet entities. */
1438 struct ipfix_data_record_flow_key_common *data_common;
1440 data_common = dp_packet_put_zeros(&msg, sizeof *data_common);
1441 data_common->observation_point_id = htonl(obs_point_id);
1442 data_common->flow_direction =
1443 (output_odp_port == ODPP_NONE) ? INGRESS_FLOW : EGRESS_FLOW;
1444 memcpy(data_common->source_mac_address, flow->dl_src,
1445 sizeof flow->dl_src);
1446 memcpy(data_common->destination_mac_address, flow->dl_dst,
1447 sizeof flow->dl_dst);
1448 data_common->ethernet_type = flow->dl_type;
1449 data_common->ethernet_header_length = ethernet_header_length;
1452 if (l2 == IPFIX_PROTO_L2_VLAN) {
1453 struct ipfix_data_record_flow_key_vlan *data_vlan;
1454 uint16_t vlan_id = vlan_tci_to_vid(flow->vlan_tci);
1455 uint8_t priority = vlan_tci_to_pcp(flow->vlan_tci);
1457 data_vlan = dp_packet_put_zeros(&msg, sizeof *data_vlan);
1458 data_vlan->vlan_id = htons(vlan_id);
1459 data_vlan->dot1q_vlan_id = htons(vlan_id);
1460 data_vlan->dot1q_priority = priority;
1463 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1464 struct ipfix_data_record_flow_key_ip *data_ip;
1466 data_ip = dp_packet_put_zeros(&msg, sizeof *data_ip);
1467 data_ip->ip_version = (l3 == IPFIX_PROTO_L3_IPV4) ? 4 : 6;
1468 data_ip->ip_ttl = flow->nw_ttl;
1469 data_ip->protocol_identifier = flow->nw_proto;
1470 data_ip->ip_diff_serv_code_point = flow->nw_tos >> 2;
1471 data_ip->ip_precedence = flow->nw_tos >> 5;
1472 data_ip->ip_class_of_service = flow->nw_tos;
1474 if (l3 == IPFIX_PROTO_L3_IPV4) {
1475 struct ipfix_data_record_flow_key_ipv4 *data_ipv4;
1477 data_ipv4 = dp_packet_put_zeros(&msg, sizeof *data_ipv4);
1478 data_ipv4->source_ipv4_address = flow->nw_src;
1479 data_ipv4->destination_ipv4_address = flow->nw_dst;
1480 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1481 struct ipfix_data_record_flow_key_ipv6 *data_ipv6;
1483 data_ipv6 = dp_packet_put_zeros(&msg, sizeof *data_ipv6);
1484 memcpy(data_ipv6->source_ipv6_address, &flow->ipv6_src,
1485 sizeof flow->ipv6_src);
1486 memcpy(data_ipv6->destination_ipv6_address, &flow->ipv6_dst,
1487 sizeof flow->ipv6_dst);
1488 data_ipv6->flow_label_ipv6 = flow->ipv6_label;
1492 if (l4 == IPFIX_PROTO_L4_TCP_UDP_SCTP) {
1493 struct ipfix_data_record_flow_key_transport *data_transport;
1495 data_transport = dp_packet_put_zeros(&msg, sizeof *data_transport);
1496 data_transport->source_transport_port = flow->tp_src;
1497 data_transport->destination_transport_port = flow->tp_dst;
1498 } else if (l4 == IPFIX_PROTO_L4_ICMP) {
1499 struct ipfix_data_record_flow_key_icmp *data_icmp;
1501 data_icmp = dp_packet_put_zeros(&msg, sizeof *data_icmp);
1502 data_icmp->icmp_type = ntohs(flow->tp_src) & 0xff;
1503 data_icmp->icmp_code = ntohs(flow->tp_dst) & 0xff;
1506 if (tunnel == IPFIX_PROTO_TUNNELED) {
1507 struct ipfix_data_record_flow_key_tunnel *data_tunnel;
1508 const uint8_t *tun_id;
1510 data_tunnel = dp_packet_put_zeros(&msg, sizeof *data_tunnel +
1511 tunnel_port->tunnel_key_length);
1512 data_tunnel->tunnel_source_ipv4_address = tunnel_key->ip_src;
1513 data_tunnel->tunnel_destination_ipv4_address = tunnel_key->ip_dst;
1514 /* The tunnel_protocol_identifier is from tunnel_proto array, which
1515 * contains protocol_identifiers of each tunnel type.
1516 * For the tunnel type on the top of IPSec, which uses the protocol
1517 * identifier of the upper tunnel type is used, the tcp_src and tcp_dst
1518 * are decided based on the protocol identifiers.
1520 * The protocol identifier of DPIF_IPFIX_TUNNEL_IPSEC_GRE is IPPROTO_GRE,
1521 * and both tp_src and tp_dst are zero.
1523 data_tunnel->tunnel_protocol_identifier =
1524 tunnel_protocol[tunnel_port->tunnel_type];
1525 data_tunnel->tunnel_source_transport_port = tunnel_key->tp_src;
1526 data_tunnel->tunnel_destination_transport_port = tunnel_key->tp_dst;
1527 data_tunnel->tunnel_type = tunnel_port->tunnel_type;
1528 data_tunnel->tunnel_key_length = tunnel_port->tunnel_key_length;
1529 /* tun_id is in network order, and tunnel key is in low bits. */
1530 tun_id = (const uint8_t *) &tunnel_key->tun_id;
1531 memcpy(data_tunnel->tunnel_key,
1532 &tun_id[8 - tunnel_port->tunnel_key_length],
1533 tunnel_port->tunnel_key_length);
1536 flow_key->flow_key_msg_part_size = dp_packet_size(&msg);
1540 uint64_t layer2_octet_delta_count;
1542 /* Calculate the total matched octet count by considering as
1543 * an approximation that all matched packets have the same
1545 layer2_octet_delta_count = packet_delta_count * ethernet_total_length;
1547 xgettimeofday(&now);
1548 entry->flow_end_timestamp_usec = now.tv_usec + 1000000LL * now.tv_sec;
1549 entry->flow_start_timestamp_usec = entry->flow_end_timestamp_usec;
1550 entry->packet_delta_count = packet_delta_count;
1551 entry->layer2_octet_delta_count = layer2_octet_delta_count;
1554 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1555 uint16_t ip_total_length =
1556 ethernet_total_length - ethernet_header_length;
1557 uint64_t octet_delta_count;
1559 /* Calculate the total matched octet count by considering as
1560 * an approximation that all matched packets have the same
1562 octet_delta_count = packet_delta_count * ip_total_length;
1564 entry->octet_delta_count = octet_delta_count;
1565 entry->octet_delta_sum_of_squares = octet_delta_count * ip_total_length;
1566 entry->minimum_ip_total_length = ip_total_length;
1567 entry->maximum_ip_total_length = ip_total_length;
1569 entry->octet_delta_sum_of_squares = 0;
1570 entry->minimum_ip_total_length = 0;
1571 entry->maximum_ip_total_length = 0;
1575 /* Send each single data record in its own data set, to simplify the
1576 * implementation by avoiding having to group record by template ID
1577 * before sending. */
1579 ipfix_put_data_set(uint32_t export_time_sec,
1580 struct ipfix_flow_cache_entry *entry,
1581 enum ipfix_flow_end_reason flow_end_reason,
1582 struct dp_packet *msg)
1584 size_t set_hdr_offset;
1585 struct ipfix_set_header *set_hdr;
1587 set_hdr_offset = dp_packet_size(msg);
1589 /* Put a Data Set. */
1590 set_hdr = dp_packet_put_zeros(msg, sizeof *set_hdr);
1591 set_hdr->set_id = htons(entry->flow_key.template_id);
1593 /* Copy the flow key part of the data record. */
1595 dp_packet_put(msg, entry->flow_key.flow_key_msg_part,
1596 entry->flow_key.flow_key_msg_part_size);
1598 /* Put the non-key part of the data record. */
1601 struct ipfix_data_record_aggregated_common *data_aggregated_common;
1602 uint64_t export_time_usec, flow_start_delta_usec, flow_end_delta_usec;
1604 /* Calculate the negative deltas relative to the export time
1605 * in seconds sent in the header, not the exact export
1607 export_time_usec = 1000000LL * export_time_sec;
1608 flow_start_delta_usec = export_time_usec
1609 - entry->flow_start_timestamp_usec;
1610 flow_end_delta_usec = export_time_usec
1611 - entry->flow_end_timestamp_usec;
1613 data_aggregated_common = dp_packet_put_zeros(
1614 msg, sizeof *data_aggregated_common);
1615 data_aggregated_common->flow_start_delta_microseconds = htonl(
1616 flow_start_delta_usec);
1617 data_aggregated_common->flow_end_delta_microseconds = htonl(
1618 flow_end_delta_usec);
1619 data_aggregated_common->packet_delta_count = htonll(
1620 entry->packet_delta_count);
1621 data_aggregated_common->layer2_octet_delta_count = htonll(
1622 entry->layer2_octet_delta_count);
1623 data_aggregated_common->flow_end_reason = flow_end_reason;
1626 if (entry->octet_delta_sum_of_squares) { /* IP packet. */
1627 struct ipfix_data_record_aggregated_ip *data_aggregated_ip;
1629 data_aggregated_ip = dp_packet_put_zeros(
1630 msg, sizeof *data_aggregated_ip);
1631 data_aggregated_ip->octet_delta_count = htonll(
1632 entry->octet_delta_count);
1633 data_aggregated_ip->octet_delta_sum_of_squares = htonll(
1634 entry->octet_delta_sum_of_squares);
1635 data_aggregated_ip->minimum_ip_total_length = htonll(
1636 entry->minimum_ip_total_length);
1637 data_aggregated_ip->maximum_ip_total_length = htonll(
1638 entry->maximum_ip_total_length);
1641 set_hdr = (struct ipfix_set_header*)((uint8_t*)dp_packet_data(msg) + set_hdr_offset);
1642 set_hdr->length = htons(dp_packet_size(msg) - set_hdr_offset);
1645 /* Send an IPFIX message with a single data record. */
1647 ipfix_send_data_msg(struct dpif_ipfix_exporter *exporter,
1648 uint32_t export_time_sec,
1649 struct ipfix_flow_cache_entry *entry,
1650 enum ipfix_flow_end_reason flow_end_reason)
1652 uint64_t msg_stub[DIV_ROUND_UP(MAX_MESSAGE_LEN, 8)];
1653 struct dp_packet msg;
1654 dp_packet_use_stub(&msg, msg_stub, sizeof msg_stub);
1656 ipfix_init_header(export_time_sec, exporter->seq_number++,
1657 entry->flow_key.obs_domain_id, &msg);
1658 ipfix_put_data_set(export_time_sec, entry, flow_end_reason, &msg);
1659 ipfix_send_msg(exporter->collectors, &msg);
1661 dp_packet_uninit(&msg);
1665 dpif_ipfix_sample(struct dpif_ipfix_exporter *exporter,
1666 const struct dp_packet *packet, const struct flow *flow,
1667 uint64_t packet_delta_count, uint32_t obs_domain_id,
1668 uint32_t obs_point_id, odp_port_t output_odp_port,
1669 const struct dpif_ipfix_port *tunnel_port,
1670 const struct flow_tnl *tunnel_key)
1672 struct ipfix_flow_cache_entry *entry;
1674 /* Create a flow cache entry from the sample. */
1675 entry = xmalloc(sizeof *entry);
1676 ipfix_cache_entry_init(entry, packet, flow, packet_delta_count,
1677 obs_domain_id, obs_point_id,
1678 output_odp_port, tunnel_port, tunnel_key);
1679 ipfix_cache_update(exporter, entry);
1683 dpif_ipfix_bridge_sample(struct dpif_ipfix *di, const struct dp_packet *packet,
1684 const struct flow *flow,
1685 odp_port_t input_odp_port, odp_port_t output_odp_port,
1686 const struct flow_tnl *output_tunnel_key)
1689 uint64_t packet_delta_count;
1690 const struct flow_tnl *tunnel_key = NULL;
1691 struct dpif_ipfix_port * tunnel_port = NULL;
1693 ovs_mutex_lock(&mutex);
1694 /* Use the sampling probability as an approximation of the number
1695 * of matched packets. */
1696 packet_delta_count = UINT32_MAX / di->bridge_exporter.probability;
1697 if (di->bridge_exporter.options->enable_tunnel_sampling) {
1698 if (output_odp_port == ODPP_NONE && flow->tunnel.ip_dst) {
1700 tunnel_key = &flow->tunnel;
1701 tunnel_port = dpif_ipfix_find_port(di, input_odp_port);
1703 if (output_odp_port != ODPP_NONE && output_tunnel_key) {
1704 /* Output tunnel, output_tunnel_key must be valid. */
1705 tunnel_key = output_tunnel_key;
1706 tunnel_port = dpif_ipfix_find_port(di, output_odp_port);
1709 dpif_ipfix_sample(&di->bridge_exporter.exporter, packet, flow,
1711 di->bridge_exporter.options->obs_domain_id,
1712 di->bridge_exporter.options->obs_point_id,
1713 output_odp_port, tunnel_port, tunnel_key);
1714 ovs_mutex_unlock(&mutex);
1718 dpif_ipfix_flow_sample(struct dpif_ipfix *di, const struct dp_packet *packet,
1719 const struct flow *flow, uint32_t collector_set_id,
1720 uint16_t probability, uint32_t obs_domain_id,
1721 uint32_t obs_point_id) OVS_EXCLUDED(mutex)
1723 struct dpif_ipfix_flow_exporter_map_node *node;
1724 /* Use the sampling probability as an approximation of the number
1725 * of matched packets. */
1726 uint64_t packet_delta_count = USHRT_MAX / probability;
1728 ovs_mutex_lock(&mutex);
1729 node = dpif_ipfix_find_flow_exporter_map_node(di, collector_set_id);
1731 dpif_ipfix_sample(&node->exporter.exporter, packet, flow,
1732 packet_delta_count, obs_domain_id, obs_point_id,
1733 ODPP_NONE, NULL, NULL);
1735 ovs_mutex_unlock(&mutex);
1739 dpif_ipfix_cache_expire(struct dpif_ipfix_exporter *exporter,
1740 bool forced_end, const uint64_t export_time_usec,
1741 const uint32_t export_time_sec)
1743 struct ipfix_flow_cache_entry *entry, *next_entry;
1744 uint64_t max_flow_start_timestamp_usec;
1745 bool template_msg_sent = false;
1746 enum ipfix_flow_end_reason flow_end_reason;
1748 if (list_is_empty(&exporter->cache_flow_start_timestamp_list)) {
1752 max_flow_start_timestamp_usec = export_time_usec -
1753 1000000LL * exporter->cache_active_timeout;
1755 LIST_FOR_EACH_SAFE (entry, next_entry, cache_flow_start_timestamp_list_node,
1756 &exporter->cache_flow_start_timestamp_list) {
1758 flow_end_reason = FORCED_END;
1759 } else if (entry->flow_start_timestamp_usec
1760 <= max_flow_start_timestamp_usec) {
1761 flow_end_reason = ACTIVE_TIMEOUT;
1762 } else if (hmap_count(&exporter->cache_flow_key_map)
1763 > exporter->cache_max_flows) {
1764 /* Enforce exporter->cache_max_flows. */
1765 flow_end_reason = LACK_OF_RESOURCES;
1767 /* Remaining flows haven't expired yet. */
1771 list_remove(&entry->cache_flow_start_timestamp_list_node);
1772 hmap_remove(&exporter->cache_flow_key_map,
1773 &entry->flow_key_map_node);
1775 if (!template_msg_sent
1776 && (exporter->last_template_set_time + IPFIX_TEMPLATE_INTERVAL)
1777 <= export_time_sec) {
1778 ipfix_send_template_msgs(exporter, export_time_sec,
1779 entry->flow_key.obs_domain_id);
1780 exporter->last_template_set_time = export_time_sec;
1781 template_msg_sent = true;
1784 /* XXX: Group multiple data records for the same obs domain id
1785 * into the same message. */
1786 ipfix_send_data_msg(exporter, export_time_sec, entry, flow_end_reason);
1792 get_export_time_now(uint64_t *export_time_usec, uint32_t *export_time_sec)
1794 struct timeval export_time;
1795 xgettimeofday(&export_time);
1797 *export_time_usec = export_time.tv_usec + 1000000LL * export_time.tv_sec;
1799 /* The IPFIX start and end deltas are negative deltas relative to
1800 * the export time, so set the export time 1 second off to
1801 * calculate those deltas. */
1802 if (export_time.tv_usec == 0) {
1803 *export_time_sec = export_time.tv_sec;
1805 *export_time_sec = export_time.tv_sec + 1;
1810 dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter *exporter,
1813 uint64_t export_time_usec;
1814 uint32_t export_time_sec;
1816 get_export_time_now(&export_time_usec, &export_time_sec);
1817 dpif_ipfix_cache_expire(exporter, forced_end, export_time_usec,
1822 dpif_ipfix_run(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
1824 uint64_t export_time_usec;
1825 uint32_t export_time_sec;
1826 struct dpif_ipfix_flow_exporter_map_node *flow_exporter_node;
1828 ovs_mutex_lock(&mutex);
1829 get_export_time_now(&export_time_usec, &export_time_sec);
1830 if (di->bridge_exporter.probability > 0) { /* Bridge exporter enabled. */
1831 dpif_ipfix_cache_expire(
1832 &di->bridge_exporter.exporter, false, export_time_usec,
1835 HMAP_FOR_EACH (flow_exporter_node, node, &di->flow_exporter_map) {
1836 dpif_ipfix_cache_expire(
1837 &flow_exporter_node->exporter.exporter, false, export_time_usec,
1840 ovs_mutex_unlock(&mutex);
1844 dpif_ipfix_wait(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
1846 long long int next_timeout_msec = LLONG_MAX;
1847 struct dpif_ipfix_flow_exporter_map_node *flow_exporter_node;
1849 ovs_mutex_lock(&mutex);
1850 if (di->bridge_exporter.probability > 0) { /* Bridge exporter enabled. */
1851 if (ipfix_cache_next_timeout_msec(
1852 &di->bridge_exporter.exporter, &next_timeout_msec)) {
1853 poll_timer_wait_until(next_timeout_msec);
1856 HMAP_FOR_EACH (flow_exporter_node, node, &di->flow_exporter_map) {
1857 if (ipfix_cache_next_timeout_msec(
1858 &flow_exporter_node->exporter.exporter, &next_timeout_msec)) {
1859 poll_timer_wait_until(next_timeout_msec);
1862 ovs_mutex_unlock(&mutex);