2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 * Copyright (c) 2009 InMon Corp.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include "ofproto-dpif-sflow.h"
21 #include <sys/resource.h>
22 #include <sys/socket.h>
25 #include "collectors.h"
35 #include "poll-loop.h"
36 #include "ovs-router.h"
37 #include "route-table.h"
38 #include "sflow_api.h"
39 #include "socket-util.h"
41 #include "openvswitch/vlog.h"
42 #include "lib/odp-util.h"
43 #include "lib/unaligned.h"
44 #include "ofproto-provider.h"
47 VLOG_DEFINE_THIS_MODULE(sflow);
49 static struct ovs_mutex mutex;
51 /* This global var is used to determine which sFlow
52 sub-agent should send the datapath counters. */
53 #define SFLOW_GC_SUBID_UNCLAIMED (uint32_t)-1
54 static uint32_t sflow_global_counters_subid = SFLOW_GC_SUBID_UNCLAIMED;
57 * The enum dpif_sflow_tunnel_type is to declare the types supported
59 enum dpif_sflow_tunnel_type {
60 DPIF_SFLOW_TUNNEL_UNKNOWN = 0,
61 DPIF_SFLOW_TUNNEL_VXLAN,
62 DPIF_SFLOW_TUNNEL_GRE,
63 DPIF_SFLOW_TUNNEL_GRE64,
64 DPIF_SFLOW_TUNNEL_LISP,
65 DPIF_SFLOW_TUNNEL_IPSEC_GRE,
66 DPIF_SFLOW_TUNNEL_IPSEC_GRE64,
67 DPIF_SFLOW_TUNNEL_GENEVE
70 struct dpif_sflow_port {
71 struct hmap_node hmap_node; /* In struct dpif_sflow's "ports" hmap. */
72 SFLDataSource_instance dsi; /* sFlow library's notion of port number. */
73 struct ofport *ofport; /* To retrive port stats. */
75 enum dpif_sflow_tunnel_type tunnel_type;
79 struct collectors *collectors;
80 SFLAgent *sflow_agent;
81 struct ofproto_sflow_options *options;
83 size_t n_flood, n_all;
84 struct hmap ports; /* Contains "struct dpif_sflow_port"s. */
86 struct ovs_refcount ref_cnt;
89 static void dpif_sflow_del_port__(struct dpif_sflow *,
90 struct dpif_sflow_port *);
92 #define RECEIVER_INDEX 1
94 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
97 nullable_string_is_equal(const char *a, const char *b)
99 return a ? b && !strcmp(a, b) : !b;
103 ofproto_sflow_options_equal(const struct ofproto_sflow_options *a,
104 const struct ofproto_sflow_options *b)
106 return (sset_equals(&a->targets, &b->targets)
107 && a->sampling_rate == b->sampling_rate
108 && a->polling_interval == b->polling_interval
109 && a->header_len == b->header_len
110 && a->sub_id == b->sub_id
111 && nullable_string_is_equal(a->agent_device, b->agent_device)
112 && nullable_string_is_equal(a->control_ip, b->control_ip));
115 static struct ofproto_sflow_options *
116 ofproto_sflow_options_clone(const struct ofproto_sflow_options *old)
118 struct ofproto_sflow_options *new = xmemdup(old, sizeof *old);
119 sset_clone(&new->targets, &old->targets);
120 new->agent_device = old->agent_device ? xstrdup(old->agent_device) : NULL;
121 new->control_ip = old->control_ip ? xstrdup(old->control_ip) : NULL;
126 ofproto_sflow_options_destroy(struct ofproto_sflow_options *options)
129 sset_destroy(&options->targets);
130 free(options->agent_device);
131 free(options->control_ip);
136 /* sFlow library callback to allocate memory. */
138 sflow_agent_alloc_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
141 return calloc(1, bytes);
144 /* sFlow library callback to free memory. */
146 sflow_agent_free_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
153 /* sFlow library callback to report error. */
155 sflow_agent_error_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
158 VLOG_WARN("sFlow agent error: %s", msg);
161 /* sFlow library callback to send datagram. */
163 sflow_agent_send_packet_cb(void *ds_, SFLAgent *agent OVS_UNUSED,
164 SFLReceiver *receiver OVS_UNUSED, u_char *pkt,
167 struct dpif_sflow *ds = ds_;
168 collectors_send(ds->collectors, pkt, pktLen);
171 static struct dpif_sflow_port *
172 dpif_sflow_find_port(const struct dpif_sflow *ds, odp_port_t odp_port)
175 struct dpif_sflow_port *dsp;
177 HMAP_FOR_EACH_IN_BUCKET (dsp, hmap_node, hash_odp_port(odp_port),
179 if (dsp->odp_port == odp_port) {
186 /* Call to get the datapath stats. Modeled after the dpctl utility.
188 * It might be more efficient for this module to be given a handle it can use
189 * to get these stats more efficiently, but this is only going to be called
190 * once every 20-30 seconds. Return number of datapaths found (normally expect
193 sflow_get_dp_stats(struct dpif_sflow *ds OVS_UNUSED,
194 struct dpif_dp_stats *dp_totals)
200 memset(dp_totals, 0, sizeof *dp_totals);
202 dp_enumerate_types(&types);
203 SSET_FOR_EACH (type, &types) {
207 if (dp_enumerate_names(type, &names) == 0) {
208 SSET_FOR_EACH (name, &names) {
210 if (dpif_open(name, type, &dpif) == 0) {
211 struct dpif_dp_stats dp_stats;
212 if (dpif_get_dp_stats(dpif, &dp_stats) == 0) {
214 dp_totals->n_hit += dp_stats.n_hit;
215 dp_totals->n_missed += dp_stats.n_missed;
216 dp_totals->n_lost += dp_stats.n_lost;
217 dp_totals->n_flows += dp_stats.n_flows;
218 dp_totals->n_mask_hit += dp_stats.n_mask_hit;
219 dp_totals->n_masks += dp_stats.n_masks;
224 sset_destroy(&names);
227 sset_destroy(&types);
231 /* If there are multiple bridges defined then we need some
232 minimal artibration to decide which one should send the
233 global counters. This function allows each sub-agent to
234 ask if he should do it or not. */
236 sflow_global_counters_subid_test(uint32_t subid)
239 if (sflow_global_counters_subid == SFLOW_GC_SUBID_UNCLAIMED) {
240 /* The role is up for grabs. */
241 sflow_global_counters_subid = subid;
243 return (sflow_global_counters_subid == subid);
247 sflow_global_counters_subid_clear(uint32_t subid)
250 if (sflow_global_counters_subid == subid) {
251 /* The sub-agent that was sending global counters
252 is going away, so reset to allow another
254 sflow_global_counters_subid = SFLOW_GC_SUBID_UNCLAIMED;
259 sflow_agent_get_global_counters(void *ds_, SFLPoller *poller,
260 SFL_COUNTERS_SAMPLE_TYPE *cs)
263 struct dpif_sflow *ds = ds_;
264 SFLCounters_sample_element dp_elem, res_elem;
265 struct dpif_dp_stats dp_totals;
268 if (!sflow_global_counters_subid_test(poller->agent->subId)) {
269 /* Another sub-agent is currently responsible for this. */
274 if (sflow_get_dp_stats(ds, &dp_totals)) {
275 dp_elem.tag = SFLCOUNTERS_OVSDP;
276 dp_elem.counterBlock.ovsdp.n_hit = dp_totals.n_hit;
277 dp_elem.counterBlock.ovsdp.n_missed = dp_totals.n_missed;
278 dp_elem.counterBlock.ovsdp.n_lost = dp_totals.n_lost;
279 dp_elem.counterBlock.ovsdp.n_mask_hit = dp_totals.n_mask_hit;
280 dp_elem.counterBlock.ovsdp.n_flows = dp_totals.n_flows;
281 dp_elem.counterBlock.ovsdp.n_masks = dp_totals.n_masks;
282 SFLADD_ELEMENT(cs, &dp_elem);
286 getrusage(RUSAGE_SELF, &usage);
287 res_elem.tag = SFLCOUNTERS_APP_RESOURCES;
288 res_elem.counterBlock.appResources.user_time
289 = timeval_to_msec(&usage.ru_utime);
290 res_elem.counterBlock.appResources.system_time
291 = timeval_to_msec(&usage.ru_stime);
292 res_elem.counterBlock.appResources.mem_used = (usage.ru_maxrss * 1024);
293 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.mem_max);
294 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.fd_open);
295 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.fd_max);
296 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.conn_open);
297 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.conn_max);
299 SFLADD_ELEMENT(cs, &res_elem);
300 sfl_poller_writeCountersSample(poller, cs);
304 sflow_agent_get_counters(void *ds_, SFLPoller *poller,
305 SFL_COUNTERS_SAMPLE_TYPE *cs)
308 struct dpif_sflow *ds = ds_;
309 SFLCounters_sample_element elem, lacp_elem, of_elem, name_elem;
310 enum netdev_features current;
311 struct dpif_sflow_port *dsp;
312 SFLIf_counters *counters;
313 struct netdev_stats stats;
314 enum netdev_flags flags;
315 struct lacp_slave_stats lacp_stats;
318 dsp = dpif_sflow_find_port(ds, u32_to_odp(poller->bridgePort));
323 elem.tag = SFLCOUNTERS_GENERIC;
324 counters = &elem.counterBlock.generic;
325 counters->ifIndex = SFL_DS_INDEX(poller->dsi);
326 counters->ifType = 6;
327 if (!netdev_get_features(dsp->ofport->netdev, ¤t, NULL, NULL, NULL)) {
328 /* The values of ifDirection come from MAU MIB (RFC 2668): 0 = unknown,
329 1 = full-duplex, 2 = half-duplex, 3 = in, 4=out */
330 counters->ifSpeed = netdev_features_to_bps(current, 0);
331 counters->ifDirection = (netdev_features_is_full_duplex(current)
334 counters->ifSpeed = 100000000;
335 counters->ifDirection = 0;
337 if (!netdev_get_flags(dsp->ofport->netdev, &flags) && flags & NETDEV_UP) {
338 counters->ifStatus = 1; /* ifAdminStatus up. */
339 if (netdev_get_carrier(dsp->ofport->netdev)) {
340 counters->ifStatus |= 2; /* ifOperStatus us. */
343 counters->ifStatus = 0; /* Down. */
347 1. Is the multicast counter filled in?
348 2. Does the multicast counter include broadcasts?
349 3. Does the rx_packets counter include multicasts/broadcasts?
351 ofproto_port_get_stats(dsp->ofport, &stats);
352 counters->ifInOctets = stats.rx_bytes;
353 counters->ifInUcastPkts = stats.rx_packets;
354 counters->ifInMulticastPkts = stats.multicast;
355 counters->ifInBroadcastPkts = -1;
356 counters->ifInDiscards = stats.rx_dropped;
357 counters->ifInErrors = stats.rx_errors;
358 counters->ifInUnknownProtos = -1;
359 counters->ifOutOctets = stats.tx_bytes;
360 counters->ifOutUcastPkts = stats.tx_packets;
361 counters->ifOutMulticastPkts = -1;
362 counters->ifOutBroadcastPkts = -1;
363 counters->ifOutDiscards = stats.tx_dropped;
364 counters->ifOutErrors = stats.tx_errors;
365 counters->ifPromiscuousMode = 0;
367 SFLADD_ELEMENT(cs, &elem);
369 /* Include LACP counters and identifiers if this port is part of a LAG. */
370 if (ofproto_port_get_lacp_stats(dsp->ofport, &lacp_stats) == 0) {
371 memset(&lacp_elem, 0, sizeof lacp_elem);
372 lacp_elem.tag = SFLCOUNTERS_LACP;
373 memcpy(&lacp_elem.counterBlock.lacp.actorSystemID,
374 lacp_stats.dot3adAggPortActorSystemID,
376 memcpy(&lacp_elem.counterBlock.lacp.partnerSystemID,
377 lacp_stats.dot3adAggPortPartnerOperSystemID,
379 lacp_elem.counterBlock.lacp.attachedAggID =
380 lacp_stats.dot3adAggPortAttachedAggID;
381 lacp_elem.counterBlock.lacp.portState.v.actorAdmin =
382 lacp_stats.dot3adAggPortActorAdminState;
383 lacp_elem.counterBlock.lacp.portState.v.actorOper =
384 lacp_stats.dot3adAggPortActorOperState;
385 lacp_elem.counterBlock.lacp.portState.v.partnerAdmin =
386 lacp_stats.dot3adAggPortPartnerAdminState;
387 lacp_elem.counterBlock.lacp.portState.v.partnerOper =
388 lacp_stats.dot3adAggPortPartnerOperState;
389 lacp_elem.counterBlock.lacp.LACPDUsRx =
390 lacp_stats.dot3adAggPortStatsLACPDUsRx;
391 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerPDUsRx);
392 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerResponsePDUsRx);
393 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.unknownRx);
394 lacp_elem.counterBlock.lacp.illegalRx =
395 lacp_stats.dot3adAggPortStatsIllegalRx;
396 lacp_elem.counterBlock.lacp.LACPDUsTx =
397 lacp_stats.dot3adAggPortStatsLACPDUsTx;
398 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerPDUsTx);
399 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerResponsePDUsTx);
400 SFLADD_ELEMENT(cs, &lacp_elem);
403 /* Include Port name. */
404 if ((ifName = netdev_get_name(dsp->ofport->netdev)) != NULL) {
405 memset(&name_elem, 0, sizeof name_elem);
406 name_elem.tag = SFLCOUNTERS_PORTNAME;
407 name_elem.counterBlock.portName.portName.str = (char *)ifName;
408 name_elem.counterBlock.portName.portName.len = strlen(ifName);
409 SFLADD_ELEMENT(cs, &name_elem);
412 /* Include OpenFlow DPID and openflow port number. */
413 memset(&of_elem, 0, sizeof of_elem);
414 of_elem.tag = SFLCOUNTERS_OPENFLOWPORT;
415 of_elem.counterBlock.ofPort.datapath_id =
416 ofproto_get_datapath_id(dsp->ofport->ofproto);
417 of_elem.counterBlock.ofPort.port_no =
418 (OVS_FORCE uint32_t)dsp->ofport->ofp_port;
419 SFLADD_ELEMENT(cs, &of_elem);
421 sfl_poller_writeCountersSample(poller, cs);
424 /* Obtains an address to use for the local sFlow agent and stores it into
425 * '*agent_addr'. Returns true if successful, false on failure.
427 * The sFlow agent address should be a local IP address that is persistent and
428 * reachable over the network, if possible. The IP address associated with
429 * 'agent_device' is used if it has one, and otherwise 'control_ip', the IP
430 * address used to talk to the controller. If the agent device is not
431 * specified then it is figured out by taking a look at the routing table based
434 sflow_choose_agent_address(const char *agent_device,
435 const struct sset *targets,
436 const char *control_ip,
437 SFLAddress *agent_addr)
442 memset(agent_addr, 0, sizeof *agent_addr);
443 agent_addr->type = SFLADDRESSTYPE_IP_V4;
446 if (!netdev_get_in4_by_name(agent_device, &in4)) {
451 SSET_FOR_EACH (target, targets) {
453 struct sockaddr_storage ss;
454 struct sockaddr_in sin;
458 if (inet_parse_active(target, SFL_DEFAULT_COLLECTOR_PORT, &sa.ss)
459 && sa.ss.ss_family == AF_INET) {
462 if (ovs_router_lookup(sa.sin.sin_addr.s_addr, name, &gw)
463 && !netdev_get_in4_by_name(name, &in4)) {
469 if (control_ip && !lookup_ip(control_ip, &in4)) {
473 VLOG_ERR("could not determine IP address for sFlow agent");
477 agent_addr->address.ip_v4.addr = (OVS_FORCE uint32_t) in4.s_addr;
482 dpif_sflow_clear__(struct dpif_sflow *ds) OVS_REQUIRES(mutex)
484 if (ds->sflow_agent) {
485 sflow_global_counters_subid_clear(ds->sflow_agent->subId);
486 sfl_agent_release(ds->sflow_agent);
487 free(ds->sflow_agent);
488 ds->sflow_agent = NULL;
490 collectors_destroy(ds->collectors);
491 ds->collectors = NULL;
492 ofproto_sflow_options_destroy(ds->options);
495 /* Turn off sampling to save CPU cycles. */
500 dpif_sflow_clear(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
502 ovs_mutex_lock(&mutex);
503 dpif_sflow_clear__(ds);
504 ovs_mutex_unlock(&mutex);
508 dpif_sflow_is_enabled(const struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
512 ovs_mutex_lock(&mutex);
513 enabled = ds->collectors != NULL;
514 ovs_mutex_unlock(&mutex);
519 dpif_sflow_create(void)
521 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
522 struct dpif_sflow *ds;
524 if (ovsthread_once_start(&once)) {
525 ovs_mutex_init_recursive(&mutex);
526 ovsthread_once_done(&once);
529 ds = xcalloc(1, sizeof *ds);
530 ds->next_tick = time_now() + 1;
531 hmap_init(&ds->ports);
533 ovs_refcount_init(&ds->ref_cnt);
539 dpif_sflow_ref(const struct dpif_sflow *ds_)
541 struct dpif_sflow *ds = CONST_CAST(struct dpif_sflow *, ds_);
543 ovs_refcount_ref(&ds->ref_cnt);
548 /* 32-bit fraction of packets to sample with. A value of 0 samples no packets,
549 * a value of %UINT32_MAX samples all packets and intermediate values sample
550 * intermediate fractions of packets. */
552 dpif_sflow_get_probability(const struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
554 uint32_t probability;
555 ovs_mutex_lock(&mutex);
556 probability = ds->probability;
557 ovs_mutex_unlock(&mutex);
562 dpif_sflow_unref(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
564 if (ds && ovs_refcount_unref_relaxed(&ds->ref_cnt) == 1) {
565 struct dpif_sflow_port *dsp, *next;
567 dpif_sflow_clear(ds);
568 HMAP_FOR_EACH_SAFE (dsp, next, hmap_node, &ds->ports) {
569 dpif_sflow_del_port__(ds, dsp);
571 hmap_destroy(&ds->ports);
577 dpif_sflow_add_poller(struct dpif_sflow *ds, struct dpif_sflow_port *dsp)
580 SFLPoller *poller = sfl_agent_addPoller(ds->sflow_agent, &dsp->dsi, ds,
581 sflow_agent_get_counters);
582 sfl_poller_set_sFlowCpInterval(poller, ds->options->polling_interval);
583 sfl_poller_set_sFlowCpReceiver(poller, RECEIVER_INDEX);
584 sfl_poller_set_bridgePort(poller, odp_to_u32(dsp->odp_port));
587 static enum dpif_sflow_tunnel_type
588 dpif_sflow_tunnel_type(struct ofport *ofport) {
589 const char *type = netdev_get_type(ofport->netdev);
591 if (strcmp(type, "gre") == 0) {
592 return DPIF_SFLOW_TUNNEL_GRE;
593 } else if (strcmp(type, "gre64") == 0) {
594 return DPIF_SFLOW_TUNNEL_GRE64;
595 } else if (strcmp(type, "ipsec_gre") == 0) {
596 return DPIF_SFLOW_TUNNEL_IPSEC_GRE;
597 } else if (strcmp(type, "ipsec_gre64") == 0) {
598 return DPIF_SFLOW_TUNNEL_IPSEC_GRE64;
599 } else if (strcmp(type, "vxlan") == 0) {
600 return DPIF_SFLOW_TUNNEL_VXLAN;
601 } else if (strcmp(type, "lisp") == 0) {
602 return DPIF_SFLOW_TUNNEL_LISP;
603 } else if (strcmp(type, "geneve") == 0) {
604 return DPIF_SFLOW_TUNNEL_GENEVE;
607 return DPIF_SFLOW_TUNNEL_UNKNOWN;
611 dpif_sflow_tunnel_proto(enum dpif_sflow_tunnel_type tunnel_type)
613 /* Default to 0 (IPPROTO_IP), meaning "unknown". */
615 switch(tunnel_type) {
617 case DPIF_SFLOW_TUNNEL_GRE:
618 case DPIF_SFLOW_TUNNEL_GRE64:
619 ipproto = IPPROTO_GRE;
622 case DPIF_SFLOW_TUNNEL_IPSEC_GRE:
623 case DPIF_SFLOW_TUNNEL_IPSEC_GRE64:
624 ipproto = IPPROTO_ESP;
627 case DPIF_SFLOW_TUNNEL_VXLAN:
628 case DPIF_SFLOW_TUNNEL_LISP:
629 case DPIF_SFLOW_TUNNEL_GENEVE:
630 ipproto = IPPROTO_UDP;
632 case DPIF_SFLOW_TUNNEL_UNKNOWN:
639 dpif_sflow_add_port(struct dpif_sflow *ds, struct ofport *ofport,
640 odp_port_t odp_port) OVS_EXCLUDED(mutex)
642 struct dpif_sflow_port *dsp;
644 enum dpif_sflow_tunnel_type tunnel_type;
646 ovs_mutex_lock(&mutex);
647 dpif_sflow_del_port(ds, odp_port);
649 tunnel_type = dpif_sflow_tunnel_type(ofport);
650 ifindex = netdev_get_ifindex(ofport->netdev);
653 && tunnel_type == DPIF_SFLOW_TUNNEL_UNKNOWN) {
654 /* Not an ifindex port, and not a tunnel port either
655 * so do not add a cross-reference to it here.
660 /* Add to table of ports. */
661 dsp = xmalloc(sizeof *dsp);
662 dsp->ofport = ofport;
663 dsp->odp_port = odp_port;
664 dsp->tunnel_type = tunnel_type;
665 hmap_insert(&ds->ports, &dsp->hmap_node, hash_odp_port(odp_port));
668 /* Add poller for ports that have ifindex. */
669 SFL_DS_SET(dsp->dsi, SFL_DSCLASS_IFINDEX, ifindex, 0);
670 if (ds->sflow_agent) {
671 dpif_sflow_add_poller(ds, dsp);
674 /* Record "ifindex unknown" for the others */
675 SFL_DS_SET(dsp->dsi, SFL_DSCLASS_IFINDEX, 0, 0);
679 ovs_mutex_unlock(&mutex);
683 dpif_sflow_del_port__(struct dpif_sflow *ds, struct dpif_sflow_port *dsp)
687 && SFL_DS_INDEX(dsp->dsi)) {
688 sfl_agent_removePoller(ds->sflow_agent, &dsp->dsi);
689 sfl_agent_removeSampler(ds->sflow_agent, &dsp->dsi);
691 hmap_remove(&ds->ports, &dsp->hmap_node);
696 dpif_sflow_del_port(struct dpif_sflow *ds, odp_port_t odp_port)
699 struct dpif_sflow_port *dsp;
701 ovs_mutex_lock(&mutex);
702 dsp = dpif_sflow_find_port(ds, odp_port);
704 dpif_sflow_del_port__(ds, dsp);
706 ovs_mutex_unlock(&mutex);
710 dpif_sflow_set_options(struct dpif_sflow *ds,
711 const struct ofproto_sflow_options *options)
714 struct dpif_sflow_port *dsp;
715 bool options_changed;
716 SFLReceiver *receiver;
719 SFLDataSource_instance dsi;
724 ovs_mutex_lock(&mutex);
725 if (sset_is_empty(&options->targets) || !options->sampling_rate) {
726 /* No point in doing any work if there are no targets or nothing to
728 dpif_sflow_clear__(ds);
732 options_changed = (!ds->options
733 || !ofproto_sflow_options_equal(options, ds->options));
735 /* Configure collectors if options have changed or if we're shortchanged in
736 * collectors (which indicates that opening one or more of the configured
737 * collectors failed, so that we should retry). */
739 || collectors_count(ds->collectors) < sset_count(&options->targets)) {
740 collectors_destroy(ds->collectors);
741 collectors_create(&options->targets, SFL_DEFAULT_COLLECTOR_PORT,
743 if (ds->collectors == NULL) {
744 VLOG_WARN_RL(&rl, "no collectors could be initialized, "
746 dpif_sflow_clear__(ds);
751 /* Choose agent IP address and agent device (if not yet setup) */
752 if (!sflow_choose_agent_address(options->agent_device,
754 options->control_ip, &agentIP)) {
755 dpif_sflow_clear__(ds);
759 /* Avoid reconfiguring if options didn't change. */
760 if (!options_changed) {
763 ofproto_sflow_options_destroy(ds->options);
764 ds->options = ofproto_sflow_options_clone(options);
767 VLOG_INFO("creating sFlow agent %d", options->sub_id);
768 if (ds->sflow_agent) {
769 sflow_global_counters_subid_clear(ds->sflow_agent->subId);
770 sfl_agent_release(ds->sflow_agent);
772 ds->sflow_agent = xcalloc(1, sizeof *ds->sflow_agent);
774 sfl_agent_init(ds->sflow_agent,
777 now, /* Boot time. */
778 now, /* Current time. */
779 ds, /* Pointer supplied to callbacks. */
780 sflow_agent_alloc_cb,
782 sflow_agent_error_cb,
783 sflow_agent_send_packet_cb);
785 receiver = sfl_agent_addReceiver(ds->sflow_agent);
786 sfl_receiver_set_sFlowRcvrOwner(receiver, "Open vSwitch sFlow");
787 sfl_receiver_set_sFlowRcvrTimeout(receiver, 0xffffffff);
789 /* Set the sampling_rate down in the datapath. */
790 ds->probability = MAX(1, UINT32_MAX / ds->options->sampling_rate);
792 /* Add a single sampler for the bridge. This appears as a PHYSICAL_ENTITY
793 because it is associated with the hypervisor, and interacts with the server
794 hardware directly. The sub_id is used to distinguish this sampler from
795 others on other bridges within the same agent. */
796 dsIndex = 1000 + options->sub_id;
797 SFL_DS_SET(dsi, SFL_DSCLASS_PHYSICAL_ENTITY, dsIndex, 0);
798 sampler = sfl_agent_addSampler(ds->sflow_agent, &dsi);
799 sfl_sampler_set_sFlowFsPacketSamplingRate(sampler, ds->options->sampling_rate);
800 sfl_sampler_set_sFlowFsMaximumHeaderSize(sampler, ds->options->header_len);
801 sfl_sampler_set_sFlowFsReceiver(sampler, RECEIVER_INDEX);
803 /* Add a counter poller for the bridge so we can use it to send
804 global counters such as datapath cache hit/miss stats. */
805 poller = sfl_agent_addPoller(ds->sflow_agent, &dsi, ds,
806 sflow_agent_get_global_counters);
807 sfl_poller_set_sFlowCpInterval(poller, ds->options->polling_interval);
808 sfl_poller_set_sFlowCpReceiver(poller, RECEIVER_INDEX);
810 /* Add pollers for the currently known ifindex-ports */
811 HMAP_FOR_EACH (dsp, hmap_node, &ds->ports) {
812 if (SFL_DS_INDEX(dsp->dsi)) {
813 dpif_sflow_add_poller(ds, dsp);
819 ovs_mutex_unlock(&mutex);
823 dpif_sflow_odp_port_to_ifindex(const struct dpif_sflow *ds,
824 odp_port_t odp_port) OVS_EXCLUDED(mutex)
826 struct dpif_sflow_port *dsp;
829 ovs_mutex_lock(&mutex);
830 dsp = dpif_sflow_find_port(ds, odp_port);
831 ret = dsp ? SFL_DS_INDEX(dsp->dsi) : 0;
832 ovs_mutex_unlock(&mutex);
837 dpif_sflow_tunnel_v4(uint8_t tunnel_ipproto,
838 const struct flow_tnl *tunnel,
839 SFLSampled_ipv4 *ipv4)
842 ipv4->protocol = tunnel_ipproto;
843 ipv4->tos = tunnel->ip_tos;
844 ipv4->src_ip.addr = (OVS_FORCE uint32_t) tunnel->ip_src;
845 ipv4->dst_ip.addr = (OVS_FORCE uint32_t) tunnel->ip_dst;
846 ipv4->src_port = (OVS_FORCE uint16_t) tunnel->tp_src;
847 ipv4->dst_port = (OVS_FORCE uint16_t) tunnel->tp_dst;
851 dpif_sflow_push_mpls_lse(struct dpif_sflow_actions *sflow_actions,
854 if (sflow_actions->mpls_stack_depth >= FLOW_MAX_MPLS_LABELS) {
855 sflow_actions->mpls_err = true;
859 /* Record the new lse in host-byte-order. */
860 /* BOS flag will be fixed later when we send stack to sFlow library. */
861 sflow_actions->mpls_lse[sflow_actions->mpls_stack_depth++] = ntohl(lse);
865 dpif_sflow_pop_mpls_lse(struct dpif_sflow_actions *sflow_actions)
867 if (sflow_actions->mpls_stack_depth == 0) {
868 sflow_actions->mpls_err = true;
871 sflow_actions->mpls_stack_depth--;
875 dpif_sflow_set_mpls(struct dpif_sflow_actions *sflow_actions,
876 const struct ovs_key_mpls *mpls_key, int n)
879 if (n > FLOW_MAX_MPLS_LABELS) {
880 sflow_actions->mpls_err = true;
884 for (ii = 0; ii < n; ii++) {
885 /* Reverse stack order, and use host-byte-order for each lse. */
886 sflow_actions->mpls_lse[n - ii - 1] = ntohl(mpls_key[ii].mpls_lse);
888 sflow_actions->mpls_stack_depth = n;
892 sflow_read_tnl_push_action(const struct nlattr *attr,
893 struct dpif_sflow_actions *sflow_actions)
895 /* Modeled on lib/odp-util.c: format_odp_tnl_push_header */
896 const struct ovs_action_push_tnl *data = nl_attr_get(attr);
897 const struct eth_header *eth = (const struct eth_header *) data->header;
898 const struct ip_header *ip
899 = ALIGNED_CAST(const struct ip_header *, eth + 1);
901 sflow_actions->out_port = u32_to_odp(data->out_port);
904 /* TODO: SFlow does not currently define a MAC-in-MAC
905 * encapsulation structure. We could use an extension
906 * structure to report this.
910 /* Cannot assume alignment so just use memcpy. */
911 sflow_actions->tunnel.ip_src = get_16aligned_be32(&ip->ip_src);
912 sflow_actions->tunnel.ip_dst = get_16aligned_be32(&ip->ip_dst);
913 sflow_actions->tunnel.ip_tos = ip->ip_tos;
914 sflow_actions->tunnel.ip_ttl = ip->ip_ttl;
915 /* The tnl_push action can supply the ip_protocol too. */
916 sflow_actions->tunnel_ipproto = ip->ip_proto;
919 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN
920 || data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
921 const struct udp_header *udp = (const struct udp_header *) (ip + 1);
922 sflow_actions->tunnel.tp_src = udp->udp_src;
923 sflow_actions->tunnel.tp_dst = udp->udp_dst;
925 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
926 const struct vxlanhdr *vxh = (const struct vxlanhdr *) (udp + 1);
927 uint64_t tun_id = ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8;
928 sflow_actions->tunnel.tun_id = htonll(tun_id);
930 const struct genevehdr *gnh = (const struct genevehdr *) (udp + 1);
931 uint64_t tun_id = ntohl(get_16aligned_be32(&gnh->vni)) >> 8;
932 sflow_actions->tunnel.tun_id = htonll(tun_id);
934 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
935 const void *l4 = ip + 1;
936 const struct gre_base_hdr *greh = (const struct gre_base_hdr *) l4;
937 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *)(greh + 1);
938 if (greh->flags & htons(GRE_CSUM)) {
941 if (greh->flags & htons(GRE_KEY)) {
942 uint64_t tun_id = ntohl(get_16aligned_be32(options));
943 sflow_actions->tunnel.tun_id = htonll(tun_id);
949 sflow_read_set_action(const struct nlattr *attr,
950 struct dpif_sflow_actions *sflow_actions)
952 enum ovs_key_attr type = nl_attr_type(attr);
954 case OVS_KEY_ATTR_ENCAP:
955 if (++sflow_actions->encap_depth > 1) {
956 /* Do not handle multi-encap for now. */
957 sflow_actions->tunnel_err = true;
959 dpif_sflow_read_actions(NULL,
960 nl_attr_get(attr), nl_attr_get_size(attr),
964 case OVS_KEY_ATTR_PRIORITY:
965 case OVS_KEY_ATTR_SKB_MARK:
966 case OVS_KEY_ATTR_DP_HASH:
967 case OVS_KEY_ATTR_RECIRC_ID:
970 case OVS_KEY_ATTR_TUNNEL: {
971 if (++sflow_actions->encap_depth > 1) {
972 /* Do not handle multi-encap for now. */
973 sflow_actions->tunnel_err = true;
975 if (odp_tun_key_from_attr(attr, &sflow_actions->tunnel)
977 /* Tunnel parsing error. */
978 sflow_actions->tunnel_err = true;
984 case OVS_KEY_ATTR_IN_PORT:
985 case OVS_KEY_ATTR_ETHERNET:
986 case OVS_KEY_ATTR_VLAN:
989 case OVS_KEY_ATTR_MPLS: {
990 const struct ovs_key_mpls *mpls_key = nl_attr_get(attr);
991 size_t size = nl_attr_get_size(attr);
992 dpif_sflow_set_mpls(sflow_actions, mpls_key, size / sizeof *mpls_key);
996 case OVS_KEY_ATTR_ETHERTYPE:
997 case OVS_KEY_ATTR_IPV4:
998 if (sflow_actions->encap_depth == 1) {
999 const struct ovs_key_ipv4 *key = nl_attr_get(attr);
1000 if (key->ipv4_src) {
1001 sflow_actions->tunnel.ip_src = key->ipv4_src;
1003 if (key->ipv4_dst) {
1004 sflow_actions->tunnel.ip_dst = key->ipv4_dst;
1006 if (key->ipv4_proto) {
1007 sflow_actions->tunnel_ipproto = key->ipv4_proto;
1009 if (key->ipv4_tos) {
1010 sflow_actions->tunnel.ip_tos = key->ipv4_tos;
1012 if (key->ipv4_ttl) {
1013 sflow_actions->tunnel.ip_tos = key->ipv4_ttl;
1018 case OVS_KEY_ATTR_IPV6:
1019 /* TODO: parse IPv6 encap. */
1022 /* These have the same structure and format. */
1023 case OVS_KEY_ATTR_TCP:
1024 case OVS_KEY_ATTR_UDP:
1025 case OVS_KEY_ATTR_SCTP:
1026 if (sflow_actions->encap_depth == 1) {
1027 const struct ovs_key_tcp *key = nl_attr_get(attr);
1029 sflow_actions->tunnel.tp_src = key->tcp_src;
1032 sflow_actions->tunnel.tp_dst = key->tcp_dst;
1037 case OVS_KEY_ATTR_TCP_FLAGS:
1038 case OVS_KEY_ATTR_ICMP:
1039 case OVS_KEY_ATTR_ICMPV6:
1040 case OVS_KEY_ATTR_ARP:
1041 case OVS_KEY_ATTR_ND:
1042 case OVS_KEY_ATTR_UNSPEC:
1043 case __OVS_KEY_ATTR_MAX:
1050 dpif_sflow_capture_input_mpls(const struct flow *flow,
1051 struct dpif_sflow_actions *sflow_actions)
1053 if (eth_type_mpls(flow->dl_type)) {
1057 /* Calculate depth by detecting BOS. */
1058 for (ii = 0; ii < FLOW_MAX_MPLS_LABELS; ii++) {
1059 lse = flow->mpls_lse[ii];
1061 if (lse & htonl(MPLS_BOS_MASK)) {
1065 /* Capture stack, reversing stack order, and
1066 * using host-byte-order for each lse. BOS flag
1067 * is ignored for now. It is set later when
1068 * the output stack is encoded.
1070 for (ii = 0; ii < depth; ii++) {
1071 lse = flow->mpls_lse[ii];
1072 sflow_actions->mpls_lse[depth - ii - 1] = ntohl(lse);
1074 sflow_actions->mpls_stack_depth = depth;
1079 dpif_sflow_read_actions(const struct flow *flow,
1080 const struct nlattr *actions, size_t actions_len,
1081 struct dpif_sflow_actions *sflow_actions)
1083 const struct nlattr *a;
1086 if (actions_len == 0) {
1087 /* Packet dropped.*/
1092 /* Make sure the MPLS output stack
1093 * is seeded with the input stack.
1095 dpif_sflow_capture_input_mpls(flow, sflow_actions);
1097 /* XXX when 802.1AD(QinQ) is supported then
1098 * we can do the same with VLAN stacks here
1102 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1103 enum ovs_action_attr type = nl_attr_type(a);
1105 case OVS_ACTION_ATTR_OUTPUT:
1106 /* Capture the output port in case we need it
1107 * to get the output tunnel type.
1109 sflow_actions->out_port = u32_to_odp(nl_attr_get_u32(a));
1112 case OVS_ACTION_ATTR_TUNNEL_POP:
1113 /* XXX: Do not handle this for now. It's not clear
1114 * if we should start with encap_depth == 1 when we
1115 * see an input tunnel, or if we should assume
1116 * that the input tunnel was always "popped" if it
1117 * was presented to us decoded in flow->tunnel?
1119 * If we do handle this it might look like this,
1120 * as we clear the captured tunnel info and decrement
1123 * memset(&sflow_actions->tunnel, 0, sizeof struct flow_tnl);
1124 * sflow_actions->tunnel_ipproto = 0;
1125 * --sflow_actions->encap_depth;
1127 * but for now just disable the tunnel annotation:
1129 sflow_actions->tunnel_err = true;
1132 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1133 /* XXX: This actions appears to come with it's own
1134 * OUTPUT action, so should it be regarded as having
1135 * an implicit "pop" following it too? Put another
1136 * way, would two tnl_push() actions in succession
1137 * result in a packet with two layers of encap?
1139 if (++sflow_actions->encap_depth > 1) {
1140 /* Do not handle multi-encap for now. */
1141 sflow_actions->tunnel_err = true;
1143 sflow_read_tnl_push_action(a, sflow_actions);
1147 case OVS_ACTION_ATTR_USERSPACE:
1148 case OVS_ACTION_ATTR_RECIRC:
1149 case OVS_ACTION_ATTR_HASH:
1152 case OVS_ACTION_ATTR_SET_MASKED:
1153 /* TODO: apply mask. XXX: Are we likely to see this? */
1156 case OVS_ACTION_ATTR_SET:
1157 sflow_read_set_action(nl_attr_get(a), sflow_actions);
1160 case OVS_ACTION_ATTR_PUSH_VLAN:
1161 case OVS_ACTION_ATTR_POP_VLAN:
1162 /* TODO: 802.1AD(QinQ) is not supported by OVS (yet), so do not
1163 * construct a VLAN-stack. The sFlow user-action cookie already
1164 * captures the egress VLAN ID so there is nothing more to do here.
1168 case OVS_ACTION_ATTR_PUSH_MPLS: {
1169 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1171 dpif_sflow_push_mpls_lse(sflow_actions, mpls->mpls_lse);
1175 case OVS_ACTION_ATTR_POP_MPLS: {
1176 dpif_sflow_pop_mpls_lse(sflow_actions);
1179 case OVS_ACTION_ATTR_SAMPLE:
1180 case OVS_ACTION_ATTR_UNSPEC:
1181 case __OVS_ACTION_ATTR_MAX:
1189 dpif_sflow_encode_mpls_stack(SFLLabelStack *stack,
1190 uint32_t *mpls_lse_buf,
1191 const struct dpif_sflow_actions *sflow_actions)
1193 /* Put the MPLS stack back into "packet header" order,
1194 * and make sure the BOS flag is set correctly on the last
1195 * one. Each lse is still in host-byte-order.
1199 stack->depth = sflow_actions->mpls_stack_depth;
1200 stack->stack = mpls_lse_buf;
1201 for (ii = 0; ii < stack->depth; ii++) {
1202 lse = sflow_actions->mpls_lse[stack->depth - ii - 1];
1203 stack->stack[ii] = (lse & ~MPLS_BOS_MASK);
1205 stack->stack[stack->depth - 1] |= MPLS_BOS_MASK;
1208 /* Extract the output port count from the user action cookie.
1209 * See http://sflow.org/sflow_version_5.txt "Input/Output port information"
1212 dpif_sflow_cookie_num_outputs(const union user_action_cookie *cookie)
1214 uint32_t format = cookie->sflow.output & 0xC0000000;
1215 uint32_t port_n = cookie->sflow.output & 0x3FFFFFFF;
1217 return port_n ? 1 : 0;
1219 else if (format == 0x80000000) {
1226 dpif_sflow_received(struct dpif_sflow *ds, const struct dp_packet *packet,
1227 const struct flow *flow, odp_port_t odp_in_port,
1228 const union user_action_cookie *cookie,
1229 const struct dpif_sflow_actions *sflow_actions)
1232 SFL_FLOW_SAMPLE_TYPE fs;
1233 SFLFlow_sample_element hdrElem;
1234 SFLSampled_header *header;
1235 SFLFlow_sample_element switchElem;
1236 uint8_t tnlInProto, tnlOutProto;
1237 SFLFlow_sample_element tnlInElem, tnlOutElem;
1238 SFLFlow_sample_element vniInElem, vniOutElem;
1239 SFLFlow_sample_element mplsElem;
1240 uint32_t mpls_lse_buf[FLOW_MAX_MPLS_LABELS];
1241 SFLSampler *sampler;
1242 struct dpif_sflow_port *in_dsp;
1243 struct dpif_sflow_port *out_dsp;
1246 ovs_mutex_lock(&mutex);
1247 sampler = ds->sflow_agent->samplers;
1252 /* Build a flow sample. */
1253 memset(&fs, 0, sizeof fs);
1255 /* Look up the input ifIndex if this port has one. Otherwise just
1256 * leave it as 0 (meaning 'unknown') and continue. */
1257 in_dsp = dpif_sflow_find_port(ds, odp_in_port);
1259 fs.input = SFL_DS_INDEX(in_dsp->dsi);
1262 /* Make the assumption that the random number generator in the datapath converges
1263 * to the configured mean, and just increment the samplePool by the configured
1264 * sampling rate every time. */
1265 sampler->samplePool += sfl_sampler_get_sFlowFsPacketSamplingRate(sampler);
1267 /* Sampled header. */
1268 memset(&hdrElem, 0, sizeof hdrElem);
1269 hdrElem.tag = SFLFLOW_HEADER;
1270 header = &hdrElem.flowType.header;
1271 header->header_protocol = SFLHEADER_ETHERNET_ISO8023;
1272 /* The frame_length should include the Ethernet FCS (4 bytes),
1273 * but it has already been stripped, so we need to add 4 here. */
1274 header->frame_length = dp_packet_size(packet) + 4;
1275 /* Ethernet FCS stripped off. */
1276 header->stripped = 4;
1277 header->header_length = MIN(dp_packet_size(packet),
1278 sampler->sFlowFsMaximumHeaderSize);
1279 header->header_bytes = dp_packet_data(packet);
1281 /* Add extended switch element. */
1282 memset(&switchElem, 0, sizeof(switchElem));
1283 switchElem.tag = SFLFLOW_EX_SWITCH;
1284 switchElem.flowType.sw.src_vlan = vlan_tci_to_vid(flow->vlan_tci);
1285 switchElem.flowType.sw.src_priority = vlan_tci_to_pcp(flow->vlan_tci);
1287 /* Retrieve data from user_action_cookie. */
1288 vlan_tci = cookie->sflow.vlan_tci;
1289 switchElem.flowType.sw.dst_vlan = vlan_tci_to_vid(vlan_tci);
1290 switchElem.flowType.sw.dst_priority = vlan_tci_to_pcp(vlan_tci);
1292 fs.output = cookie->sflow.output;
1295 if (flow->tunnel.ip_dst) {
1296 memset(&tnlInElem, 0, sizeof(tnlInElem));
1297 tnlInElem.tag = SFLFLOW_EX_IPV4_TUNNEL_INGRESS;
1298 tnlInProto = dpif_sflow_tunnel_proto(in_dsp->tunnel_type);
1299 dpif_sflow_tunnel_v4(tnlInProto,
1301 &tnlInElem.flowType.ipv4);
1302 SFLADD_ELEMENT(&fs, &tnlInElem);
1303 if (flow->tunnel.tun_id) {
1304 memset(&vniInElem, 0, sizeof(vniInElem));
1305 vniInElem.tag = SFLFLOW_EX_VNI_INGRESS;
1306 vniInElem.flowType.tunnel_vni.vni
1307 = ntohll(flow->tunnel.tun_id);
1308 SFLADD_ELEMENT(&fs, &vniInElem);
1312 /* Output tunnel. */
1314 && sflow_actions->encap_depth == 1
1315 && !sflow_actions->tunnel_err
1316 && dpif_sflow_cookie_num_outputs(cookie) == 1) {
1317 tnlOutProto = sflow_actions->tunnel_ipproto;
1318 if (tnlOutProto == 0) {
1319 /* Try to infer the ip-protocol from the output port. */
1320 if (sflow_actions->out_port != ODPP_NONE) {
1321 out_dsp = dpif_sflow_find_port(ds, sflow_actions->out_port);
1323 tnlOutProto = dpif_sflow_tunnel_proto(out_dsp->tunnel_type);
1327 memset(&tnlOutElem, 0, sizeof(tnlOutElem));
1328 tnlOutElem.tag = SFLFLOW_EX_IPV4_TUNNEL_EGRESS;
1329 dpif_sflow_tunnel_v4(tnlOutProto,
1330 &sflow_actions->tunnel,
1331 &tnlOutElem.flowType.ipv4);
1332 SFLADD_ELEMENT(&fs, &tnlOutElem);
1333 if (sflow_actions->tunnel.tun_id) {
1334 memset(&vniOutElem, 0, sizeof(vniOutElem));
1335 vniOutElem.tag = SFLFLOW_EX_VNI_EGRESS;
1336 vniOutElem.flowType.tunnel_vni.vni
1337 = ntohll(sflow_actions->tunnel.tun_id);
1338 SFLADD_ELEMENT(&fs, &vniOutElem);
1342 /* MPLS output label stack. */
1344 && sflow_actions->mpls_stack_depth > 0
1345 && !sflow_actions->mpls_err
1346 && dpif_sflow_cookie_num_outputs(cookie) == 1) {
1347 memset(&mplsElem, 0, sizeof(mplsElem));
1348 mplsElem.tag = SFLFLOW_EX_MPLS;
1349 dpif_sflow_encode_mpls_stack(&mplsElem.flowType.mpls.out_stack,
1352 SFLADD_ELEMENT(&fs, &mplsElem);
1355 /* Submit the flow sample to be encoded into the next datagram. */
1356 SFLADD_ELEMENT(&fs, &hdrElem);
1357 SFLADD_ELEMENT(&fs, &switchElem);
1358 sfl_sampler_writeFlowSample(sampler, &fs);
1361 ovs_mutex_unlock(&mutex);
1365 dpif_sflow_run(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
1367 ovs_mutex_lock(&mutex);
1368 if (ds->collectors != NULL) {
1369 time_t now = time_now();
1371 if (now >= ds->next_tick) {
1372 sfl_agent_tick(ds->sflow_agent, time_wall());
1373 ds->next_tick = now + 1;
1376 ovs_mutex_unlock(&mutex);
1380 dpif_sflow_wait(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
1382 ovs_mutex_lock(&mutex);
1383 if (ds->collectors != NULL) {
1384 poll_timer_wait_until(ds->next_tick * 1000LL);
1386 ovs_mutex_unlock(&mutex);