+ uint64_t pkts, bytes;
+
+ pkts = nf_flow->packet_count;
+ bytes = nf_flow->byte_count;
+
+ nf_flow->last_expired += nf->active_timeout;
+
+ if (pkts == 0) {
+ return;
+ }
+
+ if ((bytes >> 32) <= 175) {
+ /* NetFlow v5 records are limited to 32-bit counters. If we've wrapped
+ * a counter, send as multiple records so we don't lose track of any
+ * traffic. We try to evenly distribute the packet and byte counters,
+ * so that the bytes-per-packet lengths don't look wonky across the
+ * records. */
+ while (bytes) {
+ int n_recs = (bytes + UINT32_MAX - 1) / UINT32_MAX;
+ uint32_t pkt_count = pkts / n_recs;
+ uint32_t byte_count = bytes / n_recs;
+
+ gen_netflow_rec(nf, nf_flow, pkt_count, byte_count);
+
+ pkts -= pkt_count;
+ bytes -= byte_count;
+ }
+ } else {
+ /* In 600 seconds, a 10GbE link can theoretically transmit 75 * 10**10
+ * == 175 * 2**32 bytes. The byte counter is bigger than that, so it's
+ * probably a bug--for example, the netdev code uses UINT64_MAX to
+ * report "unknown value", and perhaps that has leaked through to here.
+ *
+ * We wouldn't want to hit the loop above in this case, because it
+ * would try to send up to UINT32_MAX netflow records, which would take
+ * a long time.
+ */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_WARN_RL(&rl, "impossible byte counter %"PRIu64, bytes);
+ }
+
+ /* Update flow tracking data. */
+ nf_flow->packet_count = 0;
+ nf_flow->byte_count = 0;
+ nf_flow->tcp_flags = 0;
+}
+
+void
+netflow_flow_clear(struct netflow *nf, const struct flow *flow)
+ OVS_EXCLUDED(mutex)
+{
+ struct netflow_flow *nf_flow;
+
+ ovs_mutex_lock(&mutex);
+ nf_flow = netflow_flow_lookup(nf, flow);
+ if (nf_flow) {
+ netflow_expire__(nf, nf_flow);
+ hmap_remove(&nf->flows, &nf_flow->hmap_node);
+ free(nf_flow);
+ }
+ ovs_mutex_unlock(&mutex);
+}
+
+/* Returns true if it's time to send out a round of NetFlow active timeouts,
+ * false otherwise. */
+static void
+netflow_run__(struct netflow *nf) OVS_REQUIRES(mutex)
+{
+ long long int now = time_msec();
+ struct netflow_flow *nf_flow, *next;
+