+ if (ipv6_addr_is_set(&gw) &&
+ (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
+ *ip = gw;
+ } else {
+ *ip = dst;
+ }
+
+ xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
+ ovs_assert(xcfg);
+
+ HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
+ if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
+ struct xport *port;
+
+ HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
+ if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
+ *out_port = port;
+ return 0;
+ }
+ }
+ }
+ }
+ return -ENOENT;
+}
+
+static int
+compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
+ struct dp_packet *packet)
+{
+ struct xbridge *xbridge = out_dev->xbridge;
+ struct ofpact_output output;
+ struct flow flow;
+
+ ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
+ flow_extract(packet, &flow);
+ flow.in_port.ofp_port = out_dev->ofp_port;
+ output.port = OFPP_TABLE;
+ output.max_len = 0;
+
+ return ofproto_dpif_execute_actions__(xbridge->ofproto, &flow, NULL,
+ &output.ofpact, sizeof output,
+ ctx->recurse, ctx->resubmits, packet);
+}
+
+static void
+tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
+ const struct eth_addr eth_src,
+ struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
+{
+ struct dp_packet packet;
+
+ dp_packet_init(&packet, 0);
+ compose_nd(&packet, eth_src, ipv6_src, ipv6_dst);
+ compose_table_xlate(ctx, out_dev, &packet);
+ dp_packet_uninit(&packet);
+}
+
+static void
+tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
+ const struct eth_addr eth_src,
+ ovs_be32 ip_src, ovs_be32 ip_dst)
+{
+ struct dp_packet packet;
+
+ dp_packet_init(&packet, 0);
+ compose_arp(&packet, ARP_OP_REQUEST,
+ eth_src, eth_addr_zero, true, ip_src, ip_dst);
+
+ compose_table_xlate(ctx, out_dev, &packet);
+ dp_packet_uninit(&packet);
+}
+
+static int
+build_tunnel_send(struct xlate_ctx *ctx, const struct xport *xport,
+ const struct flow *flow, odp_port_t tunnel_odp_port)
+{
+ struct ovs_action_push_tnl tnl_push_data;
+ struct xport *out_dev = NULL;
+ ovs_be32 s_ip = 0, d_ip = 0;
+ struct in6_addr s_ip6 = in6addr_any;
+ struct in6_addr d_ip6 = in6addr_any;
+ struct eth_addr smac;
+ struct eth_addr dmac;
+ int err;
+ char buf_sip6[INET6_ADDRSTRLEN];
+ char buf_dip6[INET6_ADDRSTRLEN];
+
+ err = tnl_route_lookup_flow(flow, &d_ip6, &out_dev);
+ if (err) {
+ xlate_report(ctx, "native tunnel routing failed");
+ return err;
+ }
+
+ xlate_report(ctx, "tunneling to %s via %s",
+ ipv6_string_mapped(buf_dip6, &d_ip6),
+ netdev_get_name(out_dev->netdev));
+
+ /* Use mac addr of bridge port of the peer. */
+ err = netdev_get_etheraddr(out_dev->netdev, &smac);
+ if (err) {
+ xlate_report(ctx, "tunnel output device lacks Ethernet address");
+ return err;
+ }
+
+ d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
+ if (d_ip) {
+ err = netdev_get_in4(out_dev->netdev, (struct in_addr *) &s_ip, NULL);
+ if (err) {
+ xlate_report(ctx, "tunnel output device lacks IPv4 address");
+ return err;
+ }
+ in6_addr_set_mapped_ipv4(&s_ip6, s_ip);
+ } else {
+ err = netdev_get_in6(out_dev->netdev, &s_ip6);
+ if (err) {
+ xlate_report(ctx, "tunnel output device lacks IPv6 address");
+ return err;
+ }
+ }
+
+ err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
+ if (err) {
+ xlate_report(ctx, "neighbor cache miss for %s on bridge %s, "
+ "sending %s request",
+ buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
+ if (d_ip) {
+ tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
+ } else {
+ tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
+ }
+ return err;
+ }
+
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
+ ovs_strlcpy(entry->u.tnl_neigh_cache.br_name, out_dev->xbridge->name,
+ sizeof entry->u.tnl_neigh_cache.br_name);
+ entry->u.tnl_neigh_cache.d_ipv6 = d_ip6;
+ }
+
+ xlate_report(ctx, "tunneling from "ETH_ADDR_FMT" %s"
+ " to "ETH_ADDR_FMT" %s",
+ ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
+ ETH_ADDR_ARGS(dmac), buf_dip6);
+
+ err = tnl_port_build_header(xport->ofport, flow,
+ dmac, smac, &s_ip6, &tnl_push_data);
+ if (err) {
+ return err;
+ }
+ tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
+ tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
+ odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
+ return 0;
+}
+
+static void
+xlate_commit_actions(struct xlate_ctx *ctx)
+{
+ bool use_masked = ctx->xbridge->support.masked_set_action;
+
+ ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ ctx->odp_actions, ctx->wc,
+ use_masked);
+}
+
+static void
+clear_conntrack(struct flow *flow)
+{
+ flow->ct_state = 0;
+ flow->ct_zone = 0;
+ flow->ct_mark = 0;
+ memset(&flow->ct_label, 0, sizeof flow->ct_label);
+}
+
+static void
+compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
+ const struct xlate_bond_recirc *xr, bool check_stp)
+{
+ const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
+ struct flow_wildcards *wc = ctx->wc;
+ struct flow *flow = &ctx->xin->flow;
+ struct flow_tnl flow_tnl;
+ ovs_be16 flow_vlan_tci;
+ uint32_t flow_pkt_mark;
+ uint8_t flow_nw_tos;
+ odp_port_t out_port, odp_port;
+ bool tnl_push_pop_send = false;
+ uint8_t dscp;
+
+ /* If 'struct flow' gets additional metadata, we'll need to zero it out
+ * before traversing a patch port. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
+ memset(&flow_tnl, 0, sizeof flow_tnl);
+
+ if (!xport) {
+ xlate_report(ctx, "Nonexistent output port");
+ return;