2 * Copyright (c) 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "tnl-ports.h"
25 #include "classifier.h"
26 #include "dynamic-string.h"
31 #include "ovs-thread.h"
33 #include "ovs-thread.h"
37 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
38 static struct classifier cls; /* Tunnel ports. */
44 struct in6_addr addr6;
47 char dev_name[IFNAMSIZ];
50 static struct ovs_list addr_list;
55 char dev_name[IFNAMSIZ];
59 static struct ovs_list port_list;
64 struct ovs_refcount ref_cnt;
65 char dev_name[IFNAMSIZ];
68 static struct tnl_port_in *
69 tnl_port_cast(const struct cls_rule *cr)
71 BUILD_ASSERT_DECL(offsetof(struct tnl_port_in, cr) == 0);
73 return CONTAINER_OF(cr, struct tnl_port_in, cr);
77 tnl_port_free(struct tnl_port_in *p)
79 cls_rule_destroy(&p->cr);
84 tnl_port_init_flow(struct flow *flow, struct eth_addr mac,
85 struct in6_addr *addr, ovs_be16 udp_port)
87 memset(flow, 0, sizeof *flow);
90 if (IN6_IS_ADDR_V4MAPPED(addr)) {
91 flow->dl_type = htons(ETH_TYPE_IP);
92 flow->nw_dst = in6_addr_get_mapped_ipv4(addr);
94 flow->dl_type = htons(ETH_TYPE_IPV6);
95 flow->ipv6_dst = *addr;
99 flow->nw_proto = IPPROTO_UDP;
101 flow->nw_proto = IPPROTO_GRE;
103 flow->tp_dst = udp_port;
107 map_insert(odp_port_t port, struct eth_addr mac, struct in6_addr *addr,
108 ovs_be16 udp_port, const char dev_name[])
110 const struct cls_rule *cr;
111 struct tnl_port_in *p;
114 memset(&match, 0, sizeof match);
115 tnl_port_init_flow(&match.flow, mac, addr, udp_port);
118 cr = classifier_lookup(&cls, CLS_MAX_VERSION, &match.flow, NULL);
119 p = tnl_port_cast(cr);
120 /* Try again if the rule was released before we get the reference. */
121 } while (p && !ovs_refcount_try_ref_rcu(&p->ref_cnt));
124 p = xzalloc(sizeof *p);
127 match.wc.masks.dl_type = OVS_BE16_MAX;
128 match.wc.masks.nw_proto = 0xff;
129 /* XXX: No fragments support. */
130 match.wc.masks.nw_frag = FLOW_NW_FRAG_MASK;
132 /* 'udp_port' is zero for non-UDP tunnels (e.g. GRE). In this case it
133 * doesn't make sense to match on UDP port numbers. */
135 match.wc.masks.tp_dst = OVS_BE16_MAX;
137 if (IN6_IS_ADDR_V4MAPPED(addr)) {
138 match.wc.masks.nw_dst = OVS_BE32_MAX;
140 match.wc.masks.ipv6_dst = in6addr_exact;
142 match.wc.masks.vlan_tci = OVS_BE16_MAX;
143 memset(&match.wc.masks.dl_dst, 0xff, sizeof (struct eth_addr));
145 cls_rule_init(&p->cr, &match, 0); /* Priority == 0. */
146 ovs_refcount_init(&p->ref_cnt);
147 ovs_strlcpy(p->dev_name, dev_name, sizeof p->dev_name);
149 classifier_insert(&cls, &p->cr, CLS_MIN_VERSION, NULL, 0);
154 tnl_port_map_insert(odp_port_t port,
155 ovs_be16 udp_port, const char dev_name[])
158 struct ip_device *ip_dev;
160 ovs_mutex_lock(&mutex);
161 LIST_FOR_EACH(p, node, &port_list) {
162 if (udp_port == p->udp_port) {
167 p = xzalloc(sizeof *p);
169 p->udp_port = udp_port;
170 ovs_strlcpy(p->dev_name, dev_name, sizeof p->dev_name);
171 list_insert(&port_list, &p->node);
173 LIST_FOR_EACH(ip_dev, node, &addr_list) {
174 if (ip_dev->addr4 != INADDR_ANY) {
175 struct in6_addr addr4 = in6_addr_mapped_ipv4(ip_dev->addr4);
176 map_insert(p->port, ip_dev->mac, &addr4,
177 p->udp_port, p->dev_name);
179 if (ipv6_addr_is_set(&ip_dev->addr6)) {
180 map_insert(p->port, ip_dev->mac, &ip_dev->addr6,
181 p->udp_port, p->dev_name);
186 ovs_mutex_unlock(&mutex);
190 tnl_port_unref(const struct cls_rule *cr)
192 struct tnl_port_in *p = tnl_port_cast(cr);
194 if (cr && ovs_refcount_unref_relaxed(&p->ref_cnt) == 1) {
195 if (classifier_remove(&cls, cr)) {
196 ovsrcu_postpone(tnl_port_free, p);
202 map_delete(struct eth_addr mac, struct in6_addr *addr, ovs_be16 udp_port)
204 const struct cls_rule *cr;
207 tnl_port_init_flow(&flow, mac, addr, udp_port);
209 cr = classifier_lookup(&cls, CLS_MAX_VERSION, &flow, NULL);
214 tnl_port_map_delete(ovs_be16 udp_port)
216 struct tnl_port *p, *next;
217 struct ip_device *ip_dev;
220 ovs_mutex_lock(&mutex);
221 LIST_FOR_EACH_SAFE(p, next, node, &port_list) {
222 if (p->udp_port == udp_port) {
223 list_remove(&p->node);
232 LIST_FOR_EACH(ip_dev, node, &addr_list) {
233 if (ip_dev->addr4 != INADDR_ANY) {
234 struct in6_addr addr4 = in6_addr_mapped_ipv4(ip_dev->addr4);
235 map_delete(ip_dev->mac, &addr4, udp_port);
237 if (ipv6_addr_is_set(&ip_dev->addr6)) {
238 map_delete(ip_dev->mac, &ip_dev->addr6, udp_port);
244 ovs_mutex_unlock(&mutex);
247 /* 'flow' is non-const to allow for temporary modifications during the lookup.
248 * Any changes are restored before returning. */
250 tnl_port_map_lookup(struct flow *flow, struct flow_wildcards *wc)
252 const struct cls_rule *cr = classifier_lookup(&cls, CLS_MAX_VERSION, flow,
255 return (cr) ? tnl_port_cast(cr)->portno : ODPP_NONE;
259 tnl_port_show_v(struct ds *ds)
261 const struct tnl_port_in *p;
263 CLS_FOR_EACH(p, cr, &cls) {
264 struct odputil_keybuf keybuf;
265 struct odputil_keybuf maskbuf;
267 const struct nlattr *key, *mask;
268 size_t key_len, mask_len;
269 struct flow_wildcards wc;
271 struct odp_flow_key_parms odp_parms = {
276 ds_put_format(ds, "%s (%"PRIu32") : ", p->dev_name, p->portno);
277 minimask_expand(p->cr.match.mask, &wc);
278 miniflow_expand(p->cr.match.flow, &flow);
281 odp_parms.odp_in_port = flow.in_port.odp_port;
282 odp_parms.support.recirc = true;
283 ofpbuf_use_stack(&buf, &keybuf, sizeof keybuf);
284 odp_flow_key_from_flow(&odp_parms, &buf);
289 odp_parms.odp_in_port = wc.masks.in_port.odp_port;
290 odp_parms.support.recirc = false;
291 ofpbuf_use_stack(&buf, &maskbuf, sizeof maskbuf);
292 odp_flow_key_from_mask(&odp_parms, &buf);
297 odp_flow_format(key, key_len, mask, mask_len, NULL, ds, false);
298 ds_put_format(ds, "\n");
303 tnl_port_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
304 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
306 struct ds ds = DS_EMPTY_INITIALIZER;
309 ds_put_format(&ds, "Listening ports:\n");
310 ovs_mutex_lock(&mutex);
312 if (!strcasecmp(argv[1], "-v")) {
313 tnl_port_show_v(&ds);
318 LIST_FOR_EACH(p, node, &port_list) {
319 ds_put_format(&ds, "%s (%"PRIu32")\n", p->dev_name, p->port);
323 ovs_mutex_unlock(&mutex);
324 unixctl_command_reply(conn, ds_cstr(&ds));
329 map_insert_ipdev(struct ip_device *ip_dev)
333 LIST_FOR_EACH(p, node, &port_list) {
334 if (ip_dev->addr4 != INADDR_ANY) {
335 struct in6_addr addr4 = in6_addr_mapped_ipv4(ip_dev->addr4);
336 map_insert(p->port, ip_dev->mac, &addr4,
337 p->udp_port, p->dev_name);
339 if (ipv6_addr_is_set(&ip_dev->addr6)) {
340 map_insert(p->port, ip_dev->mac, &ip_dev->addr6,
341 p->udp_port, p->dev_name);
347 insert_ipdev(const char dev_name[])
349 struct ip_device *ip_dev;
350 enum netdev_flags flags;
355 error = netdev_open(dev_name, NULL, &dev);
360 error = netdev_get_flags(dev, &flags);
361 if (error || (flags & NETDEV_LOOPBACK)) {
366 ip_dev = xzalloc(sizeof *ip_dev);
368 ip_dev->change_seq = netdev_get_change_seq(dev);
369 error = netdev_get_etheraddr(ip_dev->dev, &ip_dev->mac);
374 error4 = netdev_get_in4(ip_dev->dev, (struct in_addr *)&ip_dev->addr4, NULL);
375 error6 = netdev_get_in6(ip_dev->dev, &ip_dev->addr6);
376 if (error4 && error6) {
380 ovs_strlcpy(ip_dev->dev_name, netdev_get_name(dev), sizeof ip_dev->dev_name);
382 list_insert(&addr_list, &ip_dev->node);
383 map_insert_ipdev(ip_dev);
387 delete_ipdev(struct ip_device *ip_dev)
391 LIST_FOR_EACH(p, node, &port_list) {
392 if (ip_dev->addr4 != INADDR_ANY) {
393 struct in6_addr addr4 = in6_addr_mapped_ipv4(ip_dev->addr4);
394 map_delete(ip_dev->mac, &addr4, p->udp_port);
396 if (ipv6_addr_is_set(&ip_dev->addr6)) {
397 map_delete(ip_dev->mac, &ip_dev->addr6, p->udp_port);
401 list_remove(&ip_dev->node);
402 netdev_close(ip_dev->dev);
407 tnl_port_map_insert_ipdev(const char dev_name[])
409 struct ip_device *ip_dev, *next;
411 ovs_mutex_lock(&mutex);
413 LIST_FOR_EACH_SAFE(ip_dev, next, node, &addr_list) {
414 if (!strcmp(netdev_get_name(ip_dev->dev), dev_name)) {
415 if (ip_dev->change_seq == netdev_get_change_seq(ip_dev->dev)) {
418 /* Address changed. */
419 delete_ipdev(ip_dev);
423 insert_ipdev(dev_name);
426 ovs_mutex_unlock(&mutex);
430 tnl_port_map_delete_ipdev(const char dev_name[])
432 struct ip_device *ip_dev, *next;
434 ovs_mutex_lock(&mutex);
435 LIST_FOR_EACH_SAFE(ip_dev, next, node, &addr_list) {
436 if (!strcmp(netdev_get_name(ip_dev->dev), dev_name)) {
437 delete_ipdev(ip_dev);
440 ovs_mutex_unlock(&mutex);
444 tnl_port_map_run(void)
446 struct ip_device *ip_dev, *next;
448 ovs_mutex_lock(&mutex);
449 LIST_FOR_EACH_SAFE(ip_dev, next, node, &addr_list) {
450 char dev_name[IFNAMSIZ];
452 if (ip_dev->change_seq == netdev_get_change_seq(ip_dev->dev)) {
456 /* Address changed. */
457 ovs_strlcpy(dev_name, ip_dev->dev_name, sizeof dev_name);
458 delete_ipdev(ip_dev);
459 insert_ipdev(dev_name);
461 ovs_mutex_unlock(&mutex);
465 tnl_port_map_init(void)
467 classifier_init(&cls, flow_segment_u64s);
468 list_init(&addr_list);
469 list_init(&port_list);
470 unixctl_command_register("tnl/ports/show", "-v", 0, 1, tnl_port_show, NULL);