2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "dp-packet.h"
24 #include "dpif-netdev.h"
26 #include "netdev-provider.h"
27 #include "netdev-vport.h"
29 #include "openvswitch/dynamic-string.h"
30 #include "openvswitch/list.h"
31 #include "openvswitch/ofp-print.h"
32 #include "openvswitch/ofpbuf.h"
33 #include "openvswitch/vlog.h"
34 #include "ovs-atomic.h"
36 #include "pcap-file.h"
37 #include "poll-loop.h"
41 #include "unaligned.h"
44 #include "reconnect.h"
46 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
50 struct dummy_packet_stream {
51 struct stream *stream;
52 struct dp_packet rxbuf;
56 enum dummy_packet_conn_type {
57 NONE, /* No connection is configured. */
58 PASSIVE, /* Listener. */
59 ACTIVE /* Connect to listener. */
62 enum dummy_netdev_conn_state {
63 CONN_STATE_CONNECTED, /* Listener connected. */
64 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
65 CONN_STATE_UNKNOWN, /* No relavent information. */
68 struct dummy_packet_pconn {
69 struct pstream *pstream;
70 struct dummy_packet_stream *streams;
74 struct dummy_packet_rconn {
75 struct dummy_packet_stream *rstream;
76 struct reconnect *reconnect;
79 struct dummy_packet_conn {
80 enum dummy_packet_conn_type type;
82 struct dummy_packet_pconn pconn;
83 struct dummy_packet_rconn rconn;
87 struct pkt_list_node {
88 struct dp_packet *pkt;
89 struct ovs_list list_node;
92 /* Protects 'dummy_list'. */
93 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95 /* Contains all 'struct dummy_dev's. */
96 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
97 = OVS_LIST_INITIALIZER(&dummy_list);
103 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105 /* Protects all members below. */
106 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108 struct eth_addr hwaddr OVS_GUARDED;
110 struct netdev_stats stats OVS_GUARDED;
111 enum netdev_flags flags OVS_GUARDED;
112 int ifindex OVS_GUARDED;
114 struct dummy_packet_conn conn OVS_GUARDED;
116 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
118 struct in_addr address, netmask;
119 struct in6_addr ipv6, ipv6_mask;
120 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
122 /* The following properties are for dummy-pmd and they cannot be changed
123 * when a device is running, so we remember the request and update them
124 * next time netdev_dummy_reconfigure() is called. */
129 /* Max 'recv_queue_len' in struct netdev_dummy. */
130 #define NETDEV_DUMMY_MAX_QUEUE 100
132 struct netdev_rxq_dummy {
133 struct netdev_rxq up;
134 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
135 struct ovs_list recv_queue;
136 int recv_queue_len; /* ovs_list_size(&recv_queue). */
137 struct seq *seq; /* Reports newly queued packets. */
140 static unixctl_cb_func netdev_dummy_set_admin_state;
141 static int netdev_dummy_construct(struct netdev *);
142 static void netdev_dummy_queue_packet(struct netdev_dummy *,
143 struct dp_packet *, int);
145 static void dummy_packet_stream_close(struct dummy_packet_stream *);
147 static void pkt_list_delete(struct ovs_list *);
150 is_dummy_class(const struct netdev_class *class)
152 return class->construct == netdev_dummy_construct;
155 static struct netdev_dummy *
156 netdev_dummy_cast(const struct netdev *netdev)
158 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
159 return CONTAINER_OF(netdev, struct netdev_dummy, up);
162 static struct netdev_rxq_dummy *
163 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
165 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
166 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
170 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
172 int rxbuf_size = stream ? 2048 : 0;
174 dp_packet_init(&s->rxbuf, rxbuf_size);
175 ovs_list_init(&s->txq);
178 static struct dummy_packet_stream *
179 dummy_packet_stream_create(struct stream *stream)
181 struct dummy_packet_stream *s;
183 s = xzalloc(sizeof *s);
184 dummy_packet_stream_init(s, stream);
190 dummy_packet_stream_wait(struct dummy_packet_stream *s)
192 stream_run_wait(s->stream);
193 if (!ovs_list_is_empty(&s->txq)) {
194 stream_send_wait(s->stream);
196 stream_recv_wait(s->stream);
200 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
202 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
204 struct pkt_list_node *node;
206 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
207 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
209 node = xmalloc(sizeof *node);
211 ovs_list_push_back(&s->txq, &node->list_node);
216 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
221 stream_run(s->stream);
223 if (!ovs_list_is_empty(&s->txq)) {
224 struct pkt_list_node *txbuf_node;
225 struct dp_packet *txbuf;
228 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
229 txbuf = txbuf_node->pkt;
230 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
233 dp_packet_pull(txbuf, retval);
234 if (!dp_packet_size(txbuf)) {
235 ovs_list_remove(&txbuf_node->list_node);
237 dp_packet_delete(txbuf);
239 } else if (retval != -EAGAIN) {
245 if (dp_packet_size(&s->rxbuf) < 2) {
246 n = 2 - dp_packet_size(&s->rxbuf);
250 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
251 if (frame_len < ETH_HEADER_LEN) {
255 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
262 dp_packet_prealloc_tailroom(&s->rxbuf, n);
263 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
266 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
267 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
268 dp_packet_pull(&s->rxbuf, 2);
269 netdev_dummy_queue_packet(dev,
270 dp_packet_clone(&s->rxbuf), 0);
271 dp_packet_clear(&s->rxbuf);
273 } else if (retval != -EAGAIN) {
274 error = (retval < 0 ? -retval
275 : dp_packet_size(&s->rxbuf) ? EPROTO
284 dummy_packet_stream_close(struct dummy_packet_stream *s)
286 stream_close(s->stream);
287 dp_packet_uninit(&s->rxbuf);
288 pkt_list_delete(&s->txq);
292 dummy_packet_conn_init(struct dummy_packet_conn *conn)
294 memset(conn, 0, sizeof *conn);
299 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
302 switch (conn->type) {
304 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
308 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
318 dummy_packet_conn_close(struct dummy_packet_conn *conn)
321 struct dummy_packet_pconn *pconn = &conn->u.pconn;
322 struct dummy_packet_rconn *rconn = &conn->u.rconn;
324 switch (conn->type) {
326 pstream_close(pconn->pstream);
327 for (i = 0; i < pconn->n_streams; i++) {
328 dummy_packet_stream_close(&pconn->streams[i]);
330 free(pconn->streams);
331 pconn->pstream = NULL;
332 pconn->streams = NULL;
336 dummy_packet_stream_close(rconn->rstream);
337 free(rconn->rstream);
338 rconn->rstream = NULL;
339 reconnect_destroy(rconn->reconnect);
340 rconn->reconnect = NULL;
349 memset(conn, 0, sizeof *conn);
353 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
354 const struct smap *args)
356 const char *pstream = smap_get(args, "pstream");
357 const char *stream = smap_get(args, "stream");
359 if (pstream && stream) {
360 VLOG_WARN("Open failed: both %s and %s are configured",
365 switch (conn->type) {
368 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
371 dummy_packet_conn_close(conn);
375 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
378 dummy_packet_conn_close(conn);
388 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
390 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
392 conn->type = PASSIVE;
398 struct stream *active_stream;
399 struct reconnect *reconnect;
401 reconnect = reconnect_create(time_msec());
402 reconnect_set_name(reconnect, stream);
403 reconnect_set_passive(reconnect, false, time_msec());
404 reconnect_enable(reconnect, time_msec());
405 reconnect_set_backoff(reconnect, 100, INT_MAX);
406 reconnect_set_probe_interval(reconnect, 0);
407 conn->u.rconn.reconnect = reconnect;
410 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
411 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
415 reconnect_connected(reconnect, time_msec());
419 reconnect_connecting(reconnect, time_msec());
423 reconnect_connect_failed(reconnect, time_msec(), error);
424 stream_close(active_stream);
425 conn->u.rconn.rstream->stream = NULL;
432 dummy_pconn_run(struct netdev_dummy *dev)
433 OVS_REQUIRES(dev->mutex)
435 struct stream *new_stream;
436 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
440 error = pstream_accept(pconn->pstream, &new_stream);
442 struct dummy_packet_stream *s;
444 pconn->streams = xrealloc(pconn->streams,
445 ((pconn->n_streams + 1)
447 s = &pconn->streams[pconn->n_streams++];
448 dummy_packet_stream_init(s, new_stream);
449 } else if (error != EAGAIN) {
450 VLOG_WARN("%s: accept failed (%s)",
451 pstream_get_name(pconn->pstream), ovs_strerror(error));
452 pstream_close(pconn->pstream);
453 pconn->pstream = NULL;
454 dev->conn.type = NONE;
457 for (i = 0; i < pconn->n_streams; i++) {
458 struct dummy_packet_stream *s = &pconn->streams[i];
460 error = dummy_packet_stream_run(dev, s);
462 VLOG_DBG("%s: closing connection (%s)",
463 stream_get_name(s->stream),
464 ovs_retval_to_string(error));
465 dummy_packet_stream_close(s);
466 pconn->streams[i] = pconn->streams[--pconn->n_streams];
472 dummy_rconn_run(struct netdev_dummy *dev)
473 OVS_REQUIRES(dev->mutex)
475 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
477 switch (reconnect_run(rconn->reconnect, time_msec())) {
478 case RECONNECT_CONNECT:
482 if (rconn->rstream->stream) {
483 error = stream_connect(rconn->rstream->stream);
485 error = stream_open(reconnect_get_name(rconn->reconnect),
486 &rconn->rstream->stream, DSCP_DEFAULT);
491 reconnect_connected(rconn->reconnect, time_msec());
495 reconnect_connecting(rconn->reconnect, time_msec());
499 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
500 stream_close(rconn->rstream->stream);
501 rconn->rstream->stream = NULL;
507 case RECONNECT_DISCONNECT:
508 case RECONNECT_PROBE:
513 if (reconnect_is_connected(rconn->reconnect)) {
516 err = dummy_packet_stream_run(dev, rconn->rstream);
519 reconnect_disconnected(rconn->reconnect, time_msec(), err);
520 stream_close(rconn->rstream->stream);
521 rconn->rstream->stream = NULL;
527 dummy_packet_conn_run(struct netdev_dummy *dev)
528 OVS_REQUIRES(dev->mutex)
530 switch (dev->conn.type) {
532 dummy_pconn_run(dev);
536 dummy_rconn_run(dev);
546 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
549 switch (conn->type) {
551 pstream_wait(conn->u.pconn.pstream);
552 for (i = 0; i < conn->u.pconn.n_streams; i++) {
553 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
554 dummy_packet_stream_wait(s);
558 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
559 dummy_packet_stream_wait(conn->u.rconn.rstream);
570 dummy_packet_conn_send(struct dummy_packet_conn *conn,
571 const void *buffer, size_t size)
575 switch (conn->type) {
577 for (i = 0; i < conn->u.pconn.n_streams; i++) {
578 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
580 dummy_packet_stream_send(s, buffer, size);
581 pstream_wait(conn->u.pconn.pstream);
586 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
587 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
588 dummy_packet_stream_wait(conn->u.rconn.rstream);
598 static enum dummy_netdev_conn_state
599 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
601 enum dummy_netdev_conn_state state;
603 if (conn->type == ACTIVE) {
604 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
605 state = CONN_STATE_CONNECTED;
607 state = CONN_STATE_NOT_CONNECTED;
610 state = CONN_STATE_UNKNOWN;
617 netdev_dummy_run(void)
619 struct netdev_dummy *dev;
621 ovs_mutex_lock(&dummy_list_mutex);
622 LIST_FOR_EACH (dev, list_node, &dummy_list) {
623 ovs_mutex_lock(&dev->mutex);
624 dummy_packet_conn_run(dev);
625 ovs_mutex_unlock(&dev->mutex);
627 ovs_mutex_unlock(&dummy_list_mutex);
631 netdev_dummy_wait(void)
633 struct netdev_dummy *dev;
635 ovs_mutex_lock(&dummy_list_mutex);
636 LIST_FOR_EACH (dev, list_node, &dummy_list) {
637 ovs_mutex_lock(&dev->mutex);
638 dummy_packet_conn_wait(&dev->conn);
639 ovs_mutex_unlock(&dev->mutex);
641 ovs_mutex_unlock(&dummy_list_mutex);
644 static struct netdev *
645 netdev_dummy_alloc(void)
647 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
652 netdev_dummy_construct(struct netdev *netdev_)
654 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
655 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
658 n = atomic_count_inc(&next_n);
660 ovs_mutex_init(&netdev->mutex);
661 ovs_mutex_lock(&netdev->mutex);
662 netdev->hwaddr.ea[0] = 0xaa;
663 netdev->hwaddr.ea[1] = 0x55;
664 netdev->hwaddr.ea[2] = n >> 24;
665 netdev->hwaddr.ea[3] = n >> 16;
666 netdev->hwaddr.ea[4] = n >> 8;
667 netdev->hwaddr.ea[5] = n;
670 netdev->ifindex = -EOPNOTSUPP;
671 netdev->requested_n_rxq = netdev_->n_rxq;
672 netdev->requested_n_txq = netdev_->n_txq;
674 dummy_packet_conn_init(&netdev->conn);
676 ovs_list_init(&netdev->rxes);
677 ovs_mutex_unlock(&netdev->mutex);
679 ovs_mutex_lock(&dummy_list_mutex);
680 ovs_list_push_back(&dummy_list, &netdev->list_node);
681 ovs_mutex_unlock(&dummy_list_mutex);
687 netdev_dummy_destruct(struct netdev *netdev_)
689 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
691 ovs_mutex_lock(&dummy_list_mutex);
692 ovs_list_remove(&netdev->list_node);
693 ovs_mutex_unlock(&dummy_list_mutex);
695 ovs_mutex_lock(&netdev->mutex);
696 dummy_packet_conn_close(&netdev->conn);
697 netdev->conn.type = NONE;
699 ovs_mutex_unlock(&netdev->mutex);
700 ovs_mutex_destroy(&netdev->mutex);
704 netdev_dummy_dealloc(struct netdev *netdev_)
706 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
712 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
714 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
716 ovs_mutex_lock(&netdev->mutex);
718 if (netdev->ifindex >= 0) {
719 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
722 dummy_packet_conn_get_config(&netdev->conn, args);
724 /* 'dummy-pmd' specific config. */
725 if (!netdev_is_pmd(dev)) {
728 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
729 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
730 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
731 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
734 ovs_mutex_unlock(&netdev->mutex);
739 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
740 struct in6_addr **pmask, int *n_addr)
742 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
743 int cnt = 0, i = 0, err = 0;
744 struct in6_addr *addr, *mask;
746 ovs_mutex_lock(&netdev->mutex);
747 if (netdev->address.s_addr != INADDR_ANY) {
751 if (ipv6_addr_is_set(&netdev->ipv6)) {
758 addr = xmalloc(sizeof *addr * cnt);
759 mask = xmalloc(sizeof *mask * cnt);
760 if (netdev->address.s_addr != INADDR_ANY) {
761 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
762 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
766 if (ipv6_addr_is_set(&netdev->ipv6)) {
767 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
768 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
780 ovs_mutex_unlock(&netdev->mutex);
786 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
787 struct in_addr netmask)
789 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
791 ovs_mutex_lock(&netdev->mutex);
792 netdev->address = address;
793 netdev->netmask = netmask;
794 netdev_change_seq_changed(netdev_);
795 ovs_mutex_unlock(&netdev->mutex);
801 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
802 struct in6_addr *mask)
804 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
806 ovs_mutex_lock(&netdev->mutex);
808 netdev->ipv6_mask = *mask;
809 netdev_change_seq_changed(netdev_);
810 ovs_mutex_unlock(&netdev->mutex);
816 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
818 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
822 ovs_mutex_lock(&netdev->mutex);
823 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
825 dummy_packet_conn_set_config(&netdev->conn, args);
827 if (netdev->rxq_pcap) {
828 fclose(netdev->rxq_pcap);
830 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
831 fclose(netdev->tx_pcap);
833 netdev->rxq_pcap = netdev->tx_pcap = NULL;
834 pcap = smap_get(args, "pcap");
836 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
838 const char *rxq_pcap = smap_get(args, "rxq_pcap");
839 const char *tx_pcap = smap_get(args, "tx_pcap");
842 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
845 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
849 netdev_change_seq_changed(netdev_);
851 /* 'dummy-pmd' specific config. */
852 if (!netdev_->netdev_class->is_pmd) {
856 new_n_rxq = MAX(smap_get_int(args, "n_rxq", netdev->requested_n_rxq), 1);
857 if (new_n_rxq != netdev->requested_n_rxq) {
858 netdev->requested_n_rxq = new_n_rxq;
859 netdev_request_reconfigure(netdev_);
863 ovs_mutex_unlock(&netdev->mutex);
868 netdev_dummy_get_numa_id(const struct netdev *netdev_ OVS_UNUSED)
873 /* Requests the number of tx queues for the dummy PMD interface. */
875 netdev_dummy_set_tx_multiq(struct netdev *netdev_, unsigned int n_txq)
877 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
879 ovs_mutex_lock(&netdev->mutex);
881 if (netdev_->n_txq == n_txq) {
885 netdev->requested_n_txq = n_txq;
886 netdev_request_reconfigure(netdev_);
889 ovs_mutex_unlock(&netdev->mutex);
893 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
895 netdev_dummy_reconfigure(struct netdev *netdev_)
897 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
899 ovs_mutex_lock(&netdev->mutex);
901 netdev_->n_txq = netdev->requested_n_txq;
902 netdev_->n_rxq = netdev->requested_n_rxq;
904 ovs_mutex_unlock(&netdev->mutex);
908 static struct netdev_rxq *
909 netdev_dummy_rxq_alloc(void)
911 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
916 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
918 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
919 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
921 ovs_mutex_lock(&netdev->mutex);
922 ovs_list_push_back(&netdev->rxes, &rx->node);
923 ovs_list_init(&rx->recv_queue);
924 rx->recv_queue_len = 0;
925 rx->seq = seq_create();
926 ovs_mutex_unlock(&netdev->mutex);
932 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
934 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
935 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
937 ovs_mutex_lock(&netdev->mutex);
938 ovs_list_remove(&rx->node);
939 pkt_list_delete(&rx->recv_queue);
940 ovs_mutex_unlock(&netdev->mutex);
941 seq_destroy(rx->seq);
945 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
947 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
953 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
956 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
957 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
958 struct dp_packet *packet;
960 ovs_mutex_lock(&netdev->mutex);
961 if (!ovs_list_is_empty(&rx->recv_queue)) {
962 struct pkt_list_node *pkt_node;
964 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
965 packet = pkt_node->pkt;
967 rx->recv_queue_len--;
971 ovs_mutex_unlock(&netdev->mutex);
976 ovs_mutex_lock(&netdev->mutex);
977 netdev->stats.rx_packets++;
978 netdev->stats.rx_bytes += dp_packet_size(packet);
979 ovs_mutex_unlock(&netdev->mutex);
981 dp_packet_pad(packet);
989 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
991 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
992 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
993 uint64_t seq = seq_read(rx->seq);
995 ovs_mutex_lock(&netdev->mutex);
996 if (!ovs_list_is_empty(&rx->recv_queue)) {
997 poll_immediate_wake();
999 seq_wait(rx->seq, seq);
1001 ovs_mutex_unlock(&netdev->mutex);
1005 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1007 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1008 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1010 ovs_mutex_lock(&netdev->mutex);
1011 pkt_list_delete(&rx->recv_queue);
1012 rx->recv_queue_len = 0;
1013 ovs_mutex_unlock(&netdev->mutex);
1015 seq_change(rx->seq);
1021 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1022 struct dp_packet **pkts, int cnt, bool may_steal)
1024 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1028 for (i = 0; i < cnt; i++) {
1029 const void *buffer = dp_packet_data(pkts[i]);
1030 size_t size = dp_packet_size(pkts[i]);
1032 if (size < ETH_HEADER_LEN) {
1036 const struct eth_header *eth = buffer;
1039 ovs_mutex_lock(&dev->mutex);
1040 max_size = dev->mtu + ETH_HEADER_LEN;
1041 ovs_mutex_unlock(&dev->mutex);
1043 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1044 max_size += VLAN_HEADER_LEN;
1046 if (size > max_size) {
1052 ovs_mutex_lock(&dev->mutex);
1053 dev->stats.tx_packets++;
1054 dev->stats.tx_bytes += size;
1056 dummy_packet_conn_send(&dev->conn, buffer, size);
1058 /* Reply to ARP requests for 'dev''s assigned IP address. */
1059 if (dev->address.s_addr) {
1060 struct dp_packet packet;
1063 dp_packet_use_const(&packet, buffer, size);
1064 flow_extract(&packet, &flow);
1065 if (flow.dl_type == htons(ETH_TYPE_ARP)
1066 && flow.nw_proto == ARP_OP_REQUEST
1067 && flow.nw_dst == dev->address.s_addr) {
1068 struct dp_packet *reply = dp_packet_new(0);
1069 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1070 false, flow.nw_dst, flow.nw_src);
1071 netdev_dummy_queue_packet(dev, reply, 0);
1076 struct dp_packet packet;
1078 dp_packet_use_const(&packet, buffer, size);
1079 ovs_pcap_write(dev->tx_pcap, &packet);
1080 fflush(dev->tx_pcap);
1083 ovs_mutex_unlock(&dev->mutex);
1087 for (i = 0; i < cnt; i++) {
1088 dp_packet_delete(pkts[i]);
1096 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1098 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1100 ovs_mutex_lock(&dev->mutex);
1101 if (!eth_addr_equals(dev->hwaddr, mac)) {
1103 netdev_change_seq_changed(netdev);
1105 ovs_mutex_unlock(&dev->mutex);
1111 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1113 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1115 ovs_mutex_lock(&dev->mutex);
1117 ovs_mutex_unlock(&dev->mutex);
1123 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1125 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1127 ovs_mutex_lock(&dev->mutex);
1129 ovs_mutex_unlock(&dev->mutex);
1135 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
1137 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1139 ovs_mutex_lock(&dev->mutex);
1141 ovs_mutex_unlock(&dev->mutex);
1147 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1149 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1151 ovs_mutex_lock(&dev->mutex);
1152 /* Passing only collected counters */
1153 stats->tx_packets = dev->stats.tx_packets;
1154 stats->tx_bytes = dev->stats.tx_bytes;
1155 stats->rx_packets = dev->stats.rx_packets;
1156 stats->rx_bytes = dev->stats.rx_bytes;
1157 ovs_mutex_unlock(&dev->mutex);
1163 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1164 unsigned int queue_id, struct smap *details OVS_UNUSED)
1166 if (queue_id == 0) {
1174 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1176 *stats = (struct netdev_queue_stats) {
1177 .tx_bytes = UINT64_MAX,
1178 .tx_packets = UINT64_MAX,
1179 .tx_errors = UINT64_MAX,
1180 .created = LLONG_MIN,
1185 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1186 unsigned int queue_id,
1187 struct netdev_queue_stats *stats)
1189 if (queue_id == 0) {
1190 netdev_dummy_init_queue_stats(stats);
1197 struct netdev_dummy_queue_state {
1198 unsigned int next_queue;
1202 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1205 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1206 state->next_queue = 0;
1212 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1214 unsigned int *queue_id,
1215 struct smap *details OVS_UNUSED)
1217 struct netdev_dummy_queue_state *state = state_;
1218 if (state->next_queue == 0) {
1220 state->next_queue++;
1228 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1236 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1237 void (*cb)(unsigned int queue_id,
1238 struct netdev_queue_stats *,
1242 struct netdev_queue_stats stats;
1243 netdev_dummy_init_queue_stats(&stats);
1249 netdev_dummy_get_ifindex(const struct netdev *netdev)
1251 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1254 ovs_mutex_lock(&dev->mutex);
1255 ifindex = dev->ifindex;
1256 ovs_mutex_unlock(&dev->mutex);
1262 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1263 enum netdev_flags off, enum netdev_flags on,
1264 enum netdev_flags *old_flagsp)
1265 OVS_REQUIRES(netdev->mutex)
1267 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1271 *old_flagsp = netdev->flags;
1272 netdev->flags |= on;
1273 netdev->flags &= ~off;
1274 if (*old_flagsp != netdev->flags) {
1275 netdev_change_seq_changed(&netdev->up);
1282 netdev_dummy_update_flags(struct netdev *netdev_,
1283 enum netdev_flags off, enum netdev_flags on,
1284 enum netdev_flags *old_flagsp)
1286 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1289 ovs_mutex_lock(&netdev->mutex);
1290 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1291 ovs_mutex_unlock(&netdev->mutex);
1296 /* Helper functions. */
1298 #define NETDEV_DUMMY_CLASS(NAME, PMD, TX_MULTIQ, RECOFIGURE) \
1304 netdev_dummy_wait, \
1306 netdev_dummy_alloc, \
1307 netdev_dummy_construct, \
1308 netdev_dummy_destruct, \
1309 netdev_dummy_dealloc, \
1310 netdev_dummy_get_config, \
1311 netdev_dummy_set_config, \
1312 NULL, /* get_tunnel_config */ \
1313 NULL, /* build header */ \
1314 NULL, /* push header */ \
1315 NULL, /* pop header */ \
1316 netdev_dummy_get_numa_id, \
1319 netdev_dummy_send, /* send */ \
1320 NULL, /* send_wait */ \
1322 netdev_dummy_set_etheraddr, \
1323 netdev_dummy_get_etheraddr, \
1324 netdev_dummy_get_mtu, \
1325 netdev_dummy_set_mtu, \
1326 netdev_dummy_get_ifindex, \
1327 NULL, /* get_carrier */ \
1328 NULL, /* get_carrier_resets */ \
1329 NULL, /* get_miimon */ \
1330 netdev_dummy_get_stats, \
1332 NULL, /* get_features */ \
1333 NULL, /* set_advertisements */ \
1335 NULL, /* set_policing */ \
1336 NULL, /* get_qos_types */ \
1337 NULL, /* get_qos_capabilities */ \
1338 NULL, /* get_qos */ \
1339 NULL, /* set_qos */ \
1340 netdev_dummy_get_queue, \
1341 NULL, /* set_queue */ \
1342 NULL, /* delete_queue */ \
1343 netdev_dummy_get_queue_stats, \
1344 netdev_dummy_queue_dump_start, \
1345 netdev_dummy_queue_dump_next, \
1346 netdev_dummy_queue_dump_done, \
1347 netdev_dummy_dump_queue_stats, \
1349 NULL, /* set_in4 */ \
1350 netdev_dummy_get_addr_list, \
1351 NULL, /* add_router */ \
1352 NULL, /* get_next_hop */ \
1353 NULL, /* get_status */ \
1354 NULL, /* arp_lookup */ \
1356 netdev_dummy_update_flags, \
1359 netdev_dummy_rxq_alloc, \
1360 netdev_dummy_rxq_construct, \
1361 netdev_dummy_rxq_destruct, \
1362 netdev_dummy_rxq_dealloc, \
1363 netdev_dummy_rxq_recv, \
1364 netdev_dummy_rxq_wait, \
1365 netdev_dummy_rxq_drain, \
1368 static const struct netdev_class dummy_class =
1369 NETDEV_DUMMY_CLASS("dummy", false, NULL, NULL);
1371 static const struct netdev_class dummy_pmd_class =
1372 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1373 netdev_dummy_set_tx_multiq,
1374 netdev_dummy_reconfigure);
1377 pkt_list_delete(struct ovs_list *l)
1379 struct pkt_list_node *pkt;
1381 LIST_FOR_EACH_POP(pkt, list_node, l) {
1382 dp_packet_delete(pkt->pkt);
1387 static struct dp_packet *
1388 eth_from_packet_or_flow(const char *s)
1390 enum odp_key_fitness fitness;
1391 struct dp_packet *packet;
1392 struct ofpbuf odp_key;
1396 if (!eth_from_hex(s, &packet)) {
1400 /* Convert string to datapath key.
1402 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1403 * the code for that currently calls exit() on parse error. We have to
1404 * settle for parsing a datapath key for now.
1406 ofpbuf_init(&odp_key, 0);
1407 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1409 ofpbuf_uninit(&odp_key);
1413 /* Convert odp_key to flow. */
1414 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1415 if (fitness == ODP_FIT_ERROR) {
1416 ofpbuf_uninit(&odp_key);
1420 packet = dp_packet_new(0);
1421 flow_compose(packet, &flow);
1423 ofpbuf_uninit(&odp_key);
1428 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1430 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1432 pkt_node->pkt = packet;
1433 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1434 rx->recv_queue_len++;
1435 seq_change(rx->seq);
1439 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1441 OVS_REQUIRES(dummy->mutex)
1443 struct netdev_rxq_dummy *rx, *prev;
1445 if (dummy->rxq_pcap) {
1446 ovs_pcap_write(dummy->rxq_pcap, packet);
1447 fflush(dummy->rxq_pcap);
1450 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1451 if (rx->up.queue_id == queue_id &&
1452 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1454 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1460 netdev_dummy_queue_packet__(prev, packet);
1462 dp_packet_delete(packet);
1467 netdev_dummy_receive(struct unixctl_conn *conn,
1468 int argc, const char *argv[], void *aux OVS_UNUSED)
1470 struct netdev_dummy *dummy_dev;
1471 struct netdev *netdev;
1472 int i, k = 1, rx_qid = 0;
1474 netdev = netdev_from_name(argv[k++]);
1475 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1476 unixctl_command_reply_error(conn, "no such dummy netdev");
1479 dummy_dev = netdev_dummy_cast(netdev);
1481 ovs_mutex_lock(&dummy_dev->mutex);
1483 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1484 rx_qid = strtol(argv[k + 1], NULL, 10);
1485 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1486 unixctl_command_reply_error(conn, "bad rx queue id.");
1492 for (i = k; i < argc; i++) {
1493 struct dp_packet *packet;
1495 packet = eth_from_packet_or_flow(argv[i]);
1497 unixctl_command_reply_error(conn, "bad packet syntax");
1501 netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
1504 unixctl_command_reply(conn, NULL);
1507 ovs_mutex_unlock(&dummy_dev->mutex);
1509 netdev_close(netdev);
1513 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1514 OVS_REQUIRES(dev->mutex)
1516 enum netdev_flags old_flags;
1519 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1521 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1526 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1527 const char *argv[], void *aux OVS_UNUSED)
1531 if (!strcasecmp(argv[argc - 1], "up")) {
1533 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1536 unixctl_command_reply_error(conn, "Invalid Admin State");
1541 struct netdev *netdev = netdev_from_name(argv[1]);
1542 if (netdev && is_dummy_class(netdev->netdev_class)) {
1543 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1545 ovs_mutex_lock(&dummy_dev->mutex);
1546 netdev_dummy_set_admin_state__(dummy_dev, up);
1547 ovs_mutex_unlock(&dummy_dev->mutex);
1549 netdev_close(netdev);
1551 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1552 netdev_close(netdev);
1556 struct netdev_dummy *netdev;
1558 ovs_mutex_lock(&dummy_list_mutex);
1559 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1560 ovs_mutex_lock(&netdev->mutex);
1561 netdev_dummy_set_admin_state__(netdev, up);
1562 ovs_mutex_unlock(&netdev->mutex);
1564 ovs_mutex_unlock(&dummy_list_mutex);
1566 unixctl_command_reply(conn, "OK");
1570 display_conn_state__(struct ds *s, const char *name,
1571 enum dummy_netdev_conn_state state)
1573 ds_put_format(s, "%s: ", name);
1576 case CONN_STATE_CONNECTED:
1577 ds_put_cstr(s, "connected\n");
1580 case CONN_STATE_NOT_CONNECTED:
1581 ds_put_cstr(s, "disconnected\n");
1584 case CONN_STATE_UNKNOWN:
1586 ds_put_cstr(s, "unknown\n");
1592 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1593 const char *argv[], void *aux OVS_UNUSED)
1595 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1601 const char *dev_name = argv[1];
1602 struct netdev *netdev = netdev_from_name(dev_name);
1604 if (netdev && is_dummy_class(netdev->netdev_class)) {
1605 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1607 ovs_mutex_lock(&dummy_dev->mutex);
1608 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1609 ovs_mutex_unlock(&dummy_dev->mutex);
1611 netdev_close(netdev);
1613 display_conn_state__(&s, dev_name, state);
1615 struct netdev_dummy *netdev;
1617 ovs_mutex_lock(&dummy_list_mutex);
1618 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1619 ovs_mutex_lock(&netdev->mutex);
1620 state = dummy_netdev_get_conn_state(&netdev->conn);
1621 ovs_mutex_unlock(&netdev->mutex);
1622 if (state != CONN_STATE_UNKNOWN) {
1623 display_conn_state__(&s, netdev->up.name, state);
1626 ovs_mutex_unlock(&dummy_list_mutex);
1629 unixctl_command_reply(conn, ds_cstr(&s));
1634 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1635 const char *argv[], void *aux OVS_UNUSED)
1637 struct netdev *netdev = netdev_from_name(argv[1]);
1639 if (netdev && is_dummy_class(netdev->netdev_class)) {
1640 struct in_addr ip, mask;
1643 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1645 netdev_dummy_set_in4(netdev, ip, mask);
1646 unixctl_command_reply(conn, "OK");
1648 unixctl_command_reply_error(conn, error);
1652 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1655 netdev_close(netdev);
1659 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1660 const char *argv[], void *aux OVS_UNUSED)
1662 struct netdev *netdev = netdev_from_name(argv[1]);
1664 if (netdev && is_dummy_class(netdev->netdev_class)) {
1665 struct in6_addr ip6;
1669 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1671 struct in6_addr mask;
1673 mask = ipv6_create_mask(plen);
1674 netdev_dummy_set_in6(netdev, &ip6, &mask);
1675 unixctl_command_reply(conn, "OK");
1677 unixctl_command_reply_error(conn, error);
1680 netdev_close(netdev);
1682 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1685 netdev_close(netdev);
1690 netdev_dummy_override(const char *type)
1692 if (!netdev_unregister_provider(type)) {
1693 struct netdev_class *class;
1696 class = xmemdup(&dummy_class, sizeof dummy_class);
1697 class->type = xstrdup(type);
1698 error = netdev_register_provider(class);
1700 VLOG_ERR("%s: failed to register netdev provider (%s)",
1701 type, ovs_strerror(error));
1702 free(CONST_CAST(char *, class->type));
1709 netdev_dummy_register(enum dummy_level level)
1711 unixctl_command_register("netdev-dummy/receive",
1712 "name [--qid queue_id] packet|flow...",
1713 2, INT_MAX, netdev_dummy_receive, NULL);
1714 unixctl_command_register("netdev-dummy/set-admin-state",
1715 "[netdev] up|down", 1, 2,
1716 netdev_dummy_set_admin_state, NULL);
1717 unixctl_command_register("netdev-dummy/conn-state",
1719 netdev_dummy_conn_state, NULL);
1720 unixctl_command_register("netdev-dummy/ip4addr",
1721 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1722 netdev_dummy_ip4addr, NULL);
1723 unixctl_command_register("netdev-dummy/ip6addr",
1724 "[netdev] ip6addr", 2, 2,
1725 netdev_dummy_ip6addr, NULL);
1727 if (level == DUMMY_OVERRIDE_ALL) {
1732 netdev_enumerate_types(&types);
1733 SSET_FOR_EACH (type, &types) {
1734 if (strcmp(type, "patch")) {
1735 netdev_dummy_override(type);
1738 sset_destroy(&types);
1739 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1740 netdev_dummy_override("system");
1742 netdev_register_provider(&dummy_class);
1743 netdev_register_provider(&dummy_pmd_class);
1745 netdev_vport_tunnel_register();