2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "dp-packet.h"
24 #include "dpif-netdev.h"
25 #include "dynamic-string.h"
28 #include "netdev-provider.h"
29 #include "netdev-vport.h"
31 #include "ofp-print.h"
33 #include "ovs-atomic.h"
35 #include "pcap-file.h"
36 #include "poll-loop.h"
40 #include "unaligned.h"
43 #include "reconnect.h"
44 #include "openvswitch/vlog.h"
46 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
50 struct dummy_packet_stream {
51 struct stream *stream;
52 struct dp_packet rxbuf;
56 enum dummy_packet_conn_type {
57 NONE, /* No connection is configured. */
58 PASSIVE, /* Listener. */
59 ACTIVE /* Connect to listener. */
62 enum dummy_netdev_conn_state {
63 CONN_STATE_CONNECTED, /* Listener connected. */
64 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
65 CONN_STATE_UNKNOWN, /* No relavent information. */
68 struct dummy_packet_pconn {
69 struct pstream *pstream;
70 struct dummy_packet_stream *streams;
74 struct dummy_packet_rconn {
75 struct dummy_packet_stream *rstream;
76 struct reconnect *reconnect;
79 struct dummy_packet_conn {
80 enum dummy_packet_conn_type type;
82 struct dummy_packet_pconn pconn;
83 struct dummy_packet_rconn rconn;
87 struct pkt_list_node {
88 struct dp_packet *pkt;
89 struct ovs_list list_node;
92 /* Protects 'dummy_list'. */
93 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95 /* Contains all 'struct dummy_dev's. */
96 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
97 = OVS_LIST_INITIALIZER(&dummy_list);
103 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105 /* Protects all members below. */
106 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108 struct eth_addr hwaddr OVS_GUARDED;
110 struct netdev_stats stats OVS_GUARDED;
111 enum netdev_flags flags OVS_GUARDED;
112 int ifindex OVS_GUARDED;
114 struct dummy_packet_conn conn OVS_GUARDED;
116 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
118 struct in_addr address, netmask;
119 struct in6_addr ipv6;
120 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
123 /* Max 'recv_queue_len' in struct netdev_dummy. */
124 #define NETDEV_DUMMY_MAX_QUEUE 100
126 struct netdev_rxq_dummy {
127 struct netdev_rxq up;
128 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
129 struct ovs_list recv_queue;
130 int recv_queue_len; /* list_size(&recv_queue). */
131 struct seq *seq; /* Reports newly queued packets. */
134 static unixctl_cb_func netdev_dummy_set_admin_state;
135 static int netdev_dummy_construct(struct netdev *);
136 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct dp_packet *);
138 static void dummy_packet_stream_close(struct dummy_packet_stream *);
140 static void pkt_list_delete(struct ovs_list *);
143 is_dummy_class(const struct netdev_class *class)
145 return class->construct == netdev_dummy_construct;
148 static struct netdev_dummy *
149 netdev_dummy_cast(const struct netdev *netdev)
151 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
152 return CONTAINER_OF(netdev, struct netdev_dummy, up);
155 static struct netdev_rxq_dummy *
156 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
158 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
159 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
163 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
165 int rxbuf_size = stream ? 2048 : 0;
167 dp_packet_init(&s->rxbuf, rxbuf_size);
171 static struct dummy_packet_stream *
172 dummy_packet_stream_create(struct stream *stream)
174 struct dummy_packet_stream *s;
176 s = xzalloc(sizeof *s);
177 dummy_packet_stream_init(s, stream);
183 dummy_packet_stream_wait(struct dummy_packet_stream *s)
185 stream_run_wait(s->stream);
186 if (!list_is_empty(&s->txq)) {
187 stream_send_wait(s->stream);
189 stream_recv_wait(s->stream);
193 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
195 if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
197 struct pkt_list_node *node;
199 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
200 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
202 node = xmalloc(sizeof *node);
204 list_push_back(&s->txq, &node->list_node);
209 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
214 stream_run(s->stream);
216 if (!list_is_empty(&s->txq)) {
217 struct pkt_list_node *txbuf_node;
218 struct dp_packet *txbuf;
221 ASSIGN_CONTAINER(txbuf_node, list_front(&s->txq), list_node);
222 txbuf = txbuf_node->pkt;
223 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
226 dp_packet_pull(txbuf, retval);
227 if (!dp_packet_size(txbuf)) {
228 list_remove(&txbuf_node->list_node);
230 dp_packet_delete(txbuf);
232 } else if (retval != -EAGAIN) {
238 if (dp_packet_size(&s->rxbuf) < 2) {
239 n = 2 - dp_packet_size(&s->rxbuf);
243 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
244 if (frame_len < ETH_HEADER_LEN) {
248 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
255 dp_packet_prealloc_tailroom(&s->rxbuf, n);
256 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
259 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
260 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
261 dp_packet_pull(&s->rxbuf, 2);
262 netdev_dummy_queue_packet(dev,
263 dp_packet_clone(&s->rxbuf));
264 dp_packet_clear(&s->rxbuf);
266 } else if (retval != -EAGAIN) {
267 error = (retval < 0 ? -retval
268 : dp_packet_size(&s->rxbuf) ? EPROTO
277 dummy_packet_stream_close(struct dummy_packet_stream *s)
279 stream_close(s->stream);
280 dp_packet_uninit(&s->rxbuf);
281 pkt_list_delete(&s->txq);
285 dummy_packet_conn_init(struct dummy_packet_conn *conn)
287 memset(conn, 0, sizeof *conn);
292 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
295 switch (conn->type) {
297 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
301 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
311 dummy_packet_conn_close(struct dummy_packet_conn *conn)
314 struct dummy_packet_pconn *pconn = &conn->u.pconn;
315 struct dummy_packet_rconn *rconn = &conn->u.rconn;
317 switch (conn->type) {
319 pstream_close(pconn->pstream);
320 for (i = 0; i < pconn->n_streams; i++) {
321 dummy_packet_stream_close(&pconn->streams[i]);
323 free(pconn->streams);
324 pconn->pstream = NULL;
325 pconn->streams = NULL;
329 dummy_packet_stream_close(rconn->rstream);
330 free(rconn->rstream);
331 rconn->rstream = NULL;
332 reconnect_destroy(rconn->reconnect);
333 rconn->reconnect = NULL;
342 memset(conn, 0, sizeof *conn);
346 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
347 const struct smap *args)
349 const char *pstream = smap_get(args, "pstream");
350 const char *stream = smap_get(args, "stream");
352 if (pstream && stream) {
353 VLOG_WARN("Open failed: both %s and %s are configured",
358 switch (conn->type) {
361 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
364 dummy_packet_conn_close(conn);
368 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
371 dummy_packet_conn_close(conn);
381 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
383 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
385 conn->type = PASSIVE;
391 struct stream *active_stream;
392 struct reconnect *reconnect;
394 reconnect = reconnect_create(time_msec());
395 reconnect_set_name(reconnect, stream);
396 reconnect_set_passive(reconnect, false, time_msec());
397 reconnect_enable(reconnect, time_msec());
398 reconnect_set_backoff(reconnect, 100, INT_MAX);
399 reconnect_set_probe_interval(reconnect, 0);
400 conn->u.rconn.reconnect = reconnect;
403 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
404 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
408 reconnect_connected(reconnect, time_msec());
412 reconnect_connecting(reconnect, time_msec());
416 reconnect_connect_failed(reconnect, time_msec(), error);
417 stream_close(active_stream);
418 conn->u.rconn.rstream->stream = NULL;
425 dummy_pconn_run(struct netdev_dummy *dev)
426 OVS_REQUIRES(dev->mutex)
428 struct stream *new_stream;
429 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
433 error = pstream_accept(pconn->pstream, &new_stream);
435 struct dummy_packet_stream *s;
437 pconn->streams = xrealloc(pconn->streams,
438 ((pconn->n_streams + 1)
440 s = &pconn->streams[pconn->n_streams++];
441 dummy_packet_stream_init(s, new_stream);
442 } else if (error != EAGAIN) {
443 VLOG_WARN("%s: accept failed (%s)",
444 pstream_get_name(pconn->pstream), ovs_strerror(error));
445 pstream_close(pconn->pstream);
446 pconn->pstream = NULL;
447 dev->conn.type = NONE;
450 for (i = 0; i < pconn->n_streams; i++) {
451 struct dummy_packet_stream *s = &pconn->streams[i];
453 error = dummy_packet_stream_run(dev, s);
455 VLOG_DBG("%s: closing connection (%s)",
456 stream_get_name(s->stream),
457 ovs_retval_to_string(error));
458 dummy_packet_stream_close(s);
459 pconn->streams[i] = pconn->streams[--pconn->n_streams];
465 dummy_rconn_run(struct netdev_dummy *dev)
466 OVS_REQUIRES(dev->mutex)
468 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
470 switch (reconnect_run(rconn->reconnect, time_msec())) {
471 case RECONNECT_CONNECT:
475 if (rconn->rstream->stream) {
476 error = stream_connect(rconn->rstream->stream);
478 error = stream_open(reconnect_get_name(rconn->reconnect),
479 &rconn->rstream->stream, DSCP_DEFAULT);
484 reconnect_connected(rconn->reconnect, time_msec());
488 reconnect_connecting(rconn->reconnect, time_msec());
492 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
493 stream_close(rconn->rstream->stream);
494 rconn->rstream->stream = NULL;
500 case RECONNECT_DISCONNECT:
501 case RECONNECT_PROBE:
506 if (reconnect_is_connected(rconn->reconnect)) {
509 err = dummy_packet_stream_run(dev, rconn->rstream);
512 reconnect_disconnected(rconn->reconnect, time_msec(), err);
513 stream_close(rconn->rstream->stream);
514 rconn->rstream->stream = NULL;
520 dummy_packet_conn_run(struct netdev_dummy *dev)
521 OVS_REQUIRES(dev->mutex)
523 switch (dev->conn.type) {
525 dummy_pconn_run(dev);
529 dummy_rconn_run(dev);
539 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
542 switch (conn->type) {
544 pstream_wait(conn->u.pconn.pstream);
545 for (i = 0; i < conn->u.pconn.n_streams; i++) {
546 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
547 dummy_packet_stream_wait(s);
551 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
552 dummy_packet_stream_wait(conn->u.rconn.rstream);
563 dummy_packet_conn_send(struct dummy_packet_conn *conn,
564 const void *buffer, size_t size)
568 switch (conn->type) {
570 for (i = 0; i < conn->u.pconn.n_streams; i++) {
571 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
573 dummy_packet_stream_send(s, buffer, size);
574 pstream_wait(conn->u.pconn.pstream);
579 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
580 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
581 dummy_packet_stream_wait(conn->u.rconn.rstream);
591 static enum dummy_netdev_conn_state
592 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
594 enum dummy_netdev_conn_state state;
596 if (conn->type == ACTIVE) {
597 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
598 state = CONN_STATE_CONNECTED;
600 state = CONN_STATE_NOT_CONNECTED;
603 state = CONN_STATE_UNKNOWN;
610 netdev_dummy_run(void)
612 struct netdev_dummy *dev;
614 ovs_mutex_lock(&dummy_list_mutex);
615 LIST_FOR_EACH (dev, list_node, &dummy_list) {
616 ovs_mutex_lock(&dev->mutex);
617 dummy_packet_conn_run(dev);
618 ovs_mutex_unlock(&dev->mutex);
620 ovs_mutex_unlock(&dummy_list_mutex);
624 netdev_dummy_wait(void)
626 struct netdev_dummy *dev;
628 ovs_mutex_lock(&dummy_list_mutex);
629 LIST_FOR_EACH (dev, list_node, &dummy_list) {
630 ovs_mutex_lock(&dev->mutex);
631 dummy_packet_conn_wait(&dev->conn);
632 ovs_mutex_unlock(&dev->mutex);
634 ovs_mutex_unlock(&dummy_list_mutex);
637 static struct netdev *
638 netdev_dummy_alloc(void)
640 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
645 netdev_dummy_construct(struct netdev *netdev_)
647 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
648 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
651 n = atomic_count_inc(&next_n);
653 ovs_mutex_init(&netdev->mutex);
654 ovs_mutex_lock(&netdev->mutex);
655 netdev->hwaddr.ea[0] = 0xaa;
656 netdev->hwaddr.ea[1] = 0x55;
657 netdev->hwaddr.ea[2] = n >> 24;
658 netdev->hwaddr.ea[3] = n >> 16;
659 netdev->hwaddr.ea[4] = n >> 8;
660 netdev->hwaddr.ea[5] = n;
663 netdev->ifindex = -EOPNOTSUPP;
665 dummy_packet_conn_init(&netdev->conn);
667 list_init(&netdev->rxes);
668 ovs_mutex_unlock(&netdev->mutex);
670 ovs_mutex_lock(&dummy_list_mutex);
671 list_push_back(&dummy_list, &netdev->list_node);
672 ovs_mutex_unlock(&dummy_list_mutex);
678 netdev_dummy_destruct(struct netdev *netdev_)
680 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
682 ovs_mutex_lock(&dummy_list_mutex);
683 list_remove(&netdev->list_node);
684 ovs_mutex_unlock(&dummy_list_mutex);
686 ovs_mutex_lock(&netdev->mutex);
687 dummy_packet_conn_close(&netdev->conn);
688 netdev->conn.type = NONE;
690 ovs_mutex_unlock(&netdev->mutex);
691 ovs_mutex_destroy(&netdev->mutex);
695 netdev_dummy_dealloc(struct netdev *netdev_)
697 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
703 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
705 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
707 ovs_mutex_lock(&netdev->mutex);
709 if (netdev->ifindex >= 0) {
710 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
713 dummy_packet_conn_get_config(&netdev->conn, args);
715 ovs_mutex_unlock(&netdev->mutex);
720 netdev_dummy_get_in4(const struct netdev *netdev_,
721 struct in_addr *address, struct in_addr *netmask)
723 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
725 ovs_mutex_lock(&netdev->mutex);
726 *address = netdev->address;
727 *netmask = netdev->netmask;
728 ovs_mutex_unlock(&netdev->mutex);
730 return address->s_addr ? 0 : EADDRNOTAVAIL;
734 netdev_dummy_get_in6(const struct netdev *netdev_, struct in6_addr *in6)
736 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
738 ovs_mutex_lock(&netdev->mutex);
740 ovs_mutex_unlock(&netdev->mutex);
742 return ipv6_addr_is_set(in6) ? 0 : EADDRNOTAVAIL;
746 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
747 struct in_addr netmask)
749 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
751 ovs_mutex_lock(&netdev->mutex);
752 netdev->address = address;
753 netdev->netmask = netmask;
754 ovs_mutex_unlock(&netdev->mutex);
760 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6)
762 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
764 ovs_mutex_lock(&netdev->mutex);
766 ovs_mutex_unlock(&netdev->mutex);
772 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
774 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
777 ovs_mutex_lock(&netdev->mutex);
778 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
780 dummy_packet_conn_set_config(&netdev->conn, args);
782 if (netdev->rxq_pcap) {
783 fclose(netdev->rxq_pcap);
785 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
786 fclose(netdev->tx_pcap);
788 netdev->rxq_pcap = netdev->tx_pcap = NULL;
789 pcap = smap_get(args, "pcap");
791 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
793 const char *rxq_pcap = smap_get(args, "rxq_pcap");
794 const char *tx_pcap = smap_get(args, "tx_pcap");
797 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
800 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
804 ovs_mutex_unlock(&netdev->mutex);
809 static struct netdev_rxq *
810 netdev_dummy_rxq_alloc(void)
812 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
817 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
819 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
820 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
822 ovs_mutex_lock(&netdev->mutex);
823 list_push_back(&netdev->rxes, &rx->node);
824 list_init(&rx->recv_queue);
825 rx->recv_queue_len = 0;
826 rx->seq = seq_create();
827 ovs_mutex_unlock(&netdev->mutex);
833 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
835 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
836 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
838 ovs_mutex_lock(&netdev->mutex);
839 list_remove(&rx->node);
840 pkt_list_delete(&rx->recv_queue);
841 ovs_mutex_unlock(&netdev->mutex);
842 seq_destroy(rx->seq);
846 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
848 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
854 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
857 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
858 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
859 struct dp_packet *packet;
861 ovs_mutex_lock(&netdev->mutex);
862 if (!list_is_empty(&rx->recv_queue)) {
863 struct pkt_list_node *pkt_node;
865 ASSIGN_CONTAINER(pkt_node, list_pop_front(&rx->recv_queue), list_node);
866 packet = pkt_node->pkt;
868 rx->recv_queue_len--;
872 ovs_mutex_unlock(&netdev->mutex);
877 ovs_mutex_lock(&netdev->mutex);
878 netdev->stats.rx_packets++;
879 netdev->stats.rx_bytes += dp_packet_size(packet);
880 ovs_mutex_unlock(&netdev->mutex);
882 dp_packet_pad(packet);
883 dp_packet_rss_invalidate(packet);
891 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
893 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
894 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
895 uint64_t seq = seq_read(rx->seq);
897 ovs_mutex_lock(&netdev->mutex);
898 if (!list_is_empty(&rx->recv_queue)) {
899 poll_immediate_wake();
901 seq_wait(rx->seq, seq);
903 ovs_mutex_unlock(&netdev->mutex);
907 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
909 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
910 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
912 ovs_mutex_lock(&netdev->mutex);
913 pkt_list_delete(&rx->recv_queue);
914 rx->recv_queue_len = 0;
915 ovs_mutex_unlock(&netdev->mutex);
923 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
924 struct dp_packet **pkts, int cnt, bool may_steal)
926 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
930 for (i = 0; i < cnt; i++) {
931 const void *buffer = dp_packet_data(pkts[i]);
932 size_t size = dp_packet_size(pkts[i]);
934 if (size < ETH_HEADER_LEN) {
938 const struct eth_header *eth = buffer;
941 ovs_mutex_lock(&dev->mutex);
942 max_size = dev->mtu + ETH_HEADER_LEN;
943 ovs_mutex_unlock(&dev->mutex);
945 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
946 max_size += VLAN_HEADER_LEN;
948 if (size > max_size) {
954 ovs_mutex_lock(&dev->mutex);
955 dev->stats.tx_packets++;
956 dev->stats.tx_bytes += size;
958 dummy_packet_conn_send(&dev->conn, buffer, size);
960 /* Reply to ARP requests for 'dev''s assigned IP address. */
961 if (dev->address.s_addr) {
962 struct dp_packet packet;
965 dp_packet_use_const(&packet, buffer, size);
966 flow_extract(&packet, &flow);
967 if (flow.dl_type == htons(ETH_TYPE_ARP)
968 && flow.nw_proto == ARP_OP_REQUEST
969 && flow.nw_dst == dev->address.s_addr) {
970 struct dp_packet *reply = dp_packet_new(0);
971 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
972 false, flow.nw_dst, flow.nw_src);
973 netdev_dummy_queue_packet(dev, reply);
978 struct dp_packet packet;
980 dp_packet_use_const(&packet, buffer, size);
981 ovs_pcap_write(dev->tx_pcap, &packet);
982 fflush(dev->tx_pcap);
985 ovs_mutex_unlock(&dev->mutex);
989 for (i = 0; i < cnt; i++) {
990 dp_packet_delete(pkts[i]);
998 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1000 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1002 ovs_mutex_lock(&dev->mutex);
1003 if (!eth_addr_equals(dev->hwaddr, mac)) {
1005 netdev_change_seq_changed(netdev);
1007 ovs_mutex_unlock(&dev->mutex);
1013 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1015 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1017 ovs_mutex_lock(&dev->mutex);
1019 ovs_mutex_unlock(&dev->mutex);
1025 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1027 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1029 ovs_mutex_lock(&dev->mutex);
1031 ovs_mutex_unlock(&dev->mutex);
1037 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
1039 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1041 ovs_mutex_lock(&dev->mutex);
1043 ovs_mutex_unlock(&dev->mutex);
1049 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1051 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1053 ovs_mutex_lock(&dev->mutex);
1054 *stats = dev->stats;
1055 ovs_mutex_unlock(&dev->mutex);
1061 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1062 unsigned int queue_id, struct smap *details OVS_UNUSED)
1064 if (queue_id == 0) {
1072 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1074 *stats = (struct netdev_queue_stats) {
1075 .tx_bytes = UINT64_MAX,
1076 .tx_packets = UINT64_MAX,
1077 .tx_errors = UINT64_MAX,
1078 .created = LLONG_MIN,
1083 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1084 unsigned int queue_id,
1085 struct netdev_queue_stats *stats)
1087 if (queue_id == 0) {
1088 netdev_dummy_init_queue_stats(stats);
1095 struct netdev_dummy_queue_state {
1096 unsigned int next_queue;
1100 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1103 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1104 state->next_queue = 0;
1110 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1112 unsigned int *queue_id,
1113 struct smap *details OVS_UNUSED)
1115 struct netdev_dummy_queue_state *state = state_;
1116 if (state->next_queue == 0) {
1118 state->next_queue++;
1126 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1134 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1135 void (*cb)(unsigned int queue_id,
1136 struct netdev_queue_stats *,
1140 struct netdev_queue_stats stats;
1141 netdev_dummy_init_queue_stats(&stats);
1147 netdev_dummy_get_ifindex(const struct netdev *netdev)
1149 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1152 ovs_mutex_lock(&dev->mutex);
1153 ifindex = dev->ifindex;
1154 ovs_mutex_unlock(&dev->mutex);
1160 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1161 enum netdev_flags off, enum netdev_flags on,
1162 enum netdev_flags *old_flagsp)
1163 OVS_REQUIRES(netdev->mutex)
1165 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1169 *old_flagsp = netdev->flags;
1170 netdev->flags |= on;
1171 netdev->flags &= ~off;
1172 if (*old_flagsp != netdev->flags) {
1173 netdev_change_seq_changed(&netdev->up);
1180 netdev_dummy_update_flags(struct netdev *netdev_,
1181 enum netdev_flags off, enum netdev_flags on,
1182 enum netdev_flags *old_flagsp)
1184 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1187 ovs_mutex_lock(&netdev->mutex);
1188 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1189 ovs_mutex_unlock(&netdev->mutex);
1194 /* Helper functions. */
1196 static const struct netdev_class dummy_class = {
1203 netdev_dummy_construct,
1204 netdev_dummy_destruct,
1205 netdev_dummy_dealloc,
1206 netdev_dummy_get_config,
1207 netdev_dummy_set_config,
1208 NULL, /* get_tunnel_config */
1209 NULL, /* build header */
1210 NULL, /* push header */
1211 NULL, /* pop header */
1212 NULL, /* get_numa_id */
1213 NULL, /* set_multiq */
1215 netdev_dummy_send, /* send */
1216 NULL, /* send_wait */
1218 netdev_dummy_set_etheraddr,
1219 netdev_dummy_get_etheraddr,
1220 netdev_dummy_get_mtu,
1221 netdev_dummy_set_mtu,
1222 netdev_dummy_get_ifindex,
1223 NULL, /* get_carrier */
1224 NULL, /* get_carrier_resets */
1225 NULL, /* get_miimon */
1226 netdev_dummy_get_stats,
1228 NULL, /* get_features */
1229 NULL, /* set_advertisements */
1231 NULL, /* set_policing */
1232 NULL, /* get_qos_types */
1233 NULL, /* get_qos_capabilities */
1236 netdev_dummy_get_queue,
1237 NULL, /* set_queue */
1238 NULL, /* delete_queue */
1239 netdev_dummy_get_queue_stats,
1240 netdev_dummy_queue_dump_start,
1241 netdev_dummy_queue_dump_next,
1242 netdev_dummy_queue_dump_done,
1243 netdev_dummy_dump_queue_stats,
1245 netdev_dummy_get_in4, /* get_in4 */
1247 netdev_dummy_get_in6, /* get_in6 */
1248 NULL, /* add_router */
1249 NULL, /* get_next_hop */
1250 NULL, /* get_status */
1251 NULL, /* arp_lookup */
1253 netdev_dummy_update_flags,
1255 netdev_dummy_rxq_alloc,
1256 netdev_dummy_rxq_construct,
1257 netdev_dummy_rxq_destruct,
1258 netdev_dummy_rxq_dealloc,
1259 netdev_dummy_rxq_recv,
1260 netdev_dummy_rxq_wait,
1261 netdev_dummy_rxq_drain,
1265 pkt_list_delete(struct ovs_list *l)
1267 struct pkt_list_node *pkt;
1269 LIST_FOR_EACH_POP(pkt, list_node, l) {
1270 dp_packet_delete(pkt->pkt);
1275 static struct dp_packet *
1276 eth_from_packet_or_flow(const char *s)
1278 enum odp_key_fitness fitness;
1279 struct dp_packet *packet;
1280 struct ofpbuf odp_key;
1284 if (!eth_from_hex(s, &packet)) {
1288 /* Convert string to datapath key.
1290 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1291 * the code for that currently calls exit() on parse error. We have to
1292 * settle for parsing a datapath key for now.
1294 ofpbuf_init(&odp_key, 0);
1295 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1297 ofpbuf_uninit(&odp_key);
1301 /* Convert odp_key to flow. */
1302 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1303 if (fitness == ODP_FIT_ERROR) {
1304 ofpbuf_uninit(&odp_key);
1308 packet = dp_packet_new(0);
1309 flow_compose(packet, &flow);
1311 ofpbuf_uninit(&odp_key);
1316 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1318 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1320 pkt_node->pkt = packet;
1321 list_push_back(&rx->recv_queue, &pkt_node->list_node);
1322 rx->recv_queue_len++;
1323 seq_change(rx->seq);
1327 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet)
1328 OVS_REQUIRES(dummy->mutex)
1330 struct netdev_rxq_dummy *rx, *prev;
1332 if (dummy->rxq_pcap) {
1333 ovs_pcap_write(dummy->rxq_pcap, packet);
1334 fflush(dummy->rxq_pcap);
1337 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1338 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1340 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1346 netdev_dummy_queue_packet__(prev, packet);
1348 dp_packet_delete(packet);
1353 netdev_dummy_receive(struct unixctl_conn *conn,
1354 int argc, const char *argv[], void *aux OVS_UNUSED)
1356 struct netdev_dummy *dummy_dev;
1357 struct netdev *netdev;
1360 netdev = netdev_from_name(argv[1]);
1361 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1362 unixctl_command_reply_error(conn, "no such dummy netdev");
1365 dummy_dev = netdev_dummy_cast(netdev);
1367 for (i = 2; i < argc; i++) {
1368 struct dp_packet *packet;
1370 packet = eth_from_packet_or_flow(argv[i]);
1372 unixctl_command_reply_error(conn, "bad packet syntax");
1376 ovs_mutex_lock(&dummy_dev->mutex);
1377 netdev_dummy_queue_packet(dummy_dev, packet);
1378 ovs_mutex_unlock(&dummy_dev->mutex);
1381 unixctl_command_reply(conn, NULL);
1384 netdev_close(netdev);
1388 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1389 OVS_REQUIRES(dev->mutex)
1391 enum netdev_flags old_flags;
1394 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1396 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1401 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1402 const char *argv[], void *aux OVS_UNUSED)
1406 if (!strcasecmp(argv[argc - 1], "up")) {
1408 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1411 unixctl_command_reply_error(conn, "Invalid Admin State");
1416 struct netdev *netdev = netdev_from_name(argv[1]);
1417 if (netdev && is_dummy_class(netdev->netdev_class)) {
1418 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1420 ovs_mutex_lock(&dummy_dev->mutex);
1421 netdev_dummy_set_admin_state__(dummy_dev, up);
1422 ovs_mutex_unlock(&dummy_dev->mutex);
1424 netdev_close(netdev);
1426 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1427 netdev_close(netdev);
1431 struct netdev_dummy *netdev;
1433 ovs_mutex_lock(&dummy_list_mutex);
1434 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1435 ovs_mutex_lock(&netdev->mutex);
1436 netdev_dummy_set_admin_state__(netdev, up);
1437 ovs_mutex_unlock(&netdev->mutex);
1439 ovs_mutex_unlock(&dummy_list_mutex);
1441 unixctl_command_reply(conn, "OK");
1445 display_conn_state__(struct ds *s, const char *name,
1446 enum dummy_netdev_conn_state state)
1448 ds_put_format(s, "%s: ", name);
1451 case CONN_STATE_CONNECTED:
1452 ds_put_cstr(s, "connected\n");
1455 case CONN_STATE_NOT_CONNECTED:
1456 ds_put_cstr(s, "disconnected\n");
1459 case CONN_STATE_UNKNOWN:
1461 ds_put_cstr(s, "unknown\n");
1467 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1468 const char *argv[], void *aux OVS_UNUSED)
1470 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1476 const char *dev_name = argv[1];
1477 struct netdev *netdev = netdev_from_name(dev_name);
1479 if (netdev && is_dummy_class(netdev->netdev_class)) {
1480 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1482 ovs_mutex_lock(&dummy_dev->mutex);
1483 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1484 ovs_mutex_unlock(&dummy_dev->mutex);
1486 netdev_close(netdev);
1488 display_conn_state__(&s, dev_name, state);
1490 struct netdev_dummy *netdev;
1492 ovs_mutex_lock(&dummy_list_mutex);
1493 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1494 ovs_mutex_lock(&netdev->mutex);
1495 state = dummy_netdev_get_conn_state(&netdev->conn);
1496 ovs_mutex_unlock(&netdev->mutex);
1497 if (state != CONN_STATE_UNKNOWN) {
1498 display_conn_state__(&s, netdev->up.name, state);
1501 ovs_mutex_unlock(&dummy_list_mutex);
1504 unixctl_command_reply(conn, ds_cstr(&s));
1509 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1510 const char *argv[], void *aux OVS_UNUSED)
1512 struct netdev *netdev = netdev_from_name(argv[1]);
1514 if (netdev && is_dummy_class(netdev->netdev_class)) {
1515 struct in_addr ip, mask;
1518 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1520 netdev_dummy_set_in4(netdev, ip, mask);
1521 unixctl_command_reply(conn, "OK");
1523 unixctl_command_reply_error(conn, error);
1527 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1530 netdev_close(netdev);
1534 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1535 const char *argv[], void *aux OVS_UNUSED)
1537 struct netdev *netdev = netdev_from_name(argv[1]);
1539 if (netdev && is_dummy_class(netdev->netdev_class)) {
1540 char ip6_s[IPV6_SCAN_LEN + 1];
1541 struct in6_addr ip6;
1543 if (ovs_scan(argv[2], IPV6_SCAN_FMT, ip6_s) &&
1544 inet_pton(AF_INET6, ip6_s, &ip6) == 1) {
1545 netdev_dummy_set_in6(netdev, &ip6);
1546 unixctl_command_reply(conn, "OK");
1548 unixctl_command_reply_error(conn, "Invalid parameters");
1550 netdev_close(netdev);
1552 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1555 netdev_close(netdev);
1560 netdev_dummy_override(const char *type)
1562 if (!netdev_unregister_provider(type)) {
1563 struct netdev_class *class;
1566 class = xmemdup(&dummy_class, sizeof dummy_class);
1567 class->type = xstrdup(type);
1568 error = netdev_register_provider(class);
1570 VLOG_ERR("%s: failed to register netdev provider (%s)",
1571 type, ovs_strerror(error));
1572 free(CONST_CAST(char *, class->type));
1579 netdev_dummy_register(enum dummy_level level)
1581 unixctl_command_register("netdev-dummy/receive", "name packet|flow...",
1582 2, INT_MAX, netdev_dummy_receive, NULL);
1583 unixctl_command_register("netdev-dummy/set-admin-state",
1584 "[netdev] up|down", 1, 2,
1585 netdev_dummy_set_admin_state, NULL);
1586 unixctl_command_register("netdev-dummy/conn-state",
1588 netdev_dummy_conn_state, NULL);
1589 unixctl_command_register("netdev-dummy/ip4addr",
1590 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1591 netdev_dummy_ip4addr, NULL);
1592 unixctl_command_register("netdev-dummy/ip6addr",
1593 "[netdev] ip6addr", 2, 2,
1594 netdev_dummy_ip6addr, NULL);
1596 if (level == DUMMY_OVERRIDE_ALL) {
1601 netdev_enumerate_types(&types);
1602 SSET_FOR_EACH (type, &types) {
1603 if (strcmp(type, "patch")) {
1604 netdev_dummy_override(type);
1607 sset_destroy(&types);
1608 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1609 netdev_dummy_override("system");
1611 netdev_register_provider(&dummy_class);
1613 netdev_vport_tunnel_register();