2 * Copyright (c) 2010, 2011, 2012, 2013, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "dp-packet.h"
24 #include "dpif-netdev.h"
25 #include "dynamic-string.h"
28 #include "netdev-provider.h"
29 #include "netdev-vport.h"
31 #include "ofp-print.h"
33 #include "ovs-atomic.h"
35 #include "pcap-file.h"
36 #include "poll-loop.h"
40 #include "unaligned.h"
43 #include "reconnect.h"
44 #include "openvswitch/vlog.h"
46 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
50 struct dummy_packet_stream {
51 struct stream *stream;
52 struct dp_packet rxbuf;
56 enum dummy_packet_conn_type {
57 NONE, /* No connection is configured. */
58 PASSIVE, /* Listener. */
59 ACTIVE /* Connect to listener. */
62 enum dummy_netdev_conn_state {
63 CONN_STATE_CONNECTED, /* Listener connected. */
64 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
65 CONN_STATE_UNKNOWN, /* No relavent information. */
68 struct dummy_packet_pconn {
69 struct pstream *pstream;
70 struct dummy_packet_stream *streams;
74 struct dummy_packet_rconn {
75 struct dummy_packet_stream *rstream;
76 struct reconnect *reconnect;
79 struct dummy_packet_conn {
80 enum dummy_packet_conn_type type;
82 struct dummy_packet_pconn pconn;
83 struct dummy_packet_rconn rconn;
87 struct pkt_list_node {
88 struct dp_packet *pkt;
89 struct ovs_list list_node;
92 /* Protects 'dummy_list'. */
93 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95 /* Contains all 'struct dummy_dev's. */
96 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
97 = OVS_LIST_INITIALIZER(&dummy_list);
103 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105 /* Protects all members below. */
106 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108 uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
110 struct netdev_stats stats OVS_GUARDED;
111 enum netdev_flags flags OVS_GUARDED;
112 int ifindex OVS_GUARDED;
114 struct dummy_packet_conn conn OVS_GUARDED;
116 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
118 struct in_addr address, netmask;
119 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
122 /* Max 'recv_queue_len' in struct netdev_dummy. */
123 #define NETDEV_DUMMY_MAX_QUEUE 100
125 struct netdev_rxq_dummy {
126 struct netdev_rxq up;
127 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
128 struct ovs_list recv_queue;
129 int recv_queue_len; /* list_size(&recv_queue). */
130 struct seq *seq; /* Reports newly queued packets. */
133 static unixctl_cb_func netdev_dummy_set_admin_state;
134 static int netdev_dummy_construct(struct netdev *);
135 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct dp_packet *);
137 static void dummy_packet_stream_close(struct dummy_packet_stream *);
139 static void pkt_list_delete(struct ovs_list *);
142 is_dummy_class(const struct netdev_class *class)
144 return class->construct == netdev_dummy_construct;
147 static struct netdev_dummy *
148 netdev_dummy_cast(const struct netdev *netdev)
150 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
151 return CONTAINER_OF(netdev, struct netdev_dummy, up);
154 static struct netdev_rxq_dummy *
155 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
157 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
158 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
162 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
164 int rxbuf_size = stream ? 2048 : 0;
166 dp_packet_init(&s->rxbuf, rxbuf_size);
170 static struct dummy_packet_stream *
171 dummy_packet_stream_create(struct stream *stream)
173 struct dummy_packet_stream *s;
175 s = xzalloc(sizeof *s);
176 dummy_packet_stream_init(s, stream);
182 dummy_packet_stream_wait(struct dummy_packet_stream *s)
184 stream_run_wait(s->stream);
185 if (!list_is_empty(&s->txq)) {
186 stream_send_wait(s->stream);
188 stream_recv_wait(s->stream);
192 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
194 if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
196 struct pkt_list_node *node;
198 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
199 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
201 node = xmalloc(sizeof *node);
203 list_push_back(&s->txq, &node->list_node);
208 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
213 stream_run(s->stream);
215 if (!list_is_empty(&s->txq)) {
216 struct pkt_list_node *txbuf_node;
217 struct dp_packet *txbuf;
220 ASSIGN_CONTAINER(txbuf_node, list_front(&s->txq), list_node);
221 txbuf = txbuf_node->pkt;
222 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
225 dp_packet_pull(txbuf, retval);
226 if (!dp_packet_size(txbuf)) {
227 list_remove(&txbuf_node->list_node);
229 dp_packet_delete(txbuf);
231 } else if (retval != -EAGAIN) {
237 if (dp_packet_size(&s->rxbuf) < 2) {
238 n = 2 - dp_packet_size(&s->rxbuf);
242 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
243 if (frame_len < ETH_HEADER_LEN) {
247 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
254 dp_packet_prealloc_tailroom(&s->rxbuf, n);
255 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
258 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
259 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
260 dp_packet_pull(&s->rxbuf, 2);
261 netdev_dummy_queue_packet(dev,
262 dp_packet_clone(&s->rxbuf));
263 dp_packet_clear(&s->rxbuf);
265 } else if (retval != -EAGAIN) {
266 error = (retval < 0 ? -retval
267 : dp_packet_size(&s->rxbuf) ? EPROTO
276 dummy_packet_stream_close(struct dummy_packet_stream *s)
278 stream_close(s->stream);
279 dp_packet_uninit(&s->rxbuf);
280 pkt_list_delete(&s->txq);
284 dummy_packet_conn_init(struct dummy_packet_conn *conn)
286 memset(conn, 0, sizeof *conn);
291 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
294 switch (conn->type) {
296 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
300 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
310 dummy_packet_conn_close(struct dummy_packet_conn *conn)
313 struct dummy_packet_pconn *pconn = &conn->u.pconn;
314 struct dummy_packet_rconn *rconn = &conn->u.rconn;
316 switch (conn->type) {
318 pstream_close(pconn->pstream);
319 for (i = 0; i < pconn->n_streams; i++) {
320 dummy_packet_stream_close(&pconn->streams[i]);
322 free(pconn->streams);
323 pconn->pstream = NULL;
324 pconn->streams = NULL;
328 dummy_packet_stream_close(rconn->rstream);
329 free(rconn->rstream);
330 rconn->rstream = NULL;
331 reconnect_destroy(rconn->reconnect);
332 rconn->reconnect = NULL;
341 memset(conn, 0, sizeof *conn);
345 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
346 const struct smap *args)
348 const char *pstream = smap_get(args, "pstream");
349 const char *stream = smap_get(args, "stream");
351 if (pstream && stream) {
352 VLOG_WARN("Open failed: both %s and %s are configured",
357 switch (conn->type) {
360 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
363 dummy_packet_conn_close(conn);
367 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
370 dummy_packet_conn_close(conn);
380 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
382 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
384 conn->type = PASSIVE;
390 struct stream *active_stream;
391 struct reconnect *reconnect;;
393 reconnect = reconnect_create(time_msec());
394 reconnect_set_name(reconnect, stream);
395 reconnect_set_passive(reconnect, false, time_msec());
396 reconnect_enable(reconnect, time_msec());
397 reconnect_set_backoff(reconnect, 100, INT_MAX);
398 reconnect_set_probe_interval(reconnect, 0);
399 conn->u.rconn.reconnect = reconnect;
402 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
403 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
407 reconnect_connected(reconnect, time_msec());
411 reconnect_connecting(reconnect, time_msec());
415 reconnect_connect_failed(reconnect, time_msec(), error);
416 stream_close(active_stream);
417 conn->u.rconn.rstream->stream = NULL;
424 dummy_pconn_run(struct netdev_dummy *dev)
425 OVS_REQUIRES(dev->mutex)
427 struct stream *new_stream;
428 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
432 error = pstream_accept(pconn->pstream, &new_stream);
434 struct dummy_packet_stream *s;
436 pconn->streams = xrealloc(pconn->streams,
437 ((pconn->n_streams + 1)
439 s = &pconn->streams[pconn->n_streams++];
440 dummy_packet_stream_init(s, new_stream);
441 } else if (error != EAGAIN) {
442 VLOG_WARN("%s: accept failed (%s)",
443 pstream_get_name(pconn->pstream), ovs_strerror(error));
444 pstream_close(pconn->pstream);
445 pconn->pstream = NULL;
446 dev->conn.type = NONE;
449 for (i = 0; i < pconn->n_streams; i++) {
450 struct dummy_packet_stream *s = &pconn->streams[i];
452 error = dummy_packet_stream_run(dev, s);
454 VLOG_DBG("%s: closing connection (%s)",
455 stream_get_name(s->stream),
456 ovs_retval_to_string(error));
457 dummy_packet_stream_close(s);
458 pconn->streams[i] = pconn->streams[--pconn->n_streams];
464 dummy_rconn_run(struct netdev_dummy *dev)
465 OVS_REQUIRES(dev->mutex)
467 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
469 switch (reconnect_run(rconn->reconnect, time_msec())) {
470 case RECONNECT_CONNECT:
474 if (rconn->rstream->stream) {
475 error = stream_connect(rconn->rstream->stream);
477 error = stream_open(reconnect_get_name(rconn->reconnect),
478 &rconn->rstream->stream, DSCP_DEFAULT);
483 reconnect_connected(rconn->reconnect, time_msec());
487 reconnect_connecting(rconn->reconnect, time_msec());
491 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
492 stream_close(rconn->rstream->stream);
493 rconn->rstream->stream = NULL;
499 case RECONNECT_DISCONNECT:
500 case RECONNECT_PROBE:
505 if (reconnect_is_connected(rconn->reconnect)) {
508 err = dummy_packet_stream_run(dev, rconn->rstream);
511 reconnect_disconnected(rconn->reconnect, time_msec(), err);
512 stream_close(rconn->rstream->stream);
513 rconn->rstream->stream = NULL;
519 dummy_packet_conn_run(struct netdev_dummy *dev)
520 OVS_REQUIRES(dev->mutex)
522 switch (dev->conn.type) {
524 dummy_pconn_run(dev);
528 dummy_rconn_run(dev);
538 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
541 switch (conn->type) {
543 pstream_wait(conn->u.pconn.pstream);
544 for (i = 0; i < conn->u.pconn.n_streams; i++) {
545 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
546 dummy_packet_stream_wait(s);
550 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
551 dummy_packet_stream_wait(conn->u.rconn.rstream);
562 dummy_packet_conn_send(struct dummy_packet_conn *conn,
563 const void *buffer, size_t size)
567 switch (conn->type) {
569 for (i = 0; i < conn->u.pconn.n_streams; i++) {
570 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
572 dummy_packet_stream_send(s, buffer, size);
573 pstream_wait(conn->u.pconn.pstream);
578 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
579 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
580 dummy_packet_stream_wait(conn->u.rconn.rstream);
590 static enum dummy_netdev_conn_state
591 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
593 enum dummy_netdev_conn_state state;
595 if (conn->type == ACTIVE) {
596 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
597 state = CONN_STATE_CONNECTED;
599 state = CONN_STATE_NOT_CONNECTED;
602 state = CONN_STATE_UNKNOWN;
609 netdev_dummy_run(void)
611 struct netdev_dummy *dev;
613 ovs_mutex_lock(&dummy_list_mutex);
614 LIST_FOR_EACH (dev, list_node, &dummy_list) {
615 ovs_mutex_lock(&dev->mutex);
616 dummy_packet_conn_run(dev);
617 ovs_mutex_unlock(&dev->mutex);
619 ovs_mutex_unlock(&dummy_list_mutex);
623 netdev_dummy_wait(void)
625 struct netdev_dummy *dev;
627 ovs_mutex_lock(&dummy_list_mutex);
628 LIST_FOR_EACH (dev, list_node, &dummy_list) {
629 ovs_mutex_lock(&dev->mutex);
630 dummy_packet_conn_wait(&dev->conn);
631 ovs_mutex_unlock(&dev->mutex);
633 ovs_mutex_unlock(&dummy_list_mutex);
636 static struct netdev *
637 netdev_dummy_alloc(void)
639 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
644 netdev_dummy_construct(struct netdev *netdev_)
646 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
647 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
650 n = atomic_count_inc(&next_n);
652 ovs_mutex_init(&netdev->mutex);
653 ovs_mutex_lock(&netdev->mutex);
654 netdev->hwaddr[0] = 0xaa;
655 netdev->hwaddr[1] = 0x55;
656 netdev->hwaddr[2] = n >> 24;
657 netdev->hwaddr[3] = n >> 16;
658 netdev->hwaddr[4] = n >> 8;
659 netdev->hwaddr[5] = n;
662 netdev->ifindex = -EOPNOTSUPP;
664 dummy_packet_conn_init(&netdev->conn);
666 list_init(&netdev->rxes);
667 ovs_mutex_unlock(&netdev->mutex);
669 ovs_mutex_lock(&dummy_list_mutex);
670 list_push_back(&dummy_list, &netdev->list_node);
671 ovs_mutex_unlock(&dummy_list_mutex);
677 netdev_dummy_destruct(struct netdev *netdev_)
679 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
681 ovs_mutex_lock(&dummy_list_mutex);
682 list_remove(&netdev->list_node);
683 ovs_mutex_unlock(&dummy_list_mutex);
685 ovs_mutex_lock(&netdev->mutex);
686 dummy_packet_conn_close(&netdev->conn);
687 netdev->conn.type = NONE;
689 ovs_mutex_unlock(&netdev->mutex);
690 ovs_mutex_destroy(&netdev->mutex);
694 netdev_dummy_dealloc(struct netdev *netdev_)
696 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
702 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
704 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
706 ovs_mutex_lock(&netdev->mutex);
708 if (netdev->ifindex >= 0) {
709 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
712 dummy_packet_conn_get_config(&netdev->conn, args);
714 ovs_mutex_unlock(&netdev->mutex);
719 netdev_dummy_get_in4(const struct netdev *netdev_,
720 struct in_addr *address, struct in_addr *netmask)
722 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
724 ovs_mutex_lock(&netdev->mutex);
725 *address = netdev->address;
726 *netmask = netdev->netmask;
727 ovs_mutex_unlock(&netdev->mutex);
732 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
733 struct in_addr netmask)
735 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
737 ovs_mutex_lock(&netdev->mutex);
738 netdev->address = address;
739 netdev->netmask = netmask;
740 ovs_mutex_unlock(&netdev->mutex);
746 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
748 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
751 ovs_mutex_lock(&netdev->mutex);
752 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
754 dummy_packet_conn_set_config(&netdev->conn, args);
756 if (netdev->rxq_pcap) {
757 fclose(netdev->rxq_pcap);
759 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
760 fclose(netdev->tx_pcap);
762 netdev->rxq_pcap = netdev->tx_pcap = NULL;
763 pcap = smap_get(args, "pcap");
765 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
767 const char *rxq_pcap = smap_get(args, "rxq_pcap");
768 const char *tx_pcap = smap_get(args, "tx_pcap");
771 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
774 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
778 ovs_mutex_unlock(&netdev->mutex);
783 static struct netdev_rxq *
784 netdev_dummy_rxq_alloc(void)
786 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
791 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
793 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
794 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
796 ovs_mutex_lock(&netdev->mutex);
797 list_push_back(&netdev->rxes, &rx->node);
798 list_init(&rx->recv_queue);
799 rx->recv_queue_len = 0;
800 rx->seq = seq_create();
801 ovs_mutex_unlock(&netdev->mutex);
807 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
809 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
810 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
812 ovs_mutex_lock(&netdev->mutex);
813 list_remove(&rx->node);
814 pkt_list_delete(&rx->recv_queue);
815 ovs_mutex_unlock(&netdev->mutex);
816 seq_destroy(rx->seq);
820 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
822 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
828 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
831 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
832 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
833 struct dp_packet *packet;
835 ovs_mutex_lock(&netdev->mutex);
836 if (!list_is_empty(&rx->recv_queue)) {
837 struct pkt_list_node *pkt_node;
839 ASSIGN_CONTAINER(pkt_node, list_pop_front(&rx->recv_queue), list_node);
840 packet = pkt_node->pkt;
842 rx->recv_queue_len--;
846 ovs_mutex_unlock(&netdev->mutex);
851 ovs_mutex_lock(&netdev->mutex);
852 netdev->stats.rx_packets++;
853 netdev->stats.rx_bytes += dp_packet_size(packet);
854 ovs_mutex_unlock(&netdev->mutex);
856 dp_packet_pad(packet);
857 dp_packet_set_rss_hash(packet, 0);
865 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
867 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
868 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
869 uint64_t seq = seq_read(rx->seq);
871 ovs_mutex_lock(&netdev->mutex);
872 if (!list_is_empty(&rx->recv_queue)) {
873 poll_immediate_wake();
875 seq_wait(rx->seq, seq);
877 ovs_mutex_unlock(&netdev->mutex);
881 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
883 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
884 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
886 ovs_mutex_lock(&netdev->mutex);
887 pkt_list_delete(&rx->recv_queue);
888 rx->recv_queue_len = 0;
889 ovs_mutex_unlock(&netdev->mutex);
897 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
898 struct dp_packet **pkts, int cnt, bool may_steal)
900 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
904 for (i = 0; i < cnt; i++) {
905 const void *buffer = dp_packet_data(pkts[i]);
906 size_t size = dp_packet_size(pkts[i]);
908 if (size < ETH_HEADER_LEN) {
912 const struct eth_header *eth = buffer;
915 ovs_mutex_lock(&dev->mutex);
916 max_size = dev->mtu + ETH_HEADER_LEN;
917 ovs_mutex_unlock(&dev->mutex);
919 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
920 max_size += VLAN_HEADER_LEN;
922 if (size > max_size) {
928 ovs_mutex_lock(&dev->mutex);
929 dev->stats.tx_packets++;
930 dev->stats.tx_bytes += size;
932 dummy_packet_conn_send(&dev->conn, buffer, size);
935 struct dp_packet packet;
937 dp_packet_use_const(&packet, buffer, size);
938 ovs_pcap_write(dev->tx_pcap, &packet);
939 fflush(dev->tx_pcap);
942 ovs_mutex_unlock(&dev->mutex);
946 for (i = 0; i < cnt; i++) {
947 dp_packet_delete(pkts[i]);
955 netdev_dummy_set_etheraddr(struct netdev *netdev,
956 const uint8_t mac[ETH_ADDR_LEN])
958 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
960 ovs_mutex_lock(&dev->mutex);
961 if (!eth_addr_equals(dev->hwaddr, mac)) {
962 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
963 netdev_change_seq_changed(netdev);
965 ovs_mutex_unlock(&dev->mutex);
971 netdev_dummy_get_etheraddr(const struct netdev *netdev,
972 uint8_t mac[ETH_ADDR_LEN])
974 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
976 ovs_mutex_lock(&dev->mutex);
977 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
978 ovs_mutex_unlock(&dev->mutex);
984 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
986 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
988 ovs_mutex_lock(&dev->mutex);
990 ovs_mutex_unlock(&dev->mutex);
996 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
998 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1000 ovs_mutex_lock(&dev->mutex);
1002 ovs_mutex_unlock(&dev->mutex);
1008 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1010 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1012 ovs_mutex_lock(&dev->mutex);
1013 *stats = dev->stats;
1014 ovs_mutex_unlock(&dev->mutex);
1020 netdev_dummy_get_ifindex(const struct netdev *netdev)
1022 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1025 ovs_mutex_lock(&dev->mutex);
1026 ifindex = dev->ifindex;
1027 ovs_mutex_unlock(&dev->mutex);
1033 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1034 enum netdev_flags off, enum netdev_flags on,
1035 enum netdev_flags *old_flagsp)
1036 OVS_REQUIRES(netdev->mutex)
1038 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1042 *old_flagsp = netdev->flags;
1043 netdev->flags |= on;
1044 netdev->flags &= ~off;
1045 if (*old_flagsp != netdev->flags) {
1046 netdev_change_seq_changed(&netdev->up);
1053 netdev_dummy_update_flags(struct netdev *netdev_,
1054 enum netdev_flags off, enum netdev_flags on,
1055 enum netdev_flags *old_flagsp)
1057 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1060 ovs_mutex_lock(&netdev->mutex);
1061 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1062 ovs_mutex_unlock(&netdev->mutex);
1067 /* Helper functions. */
1069 static const struct netdev_class dummy_class = {
1076 netdev_dummy_construct,
1077 netdev_dummy_destruct,
1078 netdev_dummy_dealloc,
1079 netdev_dummy_get_config,
1080 netdev_dummy_set_config,
1081 NULL, /* get_tunnel_config */
1082 NULL, /* build header */
1083 NULL, /* push header */
1084 NULL, /* pop header */
1085 NULL, /* get_numa_id */
1086 NULL, /* set_multiq */
1088 netdev_dummy_send, /* send */
1089 NULL, /* send_wait */
1091 netdev_dummy_set_etheraddr,
1092 netdev_dummy_get_etheraddr,
1093 netdev_dummy_get_mtu,
1094 netdev_dummy_set_mtu,
1095 netdev_dummy_get_ifindex,
1096 NULL, /* get_carrier */
1097 NULL, /* get_carrier_resets */
1098 NULL, /* get_miimon */
1099 netdev_dummy_get_stats,
1101 NULL, /* get_features */
1102 NULL, /* set_advertisements */
1104 NULL, /* set_policing */
1105 NULL, /* get_qos_types */
1106 NULL, /* get_qos_capabilities */
1109 NULL, /* get_queue */
1110 NULL, /* set_queue */
1111 NULL, /* delete_queue */
1112 NULL, /* get_queue_stats */
1113 NULL, /* queue_dump_start */
1114 NULL, /* queue_dump_next */
1115 NULL, /* queue_dump_done */
1116 NULL, /* dump_queue_stats */
1118 netdev_dummy_get_in4, /* get_in4 */
1121 NULL, /* add_router */
1122 NULL, /* get_next_hop */
1123 NULL, /* get_status */
1124 NULL, /* arp_lookup */
1126 netdev_dummy_update_flags,
1128 netdev_dummy_rxq_alloc,
1129 netdev_dummy_rxq_construct,
1130 netdev_dummy_rxq_destruct,
1131 netdev_dummy_rxq_dealloc,
1132 netdev_dummy_rxq_recv,
1133 netdev_dummy_rxq_wait,
1134 netdev_dummy_rxq_drain,
1138 pkt_list_delete(struct ovs_list *l)
1140 struct pkt_list_node *pkt;
1142 LIST_FOR_EACH_POP(pkt, list_node, l) {
1143 dp_packet_delete(pkt->pkt);
1148 static struct dp_packet *
1149 eth_from_packet_or_flow(const char *s)
1151 enum odp_key_fitness fitness;
1152 struct dp_packet *packet;
1153 struct ofpbuf odp_key;
1157 if (!eth_from_hex(s, &packet)) {
1161 /* Convert string to datapath key.
1163 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1164 * the code for that currently calls exit() on parse error. We have to
1165 * settle for parsing a datapath key for now.
1167 ofpbuf_init(&odp_key, 0);
1168 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1170 ofpbuf_uninit(&odp_key);
1174 /* Convert odp_key to flow. */
1175 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1176 if (fitness == ODP_FIT_ERROR) {
1177 ofpbuf_uninit(&odp_key);
1181 packet = dp_packet_new(0);
1182 flow_compose(packet, &flow);
1184 ofpbuf_uninit(&odp_key);
1189 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1191 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1193 pkt_node->pkt = packet;
1194 list_push_back(&rx->recv_queue, &pkt_node->list_node);
1195 rx->recv_queue_len++;
1196 seq_change(rx->seq);
1200 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet)
1201 OVS_REQUIRES(dummy->mutex)
1203 struct netdev_rxq_dummy *rx, *prev;
1205 if (dummy->rxq_pcap) {
1206 ovs_pcap_write(dummy->rxq_pcap, packet);
1207 fflush(dummy->rxq_pcap);
1210 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1211 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1213 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1219 netdev_dummy_queue_packet__(prev, packet);
1221 dp_packet_delete(packet);
1226 netdev_dummy_receive(struct unixctl_conn *conn,
1227 int argc, const char *argv[], void *aux OVS_UNUSED)
1229 struct netdev_dummy *dummy_dev;
1230 struct netdev *netdev;
1233 netdev = netdev_from_name(argv[1]);
1234 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1235 unixctl_command_reply_error(conn, "no such dummy netdev");
1238 dummy_dev = netdev_dummy_cast(netdev);
1240 for (i = 2; i < argc; i++) {
1241 struct dp_packet *packet;
1243 packet = eth_from_packet_or_flow(argv[i]);
1245 unixctl_command_reply_error(conn, "bad packet syntax");
1249 ovs_mutex_lock(&dummy_dev->mutex);
1250 netdev_dummy_queue_packet(dummy_dev, packet);
1251 ovs_mutex_unlock(&dummy_dev->mutex);
1254 unixctl_command_reply(conn, NULL);
1257 netdev_close(netdev);
1261 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1262 OVS_REQUIRES(dev->mutex)
1264 enum netdev_flags old_flags;
1267 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1269 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1274 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1275 const char *argv[], void *aux OVS_UNUSED)
1279 if (!strcasecmp(argv[argc - 1], "up")) {
1281 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1284 unixctl_command_reply_error(conn, "Invalid Admin State");
1289 struct netdev *netdev = netdev_from_name(argv[1]);
1290 if (netdev && is_dummy_class(netdev->netdev_class)) {
1291 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1293 ovs_mutex_lock(&dummy_dev->mutex);
1294 netdev_dummy_set_admin_state__(dummy_dev, up);
1295 ovs_mutex_unlock(&dummy_dev->mutex);
1297 netdev_close(netdev);
1299 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1300 netdev_close(netdev);
1304 struct netdev_dummy *netdev;
1306 ovs_mutex_lock(&dummy_list_mutex);
1307 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1308 ovs_mutex_lock(&netdev->mutex);
1309 netdev_dummy_set_admin_state__(netdev, up);
1310 ovs_mutex_unlock(&netdev->mutex);
1312 ovs_mutex_unlock(&dummy_list_mutex);
1314 unixctl_command_reply(conn, "OK");
1318 display_conn_state__(struct ds *s, const char *name,
1319 enum dummy_netdev_conn_state state)
1321 ds_put_format(s, "%s: ", name);
1324 case CONN_STATE_CONNECTED:
1325 ds_put_cstr(s, "connected\n");
1328 case CONN_STATE_NOT_CONNECTED:
1329 ds_put_cstr(s, "disconnected\n");
1332 case CONN_STATE_UNKNOWN:
1334 ds_put_cstr(s, "unknown\n");
1340 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1341 const char *argv[], void *aux OVS_UNUSED)
1343 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1349 const char *dev_name = argv[1];
1350 struct netdev *netdev = netdev_from_name(dev_name);
1352 if (netdev && is_dummy_class(netdev->netdev_class)) {
1353 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1355 ovs_mutex_lock(&dummy_dev->mutex);
1356 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1357 ovs_mutex_unlock(&dummy_dev->mutex);
1359 netdev_close(netdev);
1361 display_conn_state__(&s, dev_name, state);
1363 struct netdev_dummy *netdev;
1365 ovs_mutex_lock(&dummy_list_mutex);
1366 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1367 ovs_mutex_lock(&netdev->mutex);
1368 state = dummy_netdev_get_conn_state(&netdev->conn);
1369 ovs_mutex_unlock(&netdev->mutex);
1370 if (state != CONN_STATE_UNKNOWN) {
1371 display_conn_state__(&s, netdev->up.name, state);
1374 ovs_mutex_unlock(&dummy_list_mutex);
1377 unixctl_command_reply(conn, ds_cstr(&s));
1382 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1383 const char *argv[], void *aux OVS_UNUSED)
1385 struct netdev *netdev = netdev_from_name(argv[1]);
1387 if (netdev && is_dummy_class(netdev->netdev_class)) {
1391 if (ovs_scan(argv[2], IP_SCAN_FMT"/%"SCNi16,
1392 IP_SCAN_ARGS(&ip.s_addr), &plen)) {
1393 struct in_addr mask;
1395 mask.s_addr = be32_prefix_mask(plen);
1396 netdev_dummy_set_in4(netdev, ip, mask);
1397 unixctl_command_reply(conn, "OK");
1399 unixctl_command_reply(conn, "Invalid parameters");
1402 netdev_close(netdev);
1404 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1405 netdev_close(netdev);
1412 netdev_dummy_register(bool override)
1414 unixctl_command_register("netdev-dummy/receive", "name packet|flow...",
1415 2, INT_MAX, netdev_dummy_receive, NULL);
1416 unixctl_command_register("netdev-dummy/set-admin-state",
1417 "[netdev] up|down", 1, 2,
1418 netdev_dummy_set_admin_state, NULL);
1419 unixctl_command_register("netdev-dummy/conn-state",
1421 netdev_dummy_conn_state, NULL);
1422 unixctl_command_register("netdev-dummy/ip4addr",
1423 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1424 netdev_dummy_ip4addr, NULL);
1432 netdev_enumerate_types(&types);
1433 SSET_FOR_EACH (type, &types) {
1434 if (!strcmp(type, "patch")) {
1437 if (!netdev_unregister_provider(type)) {
1438 struct netdev_class *class;
1441 class = xmemdup(&dummy_class, sizeof dummy_class);
1442 class->type = xstrdup(type);
1443 error = netdev_register_provider(class);
1445 VLOG_ERR("%s: failed to register netdev provider (%s)",
1446 type, ovs_strerror(error));
1447 free(CONST_CAST(char *, class->type));
1452 sset_destroy(&types);
1454 netdev_register_provider(&dummy_class);
1456 netdev_vport_tunnel_register();