cfg80211: handle failed skb allocation
[cascardo/linux.git] / net / core / net-sysfs.c
1 /*
2  * net-sysfs.c - network device class and attributes
3  *
4  * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <net/switchdev.h>
16 #include <linux/if_arp.h>
17 #include <linux/slab.h>
18 #include <linux/nsproxy.h>
19 #include <net/sock.h>
20 #include <net/net_namespace.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <linux/jiffies.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/of.h>
27
28 #include "net-sysfs.h"
29
30 #ifdef CONFIG_SYSFS
31 static const char fmt_hex[] = "%#x\n";
32 static const char fmt_dec[] = "%d\n";
33 static const char fmt_ulong[] = "%lu\n";
34 static const char fmt_u64[] = "%llu\n";
35
36 static inline int dev_isalive(const struct net_device *dev)
37 {
38         return dev->reg_state <= NETREG_REGISTERED;
39 }
40
41 /* use same locking rules as GIF* ioctl's */
42 static ssize_t netdev_show(const struct device *dev,
43                            struct device_attribute *attr, char *buf,
44                            ssize_t (*format)(const struct net_device *, char *))
45 {
46         struct net_device *ndev = to_net_dev(dev);
47         ssize_t ret = -EINVAL;
48
49         read_lock(&dev_base_lock);
50         if (dev_isalive(ndev))
51                 ret = (*format)(ndev, buf);
52         read_unlock(&dev_base_lock);
53
54         return ret;
55 }
56
57 /* generate a show function for simple field */
58 #define NETDEVICE_SHOW(field, format_string)                            \
59 static ssize_t format_##field(const struct net_device *dev, char *buf)  \
60 {                                                                       \
61         return sprintf(buf, format_string, dev->field);                 \
62 }                                                                       \
63 static ssize_t field##_show(struct device *dev,                         \
64                             struct device_attribute *attr, char *buf)   \
65 {                                                                       \
66         return netdev_show(dev, attr, buf, format_##field);             \
67 }                                                                       \
68
69 #define NETDEVICE_SHOW_RO(field, format_string)                         \
70 NETDEVICE_SHOW(field, format_string);                                   \
71 static DEVICE_ATTR_RO(field)
72
73 #define NETDEVICE_SHOW_RW(field, format_string)                         \
74 NETDEVICE_SHOW(field, format_string);                                   \
75 static DEVICE_ATTR_RW(field)
76
77 /* use same locking and permission rules as SIF* ioctl's */
78 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
79                             const char *buf, size_t len,
80                             int (*set)(struct net_device *, unsigned long))
81 {
82         struct net_device *netdev = to_net_dev(dev);
83         struct net *net = dev_net(netdev);
84         unsigned long new;
85         int ret = -EINVAL;
86
87         if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
88                 return -EPERM;
89
90         ret = kstrtoul(buf, 0, &new);
91         if (ret)
92                 goto err;
93
94         if (!rtnl_trylock())
95                 return restart_syscall();
96
97         if (dev_isalive(netdev)) {
98                 if ((ret = (*set)(netdev, new)) == 0)
99                         ret = len;
100         }
101         rtnl_unlock();
102  err:
103         return ret;
104 }
105
106 NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107 NETDEVICE_SHOW_RO(dev_port, fmt_dec);
108 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
110 NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111 NETDEVICE_SHOW_RO(type, fmt_dec);
112 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
113
114 static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
115                            char *buf)
116 {
117         struct net_device *ndev = to_net_dev(dev);
118
119         return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
120 }
121 static DEVICE_ATTR_RO(iflink);
122
123 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
124 {
125         return sprintf(buf, fmt_dec, dev->name_assign_type);
126 }
127
128 static ssize_t name_assign_type_show(struct device *dev,
129                                      struct device_attribute *attr,
130                                      char *buf)
131 {
132         struct net_device *ndev = to_net_dev(dev);
133         ssize_t ret = -EINVAL;
134
135         if (ndev->name_assign_type != NET_NAME_UNKNOWN)
136                 ret = netdev_show(dev, attr, buf, format_name_assign_type);
137
138         return ret;
139 }
140 static DEVICE_ATTR_RO(name_assign_type);
141
142 /* use same locking rules as GIFHWADDR ioctl's */
143 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
144                             char *buf)
145 {
146         struct net_device *ndev = to_net_dev(dev);
147         ssize_t ret = -EINVAL;
148
149         read_lock(&dev_base_lock);
150         if (dev_isalive(ndev))
151                 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
152         read_unlock(&dev_base_lock);
153         return ret;
154 }
155 static DEVICE_ATTR_RO(address);
156
157 static ssize_t broadcast_show(struct device *dev,
158                               struct device_attribute *attr, char *buf)
159 {
160         struct net_device *ndev = to_net_dev(dev);
161         if (dev_isalive(ndev))
162                 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
163         return -EINVAL;
164 }
165 static DEVICE_ATTR_RO(broadcast);
166
167 static int change_carrier(struct net_device *dev, unsigned long new_carrier)
168 {
169         if (!netif_running(dev))
170                 return -EINVAL;
171         return dev_change_carrier(dev, (bool) new_carrier);
172 }
173
174 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
175                              const char *buf, size_t len)
176 {
177         return netdev_store(dev, attr, buf, len, change_carrier);
178 }
179
180 static ssize_t carrier_show(struct device *dev,
181                             struct device_attribute *attr, char *buf)
182 {
183         struct net_device *netdev = to_net_dev(dev);
184         if (netif_running(netdev)) {
185                 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
186         }
187         return -EINVAL;
188 }
189 static DEVICE_ATTR_RW(carrier);
190
191 static ssize_t speed_show(struct device *dev,
192                           struct device_attribute *attr, char *buf)
193 {
194         struct net_device *netdev = to_net_dev(dev);
195         int ret = -EINVAL;
196
197         if (!rtnl_trylock())
198                 return restart_syscall();
199
200         if (netif_running(netdev)) {
201                 struct ethtool_link_ksettings cmd;
202
203                 if (!__ethtool_get_link_ksettings(netdev, &cmd))
204                         ret = sprintf(buf, fmt_dec, cmd.base.speed);
205         }
206         rtnl_unlock();
207         return ret;
208 }
209 static DEVICE_ATTR_RO(speed);
210
211 static ssize_t duplex_show(struct device *dev,
212                            struct device_attribute *attr, char *buf)
213 {
214         struct net_device *netdev = to_net_dev(dev);
215         int ret = -EINVAL;
216
217         if (!rtnl_trylock())
218                 return restart_syscall();
219
220         if (netif_running(netdev)) {
221                 struct ethtool_link_ksettings cmd;
222
223                 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
224                         const char *duplex;
225
226                         switch (cmd.base.duplex) {
227                         case DUPLEX_HALF:
228                                 duplex = "half";
229                                 break;
230                         case DUPLEX_FULL:
231                                 duplex = "full";
232                                 break;
233                         default:
234                                 duplex = "unknown";
235                                 break;
236                         }
237                         ret = sprintf(buf, "%s\n", duplex);
238                 }
239         }
240         rtnl_unlock();
241         return ret;
242 }
243 static DEVICE_ATTR_RO(duplex);
244
245 static ssize_t dormant_show(struct device *dev,
246                             struct device_attribute *attr, char *buf)
247 {
248         struct net_device *netdev = to_net_dev(dev);
249
250         if (netif_running(netdev))
251                 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
252
253         return -EINVAL;
254 }
255 static DEVICE_ATTR_RO(dormant);
256
257 static const char *const operstates[] = {
258         "unknown",
259         "notpresent", /* currently unused */
260         "down",
261         "lowerlayerdown",
262         "testing", /* currently unused */
263         "dormant",
264         "up"
265 };
266
267 static ssize_t operstate_show(struct device *dev,
268                               struct device_attribute *attr, char *buf)
269 {
270         const struct net_device *netdev = to_net_dev(dev);
271         unsigned char operstate;
272
273         read_lock(&dev_base_lock);
274         operstate = netdev->operstate;
275         if (!netif_running(netdev))
276                 operstate = IF_OPER_DOWN;
277         read_unlock(&dev_base_lock);
278
279         if (operstate >= ARRAY_SIZE(operstates))
280                 return -EINVAL; /* should not happen */
281
282         return sprintf(buf, "%s\n", operstates[operstate]);
283 }
284 static DEVICE_ATTR_RO(operstate);
285
286 static ssize_t carrier_changes_show(struct device *dev,
287                                     struct device_attribute *attr,
288                                     char *buf)
289 {
290         struct net_device *netdev = to_net_dev(dev);
291         return sprintf(buf, fmt_dec,
292                        atomic_read(&netdev->carrier_changes));
293 }
294 static DEVICE_ATTR_RO(carrier_changes);
295
296 /* read-write attributes */
297
298 static int change_mtu(struct net_device *dev, unsigned long new_mtu)
299 {
300         return dev_set_mtu(dev, (int) new_mtu);
301 }
302
303 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
304                          const char *buf, size_t len)
305 {
306         return netdev_store(dev, attr, buf, len, change_mtu);
307 }
308 NETDEVICE_SHOW_RW(mtu, fmt_dec);
309
310 static int change_flags(struct net_device *dev, unsigned long new_flags)
311 {
312         return dev_change_flags(dev, (unsigned int) new_flags);
313 }
314
315 static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
316                            const char *buf, size_t len)
317 {
318         return netdev_store(dev, attr, buf, len, change_flags);
319 }
320 NETDEVICE_SHOW_RW(flags, fmt_hex);
321
322 static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
323 {
324         dev->tx_queue_len = new_len;
325         return 0;
326 }
327
328 static ssize_t tx_queue_len_store(struct device *dev,
329                                   struct device_attribute *attr,
330                                   const char *buf, size_t len)
331 {
332         if (!capable(CAP_NET_ADMIN))
333                 return -EPERM;
334
335         return netdev_store(dev, attr, buf, len, change_tx_queue_len);
336 }
337 NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
338
339 static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
340 {
341         dev->gro_flush_timeout = val;
342         return 0;
343 }
344
345 static ssize_t gro_flush_timeout_store(struct device *dev,
346                                   struct device_attribute *attr,
347                                   const char *buf, size_t len)
348 {
349         if (!capable(CAP_NET_ADMIN))
350                 return -EPERM;
351
352         return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
353 }
354 NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
355
356 static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
357                              const char *buf, size_t len)
358 {
359         struct net_device *netdev = to_net_dev(dev);
360         struct net *net = dev_net(netdev);
361         size_t count = len;
362         ssize_t ret;
363
364         if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
365                 return -EPERM;
366
367         /* ignore trailing newline */
368         if (len >  0 && buf[len - 1] == '\n')
369                 --count;
370
371         if (!rtnl_trylock())
372                 return restart_syscall();
373         ret = dev_set_alias(netdev, buf, count);
374         rtnl_unlock();
375
376         return ret < 0 ? ret : len;
377 }
378
379 static ssize_t ifalias_show(struct device *dev,
380                             struct device_attribute *attr, char *buf)
381 {
382         const struct net_device *netdev = to_net_dev(dev);
383         ssize_t ret = 0;
384
385         if (!rtnl_trylock())
386                 return restart_syscall();
387         if (netdev->ifalias)
388                 ret = sprintf(buf, "%s\n", netdev->ifalias);
389         rtnl_unlock();
390         return ret;
391 }
392 static DEVICE_ATTR_RW(ifalias);
393
394 static int change_group(struct net_device *dev, unsigned long new_group)
395 {
396         dev_set_group(dev, (int) new_group);
397         return 0;
398 }
399
400 static ssize_t group_store(struct device *dev, struct device_attribute *attr,
401                            const char *buf, size_t len)
402 {
403         return netdev_store(dev, attr, buf, len, change_group);
404 }
405 NETDEVICE_SHOW(group, fmt_dec);
406 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
407
408 static int change_proto_down(struct net_device *dev, unsigned long proto_down)
409 {
410         return dev_change_proto_down(dev, (bool) proto_down);
411 }
412
413 static ssize_t proto_down_store(struct device *dev,
414                                 struct device_attribute *attr,
415                                 const char *buf, size_t len)
416 {
417         return netdev_store(dev, attr, buf, len, change_proto_down);
418 }
419 NETDEVICE_SHOW_RW(proto_down, fmt_dec);
420
421 static ssize_t phys_port_id_show(struct device *dev,
422                                  struct device_attribute *attr, char *buf)
423 {
424         struct net_device *netdev = to_net_dev(dev);
425         ssize_t ret = -EINVAL;
426
427         if (!rtnl_trylock())
428                 return restart_syscall();
429
430         if (dev_isalive(netdev)) {
431                 struct netdev_phys_item_id ppid;
432
433                 ret = dev_get_phys_port_id(netdev, &ppid);
434                 if (!ret)
435                         ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
436         }
437         rtnl_unlock();
438
439         return ret;
440 }
441 static DEVICE_ATTR_RO(phys_port_id);
442
443 static ssize_t phys_port_name_show(struct device *dev,
444                                    struct device_attribute *attr, char *buf)
445 {
446         struct net_device *netdev = to_net_dev(dev);
447         ssize_t ret = -EINVAL;
448
449         if (!rtnl_trylock())
450                 return restart_syscall();
451
452         if (dev_isalive(netdev)) {
453                 char name[IFNAMSIZ];
454
455                 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
456                 if (!ret)
457                         ret = sprintf(buf, "%s\n", name);
458         }
459         rtnl_unlock();
460
461         return ret;
462 }
463 static DEVICE_ATTR_RO(phys_port_name);
464
465 static ssize_t phys_switch_id_show(struct device *dev,
466                                    struct device_attribute *attr, char *buf)
467 {
468         struct net_device *netdev = to_net_dev(dev);
469         ssize_t ret = -EINVAL;
470
471         if (!rtnl_trylock())
472                 return restart_syscall();
473
474         if (dev_isalive(netdev)) {
475                 struct switchdev_attr attr = {
476                         .orig_dev = netdev,
477                         .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
478                         .flags = SWITCHDEV_F_NO_RECURSE,
479                 };
480
481                 ret = switchdev_port_attr_get(netdev, &attr);
482                 if (!ret)
483                         ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
484                                       attr.u.ppid.id);
485         }
486         rtnl_unlock();
487
488         return ret;
489 }
490 static DEVICE_ATTR_RO(phys_switch_id);
491
492 static struct attribute *net_class_attrs[] = {
493         &dev_attr_netdev_group.attr,
494         &dev_attr_type.attr,
495         &dev_attr_dev_id.attr,
496         &dev_attr_dev_port.attr,
497         &dev_attr_iflink.attr,
498         &dev_attr_ifindex.attr,
499         &dev_attr_name_assign_type.attr,
500         &dev_attr_addr_assign_type.attr,
501         &dev_attr_addr_len.attr,
502         &dev_attr_link_mode.attr,
503         &dev_attr_address.attr,
504         &dev_attr_broadcast.attr,
505         &dev_attr_speed.attr,
506         &dev_attr_duplex.attr,
507         &dev_attr_dormant.attr,
508         &dev_attr_operstate.attr,
509         &dev_attr_carrier_changes.attr,
510         &dev_attr_ifalias.attr,
511         &dev_attr_carrier.attr,
512         &dev_attr_mtu.attr,
513         &dev_attr_flags.attr,
514         &dev_attr_tx_queue_len.attr,
515         &dev_attr_gro_flush_timeout.attr,
516         &dev_attr_phys_port_id.attr,
517         &dev_attr_phys_port_name.attr,
518         &dev_attr_phys_switch_id.attr,
519         &dev_attr_proto_down.attr,
520         NULL,
521 };
522 ATTRIBUTE_GROUPS(net_class);
523
524 /* Show a given an attribute in the statistics group */
525 static ssize_t netstat_show(const struct device *d,
526                             struct device_attribute *attr, char *buf,
527                             unsigned long offset)
528 {
529         struct net_device *dev = to_net_dev(d);
530         ssize_t ret = -EINVAL;
531
532         WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
533                         offset % sizeof(u64) != 0);
534
535         read_lock(&dev_base_lock);
536         if (dev_isalive(dev)) {
537                 struct rtnl_link_stats64 temp;
538                 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
539
540                 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
541         }
542         read_unlock(&dev_base_lock);
543         return ret;
544 }
545
546 /* generate a read-only statistics attribute */
547 #define NETSTAT_ENTRY(name)                                             \
548 static ssize_t name##_show(struct device *d,                            \
549                            struct device_attribute *attr, char *buf)    \
550 {                                                                       \
551         return netstat_show(d, attr, buf,                               \
552                             offsetof(struct rtnl_link_stats64, name));  \
553 }                                                                       \
554 static DEVICE_ATTR_RO(name)
555
556 NETSTAT_ENTRY(rx_packets);
557 NETSTAT_ENTRY(tx_packets);
558 NETSTAT_ENTRY(rx_bytes);
559 NETSTAT_ENTRY(tx_bytes);
560 NETSTAT_ENTRY(rx_errors);
561 NETSTAT_ENTRY(tx_errors);
562 NETSTAT_ENTRY(rx_dropped);
563 NETSTAT_ENTRY(tx_dropped);
564 NETSTAT_ENTRY(multicast);
565 NETSTAT_ENTRY(collisions);
566 NETSTAT_ENTRY(rx_length_errors);
567 NETSTAT_ENTRY(rx_over_errors);
568 NETSTAT_ENTRY(rx_crc_errors);
569 NETSTAT_ENTRY(rx_frame_errors);
570 NETSTAT_ENTRY(rx_fifo_errors);
571 NETSTAT_ENTRY(rx_missed_errors);
572 NETSTAT_ENTRY(tx_aborted_errors);
573 NETSTAT_ENTRY(tx_carrier_errors);
574 NETSTAT_ENTRY(tx_fifo_errors);
575 NETSTAT_ENTRY(tx_heartbeat_errors);
576 NETSTAT_ENTRY(tx_window_errors);
577 NETSTAT_ENTRY(rx_compressed);
578 NETSTAT_ENTRY(tx_compressed);
579 NETSTAT_ENTRY(rx_nohandler);
580
581 static struct attribute *netstat_attrs[] = {
582         &dev_attr_rx_packets.attr,
583         &dev_attr_tx_packets.attr,
584         &dev_attr_rx_bytes.attr,
585         &dev_attr_tx_bytes.attr,
586         &dev_attr_rx_errors.attr,
587         &dev_attr_tx_errors.attr,
588         &dev_attr_rx_dropped.attr,
589         &dev_attr_tx_dropped.attr,
590         &dev_attr_multicast.attr,
591         &dev_attr_collisions.attr,
592         &dev_attr_rx_length_errors.attr,
593         &dev_attr_rx_over_errors.attr,
594         &dev_attr_rx_crc_errors.attr,
595         &dev_attr_rx_frame_errors.attr,
596         &dev_attr_rx_fifo_errors.attr,
597         &dev_attr_rx_missed_errors.attr,
598         &dev_attr_tx_aborted_errors.attr,
599         &dev_attr_tx_carrier_errors.attr,
600         &dev_attr_tx_fifo_errors.attr,
601         &dev_attr_tx_heartbeat_errors.attr,
602         &dev_attr_tx_window_errors.attr,
603         &dev_attr_rx_compressed.attr,
604         &dev_attr_tx_compressed.attr,
605         &dev_attr_rx_nohandler.attr,
606         NULL
607 };
608
609
610 static struct attribute_group netstat_group = {
611         .name  = "statistics",
612         .attrs  = netstat_attrs,
613 };
614
615 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
616 static struct attribute *wireless_attrs[] = {
617         NULL
618 };
619
620 static struct attribute_group wireless_group = {
621         .name = "wireless",
622         .attrs = wireless_attrs,
623 };
624 #endif
625
626 #else /* CONFIG_SYSFS */
627 #define net_class_groups        NULL
628 #endif /* CONFIG_SYSFS */
629
630 #ifdef CONFIG_SYSFS
631 #define to_rx_queue_attr(_attr) container_of(_attr,             \
632     struct rx_queue_attribute, attr)
633
634 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
635
636 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
637                                   char *buf)
638 {
639         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
640         struct netdev_rx_queue *queue = to_rx_queue(kobj);
641
642         if (!attribute->show)
643                 return -EIO;
644
645         return attribute->show(queue, attribute, buf);
646 }
647
648 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
649                                    const char *buf, size_t count)
650 {
651         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
652         struct netdev_rx_queue *queue = to_rx_queue(kobj);
653
654         if (!attribute->store)
655                 return -EIO;
656
657         return attribute->store(queue, attribute, buf, count);
658 }
659
660 static const struct sysfs_ops rx_queue_sysfs_ops = {
661         .show = rx_queue_attr_show,
662         .store = rx_queue_attr_store,
663 };
664
665 #ifdef CONFIG_RPS
666 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
667                             struct rx_queue_attribute *attribute, char *buf)
668 {
669         struct rps_map *map;
670         cpumask_var_t mask;
671         int i, len;
672
673         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
674                 return -ENOMEM;
675
676         rcu_read_lock();
677         map = rcu_dereference(queue->rps_map);
678         if (map)
679                 for (i = 0; i < map->len; i++)
680                         cpumask_set_cpu(map->cpus[i], mask);
681
682         len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
683         rcu_read_unlock();
684         free_cpumask_var(mask);
685
686         return len < PAGE_SIZE ? len : -EINVAL;
687 }
688
689 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
690                       struct rx_queue_attribute *attribute,
691                       const char *buf, size_t len)
692 {
693         struct rps_map *old_map, *map;
694         cpumask_var_t mask;
695         int err, cpu, i;
696         static DEFINE_MUTEX(rps_map_mutex);
697
698         if (!capable(CAP_NET_ADMIN))
699                 return -EPERM;
700
701         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
702                 return -ENOMEM;
703
704         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
705         if (err) {
706                 free_cpumask_var(mask);
707                 return err;
708         }
709
710         map = kzalloc(max_t(unsigned int,
711             RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
712             GFP_KERNEL);
713         if (!map) {
714                 free_cpumask_var(mask);
715                 return -ENOMEM;
716         }
717
718         i = 0;
719         for_each_cpu_and(cpu, mask, cpu_online_mask)
720                 map->cpus[i++] = cpu;
721
722         if (i)
723                 map->len = i;
724         else {
725                 kfree(map);
726                 map = NULL;
727         }
728
729         mutex_lock(&rps_map_mutex);
730         old_map = rcu_dereference_protected(queue->rps_map,
731                                             mutex_is_locked(&rps_map_mutex));
732         rcu_assign_pointer(queue->rps_map, map);
733
734         if (map)
735                 static_key_slow_inc(&rps_needed);
736         if (old_map)
737                 static_key_slow_dec(&rps_needed);
738
739         mutex_unlock(&rps_map_mutex);
740
741         if (old_map)
742                 kfree_rcu(old_map, rcu);
743
744         free_cpumask_var(mask);
745         return len;
746 }
747
748 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
749                                            struct rx_queue_attribute *attr,
750                                            char *buf)
751 {
752         struct rps_dev_flow_table *flow_table;
753         unsigned long val = 0;
754
755         rcu_read_lock();
756         flow_table = rcu_dereference(queue->rps_flow_table);
757         if (flow_table)
758                 val = (unsigned long)flow_table->mask + 1;
759         rcu_read_unlock();
760
761         return sprintf(buf, "%lu\n", val);
762 }
763
764 static void rps_dev_flow_table_release(struct rcu_head *rcu)
765 {
766         struct rps_dev_flow_table *table = container_of(rcu,
767             struct rps_dev_flow_table, rcu);
768         vfree(table);
769 }
770
771 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
772                                      struct rx_queue_attribute *attr,
773                                      const char *buf, size_t len)
774 {
775         unsigned long mask, count;
776         struct rps_dev_flow_table *table, *old_table;
777         static DEFINE_SPINLOCK(rps_dev_flow_lock);
778         int rc;
779
780         if (!capable(CAP_NET_ADMIN))
781                 return -EPERM;
782
783         rc = kstrtoul(buf, 0, &count);
784         if (rc < 0)
785                 return rc;
786
787         if (count) {
788                 mask = count - 1;
789                 /* mask = roundup_pow_of_two(count) - 1;
790                  * without overflows...
791                  */
792                 while ((mask | (mask >> 1)) != mask)
793                         mask |= (mask >> 1);
794                 /* On 64 bit arches, must check mask fits in table->mask (u32),
795                  * and on 32bit arches, must check
796                  * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
797                  */
798 #if BITS_PER_LONG > 32
799                 if (mask > (unsigned long)(u32)mask)
800                         return -EINVAL;
801 #else
802                 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
803                                 / sizeof(struct rps_dev_flow)) {
804                         /* Enforce a limit to prevent overflow */
805                         return -EINVAL;
806                 }
807 #endif
808                 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
809                 if (!table)
810                         return -ENOMEM;
811
812                 table->mask = mask;
813                 for (count = 0; count <= mask; count++)
814                         table->flows[count].cpu = RPS_NO_CPU;
815         } else
816                 table = NULL;
817
818         spin_lock(&rps_dev_flow_lock);
819         old_table = rcu_dereference_protected(queue->rps_flow_table,
820                                               lockdep_is_held(&rps_dev_flow_lock));
821         rcu_assign_pointer(queue->rps_flow_table, table);
822         spin_unlock(&rps_dev_flow_lock);
823
824         if (old_table)
825                 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
826
827         return len;
828 }
829
830 static struct rx_queue_attribute rps_cpus_attribute =
831         __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
832
833
834 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
835         __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
836             show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
837 #endif /* CONFIG_RPS */
838
839 static struct attribute *rx_queue_default_attrs[] = {
840 #ifdef CONFIG_RPS
841         &rps_cpus_attribute.attr,
842         &rps_dev_flow_table_cnt_attribute.attr,
843 #endif
844         NULL
845 };
846
847 static void rx_queue_release(struct kobject *kobj)
848 {
849         struct netdev_rx_queue *queue = to_rx_queue(kobj);
850 #ifdef CONFIG_RPS
851         struct rps_map *map;
852         struct rps_dev_flow_table *flow_table;
853
854
855         map = rcu_dereference_protected(queue->rps_map, 1);
856         if (map) {
857                 RCU_INIT_POINTER(queue->rps_map, NULL);
858                 kfree_rcu(map, rcu);
859         }
860
861         flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
862         if (flow_table) {
863                 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
864                 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
865         }
866 #endif
867
868         memset(kobj, 0, sizeof(*kobj));
869         dev_put(queue->dev);
870 }
871
872 static const void *rx_queue_namespace(struct kobject *kobj)
873 {
874         struct netdev_rx_queue *queue = to_rx_queue(kobj);
875         struct device *dev = &queue->dev->dev;
876         const void *ns = NULL;
877
878         if (dev->class && dev->class->ns_type)
879                 ns = dev->class->namespace(dev);
880
881         return ns;
882 }
883
884 static struct kobj_type rx_queue_ktype = {
885         .sysfs_ops = &rx_queue_sysfs_ops,
886         .release = rx_queue_release,
887         .default_attrs = rx_queue_default_attrs,
888         .namespace = rx_queue_namespace
889 };
890
891 static int rx_queue_add_kobject(struct net_device *dev, int index)
892 {
893         struct netdev_rx_queue *queue = dev->_rx + index;
894         struct kobject *kobj = &queue->kobj;
895         int error = 0;
896
897         kobj->kset = dev->queues_kset;
898         error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
899             "rx-%u", index);
900         if (error)
901                 goto exit;
902
903         if (dev->sysfs_rx_queue_group) {
904                 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
905                 if (error)
906                         goto exit;
907         }
908
909         kobject_uevent(kobj, KOBJ_ADD);
910         dev_hold(queue->dev);
911
912         return error;
913 exit:
914         kobject_put(kobj);
915         return error;
916 }
917 #endif /* CONFIG_SYSFS */
918
919 int
920 net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
921 {
922 #ifdef CONFIG_SYSFS
923         int i;
924         int error = 0;
925
926 #ifndef CONFIG_RPS
927         if (!dev->sysfs_rx_queue_group)
928                 return 0;
929 #endif
930         for (i = old_num; i < new_num; i++) {
931                 error = rx_queue_add_kobject(dev, i);
932                 if (error) {
933                         new_num = old_num;
934                         break;
935                 }
936         }
937
938         while (--i >= new_num) {
939                 if (dev->sysfs_rx_queue_group)
940                         sysfs_remove_group(&dev->_rx[i].kobj,
941                                            dev->sysfs_rx_queue_group);
942                 kobject_put(&dev->_rx[i].kobj);
943         }
944
945         return error;
946 #else
947         return 0;
948 #endif
949 }
950
951 #ifdef CONFIG_SYSFS
952 /*
953  * netdev_queue sysfs structures and functions.
954  */
955 struct netdev_queue_attribute {
956         struct attribute attr;
957         ssize_t (*show)(struct netdev_queue *queue,
958             struct netdev_queue_attribute *attr, char *buf);
959         ssize_t (*store)(struct netdev_queue *queue,
960             struct netdev_queue_attribute *attr, const char *buf, size_t len);
961 };
962 #define to_netdev_queue_attr(_attr) container_of(_attr,         \
963     struct netdev_queue_attribute, attr)
964
965 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
966
967 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
968                                       struct attribute *attr, char *buf)
969 {
970         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
971         struct netdev_queue *queue = to_netdev_queue(kobj);
972
973         if (!attribute->show)
974                 return -EIO;
975
976         return attribute->show(queue, attribute, buf);
977 }
978
979 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
980                                        struct attribute *attr,
981                                        const char *buf, size_t count)
982 {
983         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
984         struct netdev_queue *queue = to_netdev_queue(kobj);
985
986         if (!attribute->store)
987                 return -EIO;
988
989         return attribute->store(queue, attribute, buf, count);
990 }
991
992 static const struct sysfs_ops netdev_queue_sysfs_ops = {
993         .show = netdev_queue_attr_show,
994         .store = netdev_queue_attr_store,
995 };
996
997 static ssize_t show_trans_timeout(struct netdev_queue *queue,
998                                   struct netdev_queue_attribute *attribute,
999                                   char *buf)
1000 {
1001         unsigned long trans_timeout;
1002
1003         spin_lock_irq(&queue->_xmit_lock);
1004         trans_timeout = queue->trans_timeout;
1005         spin_unlock_irq(&queue->_xmit_lock);
1006
1007         return sprintf(buf, "%lu", trans_timeout);
1008 }
1009
1010 #ifdef CONFIG_XPS
1011 static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1012 {
1013         struct net_device *dev = queue->dev;
1014         unsigned int i;
1015
1016         i = queue - dev->_tx;
1017         BUG_ON(i >= dev->num_tx_queues);
1018
1019         return i;
1020 }
1021
1022 static ssize_t show_tx_maxrate(struct netdev_queue *queue,
1023                                struct netdev_queue_attribute *attribute,
1024                                char *buf)
1025 {
1026         return sprintf(buf, "%lu\n", queue->tx_maxrate);
1027 }
1028
1029 static ssize_t set_tx_maxrate(struct netdev_queue *queue,
1030                               struct netdev_queue_attribute *attribute,
1031                               const char *buf, size_t len)
1032 {
1033         struct net_device *dev = queue->dev;
1034         int err, index = get_netdev_queue_index(queue);
1035         u32 rate = 0;
1036
1037         err = kstrtou32(buf, 10, &rate);
1038         if (err < 0)
1039                 return err;
1040
1041         if (!rtnl_trylock())
1042                 return restart_syscall();
1043
1044         err = -EOPNOTSUPP;
1045         if (dev->netdev_ops->ndo_set_tx_maxrate)
1046                 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1047
1048         rtnl_unlock();
1049         if (!err) {
1050                 queue->tx_maxrate = rate;
1051                 return len;
1052         }
1053         return err;
1054 }
1055
1056 static struct netdev_queue_attribute queue_tx_maxrate =
1057         __ATTR(tx_maxrate, S_IRUGO | S_IWUSR,
1058                show_tx_maxrate, set_tx_maxrate);
1059 #endif
1060
1061 static struct netdev_queue_attribute queue_trans_timeout =
1062         __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
1063
1064 #ifdef CONFIG_BQL
1065 /*
1066  * Byte queue limits sysfs structures and functions.
1067  */
1068 static ssize_t bql_show(char *buf, unsigned int value)
1069 {
1070         return sprintf(buf, "%u\n", value);
1071 }
1072
1073 static ssize_t bql_set(const char *buf, const size_t count,
1074                        unsigned int *pvalue)
1075 {
1076         unsigned int value;
1077         int err;
1078
1079         if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
1080                 value = DQL_MAX_LIMIT;
1081         else {
1082                 err = kstrtouint(buf, 10, &value);
1083                 if (err < 0)
1084                         return err;
1085                 if (value > DQL_MAX_LIMIT)
1086                         return -EINVAL;
1087         }
1088
1089         *pvalue = value;
1090
1091         return count;
1092 }
1093
1094 static ssize_t bql_show_hold_time(struct netdev_queue *queue,
1095                                   struct netdev_queue_attribute *attr,
1096                                   char *buf)
1097 {
1098         struct dql *dql = &queue->dql;
1099
1100         return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1101 }
1102
1103 static ssize_t bql_set_hold_time(struct netdev_queue *queue,
1104                                  struct netdev_queue_attribute *attribute,
1105                                  const char *buf, size_t len)
1106 {
1107         struct dql *dql = &queue->dql;
1108         unsigned int value;
1109         int err;
1110
1111         err = kstrtouint(buf, 10, &value);
1112         if (err < 0)
1113                 return err;
1114
1115         dql->slack_hold_time = msecs_to_jiffies(value);
1116
1117         return len;
1118 }
1119
1120 static struct netdev_queue_attribute bql_hold_time_attribute =
1121         __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
1122             bql_set_hold_time);
1123
1124 static ssize_t bql_show_inflight(struct netdev_queue *queue,
1125                                  struct netdev_queue_attribute *attr,
1126                                  char *buf)
1127 {
1128         struct dql *dql = &queue->dql;
1129
1130         return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1131 }
1132
1133 static struct netdev_queue_attribute bql_inflight_attribute =
1134         __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
1135
1136 #define BQL_ATTR(NAME, FIELD)                                           \
1137 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,            \
1138                                  struct netdev_queue_attribute *attr,   \
1139                                  char *buf)                             \
1140 {                                                                       \
1141         return bql_show(buf, queue->dql.FIELD);                         \
1142 }                                                                       \
1143                                                                         \
1144 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,             \
1145                                 struct netdev_queue_attribute *attr,    \
1146                                 const char *buf, size_t len)            \
1147 {                                                                       \
1148         return bql_set(buf, len, &queue->dql.FIELD);                    \
1149 }                                                                       \
1150                                                                         \
1151 static struct netdev_queue_attribute bql_ ## NAME ## _attribute =       \
1152         __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
1153             bql_set_ ## NAME);
1154
1155 BQL_ATTR(limit, limit)
1156 BQL_ATTR(limit_max, max_limit)
1157 BQL_ATTR(limit_min, min_limit)
1158
1159 static struct attribute *dql_attrs[] = {
1160         &bql_limit_attribute.attr,
1161         &bql_limit_max_attribute.attr,
1162         &bql_limit_min_attribute.attr,
1163         &bql_hold_time_attribute.attr,
1164         &bql_inflight_attribute.attr,
1165         NULL
1166 };
1167
1168 static struct attribute_group dql_group = {
1169         .name  = "byte_queue_limits",
1170         .attrs  = dql_attrs,
1171 };
1172 #endif /* CONFIG_BQL */
1173
1174 #ifdef CONFIG_XPS
1175 static ssize_t show_xps_map(struct netdev_queue *queue,
1176                             struct netdev_queue_attribute *attribute, char *buf)
1177 {
1178         struct net_device *dev = queue->dev;
1179         struct xps_dev_maps *dev_maps;
1180         cpumask_var_t mask;
1181         unsigned long index;
1182         int i, len;
1183
1184         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1185                 return -ENOMEM;
1186
1187         index = get_netdev_queue_index(queue);
1188
1189         rcu_read_lock();
1190         dev_maps = rcu_dereference(dev->xps_maps);
1191         if (dev_maps) {
1192                 for_each_possible_cpu(i) {
1193                         struct xps_map *map =
1194                             rcu_dereference(dev_maps->cpu_map[i]);
1195                         if (map) {
1196                                 int j;
1197                                 for (j = 0; j < map->len; j++) {
1198                                         if (map->queues[j] == index) {
1199                                                 cpumask_set_cpu(i, mask);
1200                                                 break;
1201                                         }
1202                                 }
1203                         }
1204                 }
1205         }
1206         rcu_read_unlock();
1207
1208         len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
1209         free_cpumask_var(mask);
1210         return len < PAGE_SIZE ? len : -EINVAL;
1211 }
1212
1213 static ssize_t store_xps_map(struct netdev_queue *queue,
1214                       struct netdev_queue_attribute *attribute,
1215                       const char *buf, size_t len)
1216 {
1217         struct net_device *dev = queue->dev;
1218         unsigned long index;
1219         cpumask_var_t mask;
1220         int err;
1221
1222         if (!capable(CAP_NET_ADMIN))
1223                 return -EPERM;
1224
1225         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1226                 return -ENOMEM;
1227
1228         index = get_netdev_queue_index(queue);
1229
1230         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1231         if (err) {
1232                 free_cpumask_var(mask);
1233                 return err;
1234         }
1235
1236         err = netif_set_xps_queue(dev, mask, index);
1237
1238         free_cpumask_var(mask);
1239
1240         return err ? : len;
1241 }
1242
1243 static struct netdev_queue_attribute xps_cpus_attribute =
1244     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1245 #endif /* CONFIG_XPS */
1246
1247 static struct attribute *netdev_queue_default_attrs[] = {
1248         &queue_trans_timeout.attr,
1249 #ifdef CONFIG_XPS
1250         &xps_cpus_attribute.attr,
1251         &queue_tx_maxrate.attr,
1252 #endif
1253         NULL
1254 };
1255
1256 static void netdev_queue_release(struct kobject *kobj)
1257 {
1258         struct netdev_queue *queue = to_netdev_queue(kobj);
1259
1260         memset(kobj, 0, sizeof(*kobj));
1261         dev_put(queue->dev);
1262 }
1263
1264 static const void *netdev_queue_namespace(struct kobject *kobj)
1265 {
1266         struct netdev_queue *queue = to_netdev_queue(kobj);
1267         struct device *dev = &queue->dev->dev;
1268         const void *ns = NULL;
1269
1270         if (dev->class && dev->class->ns_type)
1271                 ns = dev->class->namespace(dev);
1272
1273         return ns;
1274 }
1275
1276 static struct kobj_type netdev_queue_ktype = {
1277         .sysfs_ops = &netdev_queue_sysfs_ops,
1278         .release = netdev_queue_release,
1279         .default_attrs = netdev_queue_default_attrs,
1280         .namespace = netdev_queue_namespace,
1281 };
1282
1283 static int netdev_queue_add_kobject(struct net_device *dev, int index)
1284 {
1285         struct netdev_queue *queue = dev->_tx + index;
1286         struct kobject *kobj = &queue->kobj;
1287         int error = 0;
1288
1289         kobj->kset = dev->queues_kset;
1290         error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1291             "tx-%u", index);
1292         if (error)
1293                 goto exit;
1294
1295 #ifdef CONFIG_BQL
1296         error = sysfs_create_group(kobj, &dql_group);
1297         if (error)
1298                 goto exit;
1299 #endif
1300
1301         kobject_uevent(kobj, KOBJ_ADD);
1302         dev_hold(queue->dev);
1303
1304         return 0;
1305 exit:
1306         kobject_put(kobj);
1307         return error;
1308 }
1309 #endif /* CONFIG_SYSFS */
1310
1311 int
1312 netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1313 {
1314 #ifdef CONFIG_SYSFS
1315         int i;
1316         int error = 0;
1317
1318         for (i = old_num; i < new_num; i++) {
1319                 error = netdev_queue_add_kobject(dev, i);
1320                 if (error) {
1321                         new_num = old_num;
1322                         break;
1323                 }
1324         }
1325
1326         while (--i >= new_num) {
1327                 struct netdev_queue *queue = dev->_tx + i;
1328
1329 #ifdef CONFIG_BQL
1330                 sysfs_remove_group(&queue->kobj, &dql_group);
1331 #endif
1332                 kobject_put(&queue->kobj);
1333         }
1334
1335         return error;
1336 #else
1337         return 0;
1338 #endif /* CONFIG_SYSFS */
1339 }
1340
1341 static int register_queue_kobjects(struct net_device *dev)
1342 {
1343         int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1344
1345 #ifdef CONFIG_SYSFS
1346         dev->queues_kset = kset_create_and_add("queues",
1347             NULL, &dev->dev.kobj);
1348         if (!dev->queues_kset)
1349                 return -ENOMEM;
1350         real_rx = dev->real_num_rx_queues;
1351 #endif
1352         real_tx = dev->real_num_tx_queues;
1353
1354         error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1355         if (error)
1356                 goto error;
1357         rxq = real_rx;
1358
1359         error = netdev_queue_update_kobjects(dev, 0, real_tx);
1360         if (error)
1361                 goto error;
1362         txq = real_tx;
1363
1364         return 0;
1365
1366 error:
1367         netdev_queue_update_kobjects(dev, txq, 0);
1368         net_rx_queue_update_kobjects(dev, rxq, 0);
1369         return error;
1370 }
1371
1372 static void remove_queue_kobjects(struct net_device *dev)
1373 {
1374         int real_rx = 0, real_tx = 0;
1375
1376 #ifdef CONFIG_SYSFS
1377         real_rx = dev->real_num_rx_queues;
1378 #endif
1379         real_tx = dev->real_num_tx_queues;
1380
1381         net_rx_queue_update_kobjects(dev, real_rx, 0);
1382         netdev_queue_update_kobjects(dev, real_tx, 0);
1383 #ifdef CONFIG_SYSFS
1384         kset_unregister(dev->queues_kset);
1385 #endif
1386 }
1387
1388 static bool net_current_may_mount(void)
1389 {
1390         struct net *net = current->nsproxy->net_ns;
1391
1392         return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1393 }
1394
1395 static void *net_grab_current_ns(void)
1396 {
1397         struct net *ns = current->nsproxy->net_ns;
1398 #ifdef CONFIG_NET_NS
1399         if (ns)
1400                 atomic_inc(&ns->passive);
1401 #endif
1402         return ns;
1403 }
1404
1405 static const void *net_initial_ns(void)
1406 {
1407         return &init_net;
1408 }
1409
1410 static const void *net_netlink_ns(struct sock *sk)
1411 {
1412         return sock_net(sk);
1413 }
1414
1415 struct kobj_ns_type_operations net_ns_type_operations = {
1416         .type = KOBJ_NS_TYPE_NET,
1417         .current_may_mount = net_current_may_mount,
1418         .grab_current_ns = net_grab_current_ns,
1419         .netlink_ns = net_netlink_ns,
1420         .initial_ns = net_initial_ns,
1421         .drop_ns = net_drop_ns,
1422 };
1423 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1424
1425 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1426 {
1427         struct net_device *dev = to_net_dev(d);
1428         int retval;
1429
1430         /* pass interface to uevent. */
1431         retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1432         if (retval)
1433                 goto exit;
1434
1435         /* pass ifindex to uevent.
1436          * ifindex is useful as it won't change (interface name may change)
1437          * and is what RtNetlink uses natively. */
1438         retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1439
1440 exit:
1441         return retval;
1442 }
1443
1444 /*
1445  *      netdev_release -- destroy and free a dead device.
1446  *      Called when last reference to device kobject is gone.
1447  */
1448 static void netdev_release(struct device *d)
1449 {
1450         struct net_device *dev = to_net_dev(d);
1451
1452         BUG_ON(dev->reg_state != NETREG_RELEASED);
1453
1454         kfree(dev->ifalias);
1455         netdev_freemem(dev);
1456 }
1457
1458 static const void *net_namespace(struct device *d)
1459 {
1460         struct net_device *dev = to_net_dev(d);
1461
1462         return dev_net(dev);
1463 }
1464
1465 static struct class net_class = {
1466         .name = "net",
1467         .dev_release = netdev_release,
1468         .dev_groups = net_class_groups,
1469         .dev_uevent = netdev_uevent,
1470         .ns_type = &net_ns_type_operations,
1471         .namespace = net_namespace,
1472 };
1473
1474 #ifdef CONFIG_OF_NET
1475 static int of_dev_node_match(struct device *dev, const void *data)
1476 {
1477         int ret = 0;
1478
1479         if (dev->parent)
1480                 ret = dev->parent->of_node == data;
1481
1482         return ret == 0 ? dev->of_node == data : ret;
1483 }
1484
1485 /*
1486  * of_find_net_device_by_node - lookup the net device for the device node
1487  * @np: OF device node
1488  *
1489  * Looks up the net_device structure corresponding with the device node.
1490  * If successful, returns a pointer to the net_device with the embedded
1491  * struct device refcount incremented by one, or NULL on failure. The
1492  * refcount must be dropped when done with the net_device.
1493  */
1494 struct net_device *of_find_net_device_by_node(struct device_node *np)
1495 {
1496         struct device *dev;
1497
1498         dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1499         if (!dev)
1500                 return NULL;
1501
1502         return to_net_dev(dev);
1503 }
1504 EXPORT_SYMBOL(of_find_net_device_by_node);
1505 #endif
1506
1507 /* Delete sysfs entries but hold kobject reference until after all
1508  * netdev references are gone.
1509  */
1510 void netdev_unregister_kobject(struct net_device *ndev)
1511 {
1512         struct device *dev = &(ndev->dev);
1513
1514         kobject_get(&dev->kobj);
1515
1516         remove_queue_kobjects(ndev);
1517
1518         pm_runtime_set_memalloc_noio(dev, false);
1519
1520         device_del(dev);
1521 }
1522
1523 /* Create sysfs entries for network device. */
1524 int netdev_register_kobject(struct net_device *ndev)
1525 {
1526         struct device *dev = &(ndev->dev);
1527         const struct attribute_group **groups = ndev->sysfs_groups;
1528         int error = 0;
1529
1530         device_initialize(dev);
1531         dev->class = &net_class;
1532         dev->platform_data = ndev;
1533         dev->groups = groups;
1534
1535         dev_set_name(dev, "%s", ndev->name);
1536
1537 #ifdef CONFIG_SYSFS
1538         /* Allow for a device specific group */
1539         if (*groups)
1540                 groups++;
1541
1542         *groups++ = &netstat_group;
1543
1544 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1545         if (ndev->ieee80211_ptr)
1546                 *groups++ = &wireless_group;
1547 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1548         else if (ndev->wireless_handlers)
1549                 *groups++ = &wireless_group;
1550 #endif
1551 #endif
1552 #endif /* CONFIG_SYSFS */
1553
1554         error = device_add(dev);
1555         if (error)
1556                 return error;
1557
1558         error = register_queue_kobjects(ndev);
1559         if (error) {
1560                 device_del(dev);
1561                 return error;
1562         }
1563
1564         pm_runtime_set_memalloc_noio(dev, true);
1565
1566         return error;
1567 }
1568
1569 int netdev_class_create_file_ns(struct class_attribute *class_attr,
1570                                 const void *ns)
1571 {
1572         return class_create_file_ns(&net_class, class_attr, ns);
1573 }
1574 EXPORT_SYMBOL(netdev_class_create_file_ns);
1575
1576 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1577                                  const void *ns)
1578 {
1579         class_remove_file_ns(&net_class, class_attr, ns);
1580 }
1581 EXPORT_SYMBOL(netdev_class_remove_file_ns);
1582
1583 int __init netdev_kobject_init(void)
1584 {
1585         kobj_ns_type_register(&net_ns_type_operations);
1586         return class_register(&net_class);
1587 }