bql: Fix inconsistency between file mode and attr method.
[linux-2.6.git] / net / core / net-sysfs.c
1 /*
2  * net-sysfs.c - network device class and attributes
3  *
4  * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
18 #include <net/sock.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/wireless.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <linux/jiffies.h>
25 #include <net/wext.h>
26
27 #include "net-sysfs.h"
28
29 #ifdef CONFIG_SYSFS
30 static const char fmt_hex[] = "%#x\n";
31 static const char fmt_long_hex[] = "%#lx\n";
32 static const char fmt_dec[] = "%d\n";
33 static const char fmt_udec[] = "%u\n";
34 static const char fmt_ulong[] = "%lu\n";
35 static const char fmt_u64[] = "%llu\n";
36
37 static inline int dev_isalive(const struct net_device *dev)
38 {
39         return dev->reg_state <= NETREG_REGISTERED;
40 }
41
42 /* use same locking rules as GIF* ioctl's */
43 static ssize_t netdev_show(const struct device *dev,
44                            struct device_attribute *attr, char *buf,
45                            ssize_t (*format)(const struct net_device *, char *))
46 {
47         struct net_device *net = to_net_dev(dev);
48         ssize_t ret = -EINVAL;
49
50         read_lock(&dev_base_lock);
51         if (dev_isalive(net))
52                 ret = (*format)(net, buf);
53         read_unlock(&dev_base_lock);
54
55         return ret;
56 }
57
58 /* generate a show function for simple field */
59 #define NETDEVICE_SHOW(field, format_string)                            \
60 static ssize_t format_##field(const struct net_device *net, char *buf)  \
61 {                                                                       \
62         return sprintf(buf, format_string, net->field);                 \
63 }                                                                       \
64 static ssize_t show_##field(struct device *dev,                         \
65                             struct device_attribute *attr, char *buf)   \
66 {                                                                       \
67         return netdev_show(dev, attr, buf, format_##field);             \
68 }
69
70
71 /* use same locking and permission rules as SIF* ioctl's */
72 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
73                             const char *buf, size_t len,
74                             int (*set)(struct net_device *, unsigned long))
75 {
76         struct net_device *net = to_net_dev(dev);
77         char *endp;
78         unsigned long new;
79         int ret = -EINVAL;
80
81         if (!capable(CAP_NET_ADMIN))
82                 return -EPERM;
83
84         new = simple_strtoul(buf, &endp, 0);
85         if (endp == buf)
86                 goto err;
87
88         if (!rtnl_trylock())
89                 return restart_syscall();
90
91         if (dev_isalive(net)) {
92                 if ((ret = (*set)(net, new)) == 0)
93                         ret = len;
94         }
95         rtnl_unlock();
96  err:
97         return ret;
98 }
99
100 NETDEVICE_SHOW(dev_id, fmt_hex);
101 NETDEVICE_SHOW(addr_assign_type, fmt_dec);
102 NETDEVICE_SHOW(addr_len, fmt_dec);
103 NETDEVICE_SHOW(iflink, fmt_dec);
104 NETDEVICE_SHOW(ifindex, fmt_dec);
105 NETDEVICE_SHOW(type, fmt_dec);
106 NETDEVICE_SHOW(link_mode, fmt_dec);
107
108 /* use same locking rules as GIFHWADDR ioctl's */
109 static ssize_t show_address(struct device *dev, struct device_attribute *attr,
110                             char *buf)
111 {
112         struct net_device *net = to_net_dev(dev);
113         ssize_t ret = -EINVAL;
114
115         read_lock(&dev_base_lock);
116         if (dev_isalive(net))
117                 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
118         read_unlock(&dev_base_lock);
119         return ret;
120 }
121
122 static ssize_t show_broadcast(struct device *dev,
123                             struct device_attribute *attr, char *buf)
124 {
125         struct net_device *net = to_net_dev(dev);
126         if (dev_isalive(net))
127                 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
128         return -EINVAL;
129 }
130
131 static ssize_t show_carrier(struct device *dev,
132                             struct device_attribute *attr, char *buf)
133 {
134         struct net_device *netdev = to_net_dev(dev);
135         if (netif_running(netdev)) {
136                 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
137         }
138         return -EINVAL;
139 }
140
141 static ssize_t show_speed(struct device *dev,
142                           struct device_attribute *attr, char *buf)
143 {
144         struct net_device *netdev = to_net_dev(dev);
145         int ret = -EINVAL;
146
147         if (!rtnl_trylock())
148                 return restart_syscall();
149
150         if (netif_running(netdev)) {
151                 struct ethtool_cmd cmd;
152                 if (!__ethtool_get_settings(netdev, &cmd))
153                         ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
154         }
155         rtnl_unlock();
156         return ret;
157 }
158
159 static ssize_t show_duplex(struct device *dev,
160                            struct device_attribute *attr, char *buf)
161 {
162         struct net_device *netdev = to_net_dev(dev);
163         int ret = -EINVAL;
164
165         if (!rtnl_trylock())
166                 return restart_syscall();
167
168         if (netif_running(netdev)) {
169                 struct ethtool_cmd cmd;
170                 if (!__ethtool_get_settings(netdev, &cmd))
171                         ret = sprintf(buf, "%s\n",
172                                       cmd.duplex ? "full" : "half");
173         }
174         rtnl_unlock();
175         return ret;
176 }
177
178 static ssize_t show_dormant(struct device *dev,
179                             struct device_attribute *attr, char *buf)
180 {
181         struct net_device *netdev = to_net_dev(dev);
182
183         if (netif_running(netdev))
184                 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
185
186         return -EINVAL;
187 }
188
189 static const char *const operstates[] = {
190         "unknown",
191         "notpresent", /* currently unused */
192         "down",
193         "lowerlayerdown",
194         "testing", /* currently unused */
195         "dormant",
196         "up"
197 };
198
199 static ssize_t show_operstate(struct device *dev,
200                               struct device_attribute *attr, char *buf)
201 {
202         const struct net_device *netdev = to_net_dev(dev);
203         unsigned char operstate;
204
205         read_lock(&dev_base_lock);
206         operstate = netdev->operstate;
207         if (!netif_running(netdev))
208                 operstate = IF_OPER_DOWN;
209         read_unlock(&dev_base_lock);
210
211         if (operstate >= ARRAY_SIZE(operstates))
212                 return -EINVAL; /* should not happen */
213
214         return sprintf(buf, "%s\n", operstates[operstate]);
215 }
216
217 /* read-write attributes */
218 NETDEVICE_SHOW(mtu, fmt_dec);
219
220 static int change_mtu(struct net_device *net, unsigned long new_mtu)
221 {
222         return dev_set_mtu(net, (int) new_mtu);
223 }
224
225 static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
226                          const char *buf, size_t len)
227 {
228         return netdev_store(dev, attr, buf, len, change_mtu);
229 }
230
231 NETDEVICE_SHOW(flags, fmt_hex);
232
233 static int change_flags(struct net_device *net, unsigned long new_flags)
234 {
235         return dev_change_flags(net, (unsigned) new_flags);
236 }
237
238 static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
239                            const char *buf, size_t len)
240 {
241         return netdev_store(dev, attr, buf, len, change_flags);
242 }
243
244 NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
245
246 static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
247 {
248         net->tx_queue_len = new_len;
249         return 0;
250 }
251
252 static ssize_t store_tx_queue_len(struct device *dev,
253                                   struct device_attribute *attr,
254                                   const char *buf, size_t len)
255 {
256         return netdev_store(dev, attr, buf, len, change_tx_queue_len);
257 }
258
259 static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
260                              const char *buf, size_t len)
261 {
262         struct net_device *netdev = to_net_dev(dev);
263         size_t count = len;
264         ssize_t ret;
265
266         if (!capable(CAP_NET_ADMIN))
267                 return -EPERM;
268
269         /* ignore trailing newline */
270         if (len >  0 && buf[len - 1] == '\n')
271                 --count;
272
273         if (!rtnl_trylock())
274                 return restart_syscall();
275         ret = dev_set_alias(netdev, buf, count);
276         rtnl_unlock();
277
278         return ret < 0 ? ret : len;
279 }
280
281 static ssize_t show_ifalias(struct device *dev,
282                             struct device_attribute *attr, char *buf)
283 {
284         const struct net_device *netdev = to_net_dev(dev);
285         ssize_t ret = 0;
286
287         if (!rtnl_trylock())
288                 return restart_syscall();
289         if (netdev->ifalias)
290                 ret = sprintf(buf, "%s\n", netdev->ifalias);
291         rtnl_unlock();
292         return ret;
293 }
294
295 NETDEVICE_SHOW(group, fmt_dec);
296
297 static int change_group(struct net_device *net, unsigned long new_group)
298 {
299         dev_set_group(net, (int) new_group);
300         return 0;
301 }
302
303 static ssize_t store_group(struct device *dev, struct device_attribute *attr,
304                          const char *buf, size_t len)
305 {
306         return netdev_store(dev, attr, buf, len, change_group);
307 }
308
309 static struct device_attribute net_class_attributes[] = {
310         __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
311         __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
312         __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
313         __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
314         __ATTR(iflink, S_IRUGO, show_iflink, NULL),
315         __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
316         __ATTR(type, S_IRUGO, show_type, NULL),
317         __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
318         __ATTR(address, S_IRUGO, show_address, NULL),
319         __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
320         __ATTR(carrier, S_IRUGO, show_carrier, NULL),
321         __ATTR(speed, S_IRUGO, show_speed, NULL),
322         __ATTR(duplex, S_IRUGO, show_duplex, NULL),
323         __ATTR(dormant, S_IRUGO, show_dormant, NULL),
324         __ATTR(operstate, S_IRUGO, show_operstate, NULL),
325         __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
326         __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
327         __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
328                store_tx_queue_len),
329         __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
330         {}
331 };
332
333 /* Show a given an attribute in the statistics group */
334 static ssize_t netstat_show(const struct device *d,
335                             struct device_attribute *attr, char *buf,
336                             unsigned long offset)
337 {
338         struct net_device *dev = to_net_dev(d);
339         ssize_t ret = -EINVAL;
340
341         WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
342                         offset % sizeof(u64) != 0);
343
344         read_lock(&dev_base_lock);
345         if (dev_isalive(dev)) {
346                 struct rtnl_link_stats64 temp;
347                 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
348
349                 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
350         }
351         read_unlock(&dev_base_lock);
352         return ret;
353 }
354
355 /* generate a read-only statistics attribute */
356 #define NETSTAT_ENTRY(name)                                             \
357 static ssize_t show_##name(struct device *d,                            \
358                            struct device_attribute *attr, char *buf)    \
359 {                                                                       \
360         return netstat_show(d, attr, buf,                               \
361                             offsetof(struct rtnl_link_stats64, name));  \
362 }                                                                       \
363 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
364
365 NETSTAT_ENTRY(rx_packets);
366 NETSTAT_ENTRY(tx_packets);
367 NETSTAT_ENTRY(rx_bytes);
368 NETSTAT_ENTRY(tx_bytes);
369 NETSTAT_ENTRY(rx_errors);
370 NETSTAT_ENTRY(tx_errors);
371 NETSTAT_ENTRY(rx_dropped);
372 NETSTAT_ENTRY(tx_dropped);
373 NETSTAT_ENTRY(multicast);
374 NETSTAT_ENTRY(collisions);
375 NETSTAT_ENTRY(rx_length_errors);
376 NETSTAT_ENTRY(rx_over_errors);
377 NETSTAT_ENTRY(rx_crc_errors);
378 NETSTAT_ENTRY(rx_frame_errors);
379 NETSTAT_ENTRY(rx_fifo_errors);
380 NETSTAT_ENTRY(rx_missed_errors);
381 NETSTAT_ENTRY(tx_aborted_errors);
382 NETSTAT_ENTRY(tx_carrier_errors);
383 NETSTAT_ENTRY(tx_fifo_errors);
384 NETSTAT_ENTRY(tx_heartbeat_errors);
385 NETSTAT_ENTRY(tx_window_errors);
386 NETSTAT_ENTRY(rx_compressed);
387 NETSTAT_ENTRY(tx_compressed);
388
389 static struct attribute *netstat_attrs[] = {
390         &dev_attr_rx_packets.attr,
391         &dev_attr_tx_packets.attr,
392         &dev_attr_rx_bytes.attr,
393         &dev_attr_tx_bytes.attr,
394         &dev_attr_rx_errors.attr,
395         &dev_attr_tx_errors.attr,
396         &dev_attr_rx_dropped.attr,
397         &dev_attr_tx_dropped.attr,
398         &dev_attr_multicast.attr,
399         &dev_attr_collisions.attr,
400         &dev_attr_rx_length_errors.attr,
401         &dev_attr_rx_over_errors.attr,
402         &dev_attr_rx_crc_errors.attr,
403         &dev_attr_rx_frame_errors.attr,
404         &dev_attr_rx_fifo_errors.attr,
405         &dev_attr_rx_missed_errors.attr,
406         &dev_attr_tx_aborted_errors.attr,
407         &dev_attr_tx_carrier_errors.attr,
408         &dev_attr_tx_fifo_errors.attr,
409         &dev_attr_tx_heartbeat_errors.attr,
410         &dev_attr_tx_window_errors.attr,
411         &dev_attr_rx_compressed.attr,
412         &dev_attr_tx_compressed.attr,
413         NULL
414 };
415
416
417 static struct attribute_group netstat_group = {
418         .name  = "statistics",
419         .attrs  = netstat_attrs,
420 };
421
422 #ifdef CONFIG_WIRELESS_EXT_SYSFS
423 /* helper function that does all the locking etc for wireless stats */
424 static ssize_t wireless_show(struct device *d, char *buf,
425                              ssize_t (*format)(const struct iw_statistics *,
426                                                char *))
427 {
428         struct net_device *dev = to_net_dev(d);
429         const struct iw_statistics *iw;
430         ssize_t ret = -EINVAL;
431
432         if (!rtnl_trylock())
433                 return restart_syscall();
434         if (dev_isalive(dev)) {
435                 iw = get_wireless_stats(dev);
436                 if (iw)
437                         ret = (*format)(iw, buf);
438         }
439         rtnl_unlock();
440
441         return ret;
442 }
443
444 /* show function template for wireless fields */
445 #define WIRELESS_SHOW(name, field, format_string)                       \
446 static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
447 {                                                                       \
448         return sprintf(buf, format_string, iw->field);                  \
449 }                                                                       \
450 static ssize_t show_iw_##name(struct device *d,                         \
451                               struct device_attribute *attr, char *buf) \
452 {                                                                       \
453         return wireless_show(d, buf, format_iw_##name);                 \
454 }                                                                       \
455 static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
456
457 WIRELESS_SHOW(status, status, fmt_hex);
458 WIRELESS_SHOW(link, qual.qual, fmt_dec);
459 WIRELESS_SHOW(level, qual.level, fmt_dec);
460 WIRELESS_SHOW(noise, qual.noise, fmt_dec);
461 WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
462 WIRELESS_SHOW(crypt, discard.code, fmt_dec);
463 WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
464 WIRELESS_SHOW(misc, discard.misc, fmt_dec);
465 WIRELESS_SHOW(retries, discard.retries, fmt_dec);
466 WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
467
468 static struct attribute *wireless_attrs[] = {
469         &dev_attr_status.attr,
470         &dev_attr_link.attr,
471         &dev_attr_level.attr,
472         &dev_attr_noise.attr,
473         &dev_attr_nwid.attr,
474         &dev_attr_crypt.attr,
475         &dev_attr_fragment.attr,
476         &dev_attr_retries.attr,
477         &dev_attr_misc.attr,
478         &dev_attr_beacon.attr,
479         NULL
480 };
481
482 static struct attribute_group wireless_group = {
483         .name = "wireless",
484         .attrs = wireless_attrs,
485 };
486 #endif
487 #endif /* CONFIG_SYSFS */
488
489 #ifdef CONFIG_RPS
490 /*
491  * RX queue sysfs structures and functions.
492  */
493 struct rx_queue_attribute {
494         struct attribute attr;
495         ssize_t (*show)(struct netdev_rx_queue *queue,
496             struct rx_queue_attribute *attr, char *buf);
497         ssize_t (*store)(struct netdev_rx_queue *queue,
498             struct rx_queue_attribute *attr, const char *buf, size_t len);
499 };
500 #define to_rx_queue_attr(_attr) container_of(_attr,             \
501     struct rx_queue_attribute, attr)
502
503 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
504
505 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
506                                   char *buf)
507 {
508         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
509         struct netdev_rx_queue *queue = to_rx_queue(kobj);
510
511         if (!attribute->show)
512                 return -EIO;
513
514         return attribute->show(queue, attribute, buf);
515 }
516
517 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
518                                    const char *buf, size_t count)
519 {
520         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
521         struct netdev_rx_queue *queue = to_rx_queue(kobj);
522
523         if (!attribute->store)
524                 return -EIO;
525
526         return attribute->store(queue, attribute, buf, count);
527 }
528
529 static const struct sysfs_ops rx_queue_sysfs_ops = {
530         .show = rx_queue_attr_show,
531         .store = rx_queue_attr_store,
532 };
533
534 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
535                             struct rx_queue_attribute *attribute, char *buf)
536 {
537         struct rps_map *map;
538         cpumask_var_t mask;
539         size_t len = 0;
540         int i;
541
542         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
543                 return -ENOMEM;
544
545         rcu_read_lock();
546         map = rcu_dereference(queue->rps_map);
547         if (map)
548                 for (i = 0; i < map->len; i++)
549                         cpumask_set_cpu(map->cpus[i], mask);
550
551         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
552         if (PAGE_SIZE - len < 3) {
553                 rcu_read_unlock();
554                 free_cpumask_var(mask);
555                 return -EINVAL;
556         }
557         rcu_read_unlock();
558
559         free_cpumask_var(mask);
560         len += sprintf(buf + len, "\n");
561         return len;
562 }
563
564 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
565                       struct rx_queue_attribute *attribute,
566                       const char *buf, size_t len)
567 {
568         struct rps_map *old_map, *map;
569         cpumask_var_t mask;
570         int err, cpu, i;
571         static DEFINE_SPINLOCK(rps_map_lock);
572
573         if (!capable(CAP_NET_ADMIN))
574                 return -EPERM;
575
576         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
577                 return -ENOMEM;
578
579         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
580         if (err) {
581                 free_cpumask_var(mask);
582                 return err;
583         }
584
585         map = kzalloc(max_t(unsigned,
586             RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
587             GFP_KERNEL);
588         if (!map) {
589                 free_cpumask_var(mask);
590                 return -ENOMEM;
591         }
592
593         i = 0;
594         for_each_cpu_and(cpu, mask, cpu_online_mask)
595                 map->cpus[i++] = cpu;
596
597         if (i)
598                 map->len = i;
599         else {
600                 kfree(map);
601                 map = NULL;
602         }
603
604         spin_lock(&rps_map_lock);
605         old_map = rcu_dereference_protected(queue->rps_map,
606                                             lockdep_is_held(&rps_map_lock));
607         rcu_assign_pointer(queue->rps_map, map);
608         spin_unlock(&rps_map_lock);
609
610         if (map)
611                 jump_label_inc(&rps_needed);
612         if (old_map) {
613                 kfree_rcu(old_map, rcu);
614                 jump_label_dec(&rps_needed);
615         }
616         free_cpumask_var(mask);
617         return len;
618 }
619
620 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
621                                            struct rx_queue_attribute *attr,
622                                            char *buf)
623 {
624         struct rps_dev_flow_table *flow_table;
625         unsigned long val = 0;
626
627         rcu_read_lock();
628         flow_table = rcu_dereference(queue->rps_flow_table);
629         if (flow_table)
630                 val = (unsigned long)flow_table->mask + 1;
631         rcu_read_unlock();
632
633         return sprintf(buf, "%lu\n", val);
634 }
635
636 static void rps_dev_flow_table_release_work(struct work_struct *work)
637 {
638         struct rps_dev_flow_table *table = container_of(work,
639             struct rps_dev_flow_table, free_work);
640
641         vfree(table);
642 }
643
644 static void rps_dev_flow_table_release(struct rcu_head *rcu)
645 {
646         struct rps_dev_flow_table *table = container_of(rcu,
647             struct rps_dev_flow_table, rcu);
648
649         INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
650         schedule_work(&table->free_work);
651 }
652
653 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
654                                      struct rx_queue_attribute *attr,
655                                      const char *buf, size_t len)
656 {
657         unsigned long mask, count;
658         struct rps_dev_flow_table *table, *old_table;
659         static DEFINE_SPINLOCK(rps_dev_flow_lock);
660         int rc;
661
662         if (!capable(CAP_NET_ADMIN))
663                 return -EPERM;
664
665         rc = kstrtoul(buf, 0, &count);
666         if (rc < 0)
667                 return rc;
668
669         if (count) {
670                 mask = count - 1;
671                 /* mask = roundup_pow_of_two(count) - 1;
672                  * without overflows...
673                  */
674                 while ((mask | (mask >> 1)) != mask)
675                         mask |= (mask >> 1);
676                 /* On 64 bit arches, must check mask fits in table->mask (u32),
677                  * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
678                  * doesnt overflow.
679                  */
680 #if BITS_PER_LONG > 32
681                 if (mask > (unsigned long)(u32)mask)
682                         return -EINVAL;
683 #else
684                 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
685                                 / sizeof(struct rps_dev_flow)) {
686                         /* Enforce a limit to prevent overflow */
687                         return -EINVAL;
688                 }
689 #endif
690                 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
691                 if (!table)
692                         return -ENOMEM;
693
694                 table->mask = mask;
695                 for (count = 0; count <= mask; count++)
696                         table->flows[count].cpu = RPS_NO_CPU;
697         } else
698                 table = NULL;
699
700         spin_lock(&rps_dev_flow_lock);
701         old_table = rcu_dereference_protected(queue->rps_flow_table,
702                                               lockdep_is_held(&rps_dev_flow_lock));
703         rcu_assign_pointer(queue->rps_flow_table, table);
704         spin_unlock(&rps_dev_flow_lock);
705
706         if (old_table)
707                 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
708
709         return len;
710 }
711
712 static struct rx_queue_attribute rps_cpus_attribute =
713         __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
714
715
716 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
717         __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
718             show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
719
720 static struct attribute *rx_queue_default_attrs[] = {
721         &rps_cpus_attribute.attr,
722         &rps_dev_flow_table_cnt_attribute.attr,
723         NULL
724 };
725
726 static void rx_queue_release(struct kobject *kobj)
727 {
728         struct netdev_rx_queue *queue = to_rx_queue(kobj);
729         struct rps_map *map;
730         struct rps_dev_flow_table *flow_table;
731
732
733         map = rcu_dereference_protected(queue->rps_map, 1);
734         if (map) {
735                 RCU_INIT_POINTER(queue->rps_map, NULL);
736                 kfree_rcu(map, rcu);
737         }
738
739         flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
740         if (flow_table) {
741                 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
742                 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
743         }
744
745         memset(kobj, 0, sizeof(*kobj));
746         dev_put(queue->dev);
747 }
748
749 static struct kobj_type rx_queue_ktype = {
750         .sysfs_ops = &rx_queue_sysfs_ops,
751         .release = rx_queue_release,
752         .default_attrs = rx_queue_default_attrs,
753 };
754
755 static int rx_queue_add_kobject(struct net_device *net, int index)
756 {
757         struct netdev_rx_queue *queue = net->_rx + index;
758         struct kobject *kobj = &queue->kobj;
759         int error = 0;
760
761         kobj->kset = net->queues_kset;
762         error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
763             "rx-%u", index);
764         if (error) {
765                 kobject_put(kobj);
766                 return error;
767         }
768
769         kobject_uevent(kobj, KOBJ_ADD);
770         dev_hold(queue->dev);
771
772         return error;
773 }
774 #endif /* CONFIG_RPS */
775
776 int
777 net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
778 {
779 #ifdef CONFIG_RPS
780         int i;
781         int error = 0;
782
783         for (i = old_num; i < new_num; i++) {
784                 error = rx_queue_add_kobject(net, i);
785                 if (error) {
786                         new_num = old_num;
787                         break;
788                 }
789         }
790
791         while (--i >= new_num)
792                 kobject_put(&net->_rx[i].kobj);
793
794         return error;
795 #else
796         return 0;
797 #endif
798 }
799
800 #ifdef CONFIG_SYSFS
801 /*
802  * netdev_queue sysfs structures and functions.
803  */
804 struct netdev_queue_attribute {
805         struct attribute attr;
806         ssize_t (*show)(struct netdev_queue *queue,
807             struct netdev_queue_attribute *attr, char *buf);
808         ssize_t (*store)(struct netdev_queue *queue,
809             struct netdev_queue_attribute *attr, const char *buf, size_t len);
810 };
811 #define to_netdev_queue_attr(_attr) container_of(_attr,         \
812     struct netdev_queue_attribute, attr)
813
814 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
815
816 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
817                                       struct attribute *attr, char *buf)
818 {
819         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
820         struct netdev_queue *queue = to_netdev_queue(kobj);
821
822         if (!attribute->show)
823                 return -EIO;
824
825         return attribute->show(queue, attribute, buf);
826 }
827
828 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
829                                        struct attribute *attr,
830                                        const char *buf, size_t count)
831 {
832         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
833         struct netdev_queue *queue = to_netdev_queue(kobj);
834
835         if (!attribute->store)
836                 return -EIO;
837
838         return attribute->store(queue, attribute, buf, count);
839 }
840
841 static const struct sysfs_ops netdev_queue_sysfs_ops = {
842         .show = netdev_queue_attr_show,
843         .store = netdev_queue_attr_store,
844 };
845
846 static ssize_t show_trans_timeout(struct netdev_queue *queue,
847                                   struct netdev_queue_attribute *attribute,
848                                   char *buf)
849 {
850         unsigned long trans_timeout;
851
852         spin_lock_irq(&queue->_xmit_lock);
853         trans_timeout = queue->trans_timeout;
854         spin_unlock_irq(&queue->_xmit_lock);
855
856         return sprintf(buf, "%lu", trans_timeout);
857 }
858
859 static struct netdev_queue_attribute queue_trans_timeout =
860         __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
861
862 #ifdef CONFIG_BQL
863 /*
864  * Byte queue limits sysfs structures and functions.
865  */
866 static ssize_t bql_show(char *buf, unsigned int value)
867 {
868         return sprintf(buf, "%u\n", value);
869 }
870
871 static ssize_t bql_set(const char *buf, const size_t count,
872                        unsigned int *pvalue)
873 {
874         unsigned int value;
875         int err;
876
877         if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
878                 value = DQL_MAX_LIMIT;
879         else {
880                 err = kstrtouint(buf, 10, &value);
881                 if (err < 0)
882                         return err;
883                 if (value > DQL_MAX_LIMIT)
884                         return -EINVAL;
885         }
886
887         *pvalue = value;
888
889         return count;
890 }
891
892 static ssize_t bql_show_hold_time(struct netdev_queue *queue,
893                                   struct netdev_queue_attribute *attr,
894                                   char *buf)
895 {
896         struct dql *dql = &queue->dql;
897
898         return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
899 }
900
901 static ssize_t bql_set_hold_time(struct netdev_queue *queue,
902                                  struct netdev_queue_attribute *attribute,
903                                  const char *buf, size_t len)
904 {
905         struct dql *dql = &queue->dql;
906         unsigned value;
907         int err;
908
909         err = kstrtouint(buf, 10, &value);
910         if (err < 0)
911                 return err;
912
913         dql->slack_hold_time = msecs_to_jiffies(value);
914
915         return len;
916 }
917
918 static struct netdev_queue_attribute bql_hold_time_attribute =
919         __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
920             bql_set_hold_time);
921
922 static ssize_t bql_show_inflight(struct netdev_queue *queue,
923                                  struct netdev_queue_attribute *attr,
924                                  char *buf)
925 {
926         struct dql *dql = &queue->dql;
927
928         return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
929 }
930
931 static struct netdev_queue_attribute bql_inflight_attribute =
932         __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
933
934 #define BQL_ATTR(NAME, FIELD)                                           \
935 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,            \
936                                  struct netdev_queue_attribute *attr,   \
937                                  char *buf)                             \
938 {                                                                       \
939         return bql_show(buf, queue->dql.FIELD);                         \
940 }                                                                       \
941                                                                         \
942 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,             \
943                                 struct netdev_queue_attribute *attr,    \
944                                 const char *buf, size_t len)            \
945 {                                                                       \
946         return bql_set(buf, len, &queue->dql.FIELD);                    \
947 }                                                                       \
948                                                                         \
949 static struct netdev_queue_attribute bql_ ## NAME ## _attribute =       \
950         __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
951             bql_set_ ## NAME);
952
953 BQL_ATTR(limit, limit)
954 BQL_ATTR(limit_max, max_limit)
955 BQL_ATTR(limit_min, min_limit)
956
957 static struct attribute *dql_attrs[] = {
958         &bql_limit_attribute.attr,
959         &bql_limit_max_attribute.attr,
960         &bql_limit_min_attribute.attr,
961         &bql_hold_time_attribute.attr,
962         &bql_inflight_attribute.attr,
963         NULL
964 };
965
966 static struct attribute_group dql_group = {
967         .name  = "byte_queue_limits",
968         .attrs  = dql_attrs,
969 };
970 #endif /* CONFIG_BQL */
971
972 #ifdef CONFIG_XPS
973 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
974 {
975         struct net_device *dev = queue->dev;
976         int i;
977
978         for (i = 0; i < dev->num_tx_queues; i++)
979                 if (queue == &dev->_tx[i])
980                         break;
981
982         BUG_ON(i >= dev->num_tx_queues);
983
984         return i;
985 }
986
987
988 static ssize_t show_xps_map(struct netdev_queue *queue,
989                             struct netdev_queue_attribute *attribute, char *buf)
990 {
991         struct net_device *dev = queue->dev;
992         struct xps_dev_maps *dev_maps;
993         cpumask_var_t mask;
994         unsigned long index;
995         size_t len = 0;
996         int i;
997
998         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
999                 return -ENOMEM;
1000
1001         index = get_netdev_queue_index(queue);
1002
1003         rcu_read_lock();
1004         dev_maps = rcu_dereference(dev->xps_maps);
1005         if (dev_maps) {
1006                 for_each_possible_cpu(i) {
1007                         struct xps_map *map =
1008                             rcu_dereference(dev_maps->cpu_map[i]);
1009                         if (map) {
1010                                 int j;
1011                                 for (j = 0; j < map->len; j++) {
1012                                         if (map->queues[j] == index) {
1013                                                 cpumask_set_cpu(i, mask);
1014                                                 break;
1015                                         }
1016                                 }
1017                         }
1018                 }
1019         }
1020         rcu_read_unlock();
1021
1022         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
1023         if (PAGE_SIZE - len < 3) {
1024                 free_cpumask_var(mask);
1025                 return -EINVAL;
1026         }
1027
1028         free_cpumask_var(mask);
1029         len += sprintf(buf + len, "\n");
1030         return len;
1031 }
1032
1033 static DEFINE_MUTEX(xps_map_mutex);
1034 #define xmap_dereference(P)             \
1035         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1036
1037 static void xps_queue_release(struct netdev_queue *queue)
1038 {
1039         struct net_device *dev = queue->dev;
1040         struct xps_dev_maps *dev_maps;
1041         struct xps_map *map;
1042         unsigned long index;
1043         int i, pos, nonempty = 0;
1044
1045         index = get_netdev_queue_index(queue);
1046
1047         mutex_lock(&xps_map_mutex);
1048         dev_maps = xmap_dereference(dev->xps_maps);
1049
1050         if (dev_maps) {
1051                 for_each_possible_cpu(i) {
1052                         map = xmap_dereference(dev_maps->cpu_map[i]);
1053                         if (!map)
1054                                 continue;
1055
1056                         for (pos = 0; pos < map->len; pos++)
1057                                 if (map->queues[pos] == index)
1058                                         break;
1059
1060                         if (pos < map->len) {
1061                                 if (map->len > 1)
1062                                         map->queues[pos] =
1063                                             map->queues[--map->len];
1064                                 else {
1065                                         RCU_INIT_POINTER(dev_maps->cpu_map[i],
1066                                             NULL);
1067                                         kfree_rcu(map, rcu);
1068                                         map = NULL;
1069                                 }
1070                         }
1071                         if (map)
1072                                 nonempty = 1;
1073                 }
1074
1075                 if (!nonempty) {
1076                         RCU_INIT_POINTER(dev->xps_maps, NULL);
1077                         kfree_rcu(dev_maps, rcu);
1078                 }
1079         }
1080         mutex_unlock(&xps_map_mutex);
1081 }
1082
1083 static ssize_t store_xps_map(struct netdev_queue *queue,
1084                       struct netdev_queue_attribute *attribute,
1085                       const char *buf, size_t len)
1086 {
1087         struct net_device *dev = queue->dev;
1088         cpumask_var_t mask;
1089         int err, i, cpu, pos, map_len, alloc_len, need_set;
1090         unsigned long index;
1091         struct xps_map *map, *new_map;
1092         struct xps_dev_maps *dev_maps, *new_dev_maps;
1093         int nonempty = 0;
1094         int numa_node_id = -2;
1095
1096         if (!capable(CAP_NET_ADMIN))
1097                 return -EPERM;
1098
1099         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1100                 return -ENOMEM;
1101
1102         index = get_netdev_queue_index(queue);
1103
1104         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1105         if (err) {
1106                 free_cpumask_var(mask);
1107                 return err;
1108         }
1109
1110         new_dev_maps = kzalloc(max_t(unsigned,
1111             XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1112         if (!new_dev_maps) {
1113                 free_cpumask_var(mask);
1114                 return -ENOMEM;
1115         }
1116
1117         mutex_lock(&xps_map_mutex);
1118
1119         dev_maps = xmap_dereference(dev->xps_maps);
1120
1121         for_each_possible_cpu(cpu) {
1122                 map = dev_maps ?
1123                         xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1124                 new_map = map;
1125                 if (map) {
1126                         for (pos = 0; pos < map->len; pos++)
1127                                 if (map->queues[pos] == index)
1128                                         break;
1129                         map_len = map->len;
1130                         alloc_len = map->alloc_len;
1131                 } else
1132                         pos = map_len = alloc_len = 0;
1133
1134                 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1135 #ifdef CONFIG_NUMA
1136                 if (need_set) {
1137                         if (numa_node_id == -2)
1138                                 numa_node_id = cpu_to_node(cpu);
1139                         else if (numa_node_id != cpu_to_node(cpu))
1140                                 numa_node_id = -1;
1141                 }
1142 #endif
1143                 if (need_set && pos >= map_len) {
1144                         /* Need to add queue to this CPU's map */
1145                         if (map_len >= alloc_len) {
1146                                 alloc_len = alloc_len ?
1147                                     2 * alloc_len : XPS_MIN_MAP_ALLOC;
1148                                 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1149                                                        GFP_KERNEL,
1150                                                        cpu_to_node(cpu));
1151                                 if (!new_map)
1152                                         goto error;
1153                                 new_map->alloc_len = alloc_len;
1154                                 for (i = 0; i < map_len; i++)
1155                                         new_map->queues[i] = map->queues[i];
1156                                 new_map->len = map_len;
1157                         }
1158                         new_map->queues[new_map->len++] = index;
1159                 } else if (!need_set && pos < map_len) {
1160                         /* Need to remove queue from this CPU's map */
1161                         if (map_len > 1)
1162                                 new_map->queues[pos] =
1163                                     new_map->queues[--new_map->len];
1164                         else
1165                                 new_map = NULL;
1166                 }
1167                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1168         }
1169
1170         /* Cleanup old maps */
1171         for_each_possible_cpu(cpu) {
1172                 map = dev_maps ?
1173                         xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1174                 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1175                         kfree_rcu(map, rcu);
1176                 if (new_dev_maps->cpu_map[cpu])
1177                         nonempty = 1;
1178         }
1179
1180         if (nonempty) {
1181                 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1182         } else {
1183                 kfree(new_dev_maps);
1184                 RCU_INIT_POINTER(dev->xps_maps, NULL);
1185         }
1186
1187         if (dev_maps)
1188                 kfree_rcu(dev_maps, rcu);
1189
1190         netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
1191                                             NUMA_NO_NODE);
1192
1193         mutex_unlock(&xps_map_mutex);
1194
1195         free_cpumask_var(mask);
1196         return len;
1197
1198 error:
1199         mutex_unlock(&xps_map_mutex);
1200
1201         if (new_dev_maps)
1202                 for_each_possible_cpu(i)
1203                         kfree(rcu_dereference_protected(
1204                                 new_dev_maps->cpu_map[i],
1205                                 1));
1206         kfree(new_dev_maps);
1207         free_cpumask_var(mask);
1208         return -ENOMEM;
1209 }
1210
1211 static struct netdev_queue_attribute xps_cpus_attribute =
1212     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1213 #endif /* CONFIG_XPS */
1214
1215 static struct attribute *netdev_queue_default_attrs[] = {
1216         &queue_trans_timeout.attr,
1217 #ifdef CONFIG_XPS
1218         &xps_cpus_attribute.attr,
1219 #endif
1220         NULL
1221 };
1222
1223 static void netdev_queue_release(struct kobject *kobj)
1224 {
1225         struct netdev_queue *queue = to_netdev_queue(kobj);
1226
1227 #ifdef CONFIG_XPS
1228         xps_queue_release(queue);
1229 #endif
1230
1231         memset(kobj, 0, sizeof(*kobj));
1232         dev_put(queue->dev);
1233 }
1234
1235 static struct kobj_type netdev_queue_ktype = {
1236         .sysfs_ops = &netdev_queue_sysfs_ops,
1237         .release = netdev_queue_release,
1238         .default_attrs = netdev_queue_default_attrs,
1239 };
1240
1241 static int netdev_queue_add_kobject(struct net_device *net, int index)
1242 {
1243         struct netdev_queue *queue = net->_tx + index;
1244         struct kobject *kobj = &queue->kobj;
1245         int error = 0;
1246
1247         kobj->kset = net->queues_kset;
1248         error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1249             "tx-%u", index);
1250         if (error)
1251                 goto exit;
1252
1253 #ifdef CONFIG_BQL
1254         error = sysfs_create_group(kobj, &dql_group);
1255         if (error)
1256                 goto exit;
1257 #endif
1258
1259         kobject_uevent(kobj, KOBJ_ADD);
1260         dev_hold(queue->dev);
1261
1262         return 0;
1263 exit:
1264         kobject_put(kobj);
1265         return error;
1266 }
1267 #endif /* CONFIG_SYSFS */
1268
1269 int
1270 netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1271 {
1272 #ifdef CONFIG_SYSFS
1273         int i;
1274         int error = 0;
1275
1276         for (i = old_num; i < new_num; i++) {
1277                 error = netdev_queue_add_kobject(net, i);
1278                 if (error) {
1279                         new_num = old_num;
1280                         break;
1281                 }
1282         }
1283
1284         while (--i >= new_num) {
1285                 struct netdev_queue *queue = net->_tx + i;
1286
1287 #ifdef CONFIG_BQL
1288                 sysfs_remove_group(&queue->kobj, &dql_group);
1289 #endif
1290                 kobject_put(&queue->kobj);
1291         }
1292
1293         return error;
1294 #else
1295         return 0;
1296 #endif /* CONFIG_SYSFS */
1297 }
1298
1299 static int register_queue_kobjects(struct net_device *net)
1300 {
1301         int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1302
1303 #ifdef CONFIG_SYSFS
1304         net->queues_kset = kset_create_and_add("queues",
1305             NULL, &net->dev.kobj);
1306         if (!net->queues_kset)
1307                 return -ENOMEM;
1308 #endif
1309
1310 #ifdef CONFIG_RPS
1311         real_rx = net->real_num_rx_queues;
1312 #endif
1313         real_tx = net->real_num_tx_queues;
1314
1315         error = net_rx_queue_update_kobjects(net, 0, real_rx);
1316         if (error)
1317                 goto error;
1318         rxq = real_rx;
1319
1320         error = netdev_queue_update_kobjects(net, 0, real_tx);
1321         if (error)
1322                 goto error;
1323         txq = real_tx;
1324
1325         return 0;
1326
1327 error:
1328         netdev_queue_update_kobjects(net, txq, 0);
1329         net_rx_queue_update_kobjects(net, rxq, 0);
1330         return error;
1331 }
1332
1333 static void remove_queue_kobjects(struct net_device *net)
1334 {
1335         int real_rx = 0, real_tx = 0;
1336
1337 #ifdef CONFIG_RPS
1338         real_rx = net->real_num_rx_queues;
1339 #endif
1340         real_tx = net->real_num_tx_queues;
1341
1342         net_rx_queue_update_kobjects(net, real_rx, 0);
1343         netdev_queue_update_kobjects(net, real_tx, 0);
1344 #ifdef CONFIG_SYSFS
1345         kset_unregister(net->queues_kset);
1346 #endif
1347 }
1348
1349 static void *net_grab_current_ns(void)
1350 {
1351         struct net *ns = current->nsproxy->net_ns;
1352 #ifdef CONFIG_NET_NS
1353         if (ns)
1354                 atomic_inc(&ns->passive);
1355 #endif
1356         return ns;
1357 }
1358
1359 static const void *net_initial_ns(void)
1360 {
1361         return &init_net;
1362 }
1363
1364 static const void *net_netlink_ns(struct sock *sk)
1365 {
1366         return sock_net(sk);
1367 }
1368
1369 struct kobj_ns_type_operations net_ns_type_operations = {
1370         .type = KOBJ_NS_TYPE_NET,
1371         .grab_current_ns = net_grab_current_ns,
1372         .netlink_ns = net_netlink_ns,
1373         .initial_ns = net_initial_ns,
1374         .drop_ns = net_drop_ns,
1375 };
1376 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1377
1378 #ifdef CONFIG_HOTPLUG
1379 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1380 {
1381         struct net_device *dev = to_net_dev(d);
1382         int retval;
1383
1384         /* pass interface to uevent. */
1385         retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1386         if (retval)
1387                 goto exit;
1388
1389         /* pass ifindex to uevent.
1390          * ifindex is useful as it won't change (interface name may change)
1391          * and is what RtNetlink uses natively. */
1392         retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1393
1394 exit:
1395         return retval;
1396 }
1397 #endif
1398
1399 /*
1400  *      netdev_release -- destroy and free a dead device.
1401  *      Called when last reference to device kobject is gone.
1402  */
1403 static void netdev_release(struct device *d)
1404 {
1405         struct net_device *dev = to_net_dev(d);
1406
1407         BUG_ON(dev->reg_state != NETREG_RELEASED);
1408
1409         kfree(dev->ifalias);
1410         kfree((char *)dev - dev->padded);
1411 }
1412
1413 static const void *net_namespace(struct device *d)
1414 {
1415         struct net_device *dev;
1416         dev = container_of(d, struct net_device, dev);
1417         return dev_net(dev);
1418 }
1419
1420 static struct class net_class = {
1421         .name = "net",
1422         .dev_release = netdev_release,
1423 #ifdef CONFIG_SYSFS
1424         .dev_attrs = net_class_attributes,
1425 #endif /* CONFIG_SYSFS */
1426 #ifdef CONFIG_HOTPLUG
1427         .dev_uevent = netdev_uevent,
1428 #endif
1429         .ns_type = &net_ns_type_operations,
1430         .namespace = net_namespace,
1431 };
1432
1433 /* Delete sysfs entries but hold kobject reference until after all
1434  * netdev references are gone.
1435  */
1436 void netdev_unregister_kobject(struct net_device * net)
1437 {
1438         struct device *dev = &(net->dev);
1439
1440         kobject_get(&dev->kobj);
1441
1442         remove_queue_kobjects(net);
1443
1444         device_del(dev);
1445 }
1446
1447 /* Create sysfs entries for network device. */
1448 int netdev_register_kobject(struct net_device *net)
1449 {
1450         struct device *dev = &(net->dev);
1451         const struct attribute_group **groups = net->sysfs_groups;
1452         int error = 0;
1453
1454         device_initialize(dev);
1455         dev->class = &net_class;
1456         dev->platform_data = net;
1457         dev->groups = groups;
1458
1459         dev_set_name(dev, "%s", net->name);
1460
1461 #ifdef CONFIG_SYSFS
1462         /* Allow for a device specific group */
1463         if (*groups)
1464                 groups++;
1465
1466         *groups++ = &netstat_group;
1467 #ifdef CONFIG_WIRELESS_EXT_SYSFS
1468         if (net->ieee80211_ptr)
1469                 *groups++ = &wireless_group;
1470 #ifdef CONFIG_WIRELESS_EXT
1471         else if (net->wireless_handlers)
1472                 *groups++ = &wireless_group;
1473 #endif
1474 #endif
1475 #endif /* CONFIG_SYSFS */
1476
1477         error = device_add(dev);
1478         if (error)
1479                 return error;
1480
1481         error = register_queue_kobjects(net);
1482         if (error) {
1483                 device_del(dev);
1484                 return error;
1485         }
1486
1487         return error;
1488 }
1489
1490 int netdev_class_create_file(struct class_attribute *class_attr)
1491 {
1492         return class_create_file(&net_class, class_attr);
1493 }
1494 EXPORT_SYMBOL(netdev_class_create_file);
1495
1496 void netdev_class_remove_file(struct class_attribute *class_attr)
1497 {
1498         class_remove_file(&net_class, class_attr);
1499 }
1500 EXPORT_SYMBOL(netdev_class_remove_file);
1501
1502 int netdev_kobject_init(void)
1503 {
1504         kobj_ns_type_register(&net_ns_type_operations);
1505         return class_register(&net_class);
1506 }