blob: f44473696b8b9e8239351c54f4a19074270e83d1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900130#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900131#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000132#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700133#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000134#include <linux/cpu_rmap.h>
Richard Cochran4dc360c2011-10-19 17:00:35 -0400135#include <linux/net_tstamp.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100136#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700138#include "net-sysfs.h"
139
Herbert Xud565b0a2008-12-15 23:38:52 -0800140/* Instead of increasing this, you should create a hash table. */
141#define MAX_GRO_SKBS 8
142
Herbert Xu5d38a072009-01-04 16:13:40 -0800143/* This should be increased if a protocol with a bigger head is added. */
144#define GRO_MAX_HEAD (MAX_HEADER + 128)
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146/*
147 * The list of packet types we will receive (as opposed to discard)
148 * and the routines to invoke.
149 *
150 * Why 16. Because with 16 the only overlap we get on a hash of the
151 * low nibble of the protocol value is RARP/SNAP/X.25.
152 *
153 * NOTE: That is no longer true with the addition of VLAN tags. Not
154 * sure which should go first, but I bet it won't make much
155 * difference if we are running VLANs. The good news is that
156 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700157 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 * --BLG
159 *
160 * 0800 IP
161 * 8100 802.1Q VLAN
162 * 0001 802.3
163 * 0002 AX.25
164 * 0004 802.2
165 * 8035 RARP
166 * 0005 SNAP
167 * 0805 X.25
168 * 0806 ARP
169 * 8137 IPX
170 * 0009 Localtalk
171 * 86DD IPv6
172 */
173
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800174#define PTYPE_HASH_SIZE (16)
175#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000178static DEFINE_SPINLOCK(offload_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800179static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700180static struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000181static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700184 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 * semaphore.
186 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800187 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 *
189 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700190 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 * actual updates. This allows pure readers to access the list even
192 * while a writer is preparing to update it.
193 *
194 * To put it another way, dev_base_lock is held for writing only to
195 * protect against pure readers; the rtnl semaphore provides the
196 * protection against other writers.
197 *
198 * See, for example usages, register_netdevice() and
199 * unregister_netdevice(), which must be called with the rtnl
200 * semaphore held.
201 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203EXPORT_SYMBOL(dev_base_lock);
204
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000205seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000206
Thomas Graf4e985ad2011-06-21 03:11:20 +0000207static inline void dev_base_seq_inc(struct net *net)
208{
209 while (++net->dev_base_seq == 0);
210}
211
Eric W. Biederman881d9662007-09-17 11:56:21 -0700212static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
Eric Dumazet95c96172012-04-15 05:58:06 +0000214 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
215
stephen hemminger08e98972009-11-10 07:20:34 +0000216 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
Eric W. Biederman881d9662007-09-17 11:56:21 -0700219static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700221 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000224static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000225{
226#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000227 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000228#endif
229}
230
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000231static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000232{
233#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000234 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000235#endif
236}
237
Eric W. Biedermance286d32007-09-12 13:53:49 +0200238/* Device list insertion */
239static int list_netdevice(struct net_device *dev)
240{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900241 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200242
243 ASSERT_RTNL();
244
245 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800246 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000247 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000248 hlist_add_head_rcu(&dev->index_hlist,
249 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200250 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000251
252 dev_base_seq_inc(net);
253
Eric W. Biedermance286d32007-09-12 13:53:49 +0200254 return 0;
255}
256
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000257/* Device list removal
258 * caller must respect a RCU grace period before freeing/reusing dev
259 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200260static void unlist_netdevice(struct net_device *dev)
261{
262 ASSERT_RTNL();
263
264 /* Unlink dev from the device chain */
265 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800266 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000267 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000268 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200269 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000270
271 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200272}
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274/*
275 * Our notifier list
276 */
277
Alan Sternf07d5b92006-05-09 15:23:03 -0700278static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280/*
281 * Device drivers call our routines to queue packets here. We empty the
282 * queue in the local softnet handler.
283 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700284
Eric Dumazet9958da02010-04-17 04:17:02 +0000285DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700286EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
David S. Millercf508b12008-07-22 14:16:42 -0700288#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700289/*
David S. Millerc773e842008-07-08 23:13:53 -0700290 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700291 * according to dev->type
292 */
293static const unsigned short netdev_lock_type[] =
294 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
295 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
296 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
297 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
298 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
299 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
300 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
301 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
302 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
303 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
304 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
305 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400306 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
307 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
308 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700310static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700311 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
312 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
313 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
314 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
315 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
316 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
317 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
318 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
319 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
320 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
321 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
322 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400323 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
324 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
325 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326
327static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700328static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700329
330static inline unsigned short netdev_lock_pos(unsigned short dev_type)
331{
332 int i;
333
334 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
335 if (netdev_lock_type[i] == dev_type)
336 return i;
337 /* the last key is used by default */
338 return ARRAY_SIZE(netdev_lock_type) - 1;
339}
340
David S. Millercf508b12008-07-22 14:16:42 -0700341static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
342 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700343{
344 int i;
345
346 i = netdev_lock_pos(dev_type);
347 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
348 netdev_lock_name[i]);
349}
David S. Millercf508b12008-07-22 14:16:42 -0700350
351static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
352{
353 int i;
354
355 i = netdev_lock_pos(dev->type);
356 lockdep_set_class_and_name(&dev->addr_list_lock,
357 &netdev_addr_lock_key[i],
358 netdev_lock_name[i]);
359}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700360#else
David S. Millercf508b12008-07-22 14:16:42 -0700361static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
362 unsigned short dev_type)
363{
364}
365static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700366{
367}
368#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370/*******************************************************************************
371
372 Protocol management and registration routines
373
374*******************************************************************************/
375
376/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 * Add a protocol ID to the list. Now that the input handler is
378 * smarter we can dispense with all the messy stuff that used to be
379 * here.
380 *
381 * BEWARE!!! Protocol handlers, mangling input packets,
382 * MUST BE last in hash buckets and checking protocol handlers
383 * MUST start from promiscuous ptype_all chain in net_bh.
384 * It is true now, do not change it.
385 * Explanation follows: if protocol handler, mangling packet, will
386 * be the first on list, it is not able to sense, that packet
387 * is cloned and should be copied-on-write, so that it will
388 * change it and subsequent readers will get broken packet.
389 * --ANK (980803)
390 */
391
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000392static inline struct list_head *ptype_head(const struct packet_type *pt)
393{
394 if (pt->type == htons(ETH_P_ALL))
395 return &ptype_all;
396 else
397 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
398}
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400/**
401 * dev_add_pack - add packet handler
402 * @pt: packet type declaration
403 *
404 * Add a protocol handler to the networking stack. The passed &packet_type
405 * is linked into kernel lists and may not be freed until it has been
406 * removed from the kernel lists.
407 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900408 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 * guarantee all CPU's that are in middle of receiving packets
410 * will see the new packet type (until the next received packet).
411 */
412
413void dev_add_pack(struct packet_type *pt)
414{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000415 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000417 spin_lock(&ptype_lock);
418 list_add_rcu(&pt->list, head);
419 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700421EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/**
424 * __dev_remove_pack - remove packet handler
425 * @pt: packet type declaration
426 *
427 * Remove a protocol handler that was previously added to the kernel
428 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
429 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900430 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 *
432 * The packet type might still be in use by receivers
433 * and must not be freed until after all the CPU's have gone
434 * through a quiescent state.
435 */
436void __dev_remove_pack(struct packet_type *pt)
437{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000438 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 struct packet_type *pt1;
440
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000441 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 list_for_each_entry(pt1, head, list) {
444 if (pt == pt1) {
445 list_del_rcu(&pt->list);
446 goto out;
447 }
448 }
449
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000450 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000452 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700454EXPORT_SYMBOL(__dev_remove_pack);
455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456/**
457 * dev_remove_pack - remove packet handler
458 * @pt: packet type declaration
459 *
460 * Remove a protocol handler that was previously added to the kernel
461 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
462 * from the kernel lists and can be freed or reused once this function
463 * returns.
464 *
465 * This call sleeps to guarantee that no CPU is looking at the packet
466 * type after return.
467 */
468void dev_remove_pack(struct packet_type *pt)
469{
470 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 synchronize_net();
473}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700474EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Vlad Yasevich62532da2012-11-15 08:49:10 +0000476
477/**
478 * dev_add_offload - register offload handlers
479 * @po: protocol offload declaration
480 *
481 * Add protocol offload handlers to the networking stack. The passed
482 * &proto_offload is linked into kernel lists and may not be freed until
483 * it has been removed from the kernel lists.
484 *
485 * This call does not sleep therefore it can not
486 * guarantee all CPU's that are in middle of receiving packets
487 * will see the new offload handlers (until the next received packet).
488 */
489void dev_add_offload(struct packet_offload *po)
490{
491 struct list_head *head = &offload_base;
492
493 spin_lock(&offload_lock);
494 list_add_rcu(&po->list, head);
495 spin_unlock(&offload_lock);
496}
497EXPORT_SYMBOL(dev_add_offload);
498
499/**
500 * __dev_remove_offload - remove offload handler
501 * @po: packet offload declaration
502 *
503 * Remove a protocol offload handler that was previously added to the
504 * kernel offload handlers by dev_add_offload(). The passed &offload_type
505 * is removed from the kernel lists and can be freed or reused once this
506 * function returns.
507 *
508 * The packet type might still be in use by receivers
509 * and must not be freed until after all the CPU's have gone
510 * through a quiescent state.
511 */
512void __dev_remove_offload(struct packet_offload *po)
513{
514 struct list_head *head = &offload_base;
515 struct packet_offload *po1;
516
Eric Dumazetc53aa502012-11-16 08:08:23 +0000517 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000518
519 list_for_each_entry(po1, head, list) {
520 if (po == po1) {
521 list_del_rcu(&po->list);
522 goto out;
523 }
524 }
525
526 pr_warn("dev_remove_offload: %p not found\n", po);
527out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000528 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000529}
530EXPORT_SYMBOL(__dev_remove_offload);
531
532/**
533 * dev_remove_offload - remove packet offload handler
534 * @po: packet offload declaration
535 *
536 * Remove a packet offload handler that was previously added to the kernel
537 * offload handlers by dev_add_offload(). The passed &offload_type is
538 * removed from the kernel lists and can be freed or reused once this
539 * function returns.
540 *
541 * This call sleeps to guarantee that no CPU is looking at the packet
542 * type after return.
543 */
544void dev_remove_offload(struct packet_offload *po)
545{
546 __dev_remove_offload(po);
547
548 synchronize_net();
549}
550EXPORT_SYMBOL(dev_remove_offload);
551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552/******************************************************************************
553
554 Device Boot-time Settings Routines
555
556*******************************************************************************/
557
558/* Boot time configuration table */
559static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
560
561/**
562 * netdev_boot_setup_add - add new setup entry
563 * @name: name of the device
564 * @map: configured settings for the device
565 *
566 * Adds new setup entry to the dev_boot_setup list. The function
567 * returns 0 on error and 1 on success. This is a generic routine to
568 * all netdevices.
569 */
570static int netdev_boot_setup_add(char *name, struct ifmap *map)
571{
572 struct netdev_boot_setup *s;
573 int i;
574
575 s = dev_boot_setup;
576 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
577 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
578 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700579 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 memcpy(&s[i].map, map, sizeof(s[i].map));
581 break;
582 }
583 }
584
585 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
586}
587
588/**
589 * netdev_boot_setup_check - check boot time settings
590 * @dev: the netdevice
591 *
592 * Check boot time settings for the device.
593 * The found settings are set for the device to be used
594 * later in the device probing.
595 * Returns 0 if no settings found, 1 if they are.
596 */
597int netdev_boot_setup_check(struct net_device *dev)
598{
599 struct netdev_boot_setup *s = dev_boot_setup;
600 int i;
601
602 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
603 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700604 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 dev->irq = s[i].map.irq;
606 dev->base_addr = s[i].map.base_addr;
607 dev->mem_start = s[i].map.mem_start;
608 dev->mem_end = s[i].map.mem_end;
609 return 1;
610 }
611 }
612 return 0;
613}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700614EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616
617/**
618 * netdev_boot_base - get address from boot time settings
619 * @prefix: prefix for network device
620 * @unit: id for network device
621 *
622 * Check boot time settings for the base address of device.
623 * The found settings are set for the device to be used
624 * later in the device probing.
625 * Returns 0 if no settings found.
626 */
627unsigned long netdev_boot_base(const char *prefix, int unit)
628{
629 const struct netdev_boot_setup *s = dev_boot_setup;
630 char name[IFNAMSIZ];
631 int i;
632
633 sprintf(name, "%s%d", prefix, unit);
634
635 /*
636 * If device already registered then return base of 1
637 * to indicate not to probe for this interface
638 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700639 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return 1;
641
642 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
643 if (!strcmp(name, s[i].name))
644 return s[i].map.base_addr;
645 return 0;
646}
647
648/*
649 * Saves at boot time configured settings for any netdevice.
650 */
651int __init netdev_boot_setup(char *str)
652{
653 int ints[5];
654 struct ifmap map;
655
656 str = get_options(str, ARRAY_SIZE(ints), ints);
657 if (!str || !*str)
658 return 0;
659
660 /* Save settings */
661 memset(&map, 0, sizeof(map));
662 if (ints[0] > 0)
663 map.irq = ints[1];
664 if (ints[0] > 1)
665 map.base_addr = ints[2];
666 if (ints[0] > 2)
667 map.mem_start = ints[3];
668 if (ints[0] > 3)
669 map.mem_end = ints[4];
670
671 /* Add new entry to the list */
672 return netdev_boot_setup_add(str, &map);
673}
674
675__setup("netdev=", netdev_boot_setup);
676
677/*******************************************************************************
678
679 Device Interface Subroutines
680
681*******************************************************************************/
682
683/**
684 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700685 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 * @name: name to find
687 *
688 * Find an interface by name. Must be called under RTNL semaphore
689 * or @dev_base_lock. If the name is found a pointer to the device
690 * is returned. If the name is not found then %NULL is returned. The
691 * reference counters are not incremented so the caller must be
692 * careful with locks.
693 */
694
Eric W. Biederman881d9662007-09-17 11:56:21 -0700695struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
697 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700698 struct net_device *dev;
699 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700701 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 if (!strncmp(dev->name, name, IFNAMSIZ))
703 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 return NULL;
706}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700707EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
709/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000710 * dev_get_by_name_rcu - find a device by its name
711 * @net: the applicable net namespace
712 * @name: name to find
713 *
714 * Find an interface by name.
715 * If the name is found a pointer to the device is returned.
716 * If the name is not found then %NULL is returned.
717 * The reference counters are not incremented so the caller must be
718 * careful with locks. The caller must hold RCU lock.
719 */
720
721struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
722{
723 struct hlist_node *p;
724 struct net_device *dev;
725 struct hlist_head *head = dev_name_hash(net, name);
726
727 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
728 if (!strncmp(dev->name, name, IFNAMSIZ))
729 return dev;
730
731 return NULL;
732}
733EXPORT_SYMBOL(dev_get_by_name_rcu);
734
735/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700737 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 * @name: name to find
739 *
740 * Find an interface by name. This can be called from any
741 * context and does its own locking. The returned handle has
742 * the usage count incremented and the caller must use dev_put() to
743 * release it when it is no longer needed. %NULL is returned if no
744 * matching device is found.
745 */
746
Eric W. Biederman881d9662007-09-17 11:56:21 -0700747struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748{
749 struct net_device *dev;
750
Eric Dumazet72c95282009-10-30 07:11:27 +0000751 rcu_read_lock();
752 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (dev)
754 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000755 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 return dev;
757}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700758EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760/**
761 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700762 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 * @ifindex: index of device
764 *
765 * Search for an interface by index. Returns %NULL if the device
766 * is not found or a pointer to the device. The device has not
767 * had its reference counter increased so the caller must be careful
768 * about locking. The caller must hold either the RTNL semaphore
769 * or @dev_base_lock.
770 */
771
Eric W. Biederman881d9662007-09-17 11:56:21 -0700772struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
774 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700775 struct net_device *dev;
776 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700778 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (dev->ifindex == ifindex)
780 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 return NULL;
783}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700784EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000786/**
787 * dev_get_by_index_rcu - find a device by its ifindex
788 * @net: the applicable net namespace
789 * @ifindex: index of device
790 *
791 * Search for an interface by index. Returns %NULL if the device
792 * is not found or a pointer to the device. The device has not
793 * had its reference counter increased so the caller must be careful
794 * about locking. The caller must hold RCU lock.
795 */
796
797struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
798{
799 struct hlist_node *p;
800 struct net_device *dev;
801 struct hlist_head *head = dev_index_hash(net, ifindex);
802
803 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
804 if (dev->ifindex == ifindex)
805 return dev;
806
807 return NULL;
808}
809EXPORT_SYMBOL(dev_get_by_index_rcu);
810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
812/**
813 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700814 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 * @ifindex: index of device
816 *
817 * Search for an interface by index. Returns NULL if the device
818 * is not found or a pointer to the device. The device returned has
819 * had a reference added and the pointer is safe until the user calls
820 * dev_put to indicate they have finished with it.
821 */
822
Eric W. Biederman881d9662007-09-17 11:56:21 -0700823struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824{
825 struct net_device *dev;
826
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000827 rcu_read_lock();
828 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (dev)
830 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000831 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 return dev;
833}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700834EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000837 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700838 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 * @type: media type of device
840 * @ha: hardware address
841 *
842 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800843 * is not found or a pointer to the device.
844 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000845 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * and the caller must therefore be careful about locking
847 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 */
849
Eric Dumazet941666c2010-12-05 01:23:53 +0000850struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
851 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
853 struct net_device *dev;
854
Eric Dumazet941666c2010-12-05 01:23:53 +0000855 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (dev->type == type &&
857 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700858 return dev;
859
860 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
Eric Dumazet941666c2010-12-05 01:23:53 +0000862EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300863
Eric W. Biederman881d9662007-09-17 11:56:21 -0700864struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700865{
866 struct net_device *dev;
867
868 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700869 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700870 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700871 return dev;
872
873 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700874}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875EXPORT_SYMBOL(__dev_getfirstbyhwtype);
876
Eric W. Biederman881d9662007-09-17 11:56:21 -0700877struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000879 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000881 rcu_read_lock();
882 for_each_netdev_rcu(net, dev)
883 if (dev->type == type) {
884 dev_hold(dev);
885 ret = dev;
886 break;
887 }
888 rcu_read_unlock();
889 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891EXPORT_SYMBOL(dev_getfirstbyhwtype);
892
893/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000894 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700895 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 * @if_flags: IFF_* values
897 * @mask: bitmask of bits in if_flags to check
898 *
899 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000900 * is not found or a pointer to the device. Must be called inside
901 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 */
903
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000904struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700905 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700907 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Pavel Emelianov7562f872007-05-03 15:13:45 -0700909 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800910 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700912 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914 }
915 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700916 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000918EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
920/**
921 * dev_valid_name - check if name is okay for network device
922 * @name: name string
923 *
924 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700925 * to allow sysfs to work. We also disallow any kind of
926 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 */
David S. Miller95f050b2012-03-06 16:12:15 -0500928bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700930 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500931 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700932 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500933 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700934 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500935 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700936
937 while (*name) {
938 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500939 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700940 name++;
941 }
David S. Miller95f050b2012-03-06 16:12:15 -0500942 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700944EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200947 * __dev_alloc_name - allocate a name for a device
948 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200950 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 *
952 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700953 * id. It scans list of devices to build up a free map, then chooses
954 * the first empty slot. The caller must hold the dev_base or rtnl lock
955 * while allocating the name and adding the device in order to avoid
956 * duplicates.
957 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
958 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 */
960
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200961static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 const char *p;
965 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700966 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct net_device *d;
968
969 p = strnchr(name, IFNAMSIZ-1, '%');
970 if (p) {
971 /*
972 * Verify the string as this thing may have come from
973 * the user. There must be either one "%d" and no other "%"
974 * characters.
975 */
976 if (p[1] != 'd' || strchr(p + 2, '%'))
977 return -EINVAL;
978
979 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700980 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (!inuse)
982 return -ENOMEM;
983
Eric W. Biederman881d9662007-09-17 11:56:21 -0700984 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (!sscanf(d->name, name, &i))
986 continue;
987 if (i < 0 || i >= max_netdevices)
988 continue;
989
990 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200991 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (!strncmp(buf, d->name, IFNAMSIZ))
993 set_bit(i, inuse);
994 }
995
996 i = find_first_zero_bit(inuse, max_netdevices);
997 free_page((unsigned long) inuse);
998 }
999
Octavian Purdilad9031022009-11-18 02:36:59 +00001000 if (buf != name)
1001 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001002 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 /* It is possible to run out of possible slots
1006 * when the name is long and there isn't enough space left
1007 * for the digits, or if all bits are used.
1008 */
1009 return -ENFILE;
1010}
1011
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001012/**
1013 * dev_alloc_name - allocate a name for a device
1014 * @dev: device
1015 * @name: name format string
1016 *
1017 * Passed a format string - eg "lt%d" it will try and find a suitable
1018 * id. It scans list of devices to build up a free map, then chooses
1019 * the first empty slot. The caller must hold the dev_base or rtnl lock
1020 * while allocating the name and adding the device in order to avoid
1021 * duplicates.
1022 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1023 * Returns the number of the unit assigned or a negative errno code.
1024 */
1025
1026int dev_alloc_name(struct net_device *dev, const char *name)
1027{
1028 char buf[IFNAMSIZ];
1029 struct net *net;
1030 int ret;
1031
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001032 BUG_ON(!dev_net(dev));
1033 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001034 ret = __dev_alloc_name(net, name, buf);
1035 if (ret >= 0)
1036 strlcpy(dev->name, buf, IFNAMSIZ);
1037 return ret;
1038}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001039EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001040
Gao feng828de4f2012-09-13 20:58:27 +00001041static int dev_alloc_name_ns(struct net *net,
1042 struct net_device *dev,
1043 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001044{
Gao feng828de4f2012-09-13 20:58:27 +00001045 char buf[IFNAMSIZ];
1046 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001047
Gao feng828de4f2012-09-13 20:58:27 +00001048 ret = __dev_alloc_name(net, name, buf);
1049 if (ret >= 0)
1050 strlcpy(dev->name, buf, IFNAMSIZ);
1051 return ret;
1052}
1053
1054static int dev_get_valid_name(struct net *net,
1055 struct net_device *dev,
1056 const char *name)
1057{
1058 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001059
Octavian Purdilad9031022009-11-18 02:36:59 +00001060 if (!dev_valid_name(name))
1061 return -EINVAL;
1062
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001063 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001064 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 else if (__dev_get_by_name(net, name))
1066 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001067 else if (dev->name != name)
1068 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001069
1070 return 0;
1071}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
1073/**
1074 * dev_change_name - change name of a device
1075 * @dev: device
1076 * @newname: name (or format string) must be at least IFNAMSIZ
1077 *
1078 * Change name of a device, can pass format strings "eth%d".
1079 * for wildcarding.
1080 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001081int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
Herbert Xufcc5a032007-07-30 17:03:38 -07001083 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001085 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001086 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001089 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001091 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (dev->flags & IFF_UP)
1093 return -EBUSY;
1094
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001096
1097 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001098 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001099 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001100 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001101
Herbert Xufcc5a032007-07-30 17:03:38 -07001102 memcpy(oldname, dev->name, IFNAMSIZ);
1103
Gao feng828de4f2012-09-13 20:58:27 +00001104 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001105 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001106 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001107 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Herbert Xufcc5a032007-07-30 17:03:38 -07001110rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001111 ret = device_rename(&dev->dev, dev->name);
1112 if (ret) {
1113 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001114 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001115 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001116 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001117
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001118 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001119
Herbert Xu7f988ea2007-07-30 16:35:46 -07001120 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001121 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001122 write_unlock_bh(&dev_base_lock);
1123
1124 synchronize_rcu();
1125
1126 write_lock_bh(&dev_base_lock);
1127 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001128 write_unlock_bh(&dev_base_lock);
1129
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001130 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001131 ret = notifier_to_errno(ret);
1132
1133 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001134 /* err >= 0 after dev_alloc_name() or stores the first errno */
1135 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001136 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001137 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001138 memcpy(dev->name, oldname, IFNAMSIZ);
1139 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001140 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001141 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001142 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001143 }
1144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 return err;
1147}
1148
1149/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001150 * dev_set_alias - change ifalias of a device
1151 * @dev: device
1152 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001153 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001154 *
1155 * Set ifalias for a device,
1156 */
1157int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1158{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001159 char *new_ifalias;
1160
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001161 ASSERT_RTNL();
1162
1163 if (len >= IFALIASZ)
1164 return -EINVAL;
1165
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001166 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001167 kfree(dev->ifalias);
1168 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001169 return 0;
1170 }
1171
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001172 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1173 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001174 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001175 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001176
1177 strlcpy(dev->ifalias, alias, len+1);
1178 return len;
1179}
1180
1181
1182/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001183 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001184 * @dev: device to cause notification
1185 *
1186 * Called to indicate a device has changed features.
1187 */
1188void netdev_features_change(struct net_device *dev)
1189{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001190 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001191}
1192EXPORT_SYMBOL(netdev_features_change);
1193
1194/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 * netdev_state_change - device changes state
1196 * @dev: device to cause notification
1197 *
1198 * Called to indicate a device has changed state. This function calls
1199 * the notifier chains for netdev_chain and sends a NEWLINK message
1200 * to the routing socket.
1201 */
1202void netdev_state_change(struct net_device *dev)
1203{
1204 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001205 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1207 }
1208}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001209EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Amerigo Wangee89bab2012-08-09 22:14:56 +00001211/**
1212 * netdev_notify_peers - notify network peers about existence of @dev
1213 * @dev: network device
1214 *
1215 * Generate traffic such that interested network peers are aware of
1216 * @dev, such as by generating a gratuitous ARP. This may be used when
1217 * a device wants to inform the rest of the network about some sort of
1218 * reconfiguration such as a failover event or virtual machine
1219 * migration.
1220 */
1221void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001222{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001223 rtnl_lock();
1224 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1225 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001226}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001227EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229/**
1230 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001231 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 * @name: name of interface
1233 *
1234 * If a network interface is not present and the process has suitable
1235 * privileges this function loads the module. If module loading is not
1236 * available in this kernel then it becomes a nop.
1237 */
1238
Eric W. Biederman881d9662007-09-17 11:56:21 -07001239void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001241 struct net_device *dev;
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001242 int no_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Eric Dumazet72c95282009-10-30 07:11:27 +00001244 rcu_read_lock();
1245 dev = dev_get_by_name_rcu(net, name);
1246 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001248 no_module = !dev;
1249 if (no_module && capable(CAP_NET_ADMIN))
1250 no_module = request_module("netdev-%s", name);
1251 if (no_module && capable(CAP_SYS_MODULE)) {
1252 if (!request_module("%s", name))
Vinson Lee7cecb522012-06-27 14:32:07 +00001253 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1254 name);
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001257EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Patrick McHardybd380812010-02-26 06:34:53 +00001259static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001261 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001262 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001264 ASSERT_RTNL();
1265
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 if (!netif_device_present(dev))
1267 return -ENODEV;
1268
Neil Hormanca99ca12013-02-05 08:05:43 +00001269 /* Block netpoll from trying to do any rx path servicing.
1270 * If we don't do this there is a chance ndo_poll_controller
1271 * or ndo_poll may be running while we open the device
1272 */
1273 ret = netpoll_rx_disable(dev);
1274 if (ret)
1275 return ret;
1276
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001277 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1278 ret = notifier_to_errno(ret);
1279 if (ret)
1280 return ret;
1281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001283
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001284 if (ops->ndo_validate_addr)
1285 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001286
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001287 if (!ret && ops->ndo_open)
1288 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Neil Hormanca99ca12013-02-05 08:05:43 +00001290 netpoll_rx_enable(dev);
1291
Jeff Garzikbada3392007-10-23 20:19:37 -07001292 if (ret)
1293 clear_bit(__LINK_STATE_START, &dev->state);
1294 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001296 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001297 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001299 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return ret;
1303}
Patrick McHardybd380812010-02-26 06:34:53 +00001304
1305/**
1306 * dev_open - prepare an interface for use.
1307 * @dev: device to open
1308 *
1309 * Takes a device from down to up state. The device's private open
1310 * function is invoked and then the multicast lists are loaded. Finally
1311 * the device is moved into the up state and a %NETDEV_UP message is
1312 * sent to the netdev notifier chain.
1313 *
1314 * Calling this function on an active interface is a nop. On a failure
1315 * a negative errno code is returned.
1316 */
1317int dev_open(struct net_device *dev)
1318{
1319 int ret;
1320
Patrick McHardybd380812010-02-26 06:34:53 +00001321 if (dev->flags & IFF_UP)
1322 return 0;
1323
Patrick McHardybd380812010-02-26 06:34:53 +00001324 ret = __dev_open(dev);
1325 if (ret < 0)
1326 return ret;
1327
Patrick McHardybd380812010-02-26 06:34:53 +00001328 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1329 call_netdevice_notifiers(NETDEV_UP, dev);
1330
1331 return ret;
1332}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001333EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Octavian Purdila44345722010-12-13 12:44:07 +00001335static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336{
Octavian Purdila44345722010-12-13 12:44:07 +00001337 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001338
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001339 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001340 might_sleep();
1341
Octavian Purdila44345722010-12-13 12:44:07 +00001342 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001343 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
Octavian Purdila44345722010-12-13 12:44:07 +00001345 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Octavian Purdila44345722010-12-13 12:44:07 +00001347 /* Synchronize to scheduled poll. We cannot touch poll list, it
1348 * can be even on different cpu. So just clear netif_running().
1349 *
1350 * dev->stop() will invoke napi_disable() on all of it's
1351 * napi_struct instances on this device.
1352 */
1353 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Octavian Purdila44345722010-12-13 12:44:07 +00001356 dev_deactivate_many(head);
1357
1358 list_for_each_entry(dev, head, unreg_list) {
1359 const struct net_device_ops *ops = dev->netdev_ops;
1360
1361 /*
1362 * Call the device specific close. This cannot fail.
1363 * Only if device is UP
1364 *
1365 * We allow it to be called even after a DETACH hot-plug
1366 * event.
1367 */
1368 if (ops->ndo_stop)
1369 ops->ndo_stop(dev);
1370
Octavian Purdila44345722010-12-13 12:44:07 +00001371 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001372 net_dmaengine_put();
1373 }
1374
1375 return 0;
1376}
1377
1378static int __dev_close(struct net_device *dev)
1379{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001380 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001381 LIST_HEAD(single);
1382
Neil Hormanca99ca12013-02-05 08:05:43 +00001383 /* Temporarily disable netpoll until the interface is down */
1384 retval = netpoll_rx_disable(dev);
1385 if (retval)
1386 return retval;
1387
Octavian Purdila44345722010-12-13 12:44:07 +00001388 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001389 retval = __dev_close_many(&single);
1390 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001391
1392 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001393 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001394}
1395
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001396static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001397{
1398 struct net_device *dev, *tmp;
1399 LIST_HEAD(tmp_list);
1400
1401 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1402 if (!(dev->flags & IFF_UP))
1403 list_move(&dev->unreg_list, &tmp_list);
1404
1405 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001406
Octavian Purdila44345722010-12-13 12:44:07 +00001407 list_for_each_entry(dev, head, unreg_list) {
1408 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1409 call_netdevice_notifiers(NETDEV_DOWN, dev);
1410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
Octavian Purdila44345722010-12-13 12:44:07 +00001412 /* rollback_registered_many needs the complete original list */
1413 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 return 0;
1415}
Patrick McHardybd380812010-02-26 06:34:53 +00001416
1417/**
1418 * dev_close - shutdown an interface.
1419 * @dev: device to shutdown
1420 *
1421 * This function moves an active device into down state. A
1422 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1423 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1424 * chain.
1425 */
1426int dev_close(struct net_device *dev)
1427{
Neil Hormanca99ca12013-02-05 08:05:43 +00001428 int ret = 0;
Eric Dumazete14a5992011-05-10 12:26:06 -07001429 if (dev->flags & IFF_UP) {
1430 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001431
Neil Hormanca99ca12013-02-05 08:05:43 +00001432 /* Block netpoll rx while the interface is going down */
1433 ret = netpoll_rx_disable(dev);
1434 if (ret)
1435 return ret;
1436
Eric Dumazete14a5992011-05-10 12:26:06 -07001437 list_add(&dev->unreg_list, &single);
1438 dev_close_many(&single);
1439 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001440
1441 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001442 }
Neil Hormanca99ca12013-02-05 08:05:43 +00001443 return ret;
Patrick McHardybd380812010-02-26 06:34:53 +00001444}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001445EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
1447
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001448/**
1449 * dev_disable_lro - disable Large Receive Offload on a device
1450 * @dev: device
1451 *
1452 * Disable Large Receive Offload (LRO) on a net device. Must be
1453 * called under RTNL. This is needed if received packets may be
1454 * forwarded to another interface.
1455 */
1456void dev_disable_lro(struct net_device *dev)
1457{
Neil Hormanf11970e2011-05-24 08:31:09 +00001458 /*
1459 * If we're trying to disable lro on a vlan device
1460 * use the underlying physical device instead
1461 */
1462 if (is_vlan_dev(dev))
1463 dev = vlan_dev_real_dev(dev);
1464
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001465 dev->wanted_features &= ~NETIF_F_LRO;
1466 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001467
Michał Mirosław22d59692011-04-21 12:42:15 +00001468 if (unlikely(dev->features & NETIF_F_LRO))
1469 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001470}
1471EXPORT_SYMBOL(dev_disable_lro);
1472
1473
Eric W. Biederman881d9662007-09-17 11:56:21 -07001474static int dev_boot_phase = 1;
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476/**
1477 * register_netdevice_notifier - register a network notifier block
1478 * @nb: notifier
1479 *
1480 * Register a notifier to be called when network device events occur.
1481 * The notifier passed is linked into the kernel structures and must
1482 * not be reused until it has been unregistered. A negative errno code
1483 * is returned on a failure.
1484 *
1485 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001486 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 * view of the network device list.
1488 */
1489
1490int register_netdevice_notifier(struct notifier_block *nb)
1491{
1492 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001493 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001494 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 int err;
1496
1497 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001498 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001499 if (err)
1500 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001501 if (dev_boot_phase)
1502 goto unlock;
1503 for_each_net(net) {
1504 for_each_netdev(net, dev) {
1505 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1506 err = notifier_to_errno(err);
1507 if (err)
1508 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Eric W. Biederman881d9662007-09-17 11:56:21 -07001510 if (!(dev->flags & IFF_UP))
1511 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001512
Eric W. Biederman881d9662007-09-17 11:56:21 -07001513 nb->notifier_call(nb, NETDEV_UP, dev);
1514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001516
1517unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 rtnl_unlock();
1519 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001520
1521rollback:
1522 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001523 for_each_net(net) {
1524 for_each_netdev(net, dev) {
1525 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001526 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001527
Eric W. Biederman881d9662007-09-17 11:56:21 -07001528 if (dev->flags & IFF_UP) {
1529 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1530 nb->notifier_call(nb, NETDEV_DOWN, dev);
1531 }
1532 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001533 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001534 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001535
RongQing.Li8f891482011-11-30 23:43:07 -05001536outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001537 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001538 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001540EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542/**
1543 * unregister_netdevice_notifier - unregister a network notifier block
1544 * @nb: notifier
1545 *
1546 * Unregister a notifier previously registered by
1547 * register_netdevice_notifier(). The notifier is unlinked into the
1548 * kernel structures and may then be reused. A negative errno code
1549 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001550 *
1551 * After unregistering unregister and down device events are synthesized
1552 * for all devices on the device list to the removed notifier to remove
1553 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 */
1555
1556int unregister_netdevice_notifier(struct notifier_block *nb)
1557{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001558 struct net_device *dev;
1559 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001560 int err;
1561
1562 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001563 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001564 if (err)
1565 goto unlock;
1566
1567 for_each_net(net) {
1568 for_each_netdev(net, dev) {
1569 if (dev->flags & IFF_UP) {
1570 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1571 nb->notifier_call(nb, NETDEV_DOWN, dev);
1572 }
1573 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001574 }
1575 }
1576unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001577 rtnl_unlock();
1578 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001580EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
1582/**
1583 * call_netdevice_notifiers - call all network notifier blocks
1584 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001585 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 *
1587 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001588 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 */
1590
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001591int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592{
Jiri Pirkoab930472010-04-20 01:45:37 -07001593 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001594 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001596EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597
Ingo Molnarc5905af2012-02-24 08:31:31 +01001598static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001599#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001600/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001602 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001603 */
1604static atomic_t netstamp_needed_deferred;
1605#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607void net_enable_timestamp(void)
1608{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001609#ifdef HAVE_JUMP_LABEL
1610 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1611
1612 if (deferred) {
1613 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001614 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001615 return;
1616 }
1617#endif
1618 WARN_ON(in_interrupt());
Ingo Molnarc5905af2012-02-24 08:31:31 +01001619 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001621EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623void net_disable_timestamp(void)
1624{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001625#ifdef HAVE_JUMP_LABEL
1626 if (in_interrupt()) {
1627 atomic_inc(&netstamp_needed_deferred);
1628 return;
1629 }
1630#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001631 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001633EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
Eric Dumazet3b098e22010-05-15 23:57:10 -07001635static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636{
Eric Dumazet588f0332011-11-15 04:12:55 +00001637 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001638 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001639 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640}
1641
Eric Dumazet588f0332011-11-15 04:12:55 +00001642#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001643 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001644 if ((COND) && !(SKB)->tstamp.tv64) \
1645 __net_timestamp(SKB); \
1646 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001647
Richard Cochran4dc360c2011-10-19 17:00:35 -04001648static int net_hwtstamp_validate(struct ifreq *ifr)
1649{
1650 struct hwtstamp_config cfg;
1651 enum hwtstamp_tx_types tx_type;
1652 enum hwtstamp_rx_filters rx_filter;
1653 int tx_type_valid = 0;
1654 int rx_filter_valid = 0;
1655
1656 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1657 return -EFAULT;
1658
1659 if (cfg.flags) /* reserved for future extensions */
1660 return -EINVAL;
1661
1662 tx_type = cfg.tx_type;
1663 rx_filter = cfg.rx_filter;
1664
1665 switch (tx_type) {
1666 case HWTSTAMP_TX_OFF:
1667 case HWTSTAMP_TX_ON:
1668 case HWTSTAMP_TX_ONESTEP_SYNC:
1669 tx_type_valid = 1;
1670 break;
1671 }
1672
1673 switch (rx_filter) {
1674 case HWTSTAMP_FILTER_NONE:
1675 case HWTSTAMP_FILTER_ALL:
1676 case HWTSTAMP_FILTER_SOME:
1677 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1678 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1679 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1680 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1681 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1682 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1683 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1684 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1685 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1686 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1687 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1688 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1689 rx_filter_valid = 1;
1690 break;
1691 }
1692
1693 if (!tx_type_valid || !rx_filter_valid)
1694 return -ERANGE;
1695
1696 return 0;
1697}
1698
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001699static inline bool is_skb_forwardable(struct net_device *dev,
1700 struct sk_buff *skb)
1701{
1702 unsigned int len;
1703
1704 if (!(dev->flags & IFF_UP))
1705 return false;
1706
1707 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1708 if (skb->len <= len)
1709 return true;
1710
1711 /* if TSO is enabled, we don't care about the length as the packet
1712 * could be forwarded without being segmented before
1713 */
1714 if (skb_is_gso(skb))
1715 return true;
1716
1717 return false;
1718}
1719
Arnd Bergmann44540962009-11-26 06:07:08 +00001720/**
1721 * dev_forward_skb - loopback an skb to another netif
1722 *
1723 * @dev: destination network device
1724 * @skb: buffer to forward
1725 *
1726 * return values:
1727 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001728 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001729 *
1730 * dev_forward_skb can be used for injecting an skb from the
1731 * start_xmit function of one device into the receive queue
1732 * of another device.
1733 *
1734 * The receiving device may be in another namespace, so
1735 * we have to clear all information in the skb that could
1736 * impact namespace isolation.
1737 */
1738int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1739{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001740 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1741 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1742 atomic_long_inc(&dev->rx_dropped);
1743 kfree_skb(skb);
1744 return NET_RX_DROP;
1745 }
1746 }
1747
Arnd Bergmann44540962009-11-26 06:07:08 +00001748 skb_orphan(skb);
Ben Greearc736eef2010-07-22 09:54:47 +00001749 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001750
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001751 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001752 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001753 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001754 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001755 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001756 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001757 skb->dev = dev;
1758 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001759 skb->tstamp.tv64 = 0;
1760 skb->pkt_type = PACKET_HOST;
1761 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001762 skb->mark = 0;
1763 secpath_reset(skb);
1764 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001765 return netif_rx(skb);
1766}
1767EXPORT_SYMBOL_GPL(dev_forward_skb);
1768
Changli Gao71d9dec2010-12-15 19:57:25 +00001769static inline int deliver_skb(struct sk_buff *skb,
1770 struct packet_type *pt_prev,
1771 struct net_device *orig_dev)
1772{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001773 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1774 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001775 atomic_inc(&skb->users);
1776 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1777}
1778
Eric Leblondc0de08d2012-08-16 22:02:58 +00001779static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1780{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001781 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001782 return false;
1783
1784 if (ptype->id_match)
1785 return ptype->id_match(ptype, skb->sk);
1786 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1787 return true;
1788
1789 return false;
1790}
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792/*
1793 * Support routine. Sends outgoing frames to any network
1794 * taps currently in use.
1795 */
1796
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001797static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798{
1799 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001800 struct sk_buff *skb2 = NULL;
1801 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 rcu_read_lock();
1804 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1805 /* Never send packets back to the socket
1806 * they originated from - MvS (miquels@drinkel.ow.org)
1807 */
1808 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001809 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001810 if (pt_prev) {
1811 deliver_skb(skb2, pt_prev, skb->dev);
1812 pt_prev = ptype;
1813 continue;
1814 }
1815
1816 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 if (!skb2)
1818 break;
1819
Eric Dumazet70978182010-12-20 21:22:51 +00001820 net_timestamp_set(skb2);
1821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 /* skb->nh should be correctly
1823 set by sender, so that the second statement is
1824 just protection against buggy protocols.
1825 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001826 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001828 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001829 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001830 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1831 ntohs(skb2->protocol),
1832 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001833 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 }
1835
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001836 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001838 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 }
1840 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001841 if (pt_prev)
1842 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 rcu_read_unlock();
1844}
1845
Ben Hutchings2c530402012-07-10 10:55:09 +00001846/**
1847 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001848 * @dev: Network device
1849 * @txq: number of queues available
1850 *
1851 * If real_num_tx_queues is changed the tc mappings may no longer be
1852 * valid. To resolve this verify the tc mapping remains valid and if
1853 * not NULL the mapping. With no priorities mapping to this
1854 * offset/count pair it will no longer be used. In the worst case TC0
1855 * is invalid nothing can be done so disable priority mappings. If is
1856 * expected that drivers will fix this mapping if they can before
1857 * calling netif_set_real_num_tx_queues.
1858 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001859static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001860{
1861 int i;
1862 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1863
1864 /* If TC0 is invalidated disable TC mapping */
1865 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001866 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001867 dev->num_tc = 0;
1868 return;
1869 }
1870
1871 /* Invalidated prio to tc mappings set to TC0 */
1872 for (i = 1; i < TC_BITMASK + 1; i++) {
1873 int q = netdev_get_prio_tc_map(dev, i);
1874
1875 tc = &dev->tc_to_txq[q];
1876 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001877 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1878 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001879 netdev_set_prio_tc_map(dev, i, 0);
1880 }
1881 }
1882}
1883
Alexander Duyck537c00d2013-01-10 08:57:02 +00001884#ifdef CONFIG_XPS
1885static DEFINE_MUTEX(xps_map_mutex);
1886#define xmap_dereference(P) \
1887 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1888
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001889static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1890 int cpu, u16 index)
1891{
1892 struct xps_map *map = NULL;
1893 int pos;
1894
1895 if (dev_maps)
1896 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1897
1898 for (pos = 0; map && pos < map->len; pos++) {
1899 if (map->queues[pos] == index) {
1900 if (map->len > 1) {
1901 map->queues[pos] = map->queues[--map->len];
1902 } else {
1903 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1904 kfree_rcu(map, rcu);
1905 map = NULL;
1906 }
1907 break;
1908 }
1909 }
1910
1911 return map;
1912}
1913
Alexander Duyck024e9672013-01-10 08:57:46 +00001914static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001915{
1916 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001917 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001918 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001919
1920 mutex_lock(&xps_map_mutex);
1921 dev_maps = xmap_dereference(dev->xps_maps);
1922
1923 if (!dev_maps)
1924 goto out_no_maps;
1925
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001926 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001927 for (i = index; i < dev->num_tx_queues; i++) {
1928 if (!remove_xps_queue(dev_maps, cpu, i))
1929 break;
1930 }
1931 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001932 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001933 }
1934
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001935 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001936 RCU_INIT_POINTER(dev->xps_maps, NULL);
1937 kfree_rcu(dev_maps, rcu);
1938 }
1939
Alexander Duyck024e9672013-01-10 08:57:46 +00001940 for (i = index; i < dev->num_tx_queues; i++)
1941 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1942 NUMA_NO_NODE);
1943
Alexander Duyck537c00d2013-01-10 08:57:02 +00001944out_no_maps:
1945 mutex_unlock(&xps_map_mutex);
1946}
1947
Alexander Duyck01c5f862013-01-10 08:57:35 +00001948static struct xps_map *expand_xps_map(struct xps_map *map,
1949 int cpu, u16 index)
1950{
1951 struct xps_map *new_map;
1952 int alloc_len = XPS_MIN_MAP_ALLOC;
1953 int i, pos;
1954
1955 for (pos = 0; map && pos < map->len; pos++) {
1956 if (map->queues[pos] != index)
1957 continue;
1958 return map;
1959 }
1960
1961 /* Need to add queue to this CPU's existing map */
1962 if (map) {
1963 if (pos < map->alloc_len)
1964 return map;
1965
1966 alloc_len = map->alloc_len * 2;
1967 }
1968
1969 /* Need to allocate new map to store queue on this CPU's map */
1970 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1971 cpu_to_node(cpu));
1972 if (!new_map)
1973 return NULL;
1974
1975 for (i = 0; i < pos; i++)
1976 new_map->queues[i] = map->queues[i];
1977 new_map->alloc_len = alloc_len;
1978 new_map->len = pos;
1979
1980 return new_map;
1981}
1982
Alexander Duyck537c00d2013-01-10 08:57:02 +00001983int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1984{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001985 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001986 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001987 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001988 int cpu, numa_node_id = -2;
1989 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001990
1991 mutex_lock(&xps_map_mutex);
1992
1993 dev_maps = xmap_dereference(dev->xps_maps);
1994
Alexander Duyck01c5f862013-01-10 08:57:35 +00001995 /* allocate memory for queue storage */
1996 for_each_online_cpu(cpu) {
1997 if (!cpumask_test_cpu(cpu, mask))
1998 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001999
Alexander Duyck01c5f862013-01-10 08:57:35 +00002000 if (!new_dev_maps)
2001 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2002 if (!new_dev_maps)
2003 return -ENOMEM;
2004
2005 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2006 NULL;
2007
2008 map = expand_xps_map(map, cpu, index);
2009 if (!map)
2010 goto error;
2011
2012 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2013 }
2014
2015 if (!new_dev_maps)
2016 goto out_no_new_maps;
2017
2018 for_each_possible_cpu(cpu) {
2019 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2020 /* add queue to CPU maps */
2021 int pos = 0;
2022
2023 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2024 while ((pos < map->len) && (map->queues[pos] != index))
2025 pos++;
2026
2027 if (pos == map->len)
2028 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002029#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002030 if (numa_node_id == -2)
2031 numa_node_id = cpu_to_node(cpu);
2032 else if (numa_node_id != cpu_to_node(cpu))
2033 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002034#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002035 } else if (dev_maps) {
2036 /* fill in the new device map from the old device map */
2037 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2038 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002039 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002040
Alexander Duyck537c00d2013-01-10 08:57:02 +00002041 }
2042
Alexander Duyck01c5f862013-01-10 08:57:35 +00002043 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2044
Alexander Duyck537c00d2013-01-10 08:57:02 +00002045 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002046 if (dev_maps) {
2047 for_each_possible_cpu(cpu) {
2048 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2049 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2050 if (map && map != new_map)
2051 kfree_rcu(map, rcu);
2052 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002053
Alexander Duyck537c00d2013-01-10 08:57:02 +00002054 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002055 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002056
Alexander Duyck01c5f862013-01-10 08:57:35 +00002057 dev_maps = new_dev_maps;
2058 active = true;
2059
2060out_no_new_maps:
2061 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002062 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2063 (numa_node_id >= 0) ? numa_node_id :
2064 NUMA_NO_NODE);
2065
Alexander Duyck01c5f862013-01-10 08:57:35 +00002066 if (!dev_maps)
2067 goto out_no_maps;
2068
2069 /* removes queue from unused CPUs */
2070 for_each_possible_cpu(cpu) {
2071 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2072 continue;
2073
2074 if (remove_xps_queue(dev_maps, cpu, index))
2075 active = true;
2076 }
2077
2078 /* free map if not active */
2079 if (!active) {
2080 RCU_INIT_POINTER(dev->xps_maps, NULL);
2081 kfree_rcu(dev_maps, rcu);
2082 }
2083
2084out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002085 mutex_unlock(&xps_map_mutex);
2086
2087 return 0;
2088error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002089 /* remove any maps that we added */
2090 for_each_possible_cpu(cpu) {
2091 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2092 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2093 NULL;
2094 if (new_map && new_map != map)
2095 kfree(new_map);
2096 }
2097
Alexander Duyck537c00d2013-01-10 08:57:02 +00002098 mutex_unlock(&xps_map_mutex);
2099
Alexander Duyck537c00d2013-01-10 08:57:02 +00002100 kfree(new_dev_maps);
2101 return -ENOMEM;
2102}
2103EXPORT_SYMBOL(netif_set_xps_queue);
2104
2105#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002106/*
2107 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2108 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2109 */
Tom Herberte6484932010-10-18 18:04:39 +00002110int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002111{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002112 int rc;
2113
Tom Herberte6484932010-10-18 18:04:39 +00002114 if (txq < 1 || txq > dev->num_tx_queues)
2115 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002116
Ben Hutchings5c565802011-02-15 19:39:21 +00002117 if (dev->reg_state == NETREG_REGISTERED ||
2118 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002119 ASSERT_RTNL();
2120
Tom Herbert1d24eb42010-11-21 13:17:27 +00002121 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2122 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002123 if (rc)
2124 return rc;
2125
John Fastabend4f57c082011-01-17 08:06:04 +00002126 if (dev->num_tc)
2127 netif_setup_tc(dev, txq);
2128
Alexander Duyck024e9672013-01-10 08:57:46 +00002129 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002130 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002131#ifdef CONFIG_XPS
2132 netif_reset_xps_queues_gt(dev, txq);
2133#endif
2134 }
John Fastabendf0796d52010-07-01 13:21:57 +00002135 }
Tom Herberte6484932010-10-18 18:04:39 +00002136
2137 dev->real_num_tx_queues = txq;
2138 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002139}
2140EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002141
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002142#ifdef CONFIG_RPS
2143/**
2144 * netif_set_real_num_rx_queues - set actual number of RX queues used
2145 * @dev: Network device
2146 * @rxq: Actual number of RX queues
2147 *
2148 * This must be called either with the rtnl_lock held or before
2149 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002150 * negative error code. If called before registration, it always
2151 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002152 */
2153int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2154{
2155 int rc;
2156
Tom Herbertbd25fa72010-10-18 18:00:16 +00002157 if (rxq < 1 || rxq > dev->num_rx_queues)
2158 return -EINVAL;
2159
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002160 if (dev->reg_state == NETREG_REGISTERED) {
2161 ASSERT_RTNL();
2162
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002163 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2164 rxq);
2165 if (rc)
2166 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002167 }
2168
2169 dev->real_num_rx_queues = rxq;
2170 return 0;
2171}
2172EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2173#endif
2174
Ben Hutchings2c530402012-07-10 10:55:09 +00002175/**
2176 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002177 *
2178 * This routine should set an upper limit on the number of RSS queues
2179 * used by default by multiqueue devices.
2180 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002181int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002182{
2183 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2184}
2185EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2186
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002187static inline void __netif_reschedule(struct Qdisc *q)
2188{
2189 struct softnet_data *sd;
2190 unsigned long flags;
2191
2192 local_irq_save(flags);
2193 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002194 q->next_sched = NULL;
2195 *sd->output_queue_tailp = q;
2196 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002197 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2198 local_irq_restore(flags);
2199}
2200
David S. Miller37437bb2008-07-16 02:15:04 -07002201void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002202{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002203 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2204 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002205}
2206EXPORT_SYMBOL(__netif_schedule);
2207
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002208void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002209{
David S. Miller3578b0c2010-08-03 00:24:04 -07002210 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002211 struct softnet_data *sd;
2212 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002213
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002214 local_irq_save(flags);
2215 sd = &__get_cpu_var(softnet_data);
2216 skb->next = sd->completion_queue;
2217 sd->completion_queue = skb;
2218 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2219 local_irq_restore(flags);
2220 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002221}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002222EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002223
2224void dev_kfree_skb_any(struct sk_buff *skb)
2225{
2226 if (in_irq() || irqs_disabled())
2227 dev_kfree_skb_irq(skb);
2228 else
2229 dev_kfree_skb(skb);
2230}
2231EXPORT_SYMBOL(dev_kfree_skb_any);
2232
2233
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002234/**
2235 * netif_device_detach - mark device as removed
2236 * @dev: network device
2237 *
2238 * Mark device as removed from system and therefore no longer available.
2239 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002240void netif_device_detach(struct net_device *dev)
2241{
2242 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2243 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002244 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002245 }
2246}
2247EXPORT_SYMBOL(netif_device_detach);
2248
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002249/**
2250 * netif_device_attach - mark device as attached
2251 * @dev: network device
2252 *
2253 * Mark device as attached from system and restart if needed.
2254 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002255void netif_device_attach(struct net_device *dev)
2256{
2257 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2258 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002259 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002260 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002261 }
2262}
2263EXPORT_SYMBOL(netif_device_attach);
2264
Ben Hutchings36c92472012-01-17 07:57:56 +00002265static void skb_warn_bad_offload(const struct sk_buff *skb)
2266{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002267 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002268 struct net_device *dev = skb->dev;
2269 const char *driver = "";
2270
2271 if (dev && dev->dev.parent)
2272 driver = dev_driver_string(dev->dev.parent);
2273
2274 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2275 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002276 driver, dev ? &dev->features : &null_features,
2277 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002278 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2279 skb_shinfo(skb)->gso_type, skb->ip_summed);
2280}
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282/*
2283 * Invalidate hardware checksum when packet is to be mangled, and
2284 * complete checksum manually on outgoing path.
2285 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002286int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287{
Al Virod3bc23e2006-11-14 21:24:49 -08002288 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002289 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
Patrick McHardy84fa7932006-08-29 16:44:56 -07002291 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002292 goto out_set_summed;
2293
2294 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002295 skb_warn_bad_offload(skb);
2296 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 }
2298
Eric Dumazetcef401d2013-01-25 20:34:37 +00002299 /* Before computing a checksum, we should make sure no frag could
2300 * be modified by an external entity : checksum could be wrong.
2301 */
2302 if (skb_has_shared_frag(skb)) {
2303 ret = __skb_linearize(skb);
2304 if (ret)
2305 goto out;
2306 }
2307
Michał Mirosław55508d62010-12-14 15:24:08 +00002308 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002309 BUG_ON(offset >= skb_headlen(skb));
2310 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2311
2312 offset += skb->csum_offset;
2313 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2314
2315 if (skb_cloned(skb) &&
2316 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2318 if (ret)
2319 goto out;
2320 }
2321
Herbert Xua0308472007-10-15 01:47:15 -07002322 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002323out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002325out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 return ret;
2327}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002328EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
Cong Wang12b00042013-02-05 16:36:38 +00002330/* openvswitch calls this on rx path, so we need a different check.
2331 */
2332static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2333{
2334 if (tx_path)
2335 return skb->ip_summed != CHECKSUM_PARTIAL;
2336 else
2337 return skb->ip_summed == CHECKSUM_NONE;
2338}
2339
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002340/**
Cong Wang12b00042013-02-05 16:36:38 +00002341 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002342 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002343 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002344 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002345 *
2346 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002347 *
2348 * It may return NULL if the skb requires no segmentation. This is
2349 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002350 */
Cong Wang12b00042013-02-05 16:36:38 +00002351struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2352 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002353{
2354 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
Vlad Yasevich22061d82012-11-15 08:49:11 +00002355 struct packet_offload *ptype;
Al Viro252e3342006-11-14 20:48:11 -08002356 __be16 type = skb->protocol;
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002357 int vlan_depth = ETH_HLEN;
Herbert Xua430a432006-07-08 13:34:56 -07002358 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002359
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002360 while (type == htons(ETH_P_8021Q)) {
2361 struct vlan_hdr *vh;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002362
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002363 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
Jesse Gross7b9c6092010-10-20 13:56:04 +00002364 return ERR_PTR(-EINVAL);
2365
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002366 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2367 type = vh->h_vlan_encapsulated_proto;
2368 vlan_depth += VLAN_HLEN;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002369 }
2370
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002371 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002372 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002373 __skb_pull(skb, skb->mac_len);
2374
Cong Wang12b00042013-02-05 16:36:38 +00002375 if (unlikely(skb_needs_check(skb, tx_path))) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002376 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002377
Herbert Xua430a432006-07-08 13:34:56 -07002378 if (skb_header_cloned(skb) &&
2379 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2380 return ERR_PTR(err);
2381 }
2382
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002383 rcu_read_lock();
Vlad Yasevich22061d82012-11-15 08:49:11 +00002384 list_for_each_entry_rcu(ptype, &offload_base, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002385 if (ptype->type == type && ptype->callbacks.gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07002386 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002387 err = ptype->callbacks.gso_send_check(skb);
Herbert Xua430a432006-07-08 13:34:56 -07002388 segs = ERR_PTR(err);
2389 if (err || skb_gso_ok(skb, features))
2390 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07002391 __skb_push(skb, (skb->data -
2392 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07002393 }
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002394 segs = ptype->callbacks.gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002395 break;
2396 }
2397 }
2398 rcu_read_unlock();
2399
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07002400 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07002401
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002402 return segs;
2403}
Cong Wang12b00042013-02-05 16:36:38 +00002404EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002405
Herbert Xufb286bb2005-11-10 13:01:24 -08002406/* Take action when hardware reception checksum errors are detected. */
2407#ifdef CONFIG_BUG
2408void netdev_rx_csum_fault(struct net_device *dev)
2409{
2410 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002411 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002412 dump_stack();
2413 }
2414}
2415EXPORT_SYMBOL(netdev_rx_csum_fault);
2416#endif
2417
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418/* Actually, we should eliminate this check as soon as we know, that:
2419 * 1. IOMMU is present and allows to map all the memory.
2420 * 2. No high memory really exists on this machine.
2421 */
2422
Eric Dumazet9092c652010-04-02 13:34:49 -07002423static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002425#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002427 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002428 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2429 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2430 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002431 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002432 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002435 if (PCI_DMA_BUS_IS_PHYS) {
2436 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
Eric Dumazet9092c652010-04-02 13:34:49 -07002438 if (!pdev)
2439 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002440 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002441 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2442 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002443 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2444 return 1;
2445 }
2446 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002447#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 return 0;
2449}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002451struct dev_gso_cb {
2452 void (*destructor)(struct sk_buff *skb);
2453};
2454
2455#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2456
2457static void dev_gso_skb_destructor(struct sk_buff *skb)
2458{
2459 struct dev_gso_cb *cb;
2460
2461 do {
2462 struct sk_buff *nskb = skb->next;
2463
2464 skb->next = nskb->next;
2465 nskb->next = NULL;
2466 kfree_skb(nskb);
2467 } while (skb->next);
2468
2469 cb = DEV_GSO_CB(skb);
2470 if (cb->destructor)
2471 cb->destructor(skb);
2472}
2473
2474/**
2475 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2476 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002477 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002478 *
2479 * This function segments the given skb and stores the list of segments
2480 * in skb->next.
2481 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002482static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002483{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002484 struct sk_buff *segs;
2485
Herbert Xu576a30e2006-06-27 13:22:38 -07002486 segs = skb_gso_segment(skb, features);
2487
2488 /* Verifying header integrity only. */
2489 if (!segs)
2490 return 0;
2491
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002492 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002493 return PTR_ERR(segs);
2494
2495 skb->next = segs;
2496 DEV_GSO_CB(skb)->destructor = skb->destructor;
2497 skb->destructor = dev_gso_skb_destructor;
2498
2499 return 0;
2500}
2501
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002502static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
Jesse Gross03634662011-01-09 06:23:35 +00002503{
2504 return ((features & NETIF_F_GEN_CSUM) ||
2505 ((features & NETIF_F_V4_CSUM) &&
2506 protocol == htons(ETH_P_IP)) ||
2507 ((features & NETIF_F_V6_CSUM) &&
2508 protocol == htons(ETH_P_IPV6)) ||
2509 ((features & NETIF_F_FCOE_CRC) &&
2510 protocol == htons(ETH_P_FCOE)));
2511}
2512
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002513static netdev_features_t harmonize_features(struct sk_buff *skb,
2514 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002515{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002516 if (skb->ip_summed != CHECKSUM_NONE &&
2517 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002518 features &= ~NETIF_F_ALL_CSUM;
2519 features &= ~NETIF_F_SG;
2520 } else if (illegal_highdma(skb->dev, skb)) {
2521 features &= ~NETIF_F_SG;
2522 }
2523
2524 return features;
2525}
2526
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002527netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002528{
2529 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002530 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002531
Ben Hutchings30b678d2012-07-30 15:57:00 +00002532 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2533 features &= ~NETIF_F_GSO_MASK;
2534
Jesse Gross58e998c2010-10-29 12:14:55 +00002535 if (protocol == htons(ETH_P_8021Q)) {
2536 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2537 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002538 } else if (!vlan_tx_tag_present(skb)) {
2539 return harmonize_features(skb, protocol, features);
2540 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002541
Jesse Gross6ee400a2011-01-17 20:46:00 +00002542 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002543
2544 if (protocol != htons(ETH_P_8021Q)) {
2545 return harmonize_features(skb, protocol, features);
2546 } else {
2547 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Jesse Gross6ee400a2011-01-17 20:46:00 +00002548 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002549 return harmonize_features(skb, protocol, features);
2550 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002551}
Jesse Grossf01a5232011-01-09 06:23:31 +00002552EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002553
John Fastabend6afff0c2010-06-16 14:18:12 +00002554/*
2555 * Returns true if either:
2556 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002557 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002558 */
2559static inline int skb_needs_linearize(struct sk_buff *skb,
Jesse Gross02932ce2011-01-09 06:23:34 +00002560 int features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002561{
Jesse Gross02932ce2011-01-09 06:23:34 +00002562 return skb_is_nonlinear(skb) &&
2563 ((skb_has_frag_list(skb) &&
2564 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002565 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002566 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002567}
2568
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002569int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2570 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002571{
Stephen Hemminger00829822008-11-20 20:14:53 -08002572 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002573 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002574 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002575
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002576 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002577 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002578
Eric Dumazet93f154b2009-05-18 22:19:19 -07002579 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002580 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002581 * its hot in this cpu cache
2582 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002583 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2584 skb_dst_drop(skb);
2585
Jesse Grossfc741212011-01-09 06:23:32 +00002586 features = netif_skb_features(skb);
2587
Jesse Gross7b9c6092010-10-20 13:56:04 +00002588 if (vlan_tx_tag_present(skb) &&
Jesse Grossfc741212011-01-09 06:23:32 +00002589 !(features & NETIF_F_HW_VLAN_TX)) {
Jesse Gross7b9c6092010-10-20 13:56:04 +00002590 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2591 if (unlikely(!skb))
2592 goto out;
2593
2594 skb->vlan_tci = 0;
2595 }
2596
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002597 /* If encapsulation offload request, verify we are testing
2598 * hardware encapsulation features instead of standard
2599 * features for the netdev
2600 */
2601 if (skb->encapsulation)
2602 features &= dev->hw_enc_features;
2603
Jesse Grossfc741212011-01-09 06:23:32 +00002604 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002605 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002606 goto out_kfree_skb;
2607 if (skb->next)
2608 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002609 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002610 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002611 __skb_linearize(skb))
2612 goto out_kfree_skb;
2613
2614 /* If packet is not checksummed and device does not
2615 * support checksumming for this protocol, complete
2616 * checksumming here.
2617 */
2618 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002619 if (skb->encapsulation)
2620 skb_set_inner_transport_header(skb,
2621 skb_checksum_start_offset(skb));
2622 else
2623 skb_set_transport_header(skb,
2624 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002625 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002626 skb_checksum_help(skb))
2627 goto out_kfree_skb;
2628 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002629 }
2630
Eric Dumazetb40863c2012-09-18 20:44:49 +00002631 if (!list_empty(&ptype_all))
2632 dev_queue_xmit_nit(skb, dev);
2633
Koki Sanagiec764bf2011-05-30 21:48:34 +00002634 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002635 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002636 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002637 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002638 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002639 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002640 }
2641
Herbert Xu576a30e2006-06-27 13:22:38 -07002642gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002643 do {
2644 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002645
2646 skb->next = nskb->next;
2647 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002648
2649 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002650 * If device doesn't need nskb->dst, release it right now while
Krishna Kumar068a2de2009-12-09 20:59:58 +00002651 * its hot in this cpu cache
2652 */
2653 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2654 skb_dst_drop(nskb);
2655
Eric Dumazetb40863c2012-09-18 20:44:49 +00002656 if (!list_empty(&ptype_all))
2657 dev_queue_xmit_nit(nskb, dev);
2658
Koki Sanagiec764bf2011-05-30 21:48:34 +00002659 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002660 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002661 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002662 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002663 if (rc & ~NETDEV_TX_MASK)
2664 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002665 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002666 skb->next = nskb;
2667 return rc;
2668 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002669 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002670 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002671 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002672 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002673
Patrick McHardy572a9d72009-11-10 06:14:14 +00002674out_kfree_gso_skb:
2675 if (likely(skb->next == NULL))
2676 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002677out_kfree_skb:
2678 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002679out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002680 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002681}
2682
Eric Dumazet1def9232013-01-10 12:36:42 +00002683static void qdisc_pkt_len_init(struct sk_buff *skb)
2684{
2685 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2686
2687 qdisc_skb_cb(skb)->pkt_len = skb->len;
2688
2689 /* To get more precise estimation of bytes sent on wire,
2690 * we add to pkt_len the headers size of all segments
2691 */
2692 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002693 unsigned int hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002694
Eric Dumazet757b8b12013-01-15 21:14:21 -08002695 /* mac layer + network layer */
2696 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2697
2698 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002699 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2700 hdr_len += tcp_hdrlen(skb);
2701 else
2702 hdr_len += sizeof(struct udphdr);
2703 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
2704 }
2705}
2706
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002707static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2708 struct net_device *dev,
2709 struct netdev_queue *txq)
2710{
2711 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002712 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002713 int rc;
2714
Eric Dumazet1def9232013-01-10 12:36:42 +00002715 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002716 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002717 /*
2718 * Heuristic to force contended enqueues to serialize on a
2719 * separate lock before trying to get qdisc main lock.
2720 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2721 * and dequeue packets faster.
2722 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002723 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002724 if (unlikely(contended))
2725 spin_lock(&q->busylock);
2726
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002727 spin_lock(root_lock);
2728 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2729 kfree_skb(skb);
2730 rc = NET_XMIT_DROP;
2731 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002732 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002733 /*
2734 * This is a work-conserving queue; there are no old skbs
2735 * waiting to be sent out; and the qdisc is not running -
2736 * xmit the skb directly.
2737 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002738 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2739 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002740
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002741 qdisc_bstats_update(q, skb);
2742
Eric Dumazet79640a42010-06-02 05:09:29 -07002743 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2744 if (unlikely(contended)) {
2745 spin_unlock(&q->busylock);
2746 contended = false;
2747 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002748 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002749 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002750 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002751
2752 rc = NET_XMIT_SUCCESS;
2753 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002754 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002755 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002756 if (qdisc_run_begin(q)) {
2757 if (unlikely(contended)) {
2758 spin_unlock(&q->busylock);
2759 contended = false;
2760 }
2761 __qdisc_run(q);
2762 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002763 }
2764 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002765 if (unlikely(contended))
2766 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002767 return rc;
2768}
2769
Neil Horman5bc14212011-11-22 05:10:51 +00002770#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2771static void skb_update_prio(struct sk_buff *skb)
2772{
Igor Maravic6977a792011-11-25 07:44:54 +00002773 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002774
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002775 if (!skb->priority && skb->sk && map) {
2776 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2777
2778 if (prioidx < map->priomap_len)
2779 skb->priority = map->priomap[prioidx];
2780 }
Neil Horman5bc14212011-11-22 05:10:51 +00002781}
2782#else
2783#define skb_update_prio(skb)
2784#endif
2785
Eric Dumazet745e20f2010-09-29 13:23:09 -07002786static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002787#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002788
Dave Jonesd29f7492008-07-22 14:09:06 -07002789/**
Michel Machado95603e22012-06-12 10:16:35 +00002790 * dev_loopback_xmit - loop back @skb
2791 * @skb: buffer to transmit
2792 */
2793int dev_loopback_xmit(struct sk_buff *skb)
2794{
2795 skb_reset_mac_header(skb);
2796 __skb_pull(skb, skb_network_offset(skb));
2797 skb->pkt_type = PACKET_LOOPBACK;
2798 skb->ip_summed = CHECKSUM_UNNECESSARY;
2799 WARN_ON(!skb_dst(skb));
2800 skb_dst_force(skb);
2801 netif_rx_ni(skb);
2802 return 0;
2803}
2804EXPORT_SYMBOL(dev_loopback_xmit);
2805
2806/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002807 * dev_queue_xmit - transmit a buffer
2808 * @skb: buffer to transmit
2809 *
2810 * Queue a buffer for transmission to a network device. The caller must
2811 * have set the device and priority and built the buffer before calling
2812 * this function. The function can be called from an interrupt.
2813 *
2814 * A negative errno code is returned on a failure. A success does not
2815 * guarantee the frame will be transmitted as it may be dropped due
2816 * to congestion or traffic shaping.
2817 *
2818 * -----------------------------------------------------------------------------------
2819 * I notice this method can also return errors from the queue disciplines,
2820 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2821 * be positive.
2822 *
2823 * Regardless of the return value, the skb is consumed, so it is currently
2824 * difficult to retry a send to this method. (You can bump the ref count
2825 * before sending to hold a reference for retry if you are careful.)
2826 *
2827 * When calling this method, interrupts MUST be enabled. This is because
2828 * the BH enable code must have IRQs enabled so that it will not deadlock.
2829 * --BLG
2830 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831int dev_queue_xmit(struct sk_buff *skb)
2832{
2833 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002834 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 struct Qdisc *q;
2836 int rc = -ENOMEM;
2837
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002838 skb_reset_mac_header(skb);
2839
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002840 /* Disable soft irqs for various locks below. Also
2841 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002843 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844
Neil Horman5bc14212011-11-22 05:10:51 +00002845 skb_update_prio(skb);
2846
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002847 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002848 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002849
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002851 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002853 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002855 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002856 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 }
2858
2859 /* The device has no queue. Common case for software devices:
2860 loopback, all the sorts of tunnels...
2861
Herbert Xu932ff272006-06-09 12:20:56 -07002862 Really, it is unlikely that netif_tx_lock protection is necessary
2863 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 counters.)
2865 However, it is possible, that they rely on protection
2866 made by us here.
2867
2868 Check this and shot the lock. It is not prone from deadlocks.
2869 Either shot noqueue qdisc, it is even simpler 8)
2870 */
2871 if (dev->flags & IFF_UP) {
2872 int cpu = smp_processor_id(); /* ok because BHs are off */
2873
David S. Millerc773e842008-07-08 23:13:53 -07002874 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Eric Dumazet745e20f2010-09-29 13:23:09 -07002876 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2877 goto recursion_alert;
2878
David S. Millerc773e842008-07-08 23:13:53 -07002879 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
Tom Herbert734664982011-11-28 16:32:44 +00002881 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002882 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002883 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002884 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002885 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002886 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 goto out;
2888 }
2889 }
David S. Millerc773e842008-07-08 23:13:53 -07002890 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002891 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2892 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 } else {
2894 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002895 * unfortunately
2896 */
2897recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002898 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2899 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 }
2901 }
2902
2903 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002904 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 kfree_skb(skb);
2907 return rc;
2908out:
Herbert Xud4828d82006-06-22 02:28:18 -07002909 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 return rc;
2911}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002912EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
2914
2915/*=======================================================================
2916 Receiver routines
2917 =======================================================================*/
2918
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002919int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002920EXPORT_SYMBOL(netdev_max_backlog);
2921
Eric Dumazet3b098e22010-05-15 23:57:10 -07002922int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002923int netdev_budget __read_mostly = 300;
2924int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002926/* Called with irq disabled */
2927static inline void ____napi_schedule(struct softnet_data *sd,
2928 struct napi_struct *napi)
2929{
2930 list_add_tail(&napi->poll_list, &sd->poll_list);
2931 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2932}
2933
Eric Dumazetdf334542010-03-24 19:13:54 +00002934#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002935
2936/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002937struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002938EXPORT_SYMBOL(rps_sock_flow_table);
2939
Ingo Molnarc5905af2012-02-24 08:31:31 +01002940struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002941
Ben Hutchingsc4454772011-01-19 11:03:53 +00002942static struct rps_dev_flow *
2943set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2944 struct rps_dev_flow *rflow, u16 next_cpu)
2945{
Ben Hutchings09994d12011-10-03 04:42:46 +00002946 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002947#ifdef CONFIG_RFS_ACCEL
2948 struct netdev_rx_queue *rxqueue;
2949 struct rps_dev_flow_table *flow_table;
2950 struct rps_dev_flow *old_rflow;
2951 u32 flow_id;
2952 u16 rxq_index;
2953 int rc;
2954
2955 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002956 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2957 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002958 goto out;
2959 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2960 if (rxq_index == skb_get_rx_queue(skb))
2961 goto out;
2962
2963 rxqueue = dev->_rx + rxq_index;
2964 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2965 if (!flow_table)
2966 goto out;
2967 flow_id = skb->rxhash & flow_table->mask;
2968 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2969 rxq_index, flow_id);
2970 if (rc < 0)
2971 goto out;
2972 old_rflow = rflow;
2973 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002974 rflow->filter = rc;
2975 if (old_rflow->filter == rflow->filter)
2976 old_rflow->filter = RPS_NO_FILTER;
2977 out:
2978#endif
2979 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002980 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002981 }
2982
Ben Hutchings09994d12011-10-03 04:42:46 +00002983 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002984 return rflow;
2985}
2986
Tom Herbert0a9627f2010-03-16 08:03:29 +00002987/*
2988 * get_rps_cpu is called from netif_receive_skb and returns the target
2989 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002990 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002991 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002992static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2993 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002994{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002995 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002996 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002997 struct rps_dev_flow_table *flow_table;
2998 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002999 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07003000 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003001
Tom Herbert0a9627f2010-03-16 08:03:29 +00003002 if (skb_rx_queue_recorded(skb)) {
3003 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003004 if (unlikely(index >= dev->real_num_rx_queues)) {
3005 WARN_ONCE(dev->real_num_rx_queues > 1,
3006 "%s received packet on queue %u, but number "
3007 "of RX queues is %u\n",
3008 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003009 goto done;
3010 }
3011 rxqueue = dev->_rx + index;
3012 } else
3013 rxqueue = dev->_rx;
3014
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003015 map = rcu_dereference(rxqueue->rps_map);
3016 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003017 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003018 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003019 tcpu = map->cpus[0];
3020 if (cpu_online(tcpu))
3021 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003022 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003023 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003024 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003025 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003026 }
3027
Changli Gao2d47b452010-08-17 19:00:56 +00003028 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003029 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003030 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003031
Tom Herbertfec5e652010-04-16 16:01:27 -07003032 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3033 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3034 if (flow_table && sock_flow_table) {
3035 u16 next_cpu;
3036 struct rps_dev_flow *rflow;
3037
3038 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3039 tcpu = rflow->cpu;
3040
3041 next_cpu = sock_flow_table->ents[skb->rxhash &
3042 sock_flow_table->mask];
3043
3044 /*
3045 * If the desired CPU (where last recvmsg was done) is
3046 * different from current CPU (one in the rx-queue flow
3047 * table entry), switch if one of the following holds:
3048 * - Current CPU is unset (equal to RPS_NO_CPU).
3049 * - Current CPU is offline.
3050 * - The current CPU's queue tail has advanced beyond the
3051 * last packet that was enqueued using this table entry.
3052 * This guarantees that all previous packets for the flow
3053 * have been dequeued, thus preserving in order delivery.
3054 */
3055 if (unlikely(tcpu != next_cpu) &&
3056 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3057 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003058 rflow->last_qtail)) >= 0)) {
3059 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003060 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003061 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003062
Tom Herbertfec5e652010-04-16 16:01:27 -07003063 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3064 *rflowp = rflow;
3065 cpu = tcpu;
3066 goto done;
3067 }
3068 }
3069
Tom Herbert0a9627f2010-03-16 08:03:29 +00003070 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003071 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003072
3073 if (cpu_online(tcpu)) {
3074 cpu = tcpu;
3075 goto done;
3076 }
3077 }
3078
3079done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003080 return cpu;
3081}
3082
Ben Hutchingsc4454772011-01-19 11:03:53 +00003083#ifdef CONFIG_RFS_ACCEL
3084
3085/**
3086 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3087 * @dev: Device on which the filter was set
3088 * @rxq_index: RX queue index
3089 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3090 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3091 *
3092 * Drivers that implement ndo_rx_flow_steer() should periodically call
3093 * this function for each installed filter and remove the filters for
3094 * which it returns %true.
3095 */
3096bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3097 u32 flow_id, u16 filter_id)
3098{
3099 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3100 struct rps_dev_flow_table *flow_table;
3101 struct rps_dev_flow *rflow;
3102 bool expire = true;
3103 int cpu;
3104
3105 rcu_read_lock();
3106 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3107 if (flow_table && flow_id <= flow_table->mask) {
3108 rflow = &flow_table->flows[flow_id];
3109 cpu = ACCESS_ONCE(rflow->cpu);
3110 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3111 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3112 rflow->last_qtail) <
3113 (int)(10 * flow_table->mask)))
3114 expire = false;
3115 }
3116 rcu_read_unlock();
3117 return expire;
3118}
3119EXPORT_SYMBOL(rps_may_expire_flow);
3120
3121#endif /* CONFIG_RFS_ACCEL */
3122
Tom Herbert0a9627f2010-03-16 08:03:29 +00003123/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003124static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003125{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003126 struct softnet_data *sd = data;
3127
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003128 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003129 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003130}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003131
Tom Herbertfec5e652010-04-16 16:01:27 -07003132#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003133
3134/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003135 * Check if this softnet_data structure is another cpu one
3136 * If yes, queue it to our IPI list and return 1
3137 * If no, return 0
3138 */
3139static int rps_ipi_queued(struct softnet_data *sd)
3140{
3141#ifdef CONFIG_RPS
3142 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3143
3144 if (sd != mysd) {
3145 sd->rps_ipi_next = mysd->rps_ipi_list;
3146 mysd->rps_ipi_list = sd;
3147
3148 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3149 return 1;
3150 }
3151#endif /* CONFIG_RPS */
3152 return 0;
3153}
3154
3155/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003156 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3157 * queue (may be a remote CPU queue).
3158 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003159static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3160 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003161{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003162 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003163 unsigned long flags;
3164
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003165 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003166
3167 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003168
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003169 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003170 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3171 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003172enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003173 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003174 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003175 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003176 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003177 return NET_RX_SUCCESS;
3178 }
3179
Eric Dumazetebda37c22010-05-06 23:51:21 +00003180 /* Schedule NAPI for backlog device
3181 * We can use non atomic operation since we own the queue lock
3182 */
3183 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003184 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003185 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003186 }
3187 goto enqueue;
3188 }
3189
Changli Gaodee42872010-05-02 05:42:16 +00003190 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003191 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003192
Tom Herbert0a9627f2010-03-16 08:03:29 +00003193 local_irq_restore(flags);
3194
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003195 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003196 kfree_skb(skb);
3197 return NET_RX_DROP;
3198}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200/**
3201 * netif_rx - post buffer to the network code
3202 * @skb: buffer to post
3203 *
3204 * This function receives a packet from a device driver and queues it for
3205 * the upper (protocol) levels to process. It always succeeds. The buffer
3206 * may be dropped during processing for congestion control or by the
3207 * protocol layers.
3208 *
3209 * return values:
3210 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 * NET_RX_DROP (packet was dropped)
3212 *
3213 */
3214
3215int netif_rx(struct sk_buff *skb)
3216{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003217 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
3219 /* if netpoll wants it, pretend we never saw it */
3220 if (netpoll_rx(skb))
3221 return NET_RX_DROP;
3222
Eric Dumazet588f0332011-11-15 04:12:55 +00003223 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224
Koki Sanagicf66ba52010-08-23 18:45:02 +09003225 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003226#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003227 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003228 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003229 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230
Changli Gaocece1942010-08-07 20:35:43 -07003231 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003232 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003233
3234 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003235 if (cpu < 0)
3236 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003237
3238 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3239
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003240 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003241 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003242 } else
3243#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003244 {
3245 unsigned int qtail;
3246 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3247 put_cpu();
3248 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003249 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003251EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252
3253int netif_rx_ni(struct sk_buff *skb)
3254{
3255 int err;
3256
3257 preempt_disable();
3258 err = netif_rx(skb);
3259 if (local_softirq_pending())
3260 do_softirq();
3261 preempt_enable();
3262
3263 return err;
3264}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265EXPORT_SYMBOL(netif_rx_ni);
3266
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267static void net_tx_action(struct softirq_action *h)
3268{
3269 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3270
3271 if (sd->completion_queue) {
3272 struct sk_buff *clist;
3273
3274 local_irq_disable();
3275 clist = sd->completion_queue;
3276 sd->completion_queue = NULL;
3277 local_irq_enable();
3278
3279 while (clist) {
3280 struct sk_buff *skb = clist;
3281 clist = clist->next;
3282
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003283 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003284 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 __kfree_skb(skb);
3286 }
3287 }
3288
3289 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003290 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
3292 local_irq_disable();
3293 head = sd->output_queue;
3294 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003295 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 local_irq_enable();
3297
3298 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003299 struct Qdisc *q = head;
3300 spinlock_t *root_lock;
3301
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 head = head->next_sched;
3303
David S. Miller5fb66222008-08-02 20:02:43 -07003304 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003305 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003306 smp_mb__before_clear_bit();
3307 clear_bit(__QDISC_STATE_SCHED,
3308 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003309 qdisc_run(q);
3310 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003312 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003313 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003314 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003315 } else {
3316 smp_mb__before_clear_bit();
3317 clear_bit(__QDISC_STATE_SCHED,
3318 &q->state);
3319 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 }
3321 }
3322 }
3323}
3324
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003325#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3326 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003327/* This hook is defined here for ATM LANE */
3328int (*br_fdb_test_addr_hook)(struct net_device *dev,
3329 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003330EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003331#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333#ifdef CONFIG_NET_CLS_ACT
3334/* TODO: Maybe we should just force sch_ingress to be compiled in
3335 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3336 * a compare and 2 stores extra right now if we dont have it on
3337 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003338 * NOTE: This doesn't stop any functionality; if you dont have
3339 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 *
3341 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003342static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003345 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003346 int result = TC_ACT_OK;
3347 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003348
Stephen Hemmingerde384832010-08-01 00:33:23 -07003349 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003350 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3351 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003352 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 }
3354
Herbert Xuf697c3e2007-10-14 00:38:47 -07003355 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3356 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3357
David S. Miller83874002008-07-17 00:53:03 -07003358 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003359 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003360 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003361 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3362 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003363 spin_unlock(qdisc_lock(q));
3364 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003365
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 return result;
3367}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003368
3369static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3370 struct packet_type **pt_prev,
3371 int *ret, struct net_device *orig_dev)
3372{
Eric Dumazet24824a02010-10-02 06:11:55 +00003373 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3374
3375 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003376 goto out;
3377
3378 if (*pt_prev) {
3379 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3380 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003381 }
3382
Eric Dumazet24824a02010-10-02 06:11:55 +00003383 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003384 case TC_ACT_SHOT:
3385 case TC_ACT_STOLEN:
3386 kfree_skb(skb);
3387 return NULL;
3388 }
3389
3390out:
3391 skb->tc_verd = 0;
3392 return skb;
3393}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394#endif
3395
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003396/**
3397 * netdev_rx_handler_register - register receive handler
3398 * @dev: device to register a handler for
3399 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003400 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003401 *
3402 * Register a receive hander for a device. This handler will then be
3403 * called from __netif_receive_skb. A negative errno code is returned
3404 * on a failure.
3405 *
3406 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003407 *
3408 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003409 */
3410int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003411 rx_handler_func_t *rx_handler,
3412 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003413{
3414 ASSERT_RTNL();
3415
3416 if (dev->rx_handler)
3417 return -EBUSY;
3418
Jiri Pirko93e2c322010-06-10 03:34:59 +00003419 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003420 rcu_assign_pointer(dev->rx_handler, rx_handler);
3421
3422 return 0;
3423}
3424EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3425
3426/**
3427 * netdev_rx_handler_unregister - unregister receive handler
3428 * @dev: device to unregister a handler from
3429 *
3430 * Unregister a receive hander from a device.
3431 *
3432 * The caller must hold the rtnl_mutex.
3433 */
3434void netdev_rx_handler_unregister(struct net_device *dev)
3435{
3436
3437 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003438 RCU_INIT_POINTER(dev->rx_handler, NULL);
3439 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003440}
3441EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3442
Mel Gormanb4b9e352012-07-31 16:44:26 -07003443/*
3444 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3445 * the special handling of PFMEMALLOC skbs.
3446 */
3447static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3448{
3449 switch (skb->protocol) {
3450 case __constant_htons(ETH_P_ARP):
3451 case __constant_htons(ETH_P_IP):
3452 case __constant_htons(ETH_P_IPV6):
3453 case __constant_htons(ETH_P_8021Q):
3454 return true;
3455 default:
3456 return false;
3457 }
3458}
3459
David S. Miller9754e292013-02-14 15:57:38 -05003460static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461{
3462 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003463 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003464 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003465 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003466 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003468 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469
Eric Dumazet588f0332011-11-15 04:12:55 +00003470 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003471
Koki Sanagicf66ba52010-08-23 18:45:02 +09003472 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003473
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003475 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003476 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003478 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003479
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003480 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003481 if (!skb_transport_header_was_set(skb))
3482 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003483 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484
3485 pt_prev = NULL;
3486
3487 rcu_read_lock();
3488
David S. Miller63d8ea72011-02-28 10:48:59 -08003489another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003490 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003491
3492 __this_cpu_inc(softnet_data.processed);
3493
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003494 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3495 skb = vlan_untag(skb);
3496 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003497 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003498 }
3499
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500#ifdef CONFIG_NET_CLS_ACT
3501 if (skb->tc_verd & TC_NCLS) {
3502 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3503 goto ncls;
3504 }
3505#endif
3506
David S. Miller9754e292013-02-14 15:57:38 -05003507 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003508 goto skip_taps;
3509
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003511 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003512 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003513 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 pt_prev = ptype;
3515 }
3516 }
3517
Mel Gormanb4b9e352012-07-31 16:44:26 -07003518skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003520 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3521 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003522 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523ncls:
3524#endif
3525
David S. Miller9754e292013-02-14 15:57:38 -05003526 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003527 goto drop;
3528
John Fastabend24257172011-10-10 09:16:41 +00003529 if (vlan_tx_tag_present(skb)) {
3530 if (pt_prev) {
3531 ret = deliver_skb(skb, pt_prev, orig_dev);
3532 pt_prev = NULL;
3533 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003534 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003535 goto another_round;
3536 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003537 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003538 }
3539
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003540 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003541 if (rx_handler) {
3542 if (pt_prev) {
3543 ret = deliver_skb(skb, pt_prev, orig_dev);
3544 pt_prev = NULL;
3545 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003546 switch (rx_handler(&skb)) {
3547 case RX_HANDLER_CONSUMED:
Mel Gormanb4b9e352012-07-31 16:44:26 -07003548 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003549 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003550 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003551 case RX_HANDLER_EXACT:
3552 deliver_exact = true;
3553 case RX_HANDLER_PASS:
3554 break;
3555 default:
3556 BUG();
3557 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003560 if (vlan_tx_nonzero_tag_present(skb))
3561 skb->pkt_type = PACKET_OTHERHOST;
3562
David S. Miller63d8ea72011-02-28 10:48:59 -08003563 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003564 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003565
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003567 list_for_each_entry_rcu(ptype,
3568 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003569 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003570 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3571 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003572 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003573 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 pt_prev = ptype;
3575 }
3576 }
3577
3578 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003579 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003580 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003581 else
3582 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003584drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003585 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 kfree_skb(skb);
3587 /* Jamal, now you will not able to escape explaining
3588 * me how you were going to use this. :-)
3589 */
3590 ret = NET_RX_DROP;
3591 }
3592
Mel Gormanb4b9e352012-07-31 16:44:26 -07003593unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003595out:
David S. Miller9754e292013-02-14 15:57:38 -05003596 return ret;
3597}
3598
3599static int __netif_receive_skb(struct sk_buff *skb)
3600{
3601 int ret;
3602
3603 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3604 unsigned long pflags = current->flags;
3605
3606 /*
3607 * PFMEMALLOC skbs are special, they should
3608 * - be delivered to SOCK_MEMALLOC sockets only
3609 * - stay away from userspace
3610 * - have bounded memory usage
3611 *
3612 * Use PF_MEMALLOC as this saves us from propagating the allocation
3613 * context down to all allocation sites.
3614 */
3615 current->flags |= PF_MEMALLOC;
3616 ret = __netif_receive_skb_core(skb, true);
3617 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3618 } else
3619 ret = __netif_receive_skb_core(skb, false);
3620
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 return ret;
3622}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003623
3624/**
3625 * netif_receive_skb - process receive buffer from network
3626 * @skb: buffer to process
3627 *
3628 * netif_receive_skb() is the main receive data processing function.
3629 * It always succeeds. The buffer may be dropped during processing
3630 * for congestion control or by the protocol layers.
3631 *
3632 * This function may only be called from softirq context and interrupts
3633 * should be enabled.
3634 *
3635 * Return values (usually ignored):
3636 * NET_RX_SUCCESS: no congestion
3637 * NET_RX_DROP: packet was dropped
3638 */
3639int netif_receive_skb(struct sk_buff *skb)
3640{
Eric Dumazet588f0332011-11-15 04:12:55 +00003641 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003642
Richard Cochranc1f19b52010-07-17 08:49:36 +00003643 if (skb_defer_rx_timestamp(skb))
3644 return NET_RX_SUCCESS;
3645
Eric Dumazetdf334542010-03-24 19:13:54 +00003646#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003647 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003648 struct rps_dev_flow voidflow, *rflow = &voidflow;
3649 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003650
Eric Dumazet3b098e22010-05-15 23:57:10 -07003651 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003652
Eric Dumazet3b098e22010-05-15 23:57:10 -07003653 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003654
Eric Dumazet3b098e22010-05-15 23:57:10 -07003655 if (cpu >= 0) {
3656 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3657 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003658 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003659 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003660 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003661 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003662#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003663 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003664}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003665EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666
Eric Dumazet88751272010-04-19 05:07:33 +00003667/* Network device is going away, flush any packets still pending
3668 * Called with irqs disabled.
3669 */
Changli Gao152102c2010-03-30 20:16:22 +00003670static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003671{
Changli Gao152102c2010-03-30 20:16:22 +00003672 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003673 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003674 struct sk_buff *skb, *tmp;
3675
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003676 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003677 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003678 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003679 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003680 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003681 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003682 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003683 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003684 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003685
3686 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3687 if (skb->dev == dev) {
3688 __skb_unlink(skb, &sd->process_queue);
3689 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003690 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003691 }
3692 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003693}
3694
Herbert Xud565b0a2008-12-15 23:38:52 -08003695static int napi_gro_complete(struct sk_buff *skb)
3696{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003697 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003698 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003699 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003700 int err = -ENOENT;
3701
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003702 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3703
Herbert Xufc59f9a2009-04-14 15:11:06 -07003704 if (NAPI_GRO_CB(skb)->count == 1) {
3705 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003706 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003707 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003708
3709 rcu_read_lock();
3710 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003711 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003712 continue;
3713
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003714 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003715 break;
3716 }
3717 rcu_read_unlock();
3718
3719 if (err) {
3720 WARN_ON(&ptype->list == head);
3721 kfree_skb(skb);
3722 return NET_RX_SUCCESS;
3723 }
3724
3725out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003726 return netif_receive_skb(skb);
3727}
3728
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003729/* napi->gro_list contains packets ordered by age.
3730 * youngest packets at the head of it.
3731 * Complete skbs in reverse order to reduce latencies.
3732 */
3733void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003734{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003735 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003736
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003737 /* scan list and build reverse chain */
3738 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3739 skb->prev = prev;
3740 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003741 }
3742
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003743 for (skb = prev; skb; skb = prev) {
3744 skb->next = NULL;
3745
3746 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3747 return;
3748
3749 prev = skb->prev;
3750 napi_gro_complete(skb);
3751 napi->gro_count--;
3752 }
3753
Herbert Xud565b0a2008-12-15 23:38:52 -08003754 napi->gro_list = NULL;
3755}
Eric Dumazet86cac582010-08-31 18:25:32 +00003756EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003757
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003758static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3759{
3760 struct sk_buff *p;
3761 unsigned int maclen = skb->dev->hard_header_len;
3762
3763 for (p = napi->gro_list; p; p = p->next) {
3764 unsigned long diffs;
3765
3766 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3767 diffs |= p->vlan_tci ^ skb->vlan_tci;
3768 if (maclen == ETH_HLEN)
3769 diffs |= compare_ether_header(skb_mac_header(p),
3770 skb_gro_mac_header(skb));
3771 else if (!diffs)
3772 diffs = memcmp(skb_mac_header(p),
3773 skb_gro_mac_header(skb),
3774 maclen);
3775 NAPI_GRO_CB(p)->same_flow = !diffs;
3776 NAPI_GRO_CB(p)->flush = 0;
3777 }
3778}
3779
Rami Rosenbb728822012-11-28 21:55:25 +00003780static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003781{
3782 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003783 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003784 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003785 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003786 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08003787 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003788 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003789
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003790 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003791 goto normal;
3792
David S. Miller21dc3302010-08-23 00:13:46 -07003793 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003794 goto normal;
3795
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003796 gro_list_prepare(napi, skb);
3797
Herbert Xud565b0a2008-12-15 23:38:52 -08003798 rcu_read_lock();
3799 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003800 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003801 continue;
3802
Herbert Xu86911732009-01-29 14:19:50 +00003803 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08003804 mac_len = skb->network_header - skb->mac_header;
3805 skb->mac_len = mac_len;
3806 NAPI_GRO_CB(skb)->same_flow = 0;
3807 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003808 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003809
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003810 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003811 break;
3812 }
3813 rcu_read_unlock();
3814
3815 if (&ptype->list == head)
3816 goto normal;
3817
Herbert Xu0da2afd52008-12-26 14:57:42 -08003818 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003819 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003820
Herbert Xud565b0a2008-12-15 23:38:52 -08003821 if (pp) {
3822 struct sk_buff *nskb = *pp;
3823
3824 *pp = nskb->next;
3825 nskb->next = NULL;
3826 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003827 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003828 }
3829
Herbert Xu0da2afd52008-12-26 14:57:42 -08003830 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003831 goto ok;
3832
Herbert Xu4ae55442009-02-08 18:00:36 +00003833 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003834 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003835
Herbert Xu4ae55442009-02-08 18:00:36 +00003836 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003837 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003838 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003839 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003840 skb->next = napi->gro_list;
3841 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003842 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003843
Herbert Xuad0f9902009-02-01 01:24:55 -08003844pull:
Herbert Xucb189782009-05-26 18:50:31 +00003845 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3846 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3847
3848 BUG_ON(skb->end - skb->tail < grow);
3849
3850 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3851
3852 skb->tail += grow;
3853 skb->data_len -= grow;
3854
3855 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003856 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003857
Eric Dumazet9e903e02011-10-18 21:00:24 +00003858 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003859 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003860 memmove(skb_shinfo(skb)->frags,
3861 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003862 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003863 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003864 }
3865
Herbert Xud565b0a2008-12-15 23:38:52 -08003866ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003867 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003868
3869normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003870 ret = GRO_NORMAL;
3871 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003872}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003873
Herbert Xu96e93ea2009-01-06 10:49:34 -08003874
Rami Rosenbb728822012-11-28 21:55:25 +00003875static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003876{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003877 switch (ret) {
3878 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003879 if (netif_receive_skb(skb))
3880 ret = GRO_DROP;
3881 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003882
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003883 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003884 kfree_skb(skb);
3885 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003886
Eric Dumazetdaa86542012-04-19 07:07:40 +00003887 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003888 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3889 kmem_cache_free(skbuff_head_cache, skb);
3890 else
3891 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003892 break;
3893
Ben Hutchings5b252f02009-10-29 07:17:09 +00003894 case GRO_HELD:
3895 case GRO_MERGED:
3896 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003897 }
3898
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003899 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003900}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003901
Eric Dumazetca07e432012-10-06 22:28:06 +00003902static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003903{
Eric Dumazetca07e432012-10-06 22:28:06 +00003904 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3905 const skb_frag_t *frag0 = &pinfo->frags[0];
3906
Herbert Xu78a478d2009-05-26 18:50:21 +00003907 NAPI_GRO_CB(skb)->data_offset = 0;
3908 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003909 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003910
Herbert Xu78d3fd02009-05-26 18:50:23 +00003911 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003912 pinfo->nr_frags &&
3913 !PageHighMem(skb_frag_page(frag0))) {
3914 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3915 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003916 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003917}
Herbert Xu78a478d2009-05-26 18:50:21 +00003918
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003919gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003920{
Herbert Xu86911732009-01-29 14:19:50 +00003921 skb_gro_reset_offset(skb);
3922
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003923 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003924}
3925EXPORT_SYMBOL(napi_gro_receive);
3926
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003927static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003928{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003929 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003930 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3931 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003932 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003933 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003934 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003935
3936 napi->skb = skb;
3937}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003938
Herbert Xu76620aa2009-04-16 02:02:07 -07003939struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003940{
Herbert Xu5d38a072009-01-04 16:13:40 -08003941 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003942
3943 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003944 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3945 if (skb)
3946 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003947 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003948 return skb;
3949}
Herbert Xu76620aa2009-04-16 02:02:07 -07003950EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003951
Rami Rosenbb728822012-11-28 21:55:25 +00003952static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003953 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003954{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003955 switch (ret) {
3956 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003957 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003958 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003959
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003960 if (ret == GRO_HELD)
3961 skb_gro_pull(skb, -ETH_HLEN);
3962 else if (netif_receive_skb(skb))
3963 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003964 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003965
3966 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003967 case GRO_MERGED_FREE:
3968 napi_reuse_skb(napi, skb);
3969 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003970
3971 case GRO_MERGED:
3972 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003973 }
3974
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003975 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003976}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003977
Eric Dumazet4adb9c42012-05-18 20:49:06 +00003978static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003979{
Herbert Xu76620aa2009-04-16 02:02:07 -07003980 struct sk_buff *skb = napi->skb;
3981 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003982 unsigned int hlen;
3983 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003984
3985 napi->skb = NULL;
3986
3987 skb_reset_mac_header(skb);
3988 skb_gro_reset_offset(skb);
3989
Herbert Xua5b1cf22009-05-26 18:50:28 +00003990 off = skb_gro_offset(skb);
3991 hlen = off + sizeof(*eth);
3992 eth = skb_gro_header_fast(skb, off);
3993 if (skb_gro_header_hard(skb, hlen)) {
3994 eth = skb_gro_header_slow(skb, hlen, off);
3995 if (unlikely(!eth)) {
3996 napi_reuse_skb(napi, skb);
3997 skb = NULL;
3998 goto out;
3999 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004000 }
4001
4002 skb_gro_pull(skb, sizeof(*eth));
4003
4004 /*
4005 * This works because the only protocols we care about don't require
4006 * special handling. We'll fix it up properly at the end.
4007 */
4008 skb->protocol = eth->h_proto;
4009
4010out:
4011 return skb;
4012}
Herbert Xu76620aa2009-04-16 02:02:07 -07004013
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004014gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004015{
4016 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004017
4018 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004019 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004020
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004021 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004022}
4023EXPORT_SYMBOL(napi_gro_frags);
4024
Eric Dumazete326bed2010-04-22 00:22:45 -07004025/*
4026 * net_rps_action sends any pending IPI's for rps.
4027 * Note: called with local irq disabled, but exits with local irq enabled.
4028 */
4029static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4030{
4031#ifdef CONFIG_RPS
4032 struct softnet_data *remsd = sd->rps_ipi_list;
4033
4034 if (remsd) {
4035 sd->rps_ipi_list = NULL;
4036
4037 local_irq_enable();
4038
4039 /* Send pending IPI's to kick RPS processing on remote cpus. */
4040 while (remsd) {
4041 struct softnet_data *next = remsd->rps_ipi_next;
4042
4043 if (cpu_online(remsd->cpu))
4044 __smp_call_function_single(remsd->cpu,
4045 &remsd->csd, 0);
4046 remsd = next;
4047 }
4048 } else
4049#endif
4050 local_irq_enable();
4051}
4052
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004053static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054{
4055 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004056 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057
Eric Dumazete326bed2010-04-22 00:22:45 -07004058#ifdef CONFIG_RPS
4059 /* Check if we have pending ipi, its better to send them now,
4060 * not waiting net_rx_action() end.
4061 */
4062 if (sd->rps_ipi_list) {
4063 local_irq_disable();
4064 net_rps_action_and_irq_enable(sd);
4065 }
4066#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004067 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004068 local_irq_disable();
4069 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004071 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072
Changli Gao6e7676c2010-04-27 15:07:33 -07004073 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004074 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004075 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004076 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004077 input_queue_head_incr(sd);
4078 if (++work >= quota) {
4079 local_irq_enable();
4080 return work;
4081 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004082 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083
Changli Gao6e7676c2010-04-27 15:07:33 -07004084 rps_lock(sd);
4085 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004086 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004087 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4088 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004089
Changli Gao6e7676c2010-04-27 15:07:33 -07004090 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004091 /*
4092 * Inline a custom version of __napi_complete().
4093 * only current cpu owns and manipulates this napi,
4094 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4095 * we can use a plain write instead of clear_bit(),
4096 * and we dont need an smp_mb() memory barrier.
4097 */
4098 list_del(&napi->poll_list);
4099 napi->state = 0;
4100
Changli Gao6e7676c2010-04-27 15:07:33 -07004101 quota = work + qlen;
4102 }
4103 rps_unlock(sd);
4104 }
4105 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004107 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108}
4109
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004110/**
4111 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004112 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004113 *
4114 * The entry's receive function will be scheduled to run
4115 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004116void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004117{
4118 unsigned long flags;
4119
4120 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004121 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004122 local_irq_restore(flags);
4123}
4124EXPORT_SYMBOL(__napi_schedule);
4125
Herbert Xud565b0a2008-12-15 23:38:52 -08004126void __napi_complete(struct napi_struct *n)
4127{
4128 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4129 BUG_ON(n->gro_list);
4130
4131 list_del(&n->poll_list);
4132 smp_mb__before_clear_bit();
4133 clear_bit(NAPI_STATE_SCHED, &n->state);
4134}
4135EXPORT_SYMBOL(__napi_complete);
4136
4137void napi_complete(struct napi_struct *n)
4138{
4139 unsigned long flags;
4140
4141 /*
4142 * don't let napi dequeue from the cpu poll list
4143 * just in case its running on a different cpu
4144 */
4145 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4146 return;
4147
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004148 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004149 local_irq_save(flags);
4150 __napi_complete(n);
4151 local_irq_restore(flags);
4152}
4153EXPORT_SYMBOL(napi_complete);
4154
4155void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4156 int (*poll)(struct napi_struct *, int), int weight)
4157{
4158 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004159 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004160 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004161 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004162 napi->poll = poll;
4163 napi->weight = weight;
4164 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004165 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004166#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004167 spin_lock_init(&napi->poll_lock);
4168 napi->poll_owner = -1;
4169#endif
4170 set_bit(NAPI_STATE_SCHED, &napi->state);
4171}
4172EXPORT_SYMBOL(netif_napi_add);
4173
4174void netif_napi_del(struct napi_struct *napi)
4175{
4176 struct sk_buff *skb, *next;
4177
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004178 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004179 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004180
4181 for (skb = napi->gro_list; skb; skb = next) {
4182 next = skb->next;
4183 skb->next = NULL;
4184 kfree_skb(skb);
4185 }
4186
4187 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004188 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004189}
4190EXPORT_SYMBOL(netif_napi_del);
4191
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192static void net_rx_action(struct softirq_action *h)
4193{
Eric Dumazete326bed2010-04-22 00:22:45 -07004194 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004195 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004196 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004197 void *have;
4198
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 local_irq_disable();
4200
Eric Dumazete326bed2010-04-22 00:22:45 -07004201 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004202 struct napi_struct *n;
4203 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004205 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004206 * Allow this to run for 2 jiffies since which will allow
4207 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004208 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004209 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 goto softnet_break;
4211
4212 local_irq_enable();
4213
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004214 /* Even though interrupts have been re-enabled, this
4215 * access is safe because interrupts can only add new
4216 * entries to the tail of this list, and only ->poll()
4217 * calls can remove this head entry from the list.
4218 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004219 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004221 have = netpoll_poll_lock(n);
4222
4223 weight = n->weight;
4224
David S. Miller0a7606c2007-10-29 21:28:47 -07004225 /* This NAPI_STATE_SCHED test is for avoiding a race
4226 * with netpoll's poll_napi(). Only the entity which
4227 * obtains the lock and sees NAPI_STATE_SCHED set will
4228 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004229 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004230 */
4231 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004232 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004233 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004234 trace_napi_poll(n);
4235 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004236
4237 WARN_ON_ONCE(work > weight);
4238
4239 budget -= work;
4240
4241 local_irq_disable();
4242
4243 /* Drivers must not modify the NAPI state if they
4244 * consume the entire weight. In such cases this code
4245 * still "owns" the NAPI instance and therefore can
4246 * move the instance around on the list at-will.
4247 */
David S. Millerfed17f32008-01-07 21:00:40 -08004248 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004249 if (unlikely(napi_disable_pending(n))) {
4250 local_irq_enable();
4251 napi_complete(n);
4252 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004253 } else {
4254 if (n->gro_list) {
4255 /* flush too old packets
4256 * If HZ < 1000, flush all packets.
4257 */
4258 local_irq_enable();
4259 napi_gro_flush(n, HZ >= 1000);
4260 local_irq_disable();
4261 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004262 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004263 }
David S. Millerfed17f32008-01-07 21:00:40 -08004264 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004265
4266 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267 }
4268out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004269 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004270
Chris Leechdb217332006-06-17 21:24:58 -07004271#ifdef CONFIG_NET_DMA
4272 /*
4273 * There may not be any more sk_buffs coming right now, so push
4274 * any pending DMA copies to hardware
4275 */
Dan Williams2ba05622009-01-06 11:38:14 -07004276 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004277#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004278
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 return;
4280
4281softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004282 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4284 goto out;
4285}
4286
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004287static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
4289/**
4290 * register_gifconf - register a SIOCGIF handler
4291 * @family: Address family
4292 * @gifconf: Function handler
4293 *
4294 * Register protocol dependent address dumping routines. The handler
4295 * that is passed must not be freed or reused until it has been replaced
4296 * by another handler.
4297 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004298int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299{
4300 if (family >= NPROTO)
4301 return -EINVAL;
4302 gifconf_list[family] = gifconf;
4303 return 0;
4304}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004305EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306
4307
4308/*
4309 * Map an interface index to its name (SIOCGIFNAME)
4310 */
4311
4312/*
4313 * We need this ioctl for efficient implementation of the
4314 * if_indextoname() function required by the IPv6 API. Without
4315 * it, we would have to search all the interfaces to find a
4316 * match. --pb
4317 */
4318
Eric W. Biederman881d9662007-09-17 11:56:21 -07004319static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320{
4321 struct net_device *dev;
4322 struct ifreq ifr;
Brian Haleyc91f6df2012-11-26 05:21:08 +00004323 unsigned seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324
4325 /*
4326 * Fetch the caller's info block.
4327 */
4328
4329 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4330 return -EFAULT;
4331
Brian Haleyc91f6df2012-11-26 05:21:08 +00004332retry:
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004333 seq = read_seqcount_begin(&devnet_rename_seq);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004334 rcu_read_lock();
4335 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004337 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 return -ENODEV;
4339 }
4340
4341 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004342 rcu_read_unlock();
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004343 if (read_seqcount_retry(&devnet_rename_seq, seq))
Brian Haleyc91f6df2012-11-26 05:21:08 +00004344 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345
4346 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4347 return -EFAULT;
4348 return 0;
4349}
4350
4351/*
4352 * Perform a SIOCGIFCONF call. This structure will change
4353 * size eventually, and there is nothing I can do about it.
4354 * Thus we will need a 'compatibility mode'.
4355 */
4356
Eric W. Biederman881d9662007-09-17 11:56:21 -07004357static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358{
4359 struct ifconf ifc;
4360 struct net_device *dev;
4361 char __user *pos;
4362 int len;
4363 int total;
4364 int i;
4365
4366 /*
4367 * Fetch the caller's info block.
4368 */
4369
4370 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4371 return -EFAULT;
4372
4373 pos = ifc.ifc_buf;
4374 len = ifc.ifc_len;
4375
4376 /*
4377 * Loop over the interfaces, and write an info block for each.
4378 */
4379
4380 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004381 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 for (i = 0; i < NPROTO; i++) {
4383 if (gifconf_list[i]) {
4384 int done;
4385 if (!pos)
4386 done = gifconf_list[i](dev, NULL, 0);
4387 else
4388 done = gifconf_list[i](dev, pos + total,
4389 len - total);
4390 if (done < 0)
4391 return -EFAULT;
4392 total += done;
4393 }
4394 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396
4397 /*
4398 * All done. Write the updated control block back to the caller.
4399 */
4400 ifc.ifc_len = total;
4401
4402 /*
4403 * Both BSD and Solaris return 0 here, so we do too.
4404 */
4405 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4406}
4407
4408#ifdef CONFIG_PROC_FS
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004409
Eric Dumazet2def16a2012-04-02 22:33:02 +00004410#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004411
4412#define get_bucket(x) ((x) >> BUCKET_SPACE)
4413#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4414#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4415
Eric Dumazet2def16a2012-04-02 22:33:02 +00004416static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004417{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004418 struct net *net = seq_file_net(seq);
4419 struct net_device *dev;
4420 struct hlist_node *p;
4421 struct hlist_head *h;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004422 unsigned int count = 0, offset = get_offset(*pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004423
Eric Dumazet2def16a2012-04-02 22:33:02 +00004424 h = &net->dev_name_head[get_bucket(*pos)];
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004425 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004426 if (++count == offset)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004427 return dev;
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004428 }
4429
4430 return NULL;
4431}
4432
Eric Dumazet2def16a2012-04-02 22:33:02 +00004433static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004434{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004435 struct net_device *dev;
4436 unsigned int bucket;
4437
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004438 do {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004439 dev = dev_from_same_bucket(seq, pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004440 if (dev)
4441 return dev;
4442
Eric Dumazet2def16a2012-04-02 22:33:02 +00004443 bucket = get_bucket(*pos) + 1;
4444 *pos = set_bucket_offset(bucket, 1);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004445 } while (bucket < NETDEV_HASHENTRIES);
4446
4447 return NULL;
4448}
4449
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450/*
4451 * This is invoked by the /proc filesystem handler to display a device
4452 * in detail.
4453 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004455 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004457 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07004458 if (!*pos)
4459 return SEQ_START_TOKEN;
4460
Eric Dumazet2def16a2012-04-02 22:33:02 +00004461 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004462 return NULL;
Pavel Emelianov7562f872007-05-03 15:13:45 -07004463
Eric Dumazet2def16a2012-04-02 22:33:02 +00004464 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465}
4466
4467void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4468{
4469 ++*pos;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004470 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471}
4472
4473void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004474 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004476 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477}
4478
4479static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4480{
Eric Dumazet28172732010-07-07 14:58:56 -07004481 struct rtnl_link_stats64 temp;
4482 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00004484 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4485 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
Rusty Russell5a1b5892007-04-28 21:04:03 -07004486 dev->name, stats->rx_bytes, stats->rx_packets,
4487 stats->rx_errors,
4488 stats->rx_dropped + stats->rx_missed_errors,
4489 stats->rx_fifo_errors,
4490 stats->rx_length_errors + stats->rx_over_errors +
4491 stats->rx_crc_errors + stats->rx_frame_errors,
4492 stats->rx_compressed, stats->multicast,
4493 stats->tx_bytes, stats->tx_packets,
4494 stats->tx_errors, stats->tx_dropped,
4495 stats->tx_fifo_errors, stats->collisions,
4496 stats->tx_carrier_errors +
4497 stats->tx_aborted_errors +
4498 stats->tx_window_errors +
4499 stats->tx_heartbeat_errors,
4500 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501}
4502
4503/*
4504 * Called from the PROCfs module. This now uses the new arbitrary sized
4505 * /proc/net interface to create /proc/net/dev
4506 */
4507static int dev_seq_show(struct seq_file *seq, void *v)
4508{
4509 if (v == SEQ_START_TOKEN)
4510 seq_puts(seq, "Inter-| Receive "
4511 " | Transmit\n"
4512 " face |bytes packets errs drop fifo frame "
4513 "compressed multicast|bytes packets errs "
4514 "drop fifo colls carrier compressed\n");
4515 else
4516 dev_seq_printf_stats(seq, v);
4517 return 0;
4518}
4519
Changli Gaodee42872010-05-02 05:42:16 +00004520static struct softnet_data *softnet_get_online(loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521{
Changli Gaodee42872010-05-02 05:42:16 +00004522 struct softnet_data *sd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004524 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004525 if (cpu_online(*pos)) {
Changli Gaodee42872010-05-02 05:42:16 +00004526 sd = &per_cpu(softnet_data, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527 break;
4528 } else
4529 ++*pos;
Changli Gaodee42872010-05-02 05:42:16 +00004530 return sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531}
4532
4533static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4534{
4535 return softnet_get_online(pos);
4536}
4537
4538static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4539{
4540 ++*pos;
4541 return softnet_get_online(pos);
4542}
4543
4544static void softnet_seq_stop(struct seq_file *seq, void *v)
4545{
4546}
4547
4548static int softnet_seq_show(struct seq_file *seq, void *v)
4549{
Changli Gaodee42872010-05-02 05:42:16 +00004550 struct softnet_data *sd = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551
Tom Herbert0a9627f2010-03-16 08:03:29 +00004552 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Changli Gaodee42872010-05-02 05:42:16 +00004553 sd->processed, sd->dropped, sd->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07004554 0, 0, 0, 0, /* was fastroute */
Changli Gaodee42872010-05-02 05:42:16 +00004555 sd->cpu_collision, sd->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 return 0;
4557}
4558
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004559static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 .start = dev_seq_start,
4561 .next = dev_seq_next,
4562 .stop = dev_seq_stop,
4563 .show = dev_seq_show,
4564};
4565
4566static int dev_seq_open(struct inode *inode, struct file *file)
4567{
Denis V. Luneve372c412007-11-19 22:31:54 -08004568 return seq_open_net(inode, file, &dev_seq_ops,
Eric Dumazet2def16a2012-04-02 22:33:02 +00004569 sizeof(struct seq_net_private));
Anton Blanchard5cac98d2011-11-27 21:14:46 +00004570}
4571
Arjan van de Ven9a321442007-02-12 00:55:35 -08004572static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 .owner = THIS_MODULE,
4574 .open = dev_seq_open,
4575 .read = seq_read,
4576 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08004577 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578};
4579
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004580static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581 .start = softnet_seq_start,
4582 .next = softnet_seq_next,
4583 .stop = softnet_seq_stop,
4584 .show = softnet_seq_show,
4585};
4586
4587static int softnet_seq_open(struct inode *inode, struct file *file)
4588{
4589 return seq_open(file, &softnet_seq_ops);
4590}
4591
Arjan van de Ven9a321442007-02-12 00:55:35 -08004592static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 .owner = THIS_MODULE,
4594 .open = softnet_seq_open,
4595 .read = seq_read,
4596 .llseek = seq_lseek,
4597 .release = seq_release,
4598};
4599
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004600static void *ptype_get_idx(loff_t pos)
4601{
4602 struct packet_type *pt = NULL;
4603 loff_t i = 0;
4604 int t;
4605
4606 list_for_each_entry_rcu(pt, &ptype_all, list) {
4607 if (i == pos)
4608 return pt;
4609 ++i;
4610 }
4611
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004612 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004613 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4614 if (i == pos)
4615 return pt;
4616 ++i;
4617 }
4618 }
4619 return NULL;
4620}
4621
4622static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004623 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004624{
4625 rcu_read_lock();
4626 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4627}
4628
4629static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4630{
4631 struct packet_type *pt;
4632 struct list_head *nxt;
4633 int hash;
4634
4635 ++*pos;
4636 if (v == SEQ_START_TOKEN)
4637 return ptype_get_idx(0);
4638
4639 pt = v;
4640 nxt = pt->list.next;
4641 if (pt->type == htons(ETH_P_ALL)) {
4642 if (nxt != &ptype_all)
4643 goto found;
4644 hash = 0;
4645 nxt = ptype_base[0].next;
4646 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004647 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004648
4649 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004650 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004651 return NULL;
4652 nxt = ptype_base[hash].next;
4653 }
4654found:
4655 return list_entry(nxt, struct packet_type, list);
4656}
4657
4658static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004659 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004660{
4661 rcu_read_unlock();
4662}
4663
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004664static int ptype_seq_show(struct seq_file *seq, void *v)
4665{
4666 struct packet_type *pt = v;
4667
4668 if (v == SEQ_START_TOKEN)
4669 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004670 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004671 if (pt->type == htons(ETH_P_ALL))
4672 seq_puts(seq, "ALL ");
4673 else
4674 seq_printf(seq, "%04x", ntohs(pt->type));
4675
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08004676 seq_printf(seq, " %-8s %pF\n",
4677 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004678 }
4679
4680 return 0;
4681}
4682
4683static const struct seq_operations ptype_seq_ops = {
4684 .start = ptype_seq_start,
4685 .next = ptype_seq_next,
4686 .stop = ptype_seq_stop,
4687 .show = ptype_seq_show,
4688};
4689
4690static int ptype_seq_open(struct inode *inode, struct file *file)
4691{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004692 return seq_open_net(inode, file, &ptype_seq_ops,
4693 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004694}
4695
4696static const struct file_operations ptype_seq_fops = {
4697 .owner = THIS_MODULE,
4698 .open = ptype_seq_open,
4699 .read = seq_read,
4700 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004701 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004702};
4703
4704
Pavel Emelyanov46650792007-10-08 20:38:39 -07004705static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706{
4707 int rc = -ENOMEM;
4708
Eric W. Biederman881d9662007-09-17 11:56:21 -07004709 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004711 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004713 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004714 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004715
Eric W. Biederman881d9662007-09-17 11:56:21 -07004716 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004717 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718 rc = 0;
4719out:
4720 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004721out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004722 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004724 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004726 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727 goto out;
4728}
Eric W. Biederman881d9662007-09-17 11:56:21 -07004729
Pavel Emelyanov46650792007-10-08 20:38:39 -07004730static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004731{
4732 wext_proc_exit(net);
4733
4734 proc_net_remove(net, "ptype");
4735 proc_net_remove(net, "softnet_stat");
4736 proc_net_remove(net, "dev");
4737}
4738
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004739static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004740 .init = dev_proc_net_init,
4741 .exit = dev_proc_net_exit,
4742};
4743
4744static int __init dev_proc_init(void)
4745{
4746 return register_pernet_subsys(&dev_proc_ops);
4747}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748#else
4749#define dev_proc_init() 0
4750#endif /* CONFIG_PROC_FS */
4751
4752
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004753struct netdev_upper {
4754 struct net_device *dev;
4755 bool master;
4756 struct list_head list;
4757 struct rcu_head rcu;
4758 struct list_head search_list;
4759};
4760
4761static void __append_search_uppers(struct list_head *search_list,
4762 struct net_device *dev)
4763{
4764 struct netdev_upper *upper;
4765
4766 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4767 /* check if this upper is not already in search list */
4768 if (list_empty(&upper->search_list))
4769 list_add_tail(&upper->search_list, search_list);
4770 }
4771}
4772
4773static bool __netdev_search_upper_dev(struct net_device *dev,
4774 struct net_device *upper_dev)
4775{
4776 LIST_HEAD(search_list);
4777 struct netdev_upper *upper;
4778 struct netdev_upper *tmp;
4779 bool ret = false;
4780
4781 __append_search_uppers(&search_list, dev);
4782 list_for_each_entry(upper, &search_list, search_list) {
4783 if (upper->dev == upper_dev) {
4784 ret = true;
4785 break;
4786 }
4787 __append_search_uppers(&search_list, upper->dev);
4788 }
4789 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4790 INIT_LIST_HEAD(&upper->search_list);
4791 return ret;
4792}
4793
4794static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4795 struct net_device *upper_dev)
4796{
4797 struct netdev_upper *upper;
4798
4799 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4800 if (upper->dev == upper_dev)
4801 return upper;
4802 }
4803 return NULL;
4804}
4805
4806/**
4807 * netdev_has_upper_dev - Check if device is linked to an upper device
4808 * @dev: device
4809 * @upper_dev: upper device to check
4810 *
4811 * Find out if a device is linked to specified upper device and return true
4812 * in case it is. Note that this checks only immediate upper device,
4813 * not through a complete stack of devices. The caller must hold the RTNL lock.
4814 */
4815bool netdev_has_upper_dev(struct net_device *dev,
4816 struct net_device *upper_dev)
4817{
4818 ASSERT_RTNL();
4819
4820 return __netdev_find_upper(dev, upper_dev);
4821}
4822EXPORT_SYMBOL(netdev_has_upper_dev);
4823
4824/**
4825 * netdev_has_any_upper_dev - Check if device is linked to some device
4826 * @dev: device
4827 *
4828 * Find out if a device is linked to an upper device and return true in case
4829 * it is. The caller must hold the RTNL lock.
4830 */
4831bool netdev_has_any_upper_dev(struct net_device *dev)
4832{
4833 ASSERT_RTNL();
4834
4835 return !list_empty(&dev->upper_dev_list);
4836}
4837EXPORT_SYMBOL(netdev_has_any_upper_dev);
4838
4839/**
4840 * netdev_master_upper_dev_get - Get master upper device
4841 * @dev: device
4842 *
4843 * Find a master upper device and return pointer to it or NULL in case
4844 * it's not there. The caller must hold the RTNL lock.
4845 */
4846struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4847{
4848 struct netdev_upper *upper;
4849
4850 ASSERT_RTNL();
4851
4852 if (list_empty(&dev->upper_dev_list))
4853 return NULL;
4854
4855 upper = list_first_entry(&dev->upper_dev_list,
4856 struct netdev_upper, list);
4857 if (likely(upper->master))
4858 return upper->dev;
4859 return NULL;
4860}
4861EXPORT_SYMBOL(netdev_master_upper_dev_get);
4862
4863/**
4864 * netdev_master_upper_dev_get_rcu - Get master upper device
4865 * @dev: device
4866 *
4867 * Find a master upper device and return pointer to it or NULL in case
4868 * it's not there. The caller must hold the RCU read lock.
4869 */
4870struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4871{
4872 struct netdev_upper *upper;
4873
4874 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4875 struct netdev_upper, list);
4876 if (upper && likely(upper->master))
4877 return upper->dev;
4878 return NULL;
4879}
4880EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4881
4882static int __netdev_upper_dev_link(struct net_device *dev,
4883 struct net_device *upper_dev, bool master)
4884{
4885 struct netdev_upper *upper;
4886
4887 ASSERT_RTNL();
4888
4889 if (dev == upper_dev)
4890 return -EBUSY;
4891
4892 /* To prevent loops, check if dev is not upper device to upper_dev. */
4893 if (__netdev_search_upper_dev(upper_dev, dev))
4894 return -EBUSY;
4895
4896 if (__netdev_find_upper(dev, upper_dev))
4897 return -EEXIST;
4898
4899 if (master && netdev_master_upper_dev_get(dev))
4900 return -EBUSY;
4901
4902 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4903 if (!upper)
4904 return -ENOMEM;
4905
4906 upper->dev = upper_dev;
4907 upper->master = master;
4908 INIT_LIST_HEAD(&upper->search_list);
4909
4910 /* Ensure that master upper link is always the first item in list. */
4911 if (master)
4912 list_add_rcu(&upper->list, &dev->upper_dev_list);
4913 else
4914 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4915 dev_hold(upper_dev);
4916
4917 return 0;
4918}
4919
4920/**
4921 * netdev_upper_dev_link - Add a link to the upper device
4922 * @dev: device
4923 * @upper_dev: new upper device
4924 *
4925 * Adds a link to device which is upper to this one. The caller must hold
4926 * the RTNL lock. On a failure a negative errno code is returned.
4927 * On success the reference counts are adjusted and the function
4928 * returns zero.
4929 */
4930int netdev_upper_dev_link(struct net_device *dev,
4931 struct net_device *upper_dev)
4932{
4933 return __netdev_upper_dev_link(dev, upper_dev, false);
4934}
4935EXPORT_SYMBOL(netdev_upper_dev_link);
4936
4937/**
4938 * netdev_master_upper_dev_link - Add a master link to the upper device
4939 * @dev: device
4940 * @upper_dev: new upper device
4941 *
4942 * Adds a link to device which is upper to this one. In this case, only
4943 * one master upper device can be linked, although other non-master devices
4944 * might be linked as well. The caller must hold the RTNL lock.
4945 * On a failure a negative errno code is returned. On success the reference
4946 * counts are adjusted and the function returns zero.
4947 */
4948int netdev_master_upper_dev_link(struct net_device *dev,
4949 struct net_device *upper_dev)
4950{
4951 return __netdev_upper_dev_link(dev, upper_dev, true);
4952}
4953EXPORT_SYMBOL(netdev_master_upper_dev_link);
4954
4955/**
4956 * netdev_upper_dev_unlink - Removes a link to upper device
4957 * @dev: device
4958 * @upper_dev: new upper device
4959 *
4960 * Removes a link to device which is upper to this one. The caller must hold
4961 * the RTNL lock.
4962 */
4963void netdev_upper_dev_unlink(struct net_device *dev,
4964 struct net_device *upper_dev)
4965{
4966 struct netdev_upper *upper;
4967
4968 ASSERT_RTNL();
4969
4970 upper = __netdev_find_upper(dev, upper_dev);
4971 if (!upper)
4972 return;
4973 list_del_rcu(&upper->list);
4974 dev_put(upper_dev);
4975 kfree_rcu(upper, rcu);
4976}
4977EXPORT_SYMBOL(netdev_upper_dev_unlink);
4978
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004979static void dev_change_rx_flags(struct net_device *dev, int flags)
4980{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004981 const struct net_device_ops *ops = dev->netdev_ops;
4982
4983 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4984 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004985}
4986
Wang Chendad9b332008-06-18 01:48:28 -07004987static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07004988{
Eric Dumazetb536db92011-11-30 21:42:26 +00004989 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004990 kuid_t uid;
4991 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004992
Patrick McHardy24023452007-07-14 18:51:31 -07004993 ASSERT_RTNL();
4994
Wang Chendad9b332008-06-18 01:48:28 -07004995 dev->flags |= IFF_PROMISC;
4996 dev->promiscuity += inc;
4997 if (dev->promiscuity == 0) {
4998 /*
4999 * Avoid overflow.
5000 * If inc causes overflow, untouch promisc and return error.
5001 */
5002 if (inc < 0)
5003 dev->flags &= ~IFF_PROMISC;
5004 else {
5005 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005006 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5007 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005008 return -EOVERFLOW;
5009 }
5010 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005011 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005012 pr_info("device %s %s promiscuous mode\n",
5013 dev->name,
5014 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005015 if (audit_enabled) {
5016 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005017 audit_log(current->audit_context, GFP_ATOMIC,
5018 AUDIT_ANOM_PROMISCUOUS,
5019 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5020 dev->name, (dev->flags & IFF_PROMISC),
5021 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005022 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005023 from_kuid(&init_user_ns, uid),
5024 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005025 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005026 }
Patrick McHardy24023452007-07-14 18:51:31 -07005027
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005028 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005029 }
Wang Chendad9b332008-06-18 01:48:28 -07005030 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005031}
5032
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033/**
5034 * dev_set_promiscuity - update promiscuity count on a device
5035 * @dev: device
5036 * @inc: modifier
5037 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005038 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 * remains above zero the interface remains promiscuous. Once it hits zero
5040 * the device reverts back to normal filtering operation. A negative inc
5041 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005042 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043 */
Wang Chendad9b332008-06-18 01:48:28 -07005044int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045{
Eric Dumazetb536db92011-11-30 21:42:26 +00005046 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005047 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048
Wang Chendad9b332008-06-18 01:48:28 -07005049 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005050 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005051 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005052 if (dev->flags != old_flags)
5053 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005054 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005056EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057
5058/**
5059 * dev_set_allmulti - update allmulti count on a device
5060 * @dev: device
5061 * @inc: modifier
5062 *
5063 * Add or remove reception of all multicast frames to a device. While the
5064 * count in the device remains above zero the interface remains listening
5065 * to all interfaces. Once it hits zero the device reverts back to normal
5066 * filtering operation. A negative @inc value is used to drop the counter
5067 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07005068 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 */
5070
Wang Chendad9b332008-06-18 01:48:28 -07005071int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072{
Eric Dumazetb536db92011-11-30 21:42:26 +00005073 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074
Patrick McHardy24023452007-07-14 18:51:31 -07005075 ASSERT_RTNL();
5076
Linus Torvalds1da177e2005-04-16 15:20:36 -07005077 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005078 dev->allmulti += inc;
5079 if (dev->allmulti == 0) {
5080 /*
5081 * Avoid overflow.
5082 * If inc causes overflow, untouch allmulti and return error.
5083 */
5084 if (inc < 0)
5085 dev->flags &= ~IFF_ALLMULTI;
5086 else {
5087 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005088 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5089 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005090 return -EOVERFLOW;
5091 }
5092 }
Patrick McHardy24023452007-07-14 18:51:31 -07005093 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005094 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005095 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07005096 }
Wang Chendad9b332008-06-18 01:48:28 -07005097 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005098}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005099EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005100
5101/*
5102 * Upload unicast and multicast address lists to device and
5103 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005104 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005105 * are present.
5106 */
5107void __dev_set_rx_mode(struct net_device *dev)
5108{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005109 const struct net_device_ops *ops = dev->netdev_ops;
5110
Patrick McHardy4417da62007-06-27 01:28:10 -07005111 /* dev_open will call this function so the list will stay sane. */
5112 if (!(dev->flags&IFF_UP))
5113 return;
5114
5115 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005116 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005117
Jiri Pirko01789342011-08-16 06:29:00 +00005118 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005119 /* Unicast addresses changes may only happen under the rtnl,
5120 * therefore calling __dev_set_promiscuity here is safe.
5121 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005122 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005123 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07005124 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005125 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005126 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07005127 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005128 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005129 }
Jiri Pirko01789342011-08-16 06:29:00 +00005130
5131 if (ops->ndo_set_rx_mode)
5132 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005133}
5134
5135void dev_set_rx_mode(struct net_device *dev)
5136{
David S. Millerb9e40852008-07-15 00:15:08 -07005137 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005138 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005139 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140}
5141
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005142/**
5143 * dev_get_flags - get flags reported to userspace
5144 * @dev: device
5145 *
5146 * Get the combination of flag bits exported through APIs to userspace.
5147 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005148unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149{
Eric Dumazet95c96172012-04-15 05:58:06 +00005150 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151
5152 flags = (dev->flags & ~(IFF_PROMISC |
5153 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005154 IFF_RUNNING |
5155 IFF_LOWER_UP |
5156 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 (dev->gflags & (IFF_PROMISC |
5158 IFF_ALLMULTI));
5159
Stefan Rompfb00055a2006-03-20 17:09:11 -08005160 if (netif_running(dev)) {
5161 if (netif_oper_up(dev))
5162 flags |= IFF_RUNNING;
5163 if (netif_carrier_ok(dev))
5164 flags |= IFF_LOWER_UP;
5165 if (netif_dormant(dev))
5166 flags |= IFF_DORMANT;
5167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168
5169 return flags;
5170}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005171EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
Patrick McHardybd380812010-02-26 06:34:53 +00005173int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174{
Eric Dumazetb536db92011-11-30 21:42:26 +00005175 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005176 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177
Patrick McHardy24023452007-07-14 18:51:31 -07005178 ASSERT_RTNL();
5179
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 /*
5181 * Set the flags on our device.
5182 */
5183
5184 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5185 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5186 IFF_AUTOMEDIA)) |
5187 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5188 IFF_ALLMULTI));
5189
5190 /*
5191 * Load in the correct multicast list now the flags have changed.
5192 */
5193
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005194 if ((old_flags ^ flags) & IFF_MULTICAST)
5195 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005196
Patrick McHardy4417da62007-06-27 01:28:10 -07005197 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198
5199 /*
5200 * Have we downed the interface. We handle IFF_UP ourselves
5201 * according to user attempts to set it, rather than blindly
5202 * setting it.
5203 */
5204
5205 ret = 0;
5206 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005207 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208
5209 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005210 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 }
5212
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005214 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5215
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216 dev->gflags ^= IFF_PROMISC;
5217 dev_set_promiscuity(dev, inc);
5218 }
5219
5220 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5221 is important. Some (broken) drivers set IFF_PROMISC, when
5222 IFF_ALLMULTI is requested not asking us and not reporting.
5223 */
5224 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005225 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5226
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 dev->gflags ^= IFF_ALLMULTI;
5228 dev_set_allmulti(dev, inc);
5229 }
5230
Patrick McHardybd380812010-02-26 06:34:53 +00005231 return ret;
5232}
5233
5234void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
5235{
5236 unsigned int changes = dev->flags ^ old_flags;
5237
5238 if (changes & IFF_UP) {
5239 if (dev->flags & IFF_UP)
5240 call_netdevice_notifiers(NETDEV_UP, dev);
5241 else
5242 call_netdevice_notifiers(NETDEV_DOWN, dev);
5243 }
5244
5245 if (dev->flags & IFF_UP &&
5246 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
5247 call_netdevice_notifiers(NETDEV_CHANGE, dev);
5248}
5249
5250/**
5251 * dev_change_flags - change device settings
5252 * @dev: device
5253 * @flags: device state flags
5254 *
5255 * Change settings on device based state flags. The flags are
5256 * in the userspace exported format.
5257 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005258int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005259{
Eric Dumazetb536db92011-11-30 21:42:26 +00005260 int ret;
5261 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005262
5263 ret = __dev_change_flags(dev, flags);
5264 if (ret < 0)
5265 return ret;
5266
5267 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07005268 if (changes)
5269 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270
Patrick McHardybd380812010-02-26 06:34:53 +00005271 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272 return ret;
5273}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005274EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005276/**
5277 * dev_set_mtu - Change maximum transfer unit
5278 * @dev: device
5279 * @new_mtu: new transfer unit
5280 *
5281 * Change the maximum transfer size of the network device.
5282 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005283int dev_set_mtu(struct net_device *dev, int new_mtu)
5284{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005285 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 int err;
5287
5288 if (new_mtu == dev->mtu)
5289 return 0;
5290
5291 /* MTU must be positive. */
5292 if (new_mtu < 0)
5293 return -EINVAL;
5294
5295 if (!netif_device_present(dev))
5296 return -ENODEV;
5297
5298 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005299 if (ops->ndo_change_mtu)
5300 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301 else
5302 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005303
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005304 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005305 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306 return err;
5307}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005308EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005310/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005311 * dev_set_group - Change group this device belongs to
5312 * @dev: device
5313 * @new_group: group this device should belong to
5314 */
5315void dev_set_group(struct net_device *dev, int new_group)
5316{
5317 dev->group = new_group;
5318}
5319EXPORT_SYMBOL(dev_set_group);
5320
5321/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005322 * dev_set_mac_address - Change Media Access Control Address
5323 * @dev: device
5324 * @sa: new address
5325 *
5326 * Change the hardware (MAC) address of the device
5327 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5329{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005330 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331 int err;
5332
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005333 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334 return -EOPNOTSUPP;
5335 if (sa->sa_family != dev->type)
5336 return -EINVAL;
5337 if (!netif_device_present(dev))
5338 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005339 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005340 if (err)
5341 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005342 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005343 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005344 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005345 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005346}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005347EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005349/**
5350 * dev_change_carrier - Change device carrier
5351 * @dev: device
5352 * @new_carries: new value
5353 *
5354 * Change device carrier
5355 */
5356int dev_change_carrier(struct net_device *dev, bool new_carrier)
5357{
5358 const struct net_device_ops *ops = dev->netdev_ops;
5359
5360 if (!ops->ndo_change_carrier)
5361 return -EOPNOTSUPP;
5362 if (!netif_device_present(dev))
5363 return -ENODEV;
5364 return ops->ndo_change_carrier(dev, new_carrier);
5365}
5366EXPORT_SYMBOL(dev_change_carrier);
5367
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00005369 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07005371static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372{
5373 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00005374 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375
5376 if (!dev)
5377 return -ENODEV;
5378
5379 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005380 case SIOCGIFFLAGS: /* Get interface flags */
5381 ifr->ifr_flags = (short) dev_get_flags(dev);
5382 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005383
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005384 case SIOCGIFMETRIC: /* Get the metric on the interface
5385 (currently unused) */
5386 ifr->ifr_metric = 0;
5387 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005389 case SIOCGIFMTU: /* Get the MTU of a device */
5390 ifr->ifr_mtu = dev->mtu;
5391 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005393 case SIOCGIFHWADDR:
5394 if (!dev->addr_len)
5395 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5396 else
5397 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5398 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5399 ifr->ifr_hwaddr.sa_family = dev->type;
5400 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005401
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005402 case SIOCGIFSLAVE:
5403 err = -EINVAL;
5404 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005405
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005406 case SIOCGIFMAP:
5407 ifr->ifr_map.mem_start = dev->mem_start;
5408 ifr->ifr_map.mem_end = dev->mem_end;
5409 ifr->ifr_map.base_addr = dev->base_addr;
5410 ifr->ifr_map.irq = dev->irq;
5411 ifr->ifr_map.dma = dev->dma;
5412 ifr->ifr_map.port = dev->if_port;
5413 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005414
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005415 case SIOCGIFINDEX:
5416 ifr->ifr_ifindex = dev->ifindex;
5417 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005418
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005419 case SIOCGIFTXQLEN:
5420 ifr->ifr_qlen = dev->tx_queue_len;
5421 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005422
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005423 default:
5424 /* dev_ioctl() should ensure this case
5425 * is never reached
5426 */
5427 WARN_ON(1);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005428 err = -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005429 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005430
5431 }
5432 return err;
5433}
5434
5435/*
5436 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5437 */
5438static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5439{
5440 int err;
5441 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005442 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005443
5444 if (!dev)
5445 return -ENODEV;
5446
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005447 ops = dev->netdev_ops;
5448
Jeff Garzik14e3e072007-10-08 00:06:32 -07005449 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005450 case SIOCSIFFLAGS: /* Set interface flags */
5451 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005452
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005453 case SIOCSIFMETRIC: /* Set the metric on the interface
5454 (currently unused) */
5455 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005456
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005457 case SIOCSIFMTU: /* Set the MTU of a device */
5458 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005459
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005460 case SIOCSIFHWADDR:
5461 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005463 case SIOCSIFHWBROADCAST:
5464 if (ifr->ifr_hwaddr.sa_family != dev->type)
5465 return -EINVAL;
5466 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5467 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5468 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5469 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005471 case SIOCSIFMAP:
5472 if (ops->ndo_set_config) {
5473 if (!netif_device_present(dev))
5474 return -ENODEV;
5475 return ops->ndo_set_config(dev, &ifr->ifr_map);
5476 }
5477 return -EOPNOTSUPP;
5478
5479 case SIOCADDMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005480 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005481 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5482 return -EINVAL;
5483 if (!netif_device_present(dev))
5484 return -ENODEV;
Jiri Pirko22bedad2010-04-01 21:22:57 +00005485 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005486
5487 case SIOCDELMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005488 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005489 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5490 return -EINVAL;
5491 if (!netif_device_present(dev))
5492 return -ENODEV;
Jiri Pirko22bedad2010-04-01 21:22:57 +00005493 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005494
5495 case SIOCSIFTXQLEN:
5496 if (ifr->ifr_qlen < 0)
5497 return -EINVAL;
5498 dev->tx_queue_len = ifr->ifr_qlen;
5499 return 0;
5500
5501 case SIOCSIFNAME:
5502 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5503 return dev_change_name(dev, ifr->ifr_newname);
5504
Richard Cochran4dc360c2011-10-19 17:00:35 -04005505 case SIOCSHWTSTAMP:
5506 err = net_hwtstamp_validate(ifr);
5507 if (err)
5508 return err;
5509 /* fall through */
5510
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005511 /*
5512 * Unknown or private ioctl
5513 */
5514 default:
5515 if ((cmd >= SIOCDEVPRIVATE &&
5516 cmd <= SIOCDEVPRIVATE + 15) ||
5517 cmd == SIOCBONDENSLAVE ||
5518 cmd == SIOCBONDRELEASE ||
5519 cmd == SIOCBONDSETHWADDR ||
5520 cmd == SIOCBONDSLAVEINFOQUERY ||
5521 cmd == SIOCBONDINFOQUERY ||
5522 cmd == SIOCBONDCHANGEACTIVE ||
5523 cmd == SIOCGMIIPHY ||
5524 cmd == SIOCGMIIREG ||
5525 cmd == SIOCSMIIREG ||
5526 cmd == SIOCBRADDIF ||
5527 cmd == SIOCBRDELIF ||
5528 cmd == SIOCSHWTSTAMP ||
5529 cmd == SIOCWANDEV) {
5530 err = -EOPNOTSUPP;
5531 if (ops->ndo_do_ioctl) {
5532 if (netif_device_present(dev))
5533 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5534 else
5535 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005537 } else
5538 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005539
5540 }
5541 return err;
5542}
5543
5544/*
5545 * This function handles all "interface"-type I/O control requests. The actual
5546 * 'doing' part of this is dev_ifsioc above.
5547 */
5548
5549/**
5550 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005551 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552 * @cmd: command to issue
5553 * @arg: pointer to a struct ifreq in user space
5554 *
5555 * Issue ioctl functions to devices. This is normally called by the
5556 * user space syscall interfaces but can sometimes be useful for
5557 * other purposes. The return value is the return from the syscall if
5558 * positive or a negative errno code on error.
5559 */
5560
Eric W. Biederman881d9662007-09-17 11:56:21 -07005561int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562{
5563 struct ifreq ifr;
5564 int ret;
5565 char *colon;
5566
5567 /* One special case: SIOCGIFCONF takes ifconf argument
5568 and requires shared lock, because it sleeps writing
5569 to user space.
5570 */
5571
5572 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005573 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005574 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005575 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576 return ret;
5577 }
5578 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005579 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580
5581 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5582 return -EFAULT;
5583
5584 ifr.ifr_name[IFNAMSIZ-1] = 0;
5585
5586 colon = strchr(ifr.ifr_name, ':');
5587 if (colon)
5588 *colon = 0;
5589
5590 /*
5591 * See which interface the caller is talking about.
5592 */
5593
5594 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005595 /*
5596 * These ioctl calls:
5597 * - can be done by all.
5598 * - atomic and do not require locking.
5599 * - return a value
5600 */
5601 case SIOCGIFFLAGS:
5602 case SIOCGIFMETRIC:
5603 case SIOCGIFMTU:
5604 case SIOCGIFHWADDR:
5605 case SIOCGIFSLAVE:
5606 case SIOCGIFMAP:
5607 case SIOCGIFINDEX:
5608 case SIOCGIFTXQLEN:
5609 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005610 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005611 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005612 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005613 if (!ret) {
5614 if (colon)
5615 *colon = ':';
5616 if (copy_to_user(arg, &ifr,
5617 sizeof(struct ifreq)))
5618 ret = -EFAULT;
5619 }
5620 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005621
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005622 case SIOCETHTOOL:
5623 dev_load(net, ifr.ifr_name);
5624 rtnl_lock();
5625 ret = dev_ethtool(net, &ifr);
5626 rtnl_unlock();
5627 if (!ret) {
5628 if (colon)
5629 *colon = ':';
5630 if (copy_to_user(arg, &ifr,
5631 sizeof(struct ifreq)))
5632 ret = -EFAULT;
5633 }
5634 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005636 /*
5637 * These ioctl calls:
5638 * - require superuser power.
5639 * - require strict serialization.
5640 * - return a value
5641 */
5642 case SIOCGMIIPHY:
5643 case SIOCGMIIREG:
5644 case SIOCSIFNAME:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005645 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005646 return -EPERM;
5647 dev_load(net, ifr.ifr_name);
5648 rtnl_lock();
5649 ret = dev_ifsioc(net, &ifr, cmd);
5650 rtnl_unlock();
5651 if (!ret) {
5652 if (colon)
5653 *colon = ':';
5654 if (copy_to_user(arg, &ifr,
5655 sizeof(struct ifreq)))
5656 ret = -EFAULT;
5657 }
5658 return ret;
5659
5660 /*
5661 * These ioctl calls:
5662 * - require superuser power.
5663 * - require strict serialization.
5664 * - do not return a value
5665 */
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005666 case SIOCSIFMAP:
5667 case SIOCSIFTXQLEN:
5668 if (!capable(CAP_NET_ADMIN))
5669 return -EPERM;
5670 /* fall through */
5671 /*
5672 * These ioctl calls:
5673 * - require local superuser power.
5674 * - require strict serialization.
5675 * - do not return a value
5676 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005677 case SIOCSIFFLAGS:
5678 case SIOCSIFMETRIC:
5679 case SIOCSIFMTU:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005680 case SIOCSIFHWADDR:
5681 case SIOCSIFSLAVE:
5682 case SIOCADDMULTI:
5683 case SIOCDELMULTI:
5684 case SIOCSIFHWBROADCAST:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005685 case SIOCSMIIREG:
5686 case SIOCBONDENSLAVE:
5687 case SIOCBONDRELEASE:
5688 case SIOCBONDSETHWADDR:
5689 case SIOCBONDCHANGEACTIVE:
5690 case SIOCBRADDIF:
5691 case SIOCBRDELIF:
5692 case SIOCSHWTSTAMP:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005693 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005694 return -EPERM;
5695 /* fall through */
5696 case SIOCBONDSLAVEINFOQUERY:
5697 case SIOCBONDINFOQUERY:
5698 dev_load(net, ifr.ifr_name);
5699 rtnl_lock();
5700 ret = dev_ifsioc(net, &ifr, cmd);
5701 rtnl_unlock();
5702 return ret;
5703
5704 case SIOCGIFMEM:
5705 /* Get the per device memory space. We can add this but
5706 * currently do not support it */
5707 case SIOCSIFMEM:
5708 /* Set the per device memory buffer space.
5709 * Not applicable in our case */
5710 case SIOCSIFLINK:
Lifeng Sun41c31f32011-04-27 22:04:51 +00005711 return -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005712
5713 /*
5714 * Unknown or private ioctl.
5715 */
5716 default:
5717 if (cmd == SIOCWANDEV ||
5718 (cmd >= SIOCDEVPRIVATE &&
5719 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005720 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005722 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005723 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005724 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005726 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005727 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005728 }
5729 /* Take care of Wireless Extensions */
5730 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5731 return wext_handle_ioctl(net, &ifr, cmd, arg);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005732 return -ENOTTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005733 }
5734}
5735
5736
5737/**
5738 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005739 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005740 *
5741 * Returns a suitable unique value for a new device interface
5742 * number. The caller must hold the rtnl semaphore or the
5743 * dev_base_lock to be sure it remains unique.
5744 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005745static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005746{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005747 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005748 for (;;) {
5749 if (++ifindex <= 0)
5750 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005751 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005752 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005753 }
5754}
5755
Linus Torvalds1da177e2005-04-16 15:20:36 -07005756/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005757static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005758
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005759static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005760{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005761 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762}
5763
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005764static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005765{
Krishna Kumare93737b2009-12-08 22:26:02 +00005766 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005767
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005768 BUG_ON(dev_boot_phase);
5769 ASSERT_RTNL();
5770
Krishna Kumare93737b2009-12-08 22:26:02 +00005771 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005772 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005773 * for initialization unwind. Remove those
5774 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005775 */
5776 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005777 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5778 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005779
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005780 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005781 list_del(&dev->unreg_list);
5782 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005783 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005784 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005785 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005786 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005787
Octavian Purdila44345722010-12-13 12:44:07 +00005788 /* If device is running, close it first. */
5789 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005790
Octavian Purdila44345722010-12-13 12:44:07 +00005791 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005792 /* And unlink it from device chain. */
5793 unlist_netdevice(dev);
5794
5795 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005796 }
5797
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005798 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005799
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005800 list_for_each_entry(dev, head, unreg_list) {
5801 /* Shutdown queueing discipline. */
5802 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005803
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005804
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005805 /* Notify protocols, that we are about to destroy
5806 this device. They should clean all the things.
5807 */
5808 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5809
Patrick McHardya2835762010-02-26 06:34:51 +00005810 if (!dev->rtnl_link_ops ||
5811 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5812 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5813
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005814 /*
5815 * Flush the unicast and multicast chains
5816 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005817 dev_uc_flush(dev);
Jiri Pirko22bedad2010-04-01 21:22:57 +00005818 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005819
5820 if (dev->netdev_ops->ndo_uninit)
5821 dev->netdev_ops->ndo_uninit(dev);
5822
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005823 /* Notifier chain MUST detach us all upper devices. */
5824 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005825
5826 /* Remove entries from kobject tree */
5827 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005828#ifdef CONFIG_XPS
5829 /* Remove XPS queueing entries */
5830 netif_reset_xps_queues_gt(dev, 0);
5831#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005832 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005833
Eric W. Biederman850a5452011-10-13 22:25:23 +00005834 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005835
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005836 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005837 dev_put(dev);
5838}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005839
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005840static void rollback_registered(struct net_device *dev)
5841{
5842 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005843
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005844 list_add(&dev->unreg_list, &single);
5845 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005846 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005847}
5848
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005849static netdev_features_t netdev_fix_features(struct net_device *dev,
5850 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005851{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005852 /* Fix illegal checksum combinations */
5853 if ((features & NETIF_F_HW_CSUM) &&
5854 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005855 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005856 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5857 }
5858
Herbert Xub63365a2008-10-23 01:11:29 -07005859 /* Fix illegal SG+CSUM combinations. */
5860 if ((features & NETIF_F_SG) &&
5861 !(features & NETIF_F_ALL_CSUM)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005862 netdev_dbg(dev,
5863 "Dropping NETIF_F_SG since no checksum feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005864 features &= ~NETIF_F_SG;
5865 }
5866
5867 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005868 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005869 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005870 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005871 }
5872
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005873 /* TSO ECN requires that TSO is present as well. */
5874 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5875 features &= ~NETIF_F_TSO_ECN;
5876
Michał Mirosław212b5732011-02-15 16:59:16 +00005877 /* Software GSO depends on SG. */
5878 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005879 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005880 features &= ~NETIF_F_GSO;
5881 }
5882
Michał Mirosławacd11302011-01-24 15:45:15 -08005883 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005884 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005885 /* maybe split UFO into V4 and V6? */
5886 if (!((features & NETIF_F_GEN_CSUM) ||
5887 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5888 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005889 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005890 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005891 features &= ~NETIF_F_UFO;
5892 }
5893
5894 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005895 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005896 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005897 features &= ~NETIF_F_UFO;
5898 }
5899 }
5900
5901 return features;
5902}
Herbert Xub63365a2008-10-23 01:11:29 -07005903
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005904int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005905{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005906 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005907 int err = 0;
5908
Michał Mirosław87267482011-04-12 09:56:38 +00005909 ASSERT_RTNL();
5910
Michał Mirosław5455c692011-02-15 16:59:17 +00005911 features = netdev_get_wanted_features(dev);
5912
5913 if (dev->netdev_ops->ndo_fix_features)
5914 features = dev->netdev_ops->ndo_fix_features(dev, features);
5915
5916 /* driver might be less strict about feature dependencies */
5917 features = netdev_fix_features(dev, features);
5918
5919 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005920 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005921
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005922 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5923 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005924
5925 if (dev->netdev_ops->ndo_set_features)
5926 err = dev->netdev_ops->ndo_set_features(dev, features);
5927
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005928 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005929 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005930 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5931 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005932 return -1;
5933 }
5934
5935 if (!err)
5936 dev->features = features;
5937
5938 return 1;
5939}
5940
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005941/**
5942 * netdev_update_features - recalculate device features
5943 * @dev: the device to check
5944 *
5945 * Recalculate dev->features set and send notifications if it
5946 * has changed. Should be called after driver or hardware dependent
5947 * conditions might have changed that influence the features.
5948 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005949void netdev_update_features(struct net_device *dev)
5950{
5951 if (__netdev_update_features(dev))
5952 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005953}
5954EXPORT_SYMBOL(netdev_update_features);
5955
Linus Torvalds1da177e2005-04-16 15:20:36 -07005956/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005957 * netdev_change_features - recalculate device features
5958 * @dev: the device to check
5959 *
5960 * Recalculate dev->features set and send notifications even
5961 * if they have not changed. Should be called instead of
5962 * netdev_update_features() if also dev->vlan_features might
5963 * have changed to allow the changes to be propagated to stacked
5964 * VLAN devices.
5965 */
5966void netdev_change_features(struct net_device *dev)
5967{
5968 __netdev_update_features(dev);
5969 netdev_features_change(dev);
5970}
5971EXPORT_SYMBOL(netdev_change_features);
5972
5973/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005974 * netif_stacked_transfer_operstate - transfer operstate
5975 * @rootdev: the root or lower level device to transfer state from
5976 * @dev: the device to transfer operstate to
5977 *
5978 * Transfer operational state from root to device. This is normally
5979 * called when a stacking relationship exists between the root
5980 * device and the device(a leaf device).
5981 */
5982void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5983 struct net_device *dev)
5984{
5985 if (rootdev->operstate == IF_OPER_DORMANT)
5986 netif_dormant_on(dev);
5987 else
5988 netif_dormant_off(dev);
5989
5990 if (netif_carrier_ok(rootdev)) {
5991 if (!netif_carrier_ok(dev))
5992 netif_carrier_on(dev);
5993 } else {
5994 if (netif_carrier_ok(dev))
5995 netif_carrier_off(dev);
5996 }
5997}
5998EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5999
Tom Herbertbf264142010-11-26 08:36:09 +00006000#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006001static int netif_alloc_rx_queues(struct net_device *dev)
6002{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006003 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006004 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006005
Tom Herbertbd25fa72010-10-18 18:00:16 +00006006 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006007
Tom Herbertbd25fa72010-10-18 18:00:16 +00006008 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00006009 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00006010 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00006011
Tom Herbertbd25fa72010-10-18 18:00:16 +00006012 dev->_rx = rx;
6013
Tom Herbertbd25fa72010-10-18 18:00:16 +00006014 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006015 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006016 return 0;
6017}
Tom Herbertbf264142010-11-26 08:36:09 +00006018#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006019
Changli Gaoaa942102010-12-04 02:31:41 +00006020static void netdev_init_one_queue(struct net_device *dev,
6021 struct netdev_queue *queue, void *_unused)
6022{
6023 /* Initialize queue lock */
6024 spin_lock_init(&queue->_xmit_lock);
6025 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6026 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006027 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006028 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006029#ifdef CONFIG_BQL
6030 dql_init(&queue->dql, HZ);
6031#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006032}
6033
Tom Herberte6484932010-10-18 18:04:39 +00006034static int netif_alloc_netdev_queues(struct net_device *dev)
6035{
6036 unsigned int count = dev->num_tx_queues;
6037 struct netdev_queue *tx;
6038
6039 BUG_ON(count < 1);
6040
6041 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00006042 if (!tx)
Tom Herberte6484932010-10-18 18:04:39 +00006043 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00006044
Tom Herberte6484932010-10-18 18:04:39 +00006045 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006046
Tom Herberte6484932010-10-18 18:04:39 +00006047 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6048 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006049
6050 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006051}
6052
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006053/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006054 * register_netdevice - register a network device
6055 * @dev: device to register
6056 *
6057 * Take a completed network device structure and add it to the kernel
6058 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6059 * chain. 0 is returned on success. A negative errno code is returned
6060 * on a failure to set up the device, or if the name is a duplicate.
6061 *
6062 * Callers must hold the rtnl semaphore. You may want
6063 * register_netdev() instead of this.
6064 *
6065 * BUGS:
6066 * The locking appears insufficient to guarantee two parallel registers
6067 * will not get the same name.
6068 */
6069
6070int register_netdevice(struct net_device *dev)
6071{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006073 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006074
6075 BUG_ON(dev_boot_phase);
6076 ASSERT_RTNL();
6077
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006078 might_sleep();
6079
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080 /* When net_device's are persistent, this will be fatal. */
6081 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006082 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006083
David S. Millerf1f28aa2008-07-15 00:08:33 -07006084 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006085 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006086
Linus Torvalds1da177e2005-04-16 15:20:36 -07006087 dev->iflink = -1;
6088
Gao feng828de4f2012-09-13 20:58:27 +00006089 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006090 if (ret < 0)
6091 goto out;
6092
Linus Torvalds1da177e2005-04-16 15:20:36 -07006093 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006094 if (dev->netdev_ops->ndo_init) {
6095 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006096 if (ret) {
6097 if (ret > 0)
6098 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006099 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006100 }
6101 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006102
Michał Mirosławd2ed2732013-01-29 15:14:16 +00006103 if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) &&
6104 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6105 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6106 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6107 ret = -EINVAL;
6108 goto err_uninit;
6109 }
6110
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006111 ret = -EBUSY;
6112 if (!dev->ifindex)
6113 dev->ifindex = dev_new_index(net);
6114 else if (__dev_get_by_index(net, dev->ifindex))
6115 goto err_uninit;
6116
Linus Torvalds1da177e2005-04-16 15:20:36 -07006117 if (dev->iflink == -1)
6118 dev->iflink = dev->ifindex;
6119
Michał Mirosław5455c692011-02-15 16:59:17 +00006120 /* Transfer changeable features to wanted_features and enable
6121 * software offloads (GSO and GRO).
6122 */
6123 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006124 dev->features |= NETIF_F_SOFT_FEATURES;
6125 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006126
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006127 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00006128 if (!(dev->flags & IFF_LOOPBACK)) {
6129 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6130 if (dev->features & NETIF_F_ALL_CSUM) {
6131 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
6132 dev->features |= NETIF_F_NOCACHE_COPY;
6133 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006134 }
6135
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006136 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006137 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006138 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006139
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006140 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6141 ret = notifier_to_errno(ret);
6142 if (ret)
6143 goto err_uninit;
6144
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006145 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006146 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006147 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006148 dev->reg_state = NETREG_REGISTERED;
6149
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006150 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006151
Linus Torvalds1da177e2005-04-16 15:20:36 -07006152 /*
6153 * Default initial state at registry is that the
6154 * device is present.
6155 */
6156
6157 set_bit(__LINK_STATE_PRESENT, &dev->state);
6158
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006159 linkwatch_init_dev(dev);
6160
Linus Torvalds1da177e2005-04-16 15:20:36 -07006161 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006163 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006164 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165
Jiri Pirko948b3372013-01-08 01:38:25 +00006166 /* If the device has permanent device address, driver should
6167 * set dev_addr and also addr_assign_type should be set to
6168 * NET_ADDR_PERM (default value).
6169 */
6170 if (dev->addr_assign_type == NET_ADDR_PERM)
6171 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6172
Linus Torvalds1da177e2005-04-16 15:20:36 -07006173 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006174 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006175 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006176 if (ret) {
6177 rollback_registered(dev);
6178 dev->reg_state = NETREG_UNREGISTERED;
6179 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006180 /*
6181 * Prevent userspace races by waiting until the network
6182 * device is fully setup before sending notifications.
6183 */
Patrick McHardya2835762010-02-26 06:34:51 +00006184 if (!dev->rtnl_link_ops ||
6185 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6186 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006187
6188out:
6189 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006190
6191err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006192 if (dev->netdev_ops->ndo_uninit)
6193 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006194 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006195}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006196EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006197
6198/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006199 * init_dummy_netdev - init a dummy network device for NAPI
6200 * @dev: device to init
6201 *
6202 * This takes a network device structure and initialize the minimum
6203 * amount of fields so it can be used to schedule NAPI polls without
6204 * registering a full blown interface. This is to be used by drivers
6205 * that need to tie several hardware interfaces to a single NAPI
6206 * poll scheduler due to HW limitations.
6207 */
6208int init_dummy_netdev(struct net_device *dev)
6209{
6210 /* Clear everything. Note we don't initialize spinlocks
6211 * are they aren't supposed to be taken by any of the
6212 * NAPI code and this dummy netdev is supposed to be
6213 * only ever used for NAPI polls
6214 */
6215 memset(dev, 0, sizeof(struct net_device));
6216
6217 /* make sure we BUG if trying to hit standard
6218 * register/unregister code path
6219 */
6220 dev->reg_state = NETREG_DUMMY;
6221
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006222 /* NAPI wants this */
6223 INIT_LIST_HEAD(&dev->napi_list);
6224
6225 /* a dummy interface is started by default */
6226 set_bit(__LINK_STATE_PRESENT, &dev->state);
6227 set_bit(__LINK_STATE_START, &dev->state);
6228
Eric Dumazet29b44332010-10-11 10:22:12 +00006229 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6230 * because users of this 'device' dont need to change
6231 * its refcount.
6232 */
6233
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006234 return 0;
6235}
6236EXPORT_SYMBOL_GPL(init_dummy_netdev);
6237
6238
6239/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006240 * register_netdev - register a network device
6241 * @dev: device to register
6242 *
6243 * Take a completed network device structure and add it to the kernel
6244 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6245 * chain. 0 is returned on success. A negative errno code is returned
6246 * on a failure to set up the device, or if the name is a duplicate.
6247 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006248 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006249 * and expands the device name if you passed a format string to
6250 * alloc_netdev.
6251 */
6252int register_netdev(struct net_device *dev)
6253{
6254 int err;
6255
6256 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006257 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006258 rtnl_unlock();
6259 return err;
6260}
6261EXPORT_SYMBOL(register_netdev);
6262
Eric Dumazet29b44332010-10-11 10:22:12 +00006263int netdev_refcnt_read(const struct net_device *dev)
6264{
6265 int i, refcnt = 0;
6266
6267 for_each_possible_cpu(i)
6268 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6269 return refcnt;
6270}
6271EXPORT_SYMBOL(netdev_refcnt_read);
6272
Ben Hutchings2c530402012-07-10 10:55:09 +00006273/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006274 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006275 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006276 *
6277 * This is called when unregistering network devices.
6278 *
6279 * Any protocol or device that holds a reference should register
6280 * for netdevice notification, and cleanup and put back the
6281 * reference if they receive an UNREGISTER event.
6282 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006283 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006284 */
6285static void netdev_wait_allrefs(struct net_device *dev)
6286{
6287 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006288 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289
Eric Dumazete014deb2009-11-17 05:59:21 +00006290 linkwatch_forget_dev(dev);
6291
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006293 refcnt = netdev_refcnt_read(dev);
6294
6295 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006296 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006297 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006298
6299 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006300 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006301
Eric Dumazet748e2d92012-08-22 21:50:59 +00006302 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006303 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006304 rtnl_lock();
6305
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006306 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006307 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6308 &dev->state)) {
6309 /* We must not have linkwatch events
6310 * pending on unregister. If this
6311 * happens, we simply run the queue
6312 * unscheduled, resulting in a noop
6313 * for this device.
6314 */
6315 linkwatch_run_queue();
6316 }
6317
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006318 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006319
6320 rebroadcast_time = jiffies;
6321 }
6322
6323 msleep(250);
6324
Eric Dumazet29b44332010-10-11 10:22:12 +00006325 refcnt = netdev_refcnt_read(dev);
6326
Linus Torvalds1da177e2005-04-16 15:20:36 -07006327 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006328 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6329 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006330 warning_time = jiffies;
6331 }
6332 }
6333}
6334
6335/* The sequence is:
6336 *
6337 * rtnl_lock();
6338 * ...
6339 * register_netdevice(x1);
6340 * register_netdevice(x2);
6341 * ...
6342 * unregister_netdevice(y1);
6343 * unregister_netdevice(y2);
6344 * ...
6345 * rtnl_unlock();
6346 * free_netdev(y1);
6347 * free_netdev(y2);
6348 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006349 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006350 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006351 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352 * without deadlocking with linkwatch via keventd.
6353 * 2) Since we run with the RTNL semaphore not held, we can sleep
6354 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006355 *
6356 * We must not return until all unregister events added during
6357 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006358 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006359void netdev_run_todo(void)
6360{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006361 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006362
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006364 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006365
6366 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006367
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006368
6369 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006370 if (!list_empty(&list))
6371 rcu_barrier();
6372
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373 while (!list_empty(&list)) {
6374 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006375 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006376 list_del(&dev->todo_list);
6377
Eric Dumazet748e2d92012-08-22 21:50:59 +00006378 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006379 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006380 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006381
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006382 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006383 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006384 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006385 dump_stack();
6386 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006388
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006389 dev->reg_state = NETREG_UNREGISTERED;
6390
Changli Gao152102c2010-03-30 20:16:22 +00006391 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006392
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006393 netdev_wait_allrefs(dev);
6394
6395 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006396 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006397 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6398 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006399 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006400
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006401 if (dev->destructor)
6402 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006403
6404 /* Free network device */
6405 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006407}
6408
Ben Hutchings3cfde792010-07-09 09:11:52 +00006409/* Convert net_device_stats to rtnl_link_stats64. They have the same
6410 * fields in the same order, with only the type differing.
6411 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006412void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6413 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006414{
6415#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006416 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6417 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006418#else
6419 size_t i, n = sizeof(*stats64) / sizeof(u64);
6420 const unsigned long *src = (const unsigned long *)netdev_stats;
6421 u64 *dst = (u64 *)stats64;
6422
6423 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6424 sizeof(*stats64) / sizeof(u64));
6425 for (i = 0; i < n; i++)
6426 dst[i] = src[i];
6427#endif
6428}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006429EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006430
Eric Dumazetd83345a2009-11-16 03:36:51 +00006431/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006432 * dev_get_stats - get network device statistics
6433 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006434 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006435 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006436 * Get network statistics from device. Return @storage.
6437 * The device driver may provide its own method by setting
6438 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6439 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006440 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006441struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6442 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006443{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006444 const struct net_device_ops *ops = dev->netdev_ops;
6445
Eric Dumazet28172732010-07-07 14:58:56 -07006446 if (ops->ndo_get_stats64) {
6447 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006448 ops->ndo_get_stats64(dev, storage);
6449 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006450 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006451 } else {
6452 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006453 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006454 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006455 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006456}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006457EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006458
Eric Dumazet24824a02010-10-02 06:11:55 +00006459struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006460{
Eric Dumazet24824a02010-10-02 06:11:55 +00006461 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006462
Eric Dumazet24824a02010-10-02 06:11:55 +00006463#ifdef CONFIG_NET_CLS_ACT
6464 if (queue)
6465 return queue;
6466 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6467 if (!queue)
6468 return NULL;
6469 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006470 queue->qdisc = &noop_qdisc;
6471 queue->qdisc_sleeping = &noop_qdisc;
6472 rcu_assign_pointer(dev->ingress_queue, queue);
6473#endif
6474 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006475}
6476
Eric Dumazet2c60db02012-09-16 09:17:26 +00006477static const struct ethtool_ops default_ethtool_ops;
6478
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006479void netdev_set_default_ethtool_ops(struct net_device *dev,
6480 const struct ethtool_ops *ops)
6481{
6482 if (dev->ethtool_ops == &default_ethtool_ops)
6483 dev->ethtool_ops = ops;
6484}
6485EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6486
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006488 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006489 * @sizeof_priv: size of private data to allocate space for
6490 * @name: device name format string
6491 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006492 * @txqs: the number of TX subqueues to allocate
6493 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006494 *
6495 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006496 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006497 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006498 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006499struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6500 void (*setup)(struct net_device *),
6501 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006504 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006505 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006507 BUG_ON(strlen(name) >= sizeof(dev->name));
6508
Tom Herbert36909ea2011-01-09 19:36:31 +00006509 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006510 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006511 return NULL;
6512 }
6513
Tom Herbert36909ea2011-01-09 19:36:31 +00006514#ifdef CONFIG_RPS
6515 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006516 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006517 return NULL;
6518 }
6519#endif
6520
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006521 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006522 if (sizeof_priv) {
6523 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006524 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006525 alloc_size += sizeof_priv;
6526 }
6527 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006528 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006529
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07006530 p = kzalloc(alloc_size, GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00006531 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006532 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006533
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006534 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006535 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006536
Eric Dumazet29b44332010-10-11 10:22:12 +00006537 dev->pcpu_refcnt = alloc_percpu(int);
6538 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00006539 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006540
Linus Torvalds1da177e2005-04-16 15:20:36 -07006541 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006542 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006543
Jiri Pirko22bedad2010-04-01 21:22:57 +00006544 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006545 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006546
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006547 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006548
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006549 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006550 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551
Herbert Xud565b0a2008-12-15 23:38:52 -08006552 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006553 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006554 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006555 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006556 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006557 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006558
6559 dev->num_tx_queues = txqs;
6560 dev->real_num_tx_queues = txqs;
6561 if (netif_alloc_netdev_queues(dev))
6562 goto free_all;
6563
6564#ifdef CONFIG_RPS
6565 dev->num_rx_queues = rxqs;
6566 dev->real_num_rx_queues = rxqs;
6567 if (netif_alloc_rx_queues(dev))
6568 goto free_all;
6569#endif
6570
Linus Torvalds1da177e2005-04-16 15:20:36 -07006571 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006572 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006573 if (!dev->ethtool_ops)
6574 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006575 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006576
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006577free_all:
6578 free_netdev(dev);
6579 return NULL;
6580
Eric Dumazet29b44332010-10-11 10:22:12 +00006581free_pcpu:
6582 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00006583 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006584#ifdef CONFIG_RPS
6585 kfree(dev->_rx);
6586#endif
6587
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006588free_p:
6589 kfree(p);
6590 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006591}
Tom Herbert36909ea2011-01-09 19:36:31 +00006592EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593
6594/**
6595 * free_netdev - free network device
6596 * @dev: device
6597 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006598 * This function does the last stage of destroying an allocated device
6599 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006600 * If this is the last reference then it will be freed.
6601 */
6602void free_netdev(struct net_device *dev)
6603{
Herbert Xud565b0a2008-12-15 23:38:52 -08006604 struct napi_struct *p, *n;
6605
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006606 release_net(dev_net(dev));
6607
David S. Millere8a04642008-07-17 00:34:19 -07006608 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006609#ifdef CONFIG_RPS
6610 kfree(dev->_rx);
6611#endif
David S. Millere8a04642008-07-17 00:34:19 -07006612
Eric Dumazet33d480c2011-08-11 19:30:52 +00006613 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006614
Jiri Pirkof001fde2009-05-05 02:48:28 +00006615 /* Flush device addresses */
6616 dev_addr_flush(dev);
6617
Herbert Xud565b0a2008-12-15 23:38:52 -08006618 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6619 netif_napi_del(p);
6620
Eric Dumazet29b44332010-10-11 10:22:12 +00006621 free_percpu(dev->pcpu_refcnt);
6622 dev->pcpu_refcnt = NULL;
6623
Stephen Hemminger3041a062006-05-26 13:25:24 -07006624 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625 if (dev->reg_state == NETREG_UNINITIALIZED) {
6626 kfree((char *)dev - dev->padded);
6627 return;
6628 }
6629
6630 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6631 dev->reg_state = NETREG_RELEASED;
6632
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006633 /* will free via device release */
6634 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006635}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006636EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006637
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006638/**
6639 * synchronize_net - Synchronize with packet receive processing
6640 *
6641 * Wait for packets currently being received to be done.
6642 * Does not block later packets from starting.
6643 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006644void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006645{
6646 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006647 if (rtnl_is_locked())
6648 synchronize_rcu_expedited();
6649 else
6650 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006651}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006652EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006653
6654/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006655 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006656 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006657 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006658 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006659 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006660 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006661 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662 *
6663 * Callers must hold the rtnl semaphore. You may want
6664 * unregister_netdev() instead of this.
6665 */
6666
Eric Dumazet44a08732009-10-27 07:03:04 +00006667void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006668{
Herbert Xua6620712007-12-12 19:21:56 -08006669 ASSERT_RTNL();
6670
Eric Dumazet44a08732009-10-27 07:03:04 +00006671 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006672 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006673 } else {
6674 rollback_registered(dev);
6675 /* Finish processing unregister after unlock */
6676 net_set_todo(dev);
6677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678}
Eric Dumazet44a08732009-10-27 07:03:04 +00006679EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680
6681/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006682 * unregister_netdevice_many - unregister many devices
6683 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006684 */
6685void unregister_netdevice_many(struct list_head *head)
6686{
6687 struct net_device *dev;
6688
6689 if (!list_empty(head)) {
6690 rollback_registered_many(head);
6691 list_for_each_entry(dev, head, unreg_list)
6692 net_set_todo(dev);
6693 }
6694}
Eric Dumazet63c80992009-10-27 07:06:49 +00006695EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006696
6697/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006698 * unregister_netdev - remove device from the kernel
6699 * @dev: device
6700 *
6701 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006702 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006703 *
6704 * This is just a wrapper for unregister_netdevice that takes
6705 * the rtnl semaphore. In general you want to use this and not
6706 * unregister_netdevice.
6707 */
6708void unregister_netdev(struct net_device *dev)
6709{
6710 rtnl_lock();
6711 unregister_netdevice(dev);
6712 rtnl_unlock();
6713}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006714EXPORT_SYMBOL(unregister_netdev);
6715
Eric W. Biedermance286d32007-09-12 13:53:49 +02006716/**
6717 * dev_change_net_namespace - move device to different nethost namespace
6718 * @dev: device
6719 * @net: network namespace
6720 * @pat: If not NULL name pattern to try if the current device name
6721 * is already taken in the destination network namespace.
6722 *
6723 * This function shuts down a device interface and moves it
6724 * to a new network namespace. On success 0 is returned, on
6725 * a failure a netagive errno code is returned.
6726 *
6727 * Callers must hold the rtnl semaphore.
6728 */
6729
6730int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6731{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006732 int err;
6733
6734 ASSERT_RTNL();
6735
6736 /* Don't allow namespace local devices to be moved. */
6737 err = -EINVAL;
6738 if (dev->features & NETIF_F_NETNS_LOCAL)
6739 goto out;
6740
6741 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006742 if (dev->reg_state != NETREG_REGISTERED)
6743 goto out;
6744
6745 /* Get out if there is nothing todo */
6746 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006747 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006748 goto out;
6749
6750 /* Pick the destination device name, and ensure
6751 * we can use it in the destination network namespace.
6752 */
6753 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006754 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006755 /* We get here if we can't use the current device name */
6756 if (!pat)
6757 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006758 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006759 goto out;
6760 }
6761
6762 /*
6763 * And now a mini version of register_netdevice unregister_netdevice.
6764 */
6765
6766 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006767 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006768
6769 /* And unlink it from device chain */
6770 err = -ENODEV;
6771 unlist_netdevice(dev);
6772
6773 synchronize_net();
6774
6775 /* Shutdown queueing discipline. */
6776 dev_shutdown(dev);
6777
6778 /* Notify protocols, that we are about to destroy
6779 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006780
6781 Note that dev->reg_state stays at NETREG_REGISTERED.
6782 This is wanted because this way 8021q and macvlan know
6783 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006784 */
6785 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006786 rcu_barrier();
6787 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00006788 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006789
6790 /*
6791 * Flush the unicast and multicast chains
6792 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006793 dev_uc_flush(dev);
Jiri Pirko22bedad2010-04-01 21:22:57 +00006794 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006795
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006796 /* Send a netdev-removed uevent to the old namespace */
6797 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6798
Eric W. Biedermance286d32007-09-12 13:53:49 +02006799 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006800 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006801
Eric W. Biedermance286d32007-09-12 13:53:49 +02006802 /* If there is an ifindex conflict assign a new one */
6803 if (__dev_get_by_index(net, dev->ifindex)) {
6804 int iflink = (dev->iflink == dev->ifindex);
6805 dev->ifindex = dev_new_index(net);
6806 if (iflink)
6807 dev->iflink = dev->ifindex;
6808 }
6809
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006810 /* Send a netdev-add uevent to the new namespace */
6811 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6812
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006813 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006814 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006815 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006816
6817 /* Add the device back in the hashes */
6818 list_netdevice(dev);
6819
6820 /* Notify protocols, that a new device appeared. */
6821 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6822
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006823 /*
6824 * Prevent userspace races by waiting until the network
6825 * device is fully setup before sending notifications.
6826 */
6827 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6828
Eric W. Biedermance286d32007-09-12 13:53:49 +02006829 synchronize_net();
6830 err = 0;
6831out:
6832 return err;
6833}
Johannes Berg463d0182009-07-14 00:33:35 +02006834EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006835
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836static int dev_cpu_callback(struct notifier_block *nfb,
6837 unsigned long action,
6838 void *ocpu)
6839{
6840 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006841 struct sk_buff *skb;
6842 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6843 struct softnet_data *sd, *oldsd;
6844
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006845 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006846 return NOTIFY_OK;
6847
6848 local_irq_disable();
6849 cpu = smp_processor_id();
6850 sd = &per_cpu(softnet_data, cpu);
6851 oldsd = &per_cpu(softnet_data, oldcpu);
6852
6853 /* Find end of our completion_queue. */
6854 list_skb = &sd->completion_queue;
6855 while (*list_skb)
6856 list_skb = &(*list_skb)->next;
6857 /* Append completion queue from offline CPU. */
6858 *list_skb = oldsd->completion_queue;
6859 oldsd->completion_queue = NULL;
6860
Linus Torvalds1da177e2005-04-16 15:20:36 -07006861 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006862 if (oldsd->output_queue) {
6863 *sd->output_queue_tailp = oldsd->output_queue;
6864 sd->output_queue_tailp = oldsd->output_queue_tailp;
6865 oldsd->output_queue = NULL;
6866 oldsd->output_queue_tailp = &oldsd->output_queue;
6867 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006868 /* Append NAPI poll list from offline CPU. */
6869 if (!list_empty(&oldsd->poll_list)) {
6870 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6871 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006873
6874 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6875 local_irq_enable();
6876
6877 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006878 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6879 netif_rx(skb);
6880 input_queue_head_incr(oldsd);
6881 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006882 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006883 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006884 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006885 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006886
6887 return NOTIFY_OK;
6888}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006889
6890
Herbert Xu7f353bf2007-08-10 15:47:58 -07006891/**
Herbert Xub63365a2008-10-23 01:11:29 -07006892 * netdev_increment_features - increment feature set by one
6893 * @all: current feature set
6894 * @one: new feature set
6895 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006896 *
6897 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006898 * @one to the master device with current feature set @all. Will not
6899 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006900 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006901netdev_features_t netdev_increment_features(netdev_features_t all,
6902 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006903{
Michał Mirosław1742f182011-04-22 06:31:16 +00006904 if (mask & NETIF_F_GEN_CSUM)
6905 mask |= NETIF_F_ALL_CSUM;
6906 mask |= NETIF_F_VLAN_CHALLENGED;
6907
6908 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6909 all &= one | ~NETIF_F_ALL_FOR_ALL;
6910
Michał Mirosław1742f182011-04-22 06:31:16 +00006911 /* If one device supports hw checksumming, set for all. */
6912 if (all & NETIF_F_GEN_CSUM)
6913 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006914
6915 return all;
6916}
Herbert Xub63365a2008-10-23 01:11:29 -07006917EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006918
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006919static struct hlist_head *netdev_create_hash(void)
6920{
6921 int i;
6922 struct hlist_head *hash;
6923
6924 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6925 if (hash != NULL)
6926 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6927 INIT_HLIST_HEAD(&hash[i]);
6928
6929 return hash;
6930}
6931
Eric W. Biederman881d9662007-09-17 11:56:21 -07006932/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006933static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006934{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006935 if (net != &init_net)
6936 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006937
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006938 net->dev_name_head = netdev_create_hash();
6939 if (net->dev_name_head == NULL)
6940 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006941
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006942 net->dev_index_head = netdev_create_hash();
6943 if (net->dev_index_head == NULL)
6944 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006945
6946 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006947
6948err_idx:
6949 kfree(net->dev_name_head);
6950err_name:
6951 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006952}
6953
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006954/**
6955 * netdev_drivername - network driver for the device
6956 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006957 *
6958 * Determine network driver for device.
6959 */
David S. Miller3019de12011-06-06 16:41:33 -07006960const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006961{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006962 const struct device_driver *driver;
6963 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006964 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006965
6966 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006967 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006968 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006969
6970 driver = parent->driver;
6971 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006972 return driver->name;
6973 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006974}
6975
Joe Perchesb004ff42012-09-12 20:12:19 -07006976static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006977 struct va_format *vaf)
6978{
6979 int r;
6980
Joe Perchesb004ff42012-09-12 20:12:19 -07006981 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006982 r = dev_printk_emit(level[1] - '0',
6983 dev->dev.parent,
6984 "%s %s %s: %pV",
6985 dev_driver_string(dev->dev.parent),
6986 dev_name(dev->dev.parent),
6987 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006988 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006989 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006990 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006991 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006992 }
Joe Perches256df2f2010-06-27 01:02:35 +00006993
6994 return r;
6995}
6996
6997int netdev_printk(const char *level, const struct net_device *dev,
6998 const char *format, ...)
6999{
7000 struct va_format vaf;
7001 va_list args;
7002 int r;
7003
7004 va_start(args, format);
7005
7006 vaf.fmt = format;
7007 vaf.va = &args;
7008
7009 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007010
Joe Perches256df2f2010-06-27 01:02:35 +00007011 va_end(args);
7012
7013 return r;
7014}
7015EXPORT_SYMBOL(netdev_printk);
7016
7017#define define_netdev_printk_level(func, level) \
7018int func(const struct net_device *dev, const char *fmt, ...) \
7019{ \
7020 int r; \
7021 struct va_format vaf; \
7022 va_list args; \
7023 \
7024 va_start(args, fmt); \
7025 \
7026 vaf.fmt = fmt; \
7027 vaf.va = &args; \
7028 \
7029 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007030 \
Joe Perches256df2f2010-06-27 01:02:35 +00007031 va_end(args); \
7032 \
7033 return r; \
7034} \
7035EXPORT_SYMBOL(func);
7036
7037define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7038define_netdev_printk_level(netdev_alert, KERN_ALERT);
7039define_netdev_printk_level(netdev_crit, KERN_CRIT);
7040define_netdev_printk_level(netdev_err, KERN_ERR);
7041define_netdev_printk_level(netdev_warn, KERN_WARNING);
7042define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7043define_netdev_printk_level(netdev_info, KERN_INFO);
7044
Pavel Emelyanov46650792007-10-08 20:38:39 -07007045static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007046{
7047 kfree(net->dev_name_head);
7048 kfree(net->dev_index_head);
7049}
7050
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007051static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007052 .init = netdev_init,
7053 .exit = netdev_exit,
7054};
7055
Pavel Emelyanov46650792007-10-08 20:38:39 -07007056static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007057{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007058 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007059 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007060 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007061 * initial network namespace
7062 */
7063 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007064 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007065 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007066 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007067
7068 /* Ignore unmoveable devices (i.e. loopback) */
7069 if (dev->features & NETIF_F_NETNS_LOCAL)
7070 continue;
7071
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007072 /* Leave virtual devices for the generic cleanup */
7073 if (dev->rtnl_link_ops)
7074 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007075
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007076 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007077 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7078 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007079 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007080 pr_emerg("%s: failed to move %s to init_net: %d\n",
7081 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007082 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007083 }
7084 }
7085 rtnl_unlock();
7086}
7087
Eric W. Biederman04dc7f62009-12-03 02:29:04 +00007088static void __net_exit default_device_exit_batch(struct list_head *net_list)
7089{
7090 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007091 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f62009-12-03 02:29:04 +00007092 * Do this across as many network namespaces as possible to
7093 * improve batching efficiency.
7094 */
7095 struct net_device *dev;
7096 struct net *net;
7097 LIST_HEAD(dev_kill_list);
7098
7099 rtnl_lock();
7100 list_for_each_entry(net, net_list, exit_list) {
7101 for_each_netdev_reverse(net, dev) {
7102 if (dev->rtnl_link_ops)
7103 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7104 else
7105 unregister_netdevice_queue(dev, &dev_kill_list);
7106 }
7107 }
7108 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007109 list_del(&dev_kill_list);
Eric W. Biederman04dc7f62009-12-03 02:29:04 +00007110 rtnl_unlock();
7111}
7112
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007113static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007114 .exit = default_device_exit,
Eric W. Biederman04dc7f62009-12-03 02:29:04 +00007115 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007116};
7117
Linus Torvalds1da177e2005-04-16 15:20:36 -07007118/*
7119 * Initialize the DEV module. At boot time this walks the device list and
7120 * unhooks any devices that fail to initialise (normally hardware not
7121 * present) and leaves us with a valid list of present and active devices.
7122 *
7123 */
7124
7125/*
7126 * This is called single threaded during boot, so no need
7127 * to take the rtnl semaphore.
7128 */
7129static int __init net_dev_init(void)
7130{
7131 int i, rc = -ENOMEM;
7132
7133 BUG_ON(!dev_boot_phase);
7134
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135 if (dev_proc_init())
7136 goto out;
7137
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007138 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007139 goto out;
7140
7141 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007142 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143 INIT_LIST_HEAD(&ptype_base[i]);
7144
Vlad Yasevich62532da2012-11-15 08:49:10 +00007145 INIT_LIST_HEAD(&offload_base);
7146
Eric W. Biederman881d9662007-09-17 11:56:21 -07007147 if (register_pernet_subsys(&netdev_net_ops))
7148 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149
7150 /*
7151 * Initialise the packet receive queues.
7152 */
7153
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007154 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007155 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156
Changli Gaodee42872010-05-02 05:42:16 +00007157 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007158 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007159 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007160 sd->completion_queue = NULL;
7161 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007162 sd->output_queue = NULL;
7163 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007164#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007165 sd->csd.func = rps_trigger_softirq;
7166 sd->csd.info = sd;
7167 sd->csd.flags = 0;
7168 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007169#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007170
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007171 sd->backlog.poll = process_backlog;
7172 sd->backlog.weight = weight_p;
7173 sd->backlog.gro_list = NULL;
7174 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007175 }
7176
Linus Torvalds1da177e2005-04-16 15:20:36 -07007177 dev_boot_phase = 0;
7178
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007179 /* The loopback device is special if any other network devices
7180 * is present in a network namespace the loopback device must
7181 * be present. Since we now dynamically allocate and free the
7182 * loopback device ensure this invariant is maintained by
7183 * keeping the loopback device as the first device on the
7184 * list of network devices. Ensuring the loopback devices
7185 * is the first device that appears and the last network device
7186 * that disappears.
7187 */
7188 if (register_pernet_device(&loopback_net_ops))
7189 goto out;
7190
7191 if (register_pernet_device(&default_device_ops))
7192 goto out;
7193
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007194 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7195 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196
7197 hotcpu_notifier(dev_cpu_callback, 0);
7198 dst_init();
7199 dev_mcast_init();
7200 rc = 0;
7201out:
7202 return rc;
7203}
7204
7205subsys_initcall(net_dev_init);