blob: 68a1bb68b5a83c18f659d51628d468984e6e061c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194EXPORT_SYMBOL(dev_base_lock);
195
Eric W. Biederman881d9662007-09-17 11:56:21 -0700196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Eric W. Biederman881d9662007-09-17 11:56:21 -0700202static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700204 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
Eric W. Biedermance286d32007-09-12 13:53:49 +0200207/* Device list insertion */
208static int list_netdevice(struct net_device *dev)
209{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900210 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211
212 ASSERT_RTNL();
213
214 write_lock_bh(&dev_base_lock);
215 list_add_tail(&dev->dev_list, &net->dev_base_head);
216 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000217 hlist_add_head_rcu(&dev->index_hlist,
218 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200219 write_unlock_bh(&dev_base_lock);
220 return 0;
221}
222
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000223/* Device list removal
224 * caller must respect a RCU grace period before freeing/reusing dev
225 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226static void unlist_netdevice(struct net_device *dev)
227{
228 ASSERT_RTNL();
229
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list);
233 hlist_del(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000234 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200235 write_unlock_bh(&dev_base_lock);
236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/*
239 * Our notifier list
240 */
241
Alan Sternf07d5b92006-05-09 15:23:03 -0700242static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/*
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
247 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700248
249DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700250EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700275static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700384EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386/**
387 * __dev_remove_pack - remove packet handler
388 * @pt: packet type declaration
389 *
390 * Remove a protocol handler that was previously added to the kernel
391 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
392 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900393 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 *
395 * The packet type might still be in use by receivers
396 * and must not be freed until after all the CPU's have gone
397 * through a quiescent state.
398 */
399void __dev_remove_pack(struct packet_type *pt)
400{
401 struct list_head *head;
402 struct packet_type *pt1;
403
404 spin_lock_bh(&ptype_lock);
405
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700406 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800409 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 list_for_each_entry(pt1, head, list) {
412 if (pt == pt1) {
413 list_del_rcu(&pt->list);
414 goto out;
415 }
416 }
417
418 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
419out:
420 spin_unlock_bh(&ptype_lock);
421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(__dev_remove_pack);
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
431 * returns.
432 *
433 * This call sleeps to guarantee that no CPU is looking at the packet
434 * type after return.
435 */
436void dev_remove_pack(struct packet_type *pt)
437{
438 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 synchronize_net();
441}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700442EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444/******************************************************************************
445
446 Device Boot-time Settings Routines
447
448*******************************************************************************/
449
450/* Boot time configuration table */
451static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
452
453/**
454 * netdev_boot_setup_add - add new setup entry
455 * @name: name of the device
456 * @map: configured settings for the device
457 *
458 * Adds new setup entry to the dev_boot_setup list. The function
459 * returns 0 on error and 1 on success. This is a generic routine to
460 * all netdevices.
461 */
462static int netdev_boot_setup_add(char *name, struct ifmap *map)
463{
464 struct netdev_boot_setup *s;
465 int i;
466
467 s = dev_boot_setup;
468 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
469 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
470 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700471 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 memcpy(&s[i].map, map, sizeof(s[i].map));
473 break;
474 }
475 }
476
477 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
478}
479
480/**
481 * netdev_boot_setup_check - check boot time settings
482 * @dev: the netdevice
483 *
484 * Check boot time settings for the device.
485 * The found settings are set for the device to be used
486 * later in the device probing.
487 * Returns 0 if no settings found, 1 if they are.
488 */
489int netdev_boot_setup_check(struct net_device *dev)
490{
491 struct netdev_boot_setup *s = dev_boot_setup;
492 int i;
493
494 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
495 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700496 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 dev->irq = s[i].map.irq;
498 dev->base_addr = s[i].map.base_addr;
499 dev->mem_start = s[i].map.mem_start;
500 dev->mem_end = s[i].map.mem_end;
501 return 1;
502 }
503 }
504 return 0;
505}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700506EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508
509/**
510 * netdev_boot_base - get address from boot time settings
511 * @prefix: prefix for network device
512 * @unit: id for network device
513 *
514 * Check boot time settings for the base address of device.
515 * The found settings are set for the device to be used
516 * later in the device probing.
517 * Returns 0 if no settings found.
518 */
519unsigned long netdev_boot_base(const char *prefix, int unit)
520{
521 const struct netdev_boot_setup *s = dev_boot_setup;
522 char name[IFNAMSIZ];
523 int i;
524
525 sprintf(name, "%s%d", prefix, unit);
526
527 /*
528 * If device already registered then return base of 1
529 * to indicate not to probe for this interface
530 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700531 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 return 1;
533
534 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
535 if (!strcmp(name, s[i].name))
536 return s[i].map.base_addr;
537 return 0;
538}
539
540/*
541 * Saves at boot time configured settings for any netdevice.
542 */
543int __init netdev_boot_setup(char *str)
544{
545 int ints[5];
546 struct ifmap map;
547
548 str = get_options(str, ARRAY_SIZE(ints), ints);
549 if (!str || !*str)
550 return 0;
551
552 /* Save settings */
553 memset(&map, 0, sizeof(map));
554 if (ints[0] > 0)
555 map.irq = ints[1];
556 if (ints[0] > 1)
557 map.base_addr = ints[2];
558 if (ints[0] > 2)
559 map.mem_start = ints[3];
560 if (ints[0] > 3)
561 map.mem_end = ints[4];
562
563 /* Add new entry to the list */
564 return netdev_boot_setup_add(str, &map);
565}
566
567__setup("netdev=", netdev_boot_setup);
568
569/*******************************************************************************
570
571 Device Interface Subroutines
572
573*******************************************************************************/
574
575/**
576 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700577 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 * @name: name to find
579 *
580 * Find an interface by name. Must be called under RTNL semaphore
581 * or @dev_base_lock. If the name is found a pointer to the device
582 * is returned. If the name is not found then %NULL is returned. The
583 * reference counters are not incremented so the caller must be
584 * careful with locks.
585 */
586
Eric W. Biederman881d9662007-09-17 11:56:21 -0700587struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
589 struct hlist_node *p;
590
Eric W. Biederman881d9662007-09-17 11:56:21 -0700591 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 struct net_device *dev
593 = hlist_entry(p, struct net_device, name_hlist);
594 if (!strncmp(dev->name, name, IFNAMSIZ))
595 return dev;
596 }
597 return NULL;
598}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700599EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601/**
602 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700603 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 * @name: name to find
605 *
606 * Find an interface by name. This can be called from any
607 * context and does its own locking. The returned handle has
608 * the usage count incremented and the caller must use dev_put() to
609 * release it when it is no longer needed. %NULL is returned if no
610 * matching device is found.
611 */
612
Eric W. Biederman881d9662007-09-17 11:56:21 -0700613struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614{
615 struct net_device *dev;
616
617 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700618 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 if (dev)
620 dev_hold(dev);
621 read_unlock(&dev_base_lock);
622 return dev;
623}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700624EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626/**
627 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700628 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 * @ifindex: index of device
630 *
631 * Search for an interface by index. Returns %NULL if the device
632 * is not found or a pointer to the device. The device has not
633 * had its reference counter increased so the caller must be careful
634 * about locking. The caller must hold either the RTNL semaphore
635 * or @dev_base_lock.
636 */
637
Eric W. Biederman881d9662007-09-17 11:56:21 -0700638struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639{
640 struct hlist_node *p;
641
Eric W. Biederman881d9662007-09-17 11:56:21 -0700642 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 struct net_device *dev
644 = hlist_entry(p, struct net_device, index_hlist);
645 if (dev->ifindex == ifindex)
646 return dev;
647 }
648 return NULL;
649}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700650EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000652/**
653 * dev_get_by_index_rcu - find a device by its ifindex
654 * @net: the applicable net namespace
655 * @ifindex: index of device
656 *
657 * Search for an interface by index. Returns %NULL if the device
658 * is not found or a pointer to the device. The device has not
659 * had its reference counter increased so the caller must be careful
660 * about locking. The caller must hold RCU lock.
661 */
662
663struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
664{
665 struct hlist_node *p;
666 struct net_device *dev;
667 struct hlist_head *head = dev_index_hash(net, ifindex);
668
669 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
670 if (dev->ifindex == ifindex)
671 return dev;
672
673 return NULL;
674}
675EXPORT_SYMBOL(dev_get_by_index_rcu);
676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678/**
679 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700680 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 * @ifindex: index of device
682 *
683 * Search for an interface by index. Returns NULL if the device
684 * is not found or a pointer to the device. The device returned has
685 * had a reference added and the pointer is safe until the user calls
686 * dev_put to indicate they have finished with it.
687 */
688
Eric W. Biederman881d9662007-09-17 11:56:21 -0700689struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
691 struct net_device *dev;
692
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000693 rcu_read_lock();
694 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 if (dev)
696 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000697 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return dev;
699}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700700EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702/**
703 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700704 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 * @type: media type of device
706 * @ha: hardware address
707 *
708 * Search for an interface by MAC address. Returns NULL if the device
709 * is not found or a pointer to the device. The caller must hold the
710 * rtnl semaphore. The returned device has not had its ref count increased
711 * and the caller must therefore be careful about locking
712 *
713 * BUGS:
714 * If the API was consistent this would be __dev_get_by_hwaddr
715 */
716
Eric W. Biederman881d9662007-09-17 11:56:21 -0700717struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 struct net_device *dev;
720
721 ASSERT_RTNL();
722
Denis V. Lunev81103a52007-12-12 10:47:38 -0800723 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 if (dev->type == type &&
725 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700726 return dev;
727
728 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300730EXPORT_SYMBOL(dev_getbyhwaddr);
731
Eric W. Biederman881d9662007-09-17 11:56:21 -0700732struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700733{
734 struct net_device *dev;
735
736 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700737 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700738 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 return dev;
740
741 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700742}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700743EXPORT_SYMBOL(__dev_getfirstbyhwtype);
744
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
747 struct net_device *dev;
748
749 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700750 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700751 if (dev)
752 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 rtnl_unlock();
754 return dev;
755}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756EXPORT_SYMBOL(dev_getfirstbyhwtype);
757
758/**
759 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700760 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 * @if_flags: IFF_* values
762 * @mask: bitmask of bits in if_flags to check
763 *
764 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900765 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 * had a reference added and the pointer is safe until the user calls
767 * dev_put to indicate they have finished with it.
768 */
769
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700770struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
771 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700773 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Pavel Emelianov7562f872007-05-03 15:13:45 -0700775 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700777 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if (((dev->flags ^ if_flags) & mask) == 0) {
779 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700780 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 break;
782 }
783 }
784 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700785 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700787EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789/**
790 * dev_valid_name - check if name is okay for network device
791 * @name: name string
792 *
793 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700794 * to allow sysfs to work. We also disallow any kind of
795 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800797int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700799 if (*name == '\0')
800 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700801 if (strlen(name) >= IFNAMSIZ)
802 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700803 if (!strcmp(name, ".") || !strcmp(name, ".."))
804 return 0;
805
806 while (*name) {
807 if (*name == '/' || isspace(*name))
808 return 0;
809 name++;
810 }
811 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700813EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200816 * __dev_alloc_name - allocate a name for a device
817 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200819 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 *
821 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700822 * id. It scans list of devices to build up a free map, then chooses
823 * the first empty slot. The caller must hold the dev_base or rtnl lock
824 * while allocating the name and adding the device in order to avoid
825 * duplicates.
826 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
827 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 */
829
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200830static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831{
832 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 const char *p;
834 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700835 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 struct net_device *d;
837
838 p = strnchr(name, IFNAMSIZ-1, '%');
839 if (p) {
840 /*
841 * Verify the string as this thing may have come from
842 * the user. There must be either one "%d" and no other "%"
843 * characters.
844 */
845 if (p[1] != 'd' || strchr(p + 2, '%'))
846 return -EINVAL;
847
848 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700849 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 if (!inuse)
851 return -ENOMEM;
852
Eric W. Biederman881d9662007-09-17 11:56:21 -0700853 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 if (!sscanf(d->name, name, &i))
855 continue;
856 if (i < 0 || i >= max_netdevices)
857 continue;
858
859 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200860 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 if (!strncmp(buf, d->name, IFNAMSIZ))
862 set_bit(i, inuse);
863 }
864
865 i = find_first_zero_bit(inuse, max_netdevices);
866 free_page((unsigned long) inuse);
867 }
868
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200869 snprintf(buf, IFNAMSIZ, name, i);
870 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 /* It is possible to run out of possible slots
874 * when the name is long and there isn't enough space left
875 * for the digits, or if all bits are used.
876 */
877 return -ENFILE;
878}
879
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200880/**
881 * dev_alloc_name - allocate a name for a device
882 * @dev: device
883 * @name: name format string
884 *
885 * Passed a format string - eg "lt%d" it will try and find a suitable
886 * id. It scans list of devices to build up a free map, then chooses
887 * the first empty slot. The caller must hold the dev_base or rtnl lock
888 * while allocating the name and adding the device in order to avoid
889 * duplicates.
890 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
891 * Returns the number of the unit assigned or a negative errno code.
892 */
893
894int dev_alloc_name(struct net_device *dev, const char *name)
895{
896 char buf[IFNAMSIZ];
897 struct net *net;
898 int ret;
899
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900900 BUG_ON(!dev_net(dev));
901 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200902 ret = __dev_alloc_name(net, name, buf);
903 if (ret >= 0)
904 strlcpy(dev->name, buf, IFNAMSIZ);
905 return ret;
906}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700907EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
910/**
911 * dev_change_name - change name of a device
912 * @dev: device
913 * @newname: name (or format string) must be at least IFNAMSIZ
914 *
915 * Change name of a device, can pass format strings "eth%d".
916 * for wildcarding.
917 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700918int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
Herbert Xufcc5a032007-07-30 17:03:38 -0700920 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700922 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700923 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
925 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900926 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900928 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 if (dev->flags & IFF_UP)
930 return -EBUSY;
931
932 if (!dev_valid_name(newname))
933 return -EINVAL;
934
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700935 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
936 return 0;
937
Herbert Xufcc5a032007-07-30 17:03:38 -0700938 memcpy(oldname, dev->name, IFNAMSIZ);
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (strchr(newname, '%')) {
941 err = dev_alloc_name(dev, newname);
942 if (err < 0)
943 return err;
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700944 } else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 return -EEXIST;
946 else
947 strlcpy(dev->name, newname, IFNAMSIZ);
948
Herbert Xufcc5a032007-07-30 17:03:38 -0700949rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700950 /* For now only devices in the initial network namespace
951 * are in sysfs.
952 */
953 if (net == &init_net) {
954 ret = device_rename(&dev->dev, dev->name);
955 if (ret) {
956 memcpy(dev->name, oldname, IFNAMSIZ);
957 return ret;
958 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700959 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700960
961 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600962 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700963 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700964 write_unlock_bh(&dev_base_lock);
965
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700966 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700967 ret = notifier_to_errno(ret);
968
969 if (ret) {
970 if (err) {
971 printk(KERN_ERR
972 "%s: name change rollback failed: %d.\n",
973 dev->name, ret);
974 } else {
975 err = ret;
976 memcpy(dev->name, oldname, IFNAMSIZ);
977 goto rollback;
978 }
979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
981 return err;
982}
983
984/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700985 * dev_set_alias - change ifalias of a device
986 * @dev: device
987 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700988 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700989 *
990 * Set ifalias for a device,
991 */
992int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
993{
994 ASSERT_RTNL();
995
996 if (len >= IFALIASZ)
997 return -EINVAL;
998
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700999 if (!len) {
1000 if (dev->ifalias) {
1001 kfree(dev->ifalias);
1002 dev->ifalias = NULL;
1003 }
1004 return 0;
1005 }
1006
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001007 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001008 if (!dev->ifalias)
1009 return -ENOMEM;
1010
1011 strlcpy(dev->ifalias, alias, len+1);
1012 return len;
1013}
1014
1015
1016/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001017 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001018 * @dev: device to cause notification
1019 *
1020 * Called to indicate a device has changed features.
1021 */
1022void netdev_features_change(struct net_device *dev)
1023{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001024 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001025}
1026EXPORT_SYMBOL(netdev_features_change);
1027
1028/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 * netdev_state_change - device changes state
1030 * @dev: device to cause notification
1031 *
1032 * Called to indicate a device has changed state. This function calls
1033 * the notifier chains for netdev_chain and sends a NEWLINK message
1034 * to the routing socket.
1035 */
1036void netdev_state_change(struct net_device *dev)
1037{
1038 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001039 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1041 }
1042}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001043EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Moni Shoua75c78502009-09-15 02:37:40 -07001045void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001046{
Moni Shoua75c78502009-09-15 02:37:40 -07001047 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001048}
1049EXPORT_SYMBOL(netdev_bonding_change);
1050
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051/**
1052 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001053 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 * @name: name of interface
1055 *
1056 * If a network interface is not present and the process has suitable
1057 * privileges this function loads the module. If module loading is not
1058 * available in this kernel then it becomes a nop.
1059 */
1060
Eric W. Biederman881d9662007-09-17 11:56:21 -07001061void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001063 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
1065 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001066 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 read_unlock(&dev_base_lock);
1068
Eric Parisa8f80e82009-08-13 09:44:51 -04001069 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 request_module("%s", name);
1071}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001072EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074/**
1075 * dev_open - prepare an interface for use.
1076 * @dev: device to open
1077 *
1078 * Takes a device from down to up state. The device's private open
1079 * function is invoked and then the multicast lists are loaded. Finally
1080 * the device is moved into the up state and a %NETDEV_UP message is
1081 * sent to the netdev notifier chain.
1082 *
1083 * Calling this function on an active interface is a nop. On a failure
1084 * a negative errno code is returned.
1085 */
1086int dev_open(struct net_device *dev)
1087{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001088 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001089 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001091 ASSERT_RTNL();
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 /*
1094 * Is it already up?
1095 */
1096
1097 if (dev->flags & IFF_UP)
1098 return 0;
1099
1100 /*
1101 * Is it even present?
1102 */
1103 if (!netif_device_present(dev))
1104 return -ENODEV;
1105
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001106 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1107 ret = notifier_to_errno(ret);
1108 if (ret)
1109 return ret;
1110
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 /*
1112 * Call device private open method
1113 */
1114 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001115
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001116 if (ops->ndo_validate_addr)
1117 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001118
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001119 if (!ret && ops->ndo_open)
1120 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001122 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 * If it went open OK then:
1124 */
1125
Jeff Garzikbada3392007-10-23 20:19:37 -07001126 if (ret)
1127 clear_bit(__LINK_STATE_START, &dev->state);
1128 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 /*
1130 * Set the flags.
1131 */
1132 dev->flags |= IFF_UP;
1133
1134 /*
Dan Williams649274d2009-01-11 00:20:39 -08001135 * Enable NET_DMA
1136 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001137 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001138
1139 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 * Initialize multicasting status
1141 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001142 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
1144 /*
1145 * Wakeup transmit queue engine
1146 */
1147 dev_activate(dev);
1148
1149 /*
1150 * ... and announce new interface.
1151 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001152 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001154
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 return ret;
1156}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001157EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159/**
1160 * dev_close - shutdown an interface.
1161 * @dev: device to shutdown
1162 *
1163 * This function moves an active device into down state. A
1164 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1165 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1166 * chain.
1167 */
1168int dev_close(struct net_device *dev)
1169{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001170 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001171 ASSERT_RTNL();
1172
David S. Miller9d5010d2007-09-12 14:33:25 +02001173 might_sleep();
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 if (!(dev->flags & IFF_UP))
1176 return 0;
1177
1178 /*
1179 * Tell people we are going down, so that they can
1180 * prepare to death, when device is still operating.
1181 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001182 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 clear_bit(__LINK_STATE_START, &dev->state);
1185
1186 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001187 * it can be even on different cpu. So just clear netif_running().
1188 *
1189 * dev->stop() will invoke napi_disable() on all of it's
1190 * napi_struct instances on this device.
1191 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001194 dev_deactivate(dev);
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 /*
1197 * Call the device specific close. This cannot fail.
1198 * Only if device is UP
1199 *
1200 * We allow it to be called even after a DETACH hot-plug
1201 * event.
1202 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001203 if (ops->ndo_stop)
1204 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206 /*
1207 * Device is now down.
1208 */
1209
1210 dev->flags &= ~IFF_UP;
1211
1212 /*
1213 * Tell people we are down
1214 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001215 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Dan Williams649274d2009-01-11 00:20:39 -08001217 /*
1218 * Shutdown NET_DMA
1219 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001220 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 return 0;
1223}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001224EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001227/**
1228 * dev_disable_lro - disable Large Receive Offload on a device
1229 * @dev: device
1230 *
1231 * Disable Large Receive Offload (LRO) on a net device. Must be
1232 * called under RTNL. This is needed if received packets may be
1233 * forwarded to another interface.
1234 */
1235void dev_disable_lro(struct net_device *dev)
1236{
1237 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1238 dev->ethtool_ops->set_flags) {
1239 u32 flags = dev->ethtool_ops->get_flags(dev);
1240 if (flags & ETH_FLAG_LRO) {
1241 flags &= ~ETH_FLAG_LRO;
1242 dev->ethtool_ops->set_flags(dev, flags);
1243 }
1244 }
1245 WARN_ON(dev->features & NETIF_F_LRO);
1246}
1247EXPORT_SYMBOL(dev_disable_lro);
1248
1249
Eric W. Biederman881d9662007-09-17 11:56:21 -07001250static int dev_boot_phase = 1;
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252/*
1253 * Device change register/unregister. These are not inline or static
1254 * as we export them to the world.
1255 */
1256
1257/**
1258 * register_netdevice_notifier - register a network notifier block
1259 * @nb: notifier
1260 *
1261 * Register a notifier to be called when network device events occur.
1262 * The notifier passed is linked into the kernel structures and must
1263 * not be reused until it has been unregistered. A negative errno code
1264 * is returned on a failure.
1265 *
1266 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001267 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 * view of the network device list.
1269 */
1270
1271int register_netdevice_notifier(struct notifier_block *nb)
1272{
1273 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001274 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001275 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 int err;
1277
1278 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001279 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001280 if (err)
1281 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001282 if (dev_boot_phase)
1283 goto unlock;
1284 for_each_net(net) {
1285 for_each_netdev(net, dev) {
1286 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1287 err = notifier_to_errno(err);
1288 if (err)
1289 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
Eric W. Biederman881d9662007-09-17 11:56:21 -07001291 if (!(dev->flags & IFF_UP))
1292 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001293
Eric W. Biederman881d9662007-09-17 11:56:21 -07001294 nb->notifier_call(nb, NETDEV_UP, dev);
1295 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001297
1298unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 rtnl_unlock();
1300 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001301
1302rollback:
1303 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001304 for_each_net(net) {
1305 for_each_netdev(net, dev) {
1306 if (dev == last)
1307 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001308
Eric W. Biederman881d9662007-09-17 11:56:21 -07001309 if (dev->flags & IFF_UP) {
1310 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1311 nb->notifier_call(nb, NETDEV_DOWN, dev);
1312 }
1313 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001314 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001315 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001316
1317 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001318 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001320EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
1322/**
1323 * unregister_netdevice_notifier - unregister a network notifier block
1324 * @nb: notifier
1325 *
1326 * Unregister a notifier previously registered by
1327 * register_netdevice_notifier(). The notifier is unlinked into the
1328 * kernel structures and may then be reused. A negative errno code
1329 * is returned on a failure.
1330 */
1331
1332int unregister_netdevice_notifier(struct notifier_block *nb)
1333{
Herbert Xu9f514952006-03-25 01:24:25 -08001334 int err;
1335
1336 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001337 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001338 rtnl_unlock();
1339 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001341EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
1343/**
1344 * call_netdevice_notifiers - call all network notifier blocks
1345 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001346 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 *
1348 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001349 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 */
1351
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001352int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001354 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355}
1356
1357/* When > 0 there are consumers of rx skb time stamps */
1358static atomic_t netstamp_needed = ATOMIC_INIT(0);
1359
1360void net_enable_timestamp(void)
1361{
1362 atomic_inc(&netstamp_needed);
1363}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001364EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
1366void net_disable_timestamp(void)
1367{
1368 atomic_dec(&netstamp_needed);
1369}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001370EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001372static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373{
1374 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001375 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001376 else
1377 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378}
1379
1380/*
1381 * Support routine. Sends outgoing frames to any network
1382 * taps currently in use.
1383 */
1384
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001385static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386{
1387 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001388
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001389#ifdef CONFIG_NET_CLS_ACT
1390 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1391 net_timestamp(skb);
1392#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001393 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001394#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
1396 rcu_read_lock();
1397 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1398 /* Never send packets back to the socket
1399 * they originated from - MvS (miquels@drinkel.ow.org)
1400 */
1401 if ((ptype->dev == dev || !ptype->dev) &&
1402 (ptype->af_packet_priv == NULL ||
1403 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001404 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 if (!skb2)
1406 break;
1407
1408 /* skb->nh should be correctly
1409 set by sender, so that the second statement is
1410 just protection against buggy protocols.
1411 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001412 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001414 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001415 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 if (net_ratelimit())
1417 printk(KERN_CRIT "protocol %04x is "
1418 "buggy, dev %s\n",
1419 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001420 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 }
1422
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001423 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001425 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 }
1427 }
1428 rcu_read_unlock();
1429}
1430
Denis Vlasenko56079432006-03-29 15:57:29 -08001431
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001432static inline void __netif_reschedule(struct Qdisc *q)
1433{
1434 struct softnet_data *sd;
1435 unsigned long flags;
1436
1437 local_irq_save(flags);
1438 sd = &__get_cpu_var(softnet_data);
1439 q->next_sched = sd->output_queue;
1440 sd->output_queue = q;
1441 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1442 local_irq_restore(flags);
1443}
1444
David S. Miller37437bb2008-07-16 02:15:04 -07001445void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001446{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001447 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1448 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001449}
1450EXPORT_SYMBOL(__netif_schedule);
1451
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001452void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001453{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001454 if (atomic_dec_and_test(&skb->users)) {
1455 struct softnet_data *sd;
1456 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001457
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001458 local_irq_save(flags);
1459 sd = &__get_cpu_var(softnet_data);
1460 skb->next = sd->completion_queue;
1461 sd->completion_queue = skb;
1462 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1463 local_irq_restore(flags);
1464 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001465}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001466EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001467
1468void dev_kfree_skb_any(struct sk_buff *skb)
1469{
1470 if (in_irq() || irqs_disabled())
1471 dev_kfree_skb_irq(skb);
1472 else
1473 dev_kfree_skb(skb);
1474}
1475EXPORT_SYMBOL(dev_kfree_skb_any);
1476
1477
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001478/**
1479 * netif_device_detach - mark device as removed
1480 * @dev: network device
1481 *
1482 * Mark device as removed from system and therefore no longer available.
1483 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001484void netif_device_detach(struct net_device *dev)
1485{
1486 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1487 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001488 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001489 }
1490}
1491EXPORT_SYMBOL(netif_device_detach);
1492
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001493/**
1494 * netif_device_attach - mark device as attached
1495 * @dev: network device
1496 *
1497 * Mark device as attached from system and restart if needed.
1498 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001499void netif_device_attach(struct net_device *dev)
1500{
1501 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1502 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001503 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001504 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001505 }
1506}
1507EXPORT_SYMBOL(netif_device_attach);
1508
Ben Hutchings6de329e2008-06-16 17:02:28 -07001509static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1510{
1511 return ((features & NETIF_F_GEN_CSUM) ||
1512 ((features & NETIF_F_IP_CSUM) &&
1513 protocol == htons(ETH_P_IP)) ||
1514 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001515 protocol == htons(ETH_P_IPV6)) ||
1516 ((features & NETIF_F_FCOE_CRC) &&
1517 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001518}
1519
1520static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1521{
1522 if (can_checksum_protocol(dev->features, skb->protocol))
1523 return true;
1524
1525 if (skb->protocol == htons(ETH_P_8021Q)) {
1526 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1527 if (can_checksum_protocol(dev->features & dev->vlan_features,
1528 veh->h_vlan_encapsulated_proto))
1529 return true;
1530 }
1531
1532 return false;
1533}
Denis Vlasenko56079432006-03-29 15:57:29 -08001534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535/*
1536 * Invalidate hardware checksum when packet is to be mangled, and
1537 * complete checksum manually on outgoing path.
1538 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001539int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540{
Al Virod3bc23e2006-11-14 21:24:49 -08001541 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001542 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Patrick McHardy84fa7932006-08-29 16:44:56 -07001544 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001545 goto out_set_summed;
1546
1547 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001548 /* Let GSO fix up the checksum. */
1549 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 }
1551
Herbert Xua0308472007-10-15 01:47:15 -07001552 offset = skb->csum_start - skb_headroom(skb);
1553 BUG_ON(offset >= skb_headlen(skb));
1554 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1555
1556 offset += skb->csum_offset;
1557 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1558
1559 if (skb_cloned(skb) &&
1560 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1562 if (ret)
1563 goto out;
1564 }
1565
Herbert Xua0308472007-10-15 01:47:15 -07001566 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001567out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001569out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 return ret;
1571}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001572EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001574/**
1575 * skb_gso_segment - Perform segmentation on skb.
1576 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001577 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001578 *
1579 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001580 *
1581 * It may return NULL if the skb requires no segmentation. This is
1582 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001584struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001585{
1586 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1587 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001588 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001589 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001590
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001591 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001592 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001593 __skb_pull(skb, skb->mac_len);
1594
Herbert Xu67fd1a72009-01-19 16:26:44 -08001595 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1596 struct net_device *dev = skb->dev;
1597 struct ethtool_drvinfo info = {};
1598
1599 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1600 dev->ethtool_ops->get_drvinfo(dev, &info);
1601
1602 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1603 "ip_summed=%d",
1604 info.driver, dev ? dev->features : 0L,
1605 skb->sk ? skb->sk->sk_route_caps : 0L,
1606 skb->len, skb->data_len, skb->ip_summed);
1607
Herbert Xua430a432006-07-08 13:34:56 -07001608 if (skb_header_cloned(skb) &&
1609 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1610 return ERR_PTR(err);
1611 }
1612
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001613 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001614 list_for_each_entry_rcu(ptype,
1615 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001616 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001617 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001618 err = ptype->gso_send_check(skb);
1619 segs = ERR_PTR(err);
1620 if (err || skb_gso_ok(skb, features))
1621 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001622 __skb_push(skb, (skb->data -
1623 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001624 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001625 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001626 break;
1627 }
1628 }
1629 rcu_read_unlock();
1630
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001631 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001632
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001633 return segs;
1634}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001635EXPORT_SYMBOL(skb_gso_segment);
1636
Herbert Xufb286bb2005-11-10 13:01:24 -08001637/* Take action when hardware reception checksum errors are detected. */
1638#ifdef CONFIG_BUG
1639void netdev_rx_csum_fault(struct net_device *dev)
1640{
1641 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001642 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001643 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001644 dump_stack();
1645 }
1646}
1647EXPORT_SYMBOL(netdev_rx_csum_fault);
1648#endif
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650/* Actually, we should eliminate this check as soon as we know, that:
1651 * 1. IOMMU is present and allows to map all the memory.
1652 * 2. No high memory really exists on this machine.
1653 */
1654
1655static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1656{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001657#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 int i;
1659
1660 if (dev->features & NETIF_F_HIGHDMA)
1661 return 0;
1662
1663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1664 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1665 return 1;
1666
Herbert Xu3d3a8532006-06-27 13:33:10 -07001667#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 return 0;
1669}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001671struct dev_gso_cb {
1672 void (*destructor)(struct sk_buff *skb);
1673};
1674
1675#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1676
1677static void dev_gso_skb_destructor(struct sk_buff *skb)
1678{
1679 struct dev_gso_cb *cb;
1680
1681 do {
1682 struct sk_buff *nskb = skb->next;
1683
1684 skb->next = nskb->next;
1685 nskb->next = NULL;
1686 kfree_skb(nskb);
1687 } while (skb->next);
1688
1689 cb = DEV_GSO_CB(skb);
1690 if (cb->destructor)
1691 cb->destructor(skb);
1692}
1693
1694/**
1695 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1696 * @skb: buffer to segment
1697 *
1698 * This function segments the given skb and stores the list of segments
1699 * in skb->next.
1700 */
1701static int dev_gso_segment(struct sk_buff *skb)
1702{
1703 struct net_device *dev = skb->dev;
1704 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001705 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1706 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001707
Herbert Xu576a30e2006-06-27 13:22:38 -07001708 segs = skb_gso_segment(skb, features);
1709
1710 /* Verifying header integrity only. */
1711 if (!segs)
1712 return 0;
1713
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001714 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001715 return PTR_ERR(segs);
1716
1717 skb->next = segs;
1718 DEV_GSO_CB(skb)->destructor = skb->destructor;
1719 skb->destructor = dev_gso_skb_destructor;
1720
1721 return 0;
1722}
1723
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001724int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1725 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001726{
Stephen Hemminger00829822008-11-20 20:14:53 -08001727 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001728 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001729
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001730 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001731 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001732 dev_queue_xmit_nit(skb, dev);
1733
Herbert Xu576a30e2006-06-27 13:22:38 -07001734 if (netif_needs_gso(dev, skb)) {
1735 if (unlikely(dev_gso_segment(skb)))
1736 goto out_kfree_skb;
1737 if (skb->next)
1738 goto gso;
1739 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001740
Eric Dumazet93f154b2009-05-18 22:19:19 -07001741 /*
1742 * If device doesnt need skb->dst, release it right now while
1743 * its hot in this cpu cache
1744 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001745 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1746 skb_dst_drop(skb);
1747
Patrick Ohlyac45f602009-02-12 05:03:37 +00001748 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001749 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001750 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001751 /*
1752 * TODO: if skb_orphan() was called by
1753 * dev->hard_start_xmit() (for example, the unmodified
1754 * igb driver does that; bnx2 doesn't), then
1755 * skb_tx_software_timestamp() will be unable to send
1756 * back the time stamp.
1757 *
1758 * How can this be prevented? Always create another
1759 * reference to the socket before calling
1760 * dev->hard_start_xmit()? Prevent that skb_orphan()
1761 * does anything in dev->hard_start_xmit() by clearing
1762 * the skb destructor before the call and restoring it
1763 * afterwards, then doing the skb_orphan() ourselves?
1764 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001765 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001766 }
1767
Herbert Xu576a30e2006-06-27 13:22:38 -07001768gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001769 do {
1770 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001771
1772 skb->next = nskb->next;
1773 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001774 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001775 if (unlikely(rc != NETDEV_TX_OK)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001776 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001777 skb->next = nskb;
1778 return rc;
1779 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001780 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001781 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001782 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001783 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001784
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001785 skb->destructor = DEV_GSO_CB(skb)->destructor;
1786
1787out_kfree_skb:
1788 kfree_skb(skb);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001789 return NETDEV_TX_OK;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001790}
1791
David S. Miller70192982009-01-27 16:34:47 -08001792static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001793
Stephen Hemminger92477442009-03-21 13:39:26 -07001794u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001795{
David S. Miller70192982009-01-27 16:34:47 -08001796 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001797
David S. Miller513de112009-05-03 14:43:10 -07001798 if (skb_rx_queue_recorded(skb)) {
1799 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001800 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001801 hash -= dev->real_num_tx_queues;
1802 return hash;
1803 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001804
1805 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001806 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001807 else
David S. Miller70192982009-01-27 16:34:47 -08001808 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001809
David S. Miller70192982009-01-27 16:34:47 -08001810 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001811
David S. Millerb6b2fed2008-07-21 09:48:06 -07001812 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001813}
Stephen Hemminger92477442009-03-21 13:39:26 -07001814EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001815
David S. Millere8a04642008-07-17 00:34:19 -07001816static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1817 struct sk_buff *skb)
1818{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001819 u16 queue_index;
1820 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001821
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001822 if (sk_tx_queue_recorded(sk)) {
1823 queue_index = sk_tx_queue_get(sk);
1824 } else {
1825 const struct net_device_ops *ops = dev->netdev_ops;
1826
1827 if (ops->ndo_select_queue) {
1828 queue_index = ops->ndo_select_queue(dev, skb);
1829 } else {
1830 queue_index = 0;
1831 if (dev->real_num_tx_queues > 1)
1832 queue_index = skb_tx_hash(dev, skb);
1833
1834 if (sk && sk->sk_dst_cache)
1835 sk_tx_queue_set(sk, queue_index);
1836 }
1837 }
David S. Millereae792b2008-07-15 03:03:33 -07001838
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001839 skb_set_queue_mapping(skb, queue_index);
1840 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001841}
1842
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001843static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1844 struct net_device *dev,
1845 struct netdev_queue *txq)
1846{
1847 spinlock_t *root_lock = qdisc_lock(q);
1848 int rc;
1849
1850 spin_lock(root_lock);
1851 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1852 kfree_skb(skb);
1853 rc = NET_XMIT_DROP;
1854 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1855 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1856 /*
1857 * This is a work-conserving queue; there are no old skbs
1858 * waiting to be sent out; and the qdisc is not running -
1859 * xmit the skb directly.
1860 */
1861 __qdisc_update_bstats(q, skb->len);
1862 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1863 __qdisc_run(q);
1864 else
1865 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1866
1867 rc = NET_XMIT_SUCCESS;
1868 } else {
1869 rc = qdisc_enqueue_root(skb, q);
1870 qdisc_run(q);
1871 }
1872 spin_unlock(root_lock);
1873
1874 return rc;
1875}
1876
Dave Jonesd29f7492008-07-22 14:09:06 -07001877/**
1878 * dev_queue_xmit - transmit a buffer
1879 * @skb: buffer to transmit
1880 *
1881 * Queue a buffer for transmission to a network device. The caller must
1882 * have set the device and priority and built the buffer before calling
1883 * this function. The function can be called from an interrupt.
1884 *
1885 * A negative errno code is returned on a failure. A success does not
1886 * guarantee the frame will be transmitted as it may be dropped due
1887 * to congestion or traffic shaping.
1888 *
1889 * -----------------------------------------------------------------------------------
1890 * I notice this method can also return errors from the queue disciplines,
1891 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1892 * be positive.
1893 *
1894 * Regardless of the return value, the skb is consumed, so it is currently
1895 * difficult to retry a send to this method. (You can bump the ref count
1896 * before sending to hold a reference for retry if you are careful.)
1897 *
1898 * When calling this method, interrupts MUST be enabled. This is because
1899 * the BH enable code must have IRQs enabled so that it will not deadlock.
1900 * --BLG
1901 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902int dev_queue_xmit(struct sk_buff *skb)
1903{
1904 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001905 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 struct Qdisc *q;
1907 int rc = -ENOMEM;
1908
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001909 /* GSO will handle the following emulations directly. */
1910 if (netif_needs_gso(dev, skb))
1911 goto gso;
1912
David S. Miller4cf704f2009-06-09 00:18:51 -07001913 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001915 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 goto out_kfree_skb;
1917
1918 /* Fragmented skb is linearized if device does not support SG,
1919 * or if at least one of fragments is in highmem and device
1920 * does not support DMA from it.
1921 */
1922 if (skb_shinfo(skb)->nr_frags &&
1923 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001924 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 goto out_kfree_skb;
1926
1927 /* If packet is not checksummed and device does not support
1928 * checksumming for this protocol, complete checksumming here.
1929 */
Herbert Xu663ead32007-04-09 11:59:07 -07001930 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1931 skb_set_transport_header(skb, skb->csum_start -
1932 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001933 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1934 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001937gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001938 /* Disable soft irqs for various locks below. Also
1939 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001941 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
David S. Millereae792b2008-07-15 03:03:33 -07001943 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001944 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001945
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001947 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948#endif
1949 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001950 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07001951 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 }
1953
1954 /* The device has no queue. Common case for software devices:
1955 loopback, all the sorts of tunnels...
1956
Herbert Xu932ff272006-06-09 12:20:56 -07001957 Really, it is unlikely that netif_tx_lock protection is necessary
1958 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 counters.)
1960 However, it is possible, that they rely on protection
1961 made by us here.
1962
1963 Check this and shot the lock. It is not prone from deadlocks.
1964 Either shot noqueue qdisc, it is even simpler 8)
1965 */
1966 if (dev->flags & IFF_UP) {
1967 int cpu = smp_processor_id(); /* ok because BHs are off */
1968
David S. Millerc773e842008-07-08 23:13:53 -07001969 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
David S. Millerc773e842008-07-08 23:13:53 -07001971 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001973 if (!netif_tx_queue_stopped(txq)) {
Krishna Kumar03a9a442009-08-29 20:21:36 +00001974 rc = NET_XMIT_SUCCESS;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001975 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001976 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 goto out;
1978 }
1979 }
David S. Millerc773e842008-07-08 23:13:53 -07001980 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 if (net_ratelimit())
1982 printk(KERN_CRIT "Virtual device %s asks to "
1983 "queue packet!\n", dev->name);
1984 } else {
1985 /* Recursion is detected! It is possible,
1986 * unfortunately */
1987 if (net_ratelimit())
1988 printk(KERN_CRIT "Dead loop on virtual device "
1989 "%s, fix it urgently!\n", dev->name);
1990 }
1991 }
1992
1993 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001994 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
1996out_kfree_skb:
1997 kfree_skb(skb);
1998 return rc;
1999out:
Herbert Xud4828d82006-06-22 02:28:18 -07002000 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 return rc;
2002}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002003EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005
2006/*=======================================================================
2007 Receiver routines
2008 =======================================================================*/
2009
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002010int netdev_max_backlog __read_mostly = 1000;
2011int netdev_budget __read_mostly = 300;
2012int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
2014DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2015
2016
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017/**
2018 * netif_rx - post buffer to the network code
2019 * @skb: buffer to post
2020 *
2021 * This function receives a packet from a device driver and queues it for
2022 * the upper (protocol) levels to process. It always succeeds. The buffer
2023 * may be dropped during processing for congestion control or by the
2024 * protocol layers.
2025 *
2026 * return values:
2027 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 * NET_RX_DROP (packet was dropped)
2029 *
2030 */
2031
2032int netif_rx(struct sk_buff *skb)
2033{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 struct softnet_data *queue;
2035 unsigned long flags;
2036
2037 /* if netpoll wants it, pretend we never saw it */
2038 if (netpoll_rx(skb))
2039 return NET_RX_DROP;
2040
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002041 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002042 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
2044 /*
2045 * The code is rearranged so that the path is the most
2046 * short when CPU is congested, but is still operating.
2047 */
2048 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 queue = &__get_cpu_var(softnet_data);
2050
2051 __get_cpu_var(netdev_rx_stat).total++;
2052 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2053 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002057 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 }
2059
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002060 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 goto enqueue;
2062 }
2063
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 __get_cpu_var(netdev_rx_stat).dropped++;
2065 local_irq_restore(flags);
2066
2067 kfree_skb(skb);
2068 return NET_RX_DROP;
2069}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002070EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
2072int netif_rx_ni(struct sk_buff *skb)
2073{
2074 int err;
2075
2076 preempt_disable();
2077 err = netif_rx(skb);
2078 if (local_softirq_pending())
2079 do_softirq();
2080 preempt_enable();
2081
2082 return err;
2083}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084EXPORT_SYMBOL(netif_rx_ni);
2085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086static void net_tx_action(struct softirq_action *h)
2087{
2088 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2089
2090 if (sd->completion_queue) {
2091 struct sk_buff *clist;
2092
2093 local_irq_disable();
2094 clist = sd->completion_queue;
2095 sd->completion_queue = NULL;
2096 local_irq_enable();
2097
2098 while (clist) {
2099 struct sk_buff *skb = clist;
2100 clist = clist->next;
2101
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002102 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 __kfree_skb(skb);
2104 }
2105 }
2106
2107 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002108 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 local_irq_disable();
2111 head = sd->output_queue;
2112 sd->output_queue = NULL;
2113 local_irq_enable();
2114
2115 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002116 struct Qdisc *q = head;
2117 spinlock_t *root_lock;
2118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 head = head->next_sched;
2120
David S. Miller5fb66222008-08-02 20:02:43 -07002121 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002122 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002123 smp_mb__before_clear_bit();
2124 clear_bit(__QDISC_STATE_SCHED,
2125 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002126 qdisc_run(q);
2127 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002129 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002130 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002131 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002132 } else {
2133 smp_mb__before_clear_bit();
2134 clear_bit(__QDISC_STATE_SCHED,
2135 &q->state);
2136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 }
2138 }
2139 }
2140}
2141
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002142static inline int deliver_skb(struct sk_buff *skb,
2143 struct packet_type *pt_prev,
2144 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
2146 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002147 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148}
2149
2150#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002151
2152#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2153/* This hook is defined here for ATM LANE */
2154int (*br_fdb_test_addr_hook)(struct net_device *dev,
2155 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002156EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002157#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
Stephen Hemminger6229e362007-03-21 13:38:47 -07002159/*
2160 * If bridge module is loaded call bridging hook.
2161 * returns NULL if packet was consumed.
2162 */
2163struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2164 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002165EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002166
Stephen Hemminger6229e362007-03-21 13:38:47 -07002167static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2168 struct packet_type **pt_prev, int *ret,
2169 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170{
2171 struct net_bridge_port *port;
2172
Stephen Hemminger6229e362007-03-21 13:38:47 -07002173 if (skb->pkt_type == PACKET_LOOPBACK ||
2174 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2175 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
2177 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002178 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002180 }
2181
Stephen Hemminger6229e362007-03-21 13:38:47 -07002182 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183}
2184#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002185#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186#endif
2187
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002188#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2189struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2190EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2191
2192static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2193 struct packet_type **pt_prev,
2194 int *ret,
2195 struct net_device *orig_dev)
2196{
2197 if (skb->dev->macvlan_port == NULL)
2198 return skb;
2199
2200 if (*pt_prev) {
2201 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2202 *pt_prev = NULL;
2203 }
2204 return macvlan_handle_frame_hook(skb);
2205}
2206#else
2207#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2208#endif
2209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210#ifdef CONFIG_NET_CLS_ACT
2211/* TODO: Maybe we should just force sch_ingress to be compiled in
2212 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2213 * a compare and 2 stores extra right now if we dont have it on
2214 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002215 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 * the ingress scheduler, you just cant add policies on ingress.
2217 *
2218 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002219static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002222 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002223 struct netdev_queue *rxq;
2224 int result = TC_ACT_OK;
2225 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002226
Herbert Xuf697c3e2007-10-14 00:38:47 -07002227 if (MAX_RED_LOOP < ttl++) {
2228 printk(KERN_WARNING
2229 "Redir loop detected Dropping packet (%d->%d)\n",
2230 skb->iif, dev->ifindex);
2231 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 }
2233
Herbert Xuf697c3e2007-10-14 00:38:47 -07002234 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2235 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2236
David S. Miller555353c2008-07-08 17:33:13 -07002237 rxq = &dev->rx_queue;
2238
David S. Miller83874002008-07-17 00:53:03 -07002239 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002240 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002241 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002242 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2243 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002244 spin_unlock(qdisc_lock(q));
2245 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 return result;
2248}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002249
2250static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2251 struct packet_type **pt_prev,
2252 int *ret, struct net_device *orig_dev)
2253{
David S. Miller8d50b532008-07-30 02:37:46 -07002254 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002255 goto out;
2256
2257 if (*pt_prev) {
2258 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2259 *pt_prev = NULL;
2260 } else {
2261 /* Huh? Why does turning on AF_PACKET affect this? */
2262 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2263 }
2264
2265 switch (ing_filter(skb)) {
2266 case TC_ACT_SHOT:
2267 case TC_ACT_STOLEN:
2268 kfree_skb(skb);
2269 return NULL;
2270 }
2271
2272out:
2273 skb->tc_verd = 0;
2274 return skb;
2275}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276#endif
2277
Patrick McHardybc1d0412008-07-14 22:49:30 -07002278/*
2279 * netif_nit_deliver - deliver received packets to network taps
2280 * @skb: buffer
2281 *
2282 * This function is used to deliver incoming packets to network
2283 * taps. It should be used when the normal netif_receive_skb path
2284 * is bypassed, for example because of VLAN acceleration.
2285 */
2286void netif_nit_deliver(struct sk_buff *skb)
2287{
2288 struct packet_type *ptype;
2289
2290 if (list_empty(&ptype_all))
2291 return;
2292
2293 skb_reset_network_header(skb);
2294 skb_reset_transport_header(skb);
2295 skb->mac_len = skb->network_header - skb->mac_header;
2296
2297 rcu_read_lock();
2298 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2299 if (!ptype->dev || ptype->dev == skb->dev)
2300 deliver_skb(skb, ptype, skb->dev);
2301 }
2302 rcu_read_unlock();
2303}
2304
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002305/**
2306 * netif_receive_skb - process receive buffer from network
2307 * @skb: buffer to process
2308 *
2309 * netif_receive_skb() is the main receive data processing function.
2310 * It always succeeds. The buffer may be dropped during processing
2311 * for congestion control or by the protocol layers.
2312 *
2313 * This function may only be called from softirq context and interrupts
2314 * should be enabled.
2315 *
2316 * Return values (usually ignored):
2317 * NET_RX_SUCCESS: no congestion
2318 * NET_RX_DROP: packet was dropped
2319 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320int netif_receive_skb(struct sk_buff *skb)
2321{
2322 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002323 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002324 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002326 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002328 if (!skb->tstamp.tv64)
2329 net_timestamp(skb);
2330
Eric Dumazet05423b22009-10-26 18:40:35 -07002331 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002332 return NET_RX_SUCCESS;
2333
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002335 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 return NET_RX_DROP;
2337
Patrick McHardyc01003c2007-03-29 11:46:52 -07002338 if (!skb->iif)
2339 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002340
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002341 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002342 orig_dev = skb->dev;
2343 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002344 if (skb_bond_should_drop(skb))
2345 null_or_orig = orig_dev; /* deliver only exact match */
2346 else
2347 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002348 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 __get_cpu_var(netdev_rx_stat).total++;
2351
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002352 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002353 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002354 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
2356 pt_prev = NULL;
2357
2358 rcu_read_lock();
2359
2360#ifdef CONFIG_NET_CLS_ACT
2361 if (skb->tc_verd & TC_NCLS) {
2362 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2363 goto ncls;
2364 }
2365#endif
2366
2367 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002368 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2369 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002370 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002371 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 pt_prev = ptype;
2373 }
2374 }
2375
2376#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002377 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2378 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380ncls:
2381#endif
2382
Stephen Hemminger6229e362007-03-21 13:38:47 -07002383 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2384 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002386 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2387 if (!skb)
2388 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
2390 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002391 list_for_each_entry_rcu(ptype,
2392 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002394 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2395 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002396 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002397 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 pt_prev = ptype;
2399 }
2400 }
2401
2402 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002403 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 } else {
2405 kfree_skb(skb);
2406 /* Jamal, now you will not able to escape explaining
2407 * me how you were going to use this. :-)
2408 */
2409 ret = NET_RX_DROP;
2410 }
2411
2412out:
2413 rcu_read_unlock();
2414 return ret;
2415}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002416EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002418/* Network device is going away, flush any packets still pending */
2419static void flush_backlog(void *arg)
2420{
2421 struct net_device *dev = arg;
2422 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2423 struct sk_buff *skb, *tmp;
2424
2425 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2426 if (skb->dev == dev) {
2427 __skb_unlink(skb, &queue->input_pkt_queue);
2428 kfree_skb(skb);
2429 }
2430}
2431
Herbert Xud565b0a2008-12-15 23:38:52 -08002432static int napi_gro_complete(struct sk_buff *skb)
2433{
2434 struct packet_type *ptype;
2435 __be16 type = skb->protocol;
2436 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2437 int err = -ENOENT;
2438
Herbert Xufc59f9a2009-04-14 15:11:06 -07002439 if (NAPI_GRO_CB(skb)->count == 1) {
2440 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002441 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002442 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002443
2444 rcu_read_lock();
2445 list_for_each_entry_rcu(ptype, head, list) {
2446 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2447 continue;
2448
2449 err = ptype->gro_complete(skb);
2450 break;
2451 }
2452 rcu_read_unlock();
2453
2454 if (err) {
2455 WARN_ON(&ptype->list == head);
2456 kfree_skb(skb);
2457 return NET_RX_SUCCESS;
2458 }
2459
2460out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002461 return netif_receive_skb(skb);
2462}
2463
2464void napi_gro_flush(struct napi_struct *napi)
2465{
2466 struct sk_buff *skb, *next;
2467
2468 for (skb = napi->gro_list; skb; skb = next) {
2469 next = skb->next;
2470 skb->next = NULL;
2471 napi_gro_complete(skb);
2472 }
2473
Herbert Xu4ae55442009-02-08 18:00:36 +00002474 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002475 napi->gro_list = NULL;
2476}
2477EXPORT_SYMBOL(napi_gro_flush);
2478
Herbert Xu96e93ea2009-01-06 10:49:34 -08002479int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002480{
2481 struct sk_buff **pp = NULL;
2482 struct packet_type *ptype;
2483 __be16 type = skb->protocol;
2484 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002485 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002486 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002487 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002488
2489 if (!(skb->dev->features & NETIF_F_GRO))
2490 goto normal;
2491
David S. Miller4cf704f2009-06-09 00:18:51 -07002492 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002493 goto normal;
2494
Herbert Xud565b0a2008-12-15 23:38:52 -08002495 rcu_read_lock();
2496 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002497 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2498 continue;
2499
Herbert Xu86911732009-01-29 14:19:50 +00002500 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002501 mac_len = skb->network_header - skb->mac_header;
2502 skb->mac_len = mac_len;
2503 NAPI_GRO_CB(skb)->same_flow = 0;
2504 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002505 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002506
Herbert Xud565b0a2008-12-15 23:38:52 -08002507 pp = ptype->gro_receive(&napi->gro_list, skb);
2508 break;
2509 }
2510 rcu_read_unlock();
2511
2512 if (&ptype->list == head)
2513 goto normal;
2514
Herbert Xu0da2afd52008-12-26 14:57:42 -08002515 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002516 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002517
Herbert Xud565b0a2008-12-15 23:38:52 -08002518 if (pp) {
2519 struct sk_buff *nskb = *pp;
2520
2521 *pp = nskb->next;
2522 nskb->next = NULL;
2523 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002524 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002525 }
2526
Herbert Xu0da2afd52008-12-26 14:57:42 -08002527 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002528 goto ok;
2529
Herbert Xu4ae55442009-02-08 18:00:36 +00002530 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002531 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002532
Herbert Xu4ae55442009-02-08 18:00:36 +00002533 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002534 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002535 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002536 skb->next = napi->gro_list;
2537 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002538 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002539
Herbert Xuad0f9902009-02-01 01:24:55 -08002540pull:
Herbert Xucb189782009-05-26 18:50:31 +00002541 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2542 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2543
2544 BUG_ON(skb->end - skb->tail < grow);
2545
2546 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2547
2548 skb->tail += grow;
2549 skb->data_len -= grow;
2550
2551 skb_shinfo(skb)->frags[0].page_offset += grow;
2552 skb_shinfo(skb)->frags[0].size -= grow;
2553
2554 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2555 put_page(skb_shinfo(skb)->frags[0].page);
2556 memmove(skb_shinfo(skb)->frags,
2557 skb_shinfo(skb)->frags + 1,
2558 --skb_shinfo(skb)->nr_frags);
2559 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002560 }
2561
Herbert Xud565b0a2008-12-15 23:38:52 -08002562ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002563 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002564
2565normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002566 ret = GRO_NORMAL;
2567 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002568}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002569EXPORT_SYMBOL(dev_gro_receive);
2570
2571static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2572{
2573 struct sk_buff *p;
2574
Herbert Xud1c76af2009-03-16 10:50:02 -07002575 if (netpoll_rx_on(skb))
2576 return GRO_NORMAL;
2577
Herbert Xu96e93ea2009-01-06 10:49:34 -08002578 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002579 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2580 && !compare_ether_header(skb_mac_header(p),
2581 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002582 NAPI_GRO_CB(p)->flush = 0;
2583 }
2584
2585 return dev_gro_receive(napi, skb);
2586}
Herbert Xu5d38a072009-01-04 16:13:40 -08002587
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002588int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002589{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002590 int err = NET_RX_SUCCESS;
2591
2592 switch (ret) {
2593 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002594 return netif_receive_skb(skb);
2595
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002596 case GRO_DROP:
2597 err = NET_RX_DROP;
2598 /* fall through */
2599
2600 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002601 kfree_skb(skb);
2602 break;
2603 }
2604
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002605 return err;
2606}
2607EXPORT_SYMBOL(napi_skb_finish);
2608
Herbert Xu78a478d2009-05-26 18:50:21 +00002609void skb_gro_reset_offset(struct sk_buff *skb)
2610{
2611 NAPI_GRO_CB(skb)->data_offset = 0;
2612 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002613 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002614
Herbert Xu78d3fd02009-05-26 18:50:23 +00002615 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002616 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002617 NAPI_GRO_CB(skb)->frag0 =
2618 page_address(skb_shinfo(skb)->frags[0].page) +
2619 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002620 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2621 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002622}
2623EXPORT_SYMBOL(skb_gro_reset_offset);
2624
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002625int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2626{
Herbert Xu86911732009-01-29 14:19:50 +00002627 skb_gro_reset_offset(skb);
2628
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002629 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002630}
2631EXPORT_SYMBOL(napi_gro_receive);
2632
Herbert Xu96e93ea2009-01-06 10:49:34 -08002633void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2634{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002635 __skb_pull(skb, skb_headlen(skb));
2636 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2637
2638 napi->skb = skb;
2639}
2640EXPORT_SYMBOL(napi_reuse_skb);
2641
Herbert Xu76620aa2009-04-16 02:02:07 -07002642struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002643{
Herbert Xu5d38a072009-01-04 16:13:40 -08002644 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002645
2646 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002647 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2648 if (skb)
2649 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002650 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002651 return skb;
2652}
Herbert Xu76620aa2009-04-16 02:02:07 -07002653EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002654
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002655int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2656{
2657 int err = NET_RX_SUCCESS;
2658
2659 switch (ret) {
2660 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002661 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002662 skb->protocol = eth_type_trans(skb, napi->dev);
2663
2664 if (ret == GRO_NORMAL)
2665 return netif_receive_skb(skb);
2666
2667 skb_gro_pull(skb, -ETH_HLEN);
2668 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002669
2670 case GRO_DROP:
2671 err = NET_RX_DROP;
2672 /* fall through */
2673
2674 case GRO_MERGED_FREE:
2675 napi_reuse_skb(napi, skb);
2676 break;
2677 }
2678
2679 return err;
2680}
2681EXPORT_SYMBOL(napi_frags_finish);
2682
Herbert Xu76620aa2009-04-16 02:02:07 -07002683struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002684{
Herbert Xu76620aa2009-04-16 02:02:07 -07002685 struct sk_buff *skb = napi->skb;
2686 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002687 unsigned int hlen;
2688 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002689
2690 napi->skb = NULL;
2691
2692 skb_reset_mac_header(skb);
2693 skb_gro_reset_offset(skb);
2694
Herbert Xua5b1cf22009-05-26 18:50:28 +00002695 off = skb_gro_offset(skb);
2696 hlen = off + sizeof(*eth);
2697 eth = skb_gro_header_fast(skb, off);
2698 if (skb_gro_header_hard(skb, hlen)) {
2699 eth = skb_gro_header_slow(skb, hlen, off);
2700 if (unlikely(!eth)) {
2701 napi_reuse_skb(napi, skb);
2702 skb = NULL;
2703 goto out;
2704 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002705 }
2706
2707 skb_gro_pull(skb, sizeof(*eth));
2708
2709 /*
2710 * This works because the only protocols we care about don't require
2711 * special handling. We'll fix it up properly at the end.
2712 */
2713 skb->protocol = eth->h_proto;
2714
2715out:
2716 return skb;
2717}
2718EXPORT_SYMBOL(napi_frags_skb);
2719
2720int napi_gro_frags(struct napi_struct *napi)
2721{
2722 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002723
2724 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002725 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002726
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002727 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002728}
2729EXPORT_SYMBOL(napi_gro_frags);
2730
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002731static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732{
2733 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2735 unsigned long start_time = jiffies;
2736
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002737 napi->weight = weight_p;
2738 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740
2741 local_irq_disable();
2742 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002743 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002744 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002745 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002746 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 local_irq_enable();
2749
Herbert Xu8f1ead22009-03-26 00:59:10 -07002750 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002751 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002753 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754}
2755
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002756/**
2757 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002758 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002759 *
2760 * The entry's receive function will be scheduled to run
2761 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002762void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002763{
2764 unsigned long flags;
2765
2766 local_irq_save(flags);
2767 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2768 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2769 local_irq_restore(flags);
2770}
2771EXPORT_SYMBOL(__napi_schedule);
2772
Herbert Xud565b0a2008-12-15 23:38:52 -08002773void __napi_complete(struct napi_struct *n)
2774{
2775 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2776 BUG_ON(n->gro_list);
2777
2778 list_del(&n->poll_list);
2779 smp_mb__before_clear_bit();
2780 clear_bit(NAPI_STATE_SCHED, &n->state);
2781}
2782EXPORT_SYMBOL(__napi_complete);
2783
2784void napi_complete(struct napi_struct *n)
2785{
2786 unsigned long flags;
2787
2788 /*
2789 * don't let napi dequeue from the cpu poll list
2790 * just in case its running on a different cpu
2791 */
2792 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2793 return;
2794
2795 napi_gro_flush(n);
2796 local_irq_save(flags);
2797 __napi_complete(n);
2798 local_irq_restore(flags);
2799}
2800EXPORT_SYMBOL(napi_complete);
2801
2802void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2803 int (*poll)(struct napi_struct *, int), int weight)
2804{
2805 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002806 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002807 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002808 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002809 napi->poll = poll;
2810 napi->weight = weight;
2811 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002812 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002813#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002814 spin_lock_init(&napi->poll_lock);
2815 napi->poll_owner = -1;
2816#endif
2817 set_bit(NAPI_STATE_SCHED, &napi->state);
2818}
2819EXPORT_SYMBOL(netif_napi_add);
2820
2821void netif_napi_del(struct napi_struct *napi)
2822{
2823 struct sk_buff *skb, *next;
2824
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002825 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002826 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002827
2828 for (skb = napi->gro_list; skb; skb = next) {
2829 next = skb->next;
2830 skb->next = NULL;
2831 kfree_skb(skb);
2832 }
2833
2834 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002835 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002836}
2837EXPORT_SYMBOL(netif_napi_del);
2838
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840static void net_rx_action(struct softirq_action *h)
2841{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002842 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002843 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002844 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002845 void *have;
2846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 local_irq_disable();
2848
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002849 while (!list_empty(list)) {
2850 struct napi_struct *n;
2851 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002853 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002854 * Allow this to run for 2 jiffies since which will allow
2855 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002856 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002857 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 goto softnet_break;
2859
2860 local_irq_enable();
2861
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002862 /* Even though interrupts have been re-enabled, this
2863 * access is safe because interrupts can only add new
2864 * entries to the tail of this list, and only ->poll()
2865 * calls can remove this head entry from the list.
2866 */
2867 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002869 have = netpoll_poll_lock(n);
2870
2871 weight = n->weight;
2872
David S. Miller0a7606c2007-10-29 21:28:47 -07002873 /* This NAPI_STATE_SCHED test is for avoiding a race
2874 * with netpoll's poll_napi(). Only the entity which
2875 * obtains the lock and sees NAPI_STATE_SCHED set will
2876 * actually make the ->poll() call. Therefore we avoid
2877 * accidently calling ->poll() when NAPI is not scheduled.
2878 */
2879 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002880 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002881 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002882 trace_napi_poll(n);
2883 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002884
2885 WARN_ON_ONCE(work > weight);
2886
2887 budget -= work;
2888
2889 local_irq_disable();
2890
2891 /* Drivers must not modify the NAPI state if they
2892 * consume the entire weight. In such cases this code
2893 * still "owns" the NAPI instance and therefore can
2894 * move the instance around on the list at-will.
2895 */
David S. Millerfed17f32008-01-07 21:00:40 -08002896 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002897 if (unlikely(napi_disable_pending(n))) {
2898 local_irq_enable();
2899 napi_complete(n);
2900 local_irq_disable();
2901 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002902 list_move_tail(&n->poll_list, list);
2903 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002904
2905 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 }
2907out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002908 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002909
Chris Leechdb217332006-06-17 21:24:58 -07002910#ifdef CONFIG_NET_DMA
2911 /*
2912 * There may not be any more sk_buffs coming right now, so push
2913 * any pending DMA copies to hardware
2914 */
Dan Williams2ba05622009-01-06 11:38:14 -07002915 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002916#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002917
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 return;
2919
2920softnet_break:
2921 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2922 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2923 goto out;
2924}
2925
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002926static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
2928/**
2929 * register_gifconf - register a SIOCGIF handler
2930 * @family: Address family
2931 * @gifconf: Function handler
2932 *
2933 * Register protocol dependent address dumping routines. The handler
2934 * that is passed must not be freed or reused until it has been replaced
2935 * by another handler.
2936 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002937int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938{
2939 if (family >= NPROTO)
2940 return -EINVAL;
2941 gifconf_list[family] = gifconf;
2942 return 0;
2943}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002944EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
2946
2947/*
2948 * Map an interface index to its name (SIOCGIFNAME)
2949 */
2950
2951/*
2952 * We need this ioctl for efficient implementation of the
2953 * if_indextoname() function required by the IPv6 API. Without
2954 * it, we would have to search all the interfaces to find a
2955 * match. --pb
2956 */
2957
Eric W. Biederman881d9662007-09-17 11:56:21 -07002958static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959{
2960 struct net_device *dev;
2961 struct ifreq ifr;
2962
2963 /*
2964 * Fetch the caller's info block.
2965 */
2966
2967 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2968 return -EFAULT;
2969
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00002970 rcu_read_lock();
2971 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00002973 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 return -ENODEV;
2975 }
2976
2977 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00002978 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
2980 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2981 return -EFAULT;
2982 return 0;
2983}
2984
2985/*
2986 * Perform a SIOCGIFCONF call. This structure will change
2987 * size eventually, and there is nothing I can do about it.
2988 * Thus we will need a 'compatibility mode'.
2989 */
2990
Eric W. Biederman881d9662007-09-17 11:56:21 -07002991static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992{
2993 struct ifconf ifc;
2994 struct net_device *dev;
2995 char __user *pos;
2996 int len;
2997 int total;
2998 int i;
2999
3000 /*
3001 * Fetch the caller's info block.
3002 */
3003
3004 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3005 return -EFAULT;
3006
3007 pos = ifc.ifc_buf;
3008 len = ifc.ifc_len;
3009
3010 /*
3011 * Loop over the interfaces, and write an info block for each.
3012 */
3013
3014 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003015 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 for (i = 0; i < NPROTO; i++) {
3017 if (gifconf_list[i]) {
3018 int done;
3019 if (!pos)
3020 done = gifconf_list[i](dev, NULL, 0);
3021 else
3022 done = gifconf_list[i](dev, pos + total,
3023 len - total);
3024 if (done < 0)
3025 return -EFAULT;
3026 total += done;
3027 }
3028 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003029 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030
3031 /*
3032 * All done. Write the updated control block back to the caller.
3033 */
3034 ifc.ifc_len = total;
3035
3036 /*
3037 * Both BSD and Solaris return 0 here, so we do too.
3038 */
3039 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3040}
3041
3042#ifdef CONFIG_PROC_FS
3043/*
3044 * This is invoked by the /proc filesystem handler to display a device
3045 * in detail.
3046 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003048 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049{
Denis V. Luneve372c412007-11-19 22:31:54 -08003050 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003051 loff_t off;
3052 struct net_device *dev;
3053
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003055 if (!*pos)
3056 return SEQ_START_TOKEN;
3057
3058 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003059 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003060 if (off++ == *pos)
3061 return dev;
3062
3063 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064}
3065
3066void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3067{
Denis V. Luneve372c412007-11-19 22:31:54 -08003068 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07003070 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07003071 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072}
3073
3074void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003075 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076{
3077 read_unlock(&dev_base_lock);
3078}
3079
3080static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3081{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003082 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083
Rusty Russell5a1b5892007-04-28 21:04:03 -07003084 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3085 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3086 dev->name, stats->rx_bytes, stats->rx_packets,
3087 stats->rx_errors,
3088 stats->rx_dropped + stats->rx_missed_errors,
3089 stats->rx_fifo_errors,
3090 stats->rx_length_errors + stats->rx_over_errors +
3091 stats->rx_crc_errors + stats->rx_frame_errors,
3092 stats->rx_compressed, stats->multicast,
3093 stats->tx_bytes, stats->tx_packets,
3094 stats->tx_errors, stats->tx_dropped,
3095 stats->tx_fifo_errors, stats->collisions,
3096 stats->tx_carrier_errors +
3097 stats->tx_aborted_errors +
3098 stats->tx_window_errors +
3099 stats->tx_heartbeat_errors,
3100 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101}
3102
3103/*
3104 * Called from the PROCfs module. This now uses the new arbitrary sized
3105 * /proc/net interface to create /proc/net/dev
3106 */
3107static int dev_seq_show(struct seq_file *seq, void *v)
3108{
3109 if (v == SEQ_START_TOKEN)
3110 seq_puts(seq, "Inter-| Receive "
3111 " | Transmit\n"
3112 " face |bytes packets errs drop fifo frame "
3113 "compressed multicast|bytes packets errs "
3114 "drop fifo colls carrier compressed\n");
3115 else
3116 dev_seq_printf_stats(seq, v);
3117 return 0;
3118}
3119
3120static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3121{
3122 struct netif_rx_stats *rc = NULL;
3123
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003124 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003125 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 rc = &per_cpu(netdev_rx_stat, *pos);
3127 break;
3128 } else
3129 ++*pos;
3130 return rc;
3131}
3132
3133static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3134{
3135 return softnet_get_online(pos);
3136}
3137
3138static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3139{
3140 ++*pos;
3141 return softnet_get_online(pos);
3142}
3143
3144static void softnet_seq_stop(struct seq_file *seq, void *v)
3145{
3146}
3147
3148static int softnet_seq_show(struct seq_file *seq, void *v)
3149{
3150 struct netif_rx_stats *s = v;
3151
3152 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003153 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003154 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003155 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 return 0;
3157}
3158
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003159static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 .start = dev_seq_start,
3161 .next = dev_seq_next,
3162 .stop = dev_seq_stop,
3163 .show = dev_seq_show,
3164};
3165
3166static int dev_seq_open(struct inode *inode, struct file *file)
3167{
Denis V. Luneve372c412007-11-19 22:31:54 -08003168 return seq_open_net(inode, file, &dev_seq_ops,
3169 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170}
3171
Arjan van de Ven9a321442007-02-12 00:55:35 -08003172static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 .owner = THIS_MODULE,
3174 .open = dev_seq_open,
3175 .read = seq_read,
3176 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003177 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178};
3179
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003180static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 .start = softnet_seq_start,
3182 .next = softnet_seq_next,
3183 .stop = softnet_seq_stop,
3184 .show = softnet_seq_show,
3185};
3186
3187static int softnet_seq_open(struct inode *inode, struct file *file)
3188{
3189 return seq_open(file, &softnet_seq_ops);
3190}
3191
Arjan van de Ven9a321442007-02-12 00:55:35 -08003192static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 .owner = THIS_MODULE,
3194 .open = softnet_seq_open,
3195 .read = seq_read,
3196 .llseek = seq_lseek,
3197 .release = seq_release,
3198};
3199
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003200static void *ptype_get_idx(loff_t pos)
3201{
3202 struct packet_type *pt = NULL;
3203 loff_t i = 0;
3204 int t;
3205
3206 list_for_each_entry_rcu(pt, &ptype_all, list) {
3207 if (i == pos)
3208 return pt;
3209 ++i;
3210 }
3211
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003212 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003213 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3214 if (i == pos)
3215 return pt;
3216 ++i;
3217 }
3218 }
3219 return NULL;
3220}
3221
3222static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003223 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003224{
3225 rcu_read_lock();
3226 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3227}
3228
3229static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3230{
3231 struct packet_type *pt;
3232 struct list_head *nxt;
3233 int hash;
3234
3235 ++*pos;
3236 if (v == SEQ_START_TOKEN)
3237 return ptype_get_idx(0);
3238
3239 pt = v;
3240 nxt = pt->list.next;
3241 if (pt->type == htons(ETH_P_ALL)) {
3242 if (nxt != &ptype_all)
3243 goto found;
3244 hash = 0;
3245 nxt = ptype_base[0].next;
3246 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003247 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003248
3249 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003250 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003251 return NULL;
3252 nxt = ptype_base[hash].next;
3253 }
3254found:
3255 return list_entry(nxt, struct packet_type, list);
3256}
3257
3258static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003259 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003260{
3261 rcu_read_unlock();
3262}
3263
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003264static int ptype_seq_show(struct seq_file *seq, void *v)
3265{
3266 struct packet_type *pt = v;
3267
3268 if (v == SEQ_START_TOKEN)
3269 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003270 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003271 if (pt->type == htons(ETH_P_ALL))
3272 seq_puts(seq, "ALL ");
3273 else
3274 seq_printf(seq, "%04x", ntohs(pt->type));
3275
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003276 seq_printf(seq, " %-8s %pF\n",
3277 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003278 }
3279
3280 return 0;
3281}
3282
3283static const struct seq_operations ptype_seq_ops = {
3284 .start = ptype_seq_start,
3285 .next = ptype_seq_next,
3286 .stop = ptype_seq_stop,
3287 .show = ptype_seq_show,
3288};
3289
3290static int ptype_seq_open(struct inode *inode, struct file *file)
3291{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003292 return seq_open_net(inode, file, &ptype_seq_ops,
3293 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003294}
3295
3296static const struct file_operations ptype_seq_fops = {
3297 .owner = THIS_MODULE,
3298 .open = ptype_seq_open,
3299 .read = seq_read,
3300 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003301 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003302};
3303
3304
Pavel Emelyanov46650792007-10-08 20:38:39 -07003305static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306{
3307 int rc = -ENOMEM;
3308
Eric W. Biederman881d9662007-09-17 11:56:21 -07003309 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003311 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003313 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003314 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003315
Eric W. Biederman881d9662007-09-17 11:56:21 -07003316 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003317 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 rc = 0;
3319out:
3320 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003321out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003322 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003324 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003326 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 goto out;
3328}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003329
Pavel Emelyanov46650792007-10-08 20:38:39 -07003330static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003331{
3332 wext_proc_exit(net);
3333
3334 proc_net_remove(net, "ptype");
3335 proc_net_remove(net, "softnet_stat");
3336 proc_net_remove(net, "dev");
3337}
3338
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003339static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003340 .init = dev_proc_net_init,
3341 .exit = dev_proc_net_exit,
3342};
3343
3344static int __init dev_proc_init(void)
3345{
3346 return register_pernet_subsys(&dev_proc_ops);
3347}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348#else
3349#define dev_proc_init() 0
3350#endif /* CONFIG_PROC_FS */
3351
3352
3353/**
3354 * netdev_set_master - set up master/slave pair
3355 * @slave: slave device
3356 * @master: new master device
3357 *
3358 * Changes the master device of the slave. Pass %NULL to break the
3359 * bonding. The caller must hold the RTNL semaphore. On a failure
3360 * a negative errno code is returned. On success the reference counts
3361 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3362 * function returns zero.
3363 */
3364int netdev_set_master(struct net_device *slave, struct net_device *master)
3365{
3366 struct net_device *old = slave->master;
3367
3368 ASSERT_RTNL();
3369
3370 if (master) {
3371 if (old)
3372 return -EBUSY;
3373 dev_hold(master);
3374 }
3375
3376 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003377
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 synchronize_net();
3379
3380 if (old)
3381 dev_put(old);
3382
3383 if (master)
3384 slave->flags |= IFF_SLAVE;
3385 else
3386 slave->flags &= ~IFF_SLAVE;
3387
3388 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3389 return 0;
3390}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003391EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003393static void dev_change_rx_flags(struct net_device *dev, int flags)
3394{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003395 const struct net_device_ops *ops = dev->netdev_ops;
3396
3397 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3398 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003399}
3400
Wang Chendad9b332008-06-18 01:48:28 -07003401static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003402{
3403 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003404 uid_t uid;
3405 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003406
Patrick McHardy24023452007-07-14 18:51:31 -07003407 ASSERT_RTNL();
3408
Wang Chendad9b332008-06-18 01:48:28 -07003409 dev->flags |= IFF_PROMISC;
3410 dev->promiscuity += inc;
3411 if (dev->promiscuity == 0) {
3412 /*
3413 * Avoid overflow.
3414 * If inc causes overflow, untouch promisc and return error.
3415 */
3416 if (inc < 0)
3417 dev->flags &= ~IFF_PROMISC;
3418 else {
3419 dev->promiscuity -= inc;
3420 printk(KERN_WARNING "%s: promiscuity touches roof, "
3421 "set promiscuity failed, promiscuity feature "
3422 "of device might be broken.\n", dev->name);
3423 return -EOVERFLOW;
3424 }
3425 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003426 if (dev->flags != old_flags) {
3427 printk(KERN_INFO "device %s %s promiscuous mode\n",
3428 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3429 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003430 if (audit_enabled) {
3431 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003432 audit_log(current->audit_context, GFP_ATOMIC,
3433 AUDIT_ANOM_PROMISCUOUS,
3434 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3435 dev->name, (dev->flags & IFF_PROMISC),
3436 (old_flags & IFF_PROMISC),
3437 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003438 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003439 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003440 }
Patrick McHardy24023452007-07-14 18:51:31 -07003441
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003442 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003443 }
Wang Chendad9b332008-06-18 01:48:28 -07003444 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003445}
3446
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447/**
3448 * dev_set_promiscuity - update promiscuity count on a device
3449 * @dev: device
3450 * @inc: modifier
3451 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003452 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 * remains above zero the interface remains promiscuous. Once it hits zero
3454 * the device reverts back to normal filtering operation. A negative inc
3455 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003456 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 */
Wang Chendad9b332008-06-18 01:48:28 -07003458int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459{
3460 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003461 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
Wang Chendad9b332008-06-18 01:48:28 -07003463 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003464 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003465 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003466 if (dev->flags != old_flags)
3467 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003468 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003470EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471
3472/**
3473 * dev_set_allmulti - update allmulti count on a device
3474 * @dev: device
3475 * @inc: modifier
3476 *
3477 * Add or remove reception of all multicast frames to a device. While the
3478 * count in the device remains above zero the interface remains listening
3479 * to all interfaces. Once it hits zero the device reverts back to normal
3480 * filtering operation. A negative @inc value is used to drop the counter
3481 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003482 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 */
3484
Wang Chendad9b332008-06-18 01:48:28 -07003485int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486{
3487 unsigned short old_flags = dev->flags;
3488
Patrick McHardy24023452007-07-14 18:51:31 -07003489 ASSERT_RTNL();
3490
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003492 dev->allmulti += inc;
3493 if (dev->allmulti == 0) {
3494 /*
3495 * Avoid overflow.
3496 * If inc causes overflow, untouch allmulti and return error.
3497 */
3498 if (inc < 0)
3499 dev->flags &= ~IFF_ALLMULTI;
3500 else {
3501 dev->allmulti -= inc;
3502 printk(KERN_WARNING "%s: allmulti touches roof, "
3503 "set allmulti failed, allmulti feature of "
3504 "device might be broken.\n", dev->name);
3505 return -EOVERFLOW;
3506 }
3507 }
Patrick McHardy24023452007-07-14 18:51:31 -07003508 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003509 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003510 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003511 }
Wang Chendad9b332008-06-18 01:48:28 -07003512 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003513}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003514EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003515
3516/*
3517 * Upload unicast and multicast address lists to device and
3518 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003519 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003520 * are present.
3521 */
3522void __dev_set_rx_mode(struct net_device *dev)
3523{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003524 const struct net_device_ops *ops = dev->netdev_ops;
3525
Patrick McHardy4417da62007-06-27 01:28:10 -07003526 /* dev_open will call this function so the list will stay sane. */
3527 if (!(dev->flags&IFF_UP))
3528 return;
3529
3530 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003531 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003532
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003533 if (ops->ndo_set_rx_mode)
3534 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003535 else {
3536 /* Unicast addresses changes may only happen under the rtnl,
3537 * therefore calling __dev_set_promiscuity here is safe.
3538 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003539 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003540 __dev_set_promiscuity(dev, 1);
3541 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003542 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003543 __dev_set_promiscuity(dev, -1);
3544 dev->uc_promisc = 0;
3545 }
3546
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003547 if (ops->ndo_set_multicast_list)
3548 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003549 }
3550}
3551
3552void dev_set_rx_mode(struct net_device *dev)
3553{
David S. Millerb9e40852008-07-15 00:15:08 -07003554 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003555 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003556 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557}
3558
Jiri Pirkof001fde2009-05-05 02:48:28 +00003559/* hw addresses list handling functions */
3560
Jiri Pirko31278e72009-06-17 01:12:19 +00003561static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3562 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003563{
3564 struct netdev_hw_addr *ha;
3565 int alloc_size;
3566
3567 if (addr_len > MAX_ADDR_LEN)
3568 return -EINVAL;
3569
Jiri Pirko31278e72009-06-17 01:12:19 +00003570 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003571 if (!memcmp(ha->addr, addr, addr_len) &&
3572 ha->type == addr_type) {
3573 ha->refcount++;
3574 return 0;
3575 }
3576 }
3577
3578
Jiri Pirkof001fde2009-05-05 02:48:28 +00003579 alloc_size = sizeof(*ha);
3580 if (alloc_size < L1_CACHE_BYTES)
3581 alloc_size = L1_CACHE_BYTES;
3582 ha = kmalloc(alloc_size, GFP_ATOMIC);
3583 if (!ha)
3584 return -ENOMEM;
3585 memcpy(ha->addr, addr, addr_len);
3586 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003587 ha->refcount = 1;
3588 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003589 list_add_tail_rcu(&ha->list, &list->list);
3590 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003591 return 0;
3592}
3593
3594static void ha_rcu_free(struct rcu_head *head)
3595{
3596 struct netdev_hw_addr *ha;
3597
3598 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3599 kfree(ha);
3600}
3601
Jiri Pirko31278e72009-06-17 01:12:19 +00003602static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3603 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003604{
3605 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003606
Jiri Pirko31278e72009-06-17 01:12:19 +00003607 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003608 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003609 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003610 if (--ha->refcount)
3611 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003612 list_del_rcu(&ha->list);
3613 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003614 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003615 return 0;
3616 }
3617 }
3618 return -ENOENT;
3619}
3620
Jiri Pirko31278e72009-06-17 01:12:19 +00003621static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3622 struct netdev_hw_addr_list *from_list,
3623 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003624 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003625{
3626 int err;
3627 struct netdev_hw_addr *ha, *ha2;
3628 unsigned char type;
3629
Jiri Pirko31278e72009-06-17 01:12:19 +00003630 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003631 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003632 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003633 if (err)
3634 goto unroll;
3635 }
3636 return 0;
3637
3638unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003639 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003640 if (ha2 == ha)
3641 break;
3642 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003643 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003644 }
3645 return err;
3646}
3647
Jiri Pirko31278e72009-06-17 01:12:19 +00003648static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3649 struct netdev_hw_addr_list *from_list,
3650 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003651 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003652{
3653 struct netdev_hw_addr *ha;
3654 unsigned char type;
3655
Jiri Pirko31278e72009-06-17 01:12:19 +00003656 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003657 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003658 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003659 }
3660}
3661
Jiri Pirko31278e72009-06-17 01:12:19 +00003662static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3663 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003664 int addr_len)
3665{
3666 int err = 0;
3667 struct netdev_hw_addr *ha, *tmp;
3668
Jiri Pirko31278e72009-06-17 01:12:19 +00003669 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003670 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003671 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003672 addr_len, ha->type);
3673 if (err)
3674 break;
3675 ha->synced = true;
3676 ha->refcount++;
3677 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003678 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3679 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003680 }
3681 }
3682 return err;
3683}
3684
Jiri Pirko31278e72009-06-17 01:12:19 +00003685static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3686 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003687 int addr_len)
3688{
3689 struct netdev_hw_addr *ha, *tmp;
3690
Jiri Pirko31278e72009-06-17 01:12:19 +00003691 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003692 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003693 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003694 addr_len, ha->type);
3695 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003696 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003697 addr_len, ha->type);
3698 }
3699 }
3700}
3701
Jiri Pirko31278e72009-06-17 01:12:19 +00003702static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003703{
3704 struct netdev_hw_addr *ha, *tmp;
3705
Jiri Pirko31278e72009-06-17 01:12:19 +00003706 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003707 list_del_rcu(&ha->list);
3708 call_rcu(&ha->rcu_head, ha_rcu_free);
3709 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003710 list->count = 0;
3711}
3712
3713static void __hw_addr_init(struct netdev_hw_addr_list *list)
3714{
3715 INIT_LIST_HEAD(&list->list);
3716 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003717}
3718
3719/* Device addresses handling functions */
3720
3721static void dev_addr_flush(struct net_device *dev)
3722{
3723 /* rtnl_mutex must be held here */
3724
Jiri Pirko31278e72009-06-17 01:12:19 +00003725 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003726 dev->dev_addr = NULL;
3727}
3728
3729static int dev_addr_init(struct net_device *dev)
3730{
3731 unsigned char addr[MAX_ADDR_LEN];
3732 struct netdev_hw_addr *ha;
3733 int err;
3734
3735 /* rtnl_mutex must be held here */
3736
Jiri Pirko31278e72009-06-17 01:12:19 +00003737 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003738 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003739 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003740 NETDEV_HW_ADDR_T_LAN);
3741 if (!err) {
3742 /*
3743 * Get the first (previously created) address from the list
3744 * and set dev_addr pointer to this location.
3745 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003746 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003747 struct netdev_hw_addr, list);
3748 dev->dev_addr = ha->addr;
3749 }
3750 return err;
3751}
3752
3753/**
3754 * dev_addr_add - Add a device address
3755 * @dev: device
3756 * @addr: address to add
3757 * @addr_type: address type
3758 *
3759 * Add a device address to the device or increase the reference count if
3760 * it already exists.
3761 *
3762 * The caller must hold the rtnl_mutex.
3763 */
3764int dev_addr_add(struct net_device *dev, unsigned char *addr,
3765 unsigned char addr_type)
3766{
3767 int err;
3768
3769 ASSERT_RTNL();
3770
Jiri Pirko31278e72009-06-17 01:12:19 +00003771 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003772 if (!err)
3773 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3774 return err;
3775}
3776EXPORT_SYMBOL(dev_addr_add);
3777
3778/**
3779 * dev_addr_del - Release a device address.
3780 * @dev: device
3781 * @addr: address to delete
3782 * @addr_type: address type
3783 *
3784 * Release reference to a device address and remove it from the device
3785 * if the reference count drops to zero.
3786 *
3787 * The caller must hold the rtnl_mutex.
3788 */
3789int dev_addr_del(struct net_device *dev, unsigned char *addr,
3790 unsigned char addr_type)
3791{
3792 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003793 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003794
3795 ASSERT_RTNL();
3796
Jiri Pirkoccffad252009-05-22 23:22:17 +00003797 /*
3798 * We can not remove the first address from the list because
3799 * dev->dev_addr points to that.
3800 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003801 ha = list_first_entry(&dev->dev_addrs.list,
3802 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003803 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3804 return -ENOENT;
3805
Jiri Pirko31278e72009-06-17 01:12:19 +00003806 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003807 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003808 if (!err)
3809 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3810 return err;
3811}
3812EXPORT_SYMBOL(dev_addr_del);
3813
3814/**
3815 * dev_addr_add_multiple - Add device addresses from another device
3816 * @to_dev: device to which addresses will be added
3817 * @from_dev: device from which addresses will be added
3818 * @addr_type: address type - 0 means type will be used from from_dev
3819 *
3820 * Add device addresses of the one device to another.
3821 **
3822 * The caller must hold the rtnl_mutex.
3823 */
3824int dev_addr_add_multiple(struct net_device *to_dev,
3825 struct net_device *from_dev,
3826 unsigned char addr_type)
3827{
3828 int err;
3829
3830 ASSERT_RTNL();
3831
3832 if (from_dev->addr_len != to_dev->addr_len)
3833 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003834 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003835 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003836 if (!err)
3837 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3838 return err;
3839}
3840EXPORT_SYMBOL(dev_addr_add_multiple);
3841
3842/**
3843 * dev_addr_del_multiple - Delete device addresses by another device
3844 * @to_dev: device where the addresses will be deleted
3845 * @from_dev: device by which addresses the addresses will be deleted
3846 * @addr_type: address type - 0 means type will used from from_dev
3847 *
3848 * Deletes addresses in to device by the list of addresses in from device.
3849 *
3850 * The caller must hold the rtnl_mutex.
3851 */
3852int dev_addr_del_multiple(struct net_device *to_dev,
3853 struct net_device *from_dev,
3854 unsigned char addr_type)
3855{
3856 ASSERT_RTNL();
3857
3858 if (from_dev->addr_len != to_dev->addr_len)
3859 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003860 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003861 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003862 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3863 return 0;
3864}
3865EXPORT_SYMBOL(dev_addr_del_multiple);
3866
Jiri Pirko31278e72009-06-17 01:12:19 +00003867/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003868
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003869int __dev_addr_delete(struct dev_addr_list **list, int *count,
3870 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003871{
3872 struct dev_addr_list *da;
3873
3874 for (; (da = *list) != NULL; list = &da->next) {
3875 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3876 alen == da->da_addrlen) {
3877 if (glbl) {
3878 int old_glbl = da->da_gusers;
3879 da->da_gusers = 0;
3880 if (old_glbl == 0)
3881 break;
3882 }
3883 if (--da->da_users)
3884 return 0;
3885
3886 *list = da->next;
3887 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003888 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003889 return 0;
3890 }
3891 }
3892 return -ENOENT;
3893}
3894
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003895int __dev_addr_add(struct dev_addr_list **list, int *count,
3896 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003897{
3898 struct dev_addr_list *da;
3899
3900 for (da = *list; da != NULL; da = da->next) {
3901 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3902 da->da_addrlen == alen) {
3903 if (glbl) {
3904 int old_glbl = da->da_gusers;
3905 da->da_gusers = 1;
3906 if (old_glbl)
3907 return 0;
3908 }
3909 da->da_users++;
3910 return 0;
3911 }
3912 }
3913
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003914 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003915 if (da == NULL)
3916 return -ENOMEM;
3917 memcpy(da->da_addr, addr, alen);
3918 da->da_addrlen = alen;
3919 da->da_users = 1;
3920 da->da_gusers = glbl ? 1 : 0;
3921 da->next = *list;
3922 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003923 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003924 return 0;
3925}
3926
Patrick McHardy4417da62007-06-27 01:28:10 -07003927/**
3928 * dev_unicast_delete - Release secondary unicast address.
3929 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003930 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003931 *
3932 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003933 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003934 *
3935 * The caller must hold the rtnl_mutex.
3936 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003937int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003938{
3939 int err;
3940
3941 ASSERT_RTNL();
3942
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003943 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003944 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3945 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003946 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003947 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003948 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003949 return err;
3950}
3951EXPORT_SYMBOL(dev_unicast_delete);
3952
3953/**
3954 * dev_unicast_add - add a secondary unicast address
3955 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003956 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003957 *
3958 * Add a secondary unicast address to the device or increase
3959 * the reference count if it already exists.
3960 *
3961 * The caller must hold the rtnl_mutex.
3962 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003963int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003964{
3965 int err;
3966
3967 ASSERT_RTNL();
3968
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003969 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003970 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3971 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003972 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003973 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003974 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003975 return err;
3976}
3977EXPORT_SYMBOL(dev_unicast_add);
3978
Chris Leeche83a2ea2008-01-31 16:53:23 -08003979int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3980 struct dev_addr_list **from, int *from_count)
3981{
3982 struct dev_addr_list *da, *next;
3983 int err = 0;
3984
3985 da = *from;
3986 while (da != NULL) {
3987 next = da->next;
3988 if (!da->da_synced) {
3989 err = __dev_addr_add(to, to_count,
3990 da->da_addr, da->da_addrlen, 0);
3991 if (err < 0)
3992 break;
3993 da->da_synced = 1;
3994 da->da_users++;
3995 } else if (da->da_users == 1) {
3996 __dev_addr_delete(to, to_count,
3997 da->da_addr, da->da_addrlen, 0);
3998 __dev_addr_delete(from, from_count,
3999 da->da_addr, da->da_addrlen, 0);
4000 }
4001 da = next;
4002 }
4003 return err;
4004}
Johannes Bergc4029082009-06-17 17:43:30 +02004005EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004006
4007void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4008 struct dev_addr_list **from, int *from_count)
4009{
4010 struct dev_addr_list *da, *next;
4011
4012 da = *from;
4013 while (da != NULL) {
4014 next = da->next;
4015 if (da->da_synced) {
4016 __dev_addr_delete(to, to_count,
4017 da->da_addr, da->da_addrlen, 0);
4018 da->da_synced = 0;
4019 __dev_addr_delete(from, from_count,
4020 da->da_addr, da->da_addrlen, 0);
4021 }
4022 da = next;
4023 }
4024}
Johannes Bergc4029082009-06-17 17:43:30 +02004025EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004026
4027/**
4028 * dev_unicast_sync - Synchronize device's unicast list to another device
4029 * @to: destination device
4030 * @from: source device
4031 *
4032 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004033 * addresses that have no users left. The source device must be
4034 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004035 *
4036 * This function is intended to be called from the dev->set_rx_mode
4037 * function of layered software devices.
4038 */
4039int dev_unicast_sync(struct net_device *to, struct net_device *from)
4040{
4041 int err = 0;
4042
Jiri Pirkoccffad252009-05-22 23:22:17 +00004043 if (to->addr_len != from->addr_len)
4044 return -EINVAL;
4045
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004046 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004047 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004048 if (!err)
4049 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004050 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004051 return err;
4052}
4053EXPORT_SYMBOL(dev_unicast_sync);
4054
4055/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004056 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004057 * @to: destination device
4058 * @from: source device
4059 *
4060 * Remove all addresses that were added to the destination device by
4061 * dev_unicast_sync(). This function is intended to be called from the
4062 * dev->stop function of layered software devices.
4063 */
4064void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4065{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004066 if (to->addr_len != from->addr_len)
4067 return;
4068
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004069 netif_addr_lock_bh(from);
4070 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004071 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004072 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004073 netif_addr_unlock(to);
4074 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004075}
4076EXPORT_SYMBOL(dev_unicast_unsync);
4077
Jiri Pirkoccffad252009-05-22 23:22:17 +00004078static void dev_unicast_flush(struct net_device *dev)
4079{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004080 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004081 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004082 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004083}
4084
4085static void dev_unicast_init(struct net_device *dev)
4086{
Jiri Pirko31278e72009-06-17 01:12:19 +00004087 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004088}
4089
4090
Denis Cheng12972622007-07-18 02:12:56 -07004091static void __dev_addr_discard(struct dev_addr_list **list)
4092{
4093 struct dev_addr_list *tmp;
4094
4095 while (*list != NULL) {
4096 tmp = *list;
4097 *list = tmp->next;
4098 if (tmp->da_users > tmp->da_gusers)
4099 printk("__dev_addr_discard: address leakage! "
4100 "da_users=%d\n", tmp->da_users);
4101 kfree(tmp);
4102 }
4103}
4104
Denis Cheng26cc2522007-07-18 02:12:03 -07004105static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004106{
David S. Millerb9e40852008-07-15 00:15:08 -07004107 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004108
Denis Cheng456ad752007-07-18 02:10:54 -07004109 __dev_addr_discard(&dev->mc_list);
4110 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004111
David S. Millerb9e40852008-07-15 00:15:08 -07004112 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004113}
4114
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004115/**
4116 * dev_get_flags - get flags reported to userspace
4117 * @dev: device
4118 *
4119 * Get the combination of flag bits exported through APIs to userspace.
4120 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121unsigned dev_get_flags(const struct net_device *dev)
4122{
4123 unsigned flags;
4124
4125 flags = (dev->flags & ~(IFF_PROMISC |
4126 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004127 IFF_RUNNING |
4128 IFF_LOWER_UP |
4129 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130 (dev->gflags & (IFF_PROMISC |
4131 IFF_ALLMULTI));
4132
Stefan Rompfb00055a2006-03-20 17:09:11 -08004133 if (netif_running(dev)) {
4134 if (netif_oper_up(dev))
4135 flags |= IFF_RUNNING;
4136 if (netif_carrier_ok(dev))
4137 flags |= IFF_LOWER_UP;
4138 if (netif_dormant(dev))
4139 flags |= IFF_DORMANT;
4140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141
4142 return flags;
4143}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004144EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004146/**
4147 * dev_change_flags - change device settings
4148 * @dev: device
4149 * @flags: device state flags
4150 *
4151 * Change settings on device based state flags. The flags are
4152 * in the userspace exported format.
4153 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154int dev_change_flags(struct net_device *dev, unsigned flags)
4155{
Thomas Graf7c355f52007-06-05 16:03:03 -07004156 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 int old_flags = dev->flags;
4158
Patrick McHardy24023452007-07-14 18:51:31 -07004159 ASSERT_RTNL();
4160
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 /*
4162 * Set the flags on our device.
4163 */
4164
4165 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4166 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4167 IFF_AUTOMEDIA)) |
4168 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4169 IFF_ALLMULTI));
4170
4171 /*
4172 * Load in the correct multicast list now the flags have changed.
4173 */
4174
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004175 if ((old_flags ^ flags) & IFF_MULTICAST)
4176 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004177
Patrick McHardy4417da62007-06-27 01:28:10 -07004178 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179
4180 /*
4181 * Have we downed the interface. We handle IFF_UP ourselves
4182 * according to user attempts to set it, rather than blindly
4183 * setting it.
4184 */
4185
4186 ret = 0;
4187 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4188 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4189
4190 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004191 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 }
4193
4194 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004195 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004197 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198
4199 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004200 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 dev->gflags ^= IFF_PROMISC;
4203 dev_set_promiscuity(dev, inc);
4204 }
4205
4206 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4207 is important. Some (broken) drivers set IFF_PROMISC, when
4208 IFF_ALLMULTI is requested not asking us and not reporting.
4209 */
4210 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004211 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4212
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 dev->gflags ^= IFF_ALLMULTI;
4214 dev_set_allmulti(dev, inc);
4215 }
4216
Thomas Graf7c355f52007-06-05 16:03:03 -07004217 /* Exclude state transition flags, already notified */
4218 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4219 if (changes)
4220 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221
4222 return ret;
4223}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004224EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004226/**
4227 * dev_set_mtu - Change maximum transfer unit
4228 * @dev: device
4229 * @new_mtu: new transfer unit
4230 *
4231 * Change the maximum transfer size of the network device.
4232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233int dev_set_mtu(struct net_device *dev, int new_mtu)
4234{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004235 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 int err;
4237
4238 if (new_mtu == dev->mtu)
4239 return 0;
4240
4241 /* MTU must be positive. */
4242 if (new_mtu < 0)
4243 return -EINVAL;
4244
4245 if (!netif_device_present(dev))
4246 return -ENODEV;
4247
4248 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004249 if (ops->ndo_change_mtu)
4250 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 else
4252 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004253
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004255 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 return err;
4257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004258EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004260/**
4261 * dev_set_mac_address - Change Media Access Control Address
4262 * @dev: device
4263 * @sa: new address
4264 *
4265 * Change the hardware (MAC) address of the device
4266 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4268{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004269 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270 int err;
4271
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004272 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 return -EOPNOTSUPP;
4274 if (sa->sa_family != dev->type)
4275 return -EINVAL;
4276 if (!netif_device_present(dev))
4277 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004278 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004280 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281 return err;
4282}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004283EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284
4285/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004286 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004288static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289{
4290 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004291 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292
4293 if (!dev)
4294 return -ENODEV;
4295
4296 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004297 case SIOCGIFFLAGS: /* Get interface flags */
4298 ifr->ifr_flags = (short) dev_get_flags(dev);
4299 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004301 case SIOCGIFMETRIC: /* Get the metric on the interface
4302 (currently unused) */
4303 ifr->ifr_metric = 0;
4304 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004306 case SIOCGIFMTU: /* Get the MTU of a device */
4307 ifr->ifr_mtu = dev->mtu;
4308 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004310 case SIOCGIFHWADDR:
4311 if (!dev->addr_len)
4312 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4313 else
4314 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4315 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4316 ifr->ifr_hwaddr.sa_family = dev->type;
4317 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004319 case SIOCGIFSLAVE:
4320 err = -EINVAL;
4321 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004322
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004323 case SIOCGIFMAP:
4324 ifr->ifr_map.mem_start = dev->mem_start;
4325 ifr->ifr_map.mem_end = dev->mem_end;
4326 ifr->ifr_map.base_addr = dev->base_addr;
4327 ifr->ifr_map.irq = dev->irq;
4328 ifr->ifr_map.dma = dev->dma;
4329 ifr->ifr_map.port = dev->if_port;
4330 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004331
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004332 case SIOCGIFINDEX:
4333 ifr->ifr_ifindex = dev->ifindex;
4334 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004335
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004336 case SIOCGIFTXQLEN:
4337 ifr->ifr_qlen = dev->tx_queue_len;
4338 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004339
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004340 default:
4341 /* dev_ioctl() should ensure this case
4342 * is never reached
4343 */
4344 WARN_ON(1);
4345 err = -EINVAL;
4346 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004347
4348 }
4349 return err;
4350}
4351
4352/*
4353 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4354 */
4355static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4356{
4357 int err;
4358 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004359 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004360
4361 if (!dev)
4362 return -ENODEV;
4363
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004364 ops = dev->netdev_ops;
4365
Jeff Garzik14e3e072007-10-08 00:06:32 -07004366 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004367 case SIOCSIFFLAGS: /* Set interface flags */
4368 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004369
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004370 case SIOCSIFMETRIC: /* Set the metric on the interface
4371 (currently unused) */
4372 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004373
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004374 case SIOCSIFMTU: /* Set the MTU of a device */
4375 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004376
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004377 case SIOCSIFHWADDR:
4378 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004380 case SIOCSIFHWBROADCAST:
4381 if (ifr->ifr_hwaddr.sa_family != dev->type)
4382 return -EINVAL;
4383 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4384 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4385 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4386 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004388 case SIOCSIFMAP:
4389 if (ops->ndo_set_config) {
4390 if (!netif_device_present(dev))
4391 return -ENODEV;
4392 return ops->ndo_set_config(dev, &ifr->ifr_map);
4393 }
4394 return -EOPNOTSUPP;
4395
4396 case SIOCADDMULTI:
4397 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4398 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4399 return -EINVAL;
4400 if (!netif_device_present(dev))
4401 return -ENODEV;
4402 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4403 dev->addr_len, 1);
4404
4405 case SIOCDELMULTI:
4406 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4407 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4408 return -EINVAL;
4409 if (!netif_device_present(dev))
4410 return -ENODEV;
4411 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4412 dev->addr_len, 1);
4413
4414 case SIOCSIFTXQLEN:
4415 if (ifr->ifr_qlen < 0)
4416 return -EINVAL;
4417 dev->tx_queue_len = ifr->ifr_qlen;
4418 return 0;
4419
4420 case SIOCSIFNAME:
4421 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4422 return dev_change_name(dev, ifr->ifr_newname);
4423
4424 /*
4425 * Unknown or private ioctl
4426 */
4427 default:
4428 if ((cmd >= SIOCDEVPRIVATE &&
4429 cmd <= SIOCDEVPRIVATE + 15) ||
4430 cmd == SIOCBONDENSLAVE ||
4431 cmd == SIOCBONDRELEASE ||
4432 cmd == SIOCBONDSETHWADDR ||
4433 cmd == SIOCBONDSLAVEINFOQUERY ||
4434 cmd == SIOCBONDINFOQUERY ||
4435 cmd == SIOCBONDCHANGEACTIVE ||
4436 cmd == SIOCGMIIPHY ||
4437 cmd == SIOCGMIIREG ||
4438 cmd == SIOCSMIIREG ||
4439 cmd == SIOCBRADDIF ||
4440 cmd == SIOCBRDELIF ||
4441 cmd == SIOCSHWTSTAMP ||
4442 cmd == SIOCWANDEV) {
4443 err = -EOPNOTSUPP;
4444 if (ops->ndo_do_ioctl) {
4445 if (netif_device_present(dev))
4446 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4447 else
4448 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004450 } else
4451 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452
4453 }
4454 return err;
4455}
4456
4457/*
4458 * This function handles all "interface"-type I/O control requests. The actual
4459 * 'doing' part of this is dev_ifsioc above.
4460 */
4461
4462/**
4463 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004464 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 * @cmd: command to issue
4466 * @arg: pointer to a struct ifreq in user space
4467 *
4468 * Issue ioctl functions to devices. This is normally called by the
4469 * user space syscall interfaces but can sometimes be useful for
4470 * other purposes. The return value is the return from the syscall if
4471 * positive or a negative errno code on error.
4472 */
4473
Eric W. Biederman881d9662007-09-17 11:56:21 -07004474int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475{
4476 struct ifreq ifr;
4477 int ret;
4478 char *colon;
4479
4480 /* One special case: SIOCGIFCONF takes ifconf argument
4481 and requires shared lock, because it sleeps writing
4482 to user space.
4483 */
4484
4485 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004486 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004487 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004488 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 return ret;
4490 }
4491 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004492 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493
4494 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4495 return -EFAULT;
4496
4497 ifr.ifr_name[IFNAMSIZ-1] = 0;
4498
4499 colon = strchr(ifr.ifr_name, ':');
4500 if (colon)
4501 *colon = 0;
4502
4503 /*
4504 * See which interface the caller is talking about.
4505 */
4506
4507 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004508 /*
4509 * These ioctl calls:
4510 * - can be done by all.
4511 * - atomic and do not require locking.
4512 * - return a value
4513 */
4514 case SIOCGIFFLAGS:
4515 case SIOCGIFMETRIC:
4516 case SIOCGIFMTU:
4517 case SIOCGIFHWADDR:
4518 case SIOCGIFSLAVE:
4519 case SIOCGIFMAP:
4520 case SIOCGIFINDEX:
4521 case SIOCGIFTXQLEN:
4522 dev_load(net, ifr.ifr_name);
4523 read_lock(&dev_base_lock);
4524 ret = dev_ifsioc_locked(net, &ifr, cmd);
4525 read_unlock(&dev_base_lock);
4526 if (!ret) {
4527 if (colon)
4528 *colon = ':';
4529 if (copy_to_user(arg, &ifr,
4530 sizeof(struct ifreq)))
4531 ret = -EFAULT;
4532 }
4533 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004535 case SIOCETHTOOL:
4536 dev_load(net, ifr.ifr_name);
4537 rtnl_lock();
4538 ret = dev_ethtool(net, &ifr);
4539 rtnl_unlock();
4540 if (!ret) {
4541 if (colon)
4542 *colon = ':';
4543 if (copy_to_user(arg, &ifr,
4544 sizeof(struct ifreq)))
4545 ret = -EFAULT;
4546 }
4547 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004549 /*
4550 * These ioctl calls:
4551 * - require superuser power.
4552 * - require strict serialization.
4553 * - return a value
4554 */
4555 case SIOCGMIIPHY:
4556 case SIOCGMIIREG:
4557 case SIOCSIFNAME:
4558 if (!capable(CAP_NET_ADMIN))
4559 return -EPERM;
4560 dev_load(net, ifr.ifr_name);
4561 rtnl_lock();
4562 ret = dev_ifsioc(net, &ifr, cmd);
4563 rtnl_unlock();
4564 if (!ret) {
4565 if (colon)
4566 *colon = ':';
4567 if (copy_to_user(arg, &ifr,
4568 sizeof(struct ifreq)))
4569 ret = -EFAULT;
4570 }
4571 return ret;
4572
4573 /*
4574 * These ioctl calls:
4575 * - require superuser power.
4576 * - require strict serialization.
4577 * - do not return a value
4578 */
4579 case SIOCSIFFLAGS:
4580 case SIOCSIFMETRIC:
4581 case SIOCSIFMTU:
4582 case SIOCSIFMAP:
4583 case SIOCSIFHWADDR:
4584 case SIOCSIFSLAVE:
4585 case SIOCADDMULTI:
4586 case SIOCDELMULTI:
4587 case SIOCSIFHWBROADCAST:
4588 case SIOCSIFTXQLEN:
4589 case SIOCSMIIREG:
4590 case SIOCBONDENSLAVE:
4591 case SIOCBONDRELEASE:
4592 case SIOCBONDSETHWADDR:
4593 case SIOCBONDCHANGEACTIVE:
4594 case SIOCBRADDIF:
4595 case SIOCBRDELIF:
4596 case SIOCSHWTSTAMP:
4597 if (!capable(CAP_NET_ADMIN))
4598 return -EPERM;
4599 /* fall through */
4600 case SIOCBONDSLAVEINFOQUERY:
4601 case SIOCBONDINFOQUERY:
4602 dev_load(net, ifr.ifr_name);
4603 rtnl_lock();
4604 ret = dev_ifsioc(net, &ifr, cmd);
4605 rtnl_unlock();
4606 return ret;
4607
4608 case SIOCGIFMEM:
4609 /* Get the per device memory space. We can add this but
4610 * currently do not support it */
4611 case SIOCSIFMEM:
4612 /* Set the per device memory buffer space.
4613 * Not applicable in our case */
4614 case SIOCSIFLINK:
4615 return -EINVAL;
4616
4617 /*
4618 * Unknown or private ioctl.
4619 */
4620 default:
4621 if (cmd == SIOCWANDEV ||
4622 (cmd >= SIOCDEVPRIVATE &&
4623 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004624 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004626 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004628 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004630 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004632 }
4633 /* Take care of Wireless Extensions */
4634 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4635 return wext_handle_ioctl(net, &ifr, cmd, arg);
4636 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 }
4638}
4639
4640
4641/**
4642 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004643 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 *
4645 * Returns a suitable unique value for a new device interface
4646 * number. The caller must hold the rtnl semaphore or the
4647 * dev_base_lock to be sure it remains unique.
4648 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004649static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650{
4651 static int ifindex;
4652 for (;;) {
4653 if (++ifindex <= 0)
4654 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004655 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 return ifindex;
4657 }
4658}
4659
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004661static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004663static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666}
4667
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004668static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004669{
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004670 struct net_device *dev;
4671
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004672 BUG_ON(dev_boot_phase);
4673 ASSERT_RTNL();
4674
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004675 list_for_each_entry(dev, head, unreg_list) {
4676 /* Some devices call without registering
4677 * for initialization unwind.
4678 */
4679 if (dev->reg_state == NETREG_UNINITIALIZED) {
4680 pr_debug("unregister_netdevice: device %s/%p never "
4681 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004682
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004683 WARN_ON(1);
4684 return;
4685 }
4686
4687 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4688
4689 /* If device is running, close it first. */
4690 dev_close(dev);
4691
4692 /* And unlink it from device chain. */
4693 unlist_netdevice(dev);
4694
4695 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004696 }
4697
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004698 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004699
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004700 list_for_each_entry(dev, head, unreg_list) {
4701 /* Shutdown queueing discipline. */
4702 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004703
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004704
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004705 /* Notify protocols, that we are about to destroy
4706 this device. They should clean all the things.
4707 */
4708 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4709
4710 /*
4711 * Flush the unicast and multicast chains
4712 */
4713 dev_unicast_flush(dev);
4714 dev_addr_discard(dev);
4715
4716 if (dev->netdev_ops->ndo_uninit)
4717 dev->netdev_ops->ndo_uninit(dev);
4718
4719 /* Notifier chain MUST detach us from master device. */
4720 WARN_ON(dev->master);
4721
4722 /* Remove entries from kobject tree */
4723 netdev_unregister_kobject(dev);
4724 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004725
4726 synchronize_net();
4727
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004728 list_for_each_entry(dev, head, unreg_list)
4729 dev_put(dev);
4730}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004731
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004732static void rollback_registered(struct net_device *dev)
4733{
4734 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004735
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004736 list_add(&dev->unreg_list, &single);
4737 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004738}
4739
David S. Millere8a04642008-07-17 00:34:19 -07004740static void __netdev_init_queue_locks_one(struct net_device *dev,
4741 struct netdev_queue *dev_queue,
4742 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004743{
4744 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004745 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004746 dev_queue->xmit_lock_owner = -1;
4747}
4748
4749static void netdev_init_queue_locks(struct net_device *dev)
4750{
David S. Millere8a04642008-07-17 00:34:19 -07004751 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4752 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004753}
4754
Herbert Xub63365a2008-10-23 01:11:29 -07004755unsigned long netdev_fix_features(unsigned long features, const char *name)
4756{
4757 /* Fix illegal SG+CSUM combinations. */
4758 if ((features & NETIF_F_SG) &&
4759 !(features & NETIF_F_ALL_CSUM)) {
4760 if (name)
4761 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4762 "checksum feature.\n", name);
4763 features &= ~NETIF_F_SG;
4764 }
4765
4766 /* TSO requires that SG is present as well. */
4767 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4768 if (name)
4769 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4770 "SG feature.\n", name);
4771 features &= ~NETIF_F_TSO;
4772 }
4773
4774 if (features & NETIF_F_UFO) {
4775 if (!(features & NETIF_F_GEN_CSUM)) {
4776 if (name)
4777 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4778 "since no NETIF_F_HW_CSUM feature.\n",
4779 name);
4780 features &= ~NETIF_F_UFO;
4781 }
4782
4783 if (!(features & NETIF_F_SG)) {
4784 if (name)
4785 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4786 "since no NETIF_F_SG feature.\n", name);
4787 features &= ~NETIF_F_UFO;
4788 }
4789 }
4790
4791 return features;
4792}
4793EXPORT_SYMBOL(netdev_fix_features);
4794
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795/**
4796 * register_netdevice - register a network device
4797 * @dev: device to register
4798 *
4799 * Take a completed network device structure and add it to the kernel
4800 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4801 * chain. 0 is returned on success. A negative errno code is returned
4802 * on a failure to set up the device, or if the name is a duplicate.
4803 *
4804 * Callers must hold the rtnl semaphore. You may want
4805 * register_netdev() instead of this.
4806 *
4807 * BUGS:
4808 * The locking appears insufficient to guarantee two parallel registers
4809 * will not get the same name.
4810 */
4811
4812int register_netdevice(struct net_device *dev)
4813{
4814 struct hlist_head *head;
4815 struct hlist_node *p;
4816 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004817 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818
4819 BUG_ON(dev_boot_phase);
4820 ASSERT_RTNL();
4821
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004822 might_sleep();
4823
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824 /* When net_device's are persistent, this will be fatal. */
4825 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004826 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827
David S. Millerf1f28aa2008-07-15 00:08:33 -07004828 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004829 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004830 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832 dev->iflink = -1;
4833
4834 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004835 if (dev->netdev_ops->ndo_init) {
4836 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837 if (ret) {
4838 if (ret > 0)
4839 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004840 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841 }
4842 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004843
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844 if (!dev_valid_name(dev->name)) {
4845 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004846 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847 }
4848
Eric W. Biederman881d9662007-09-17 11:56:21 -07004849 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 if (dev->iflink == -1)
4851 dev->iflink = dev->ifindex;
4852
4853 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004854 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855 hlist_for_each(p, head) {
4856 struct net_device *d
4857 = hlist_entry(p, struct net_device, name_hlist);
4858 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4859 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004860 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004864 /* Fix illegal checksum combinations */
4865 if ((dev->features & NETIF_F_HW_CSUM) &&
4866 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4867 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4868 dev->name);
4869 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4870 }
4871
4872 if ((dev->features & NETIF_F_NO_CSUM) &&
4873 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4874 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4875 dev->name);
4876 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4877 }
4878
Herbert Xub63365a2008-10-23 01:11:29 -07004879 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004881 /* Enable software GSO if SG is supported. */
4882 if (dev->features & NETIF_F_SG)
4883 dev->features |= NETIF_F_GSO;
4884
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004885 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00004886
4887 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4888 ret = notifier_to_errno(ret);
4889 if (ret)
4890 goto err_uninit;
4891
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004892 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004893 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004894 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004895 dev->reg_state = NETREG_REGISTERED;
4896
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897 /*
4898 * Default initial state at registry is that the
4899 * device is present.
4900 */
4901
4902 set_bit(__LINK_STATE_PRESENT, &dev->state);
4903
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004906 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907
4908 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004909 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004910 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004911 if (ret) {
4912 rollback_registered(dev);
4913 dev->reg_state = NETREG_UNREGISTERED;
4914 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915
4916out:
4917 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004918
4919err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004920 if (dev->netdev_ops->ndo_uninit)
4921 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004922 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004924EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925
4926/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004927 * init_dummy_netdev - init a dummy network device for NAPI
4928 * @dev: device to init
4929 *
4930 * This takes a network device structure and initialize the minimum
4931 * amount of fields so it can be used to schedule NAPI polls without
4932 * registering a full blown interface. This is to be used by drivers
4933 * that need to tie several hardware interfaces to a single NAPI
4934 * poll scheduler due to HW limitations.
4935 */
4936int init_dummy_netdev(struct net_device *dev)
4937{
4938 /* Clear everything. Note we don't initialize spinlocks
4939 * are they aren't supposed to be taken by any of the
4940 * NAPI code and this dummy netdev is supposed to be
4941 * only ever used for NAPI polls
4942 */
4943 memset(dev, 0, sizeof(struct net_device));
4944
4945 /* make sure we BUG if trying to hit standard
4946 * register/unregister code path
4947 */
4948 dev->reg_state = NETREG_DUMMY;
4949
4950 /* initialize the ref count */
4951 atomic_set(&dev->refcnt, 1);
4952
4953 /* NAPI wants this */
4954 INIT_LIST_HEAD(&dev->napi_list);
4955
4956 /* a dummy interface is started by default */
4957 set_bit(__LINK_STATE_PRESENT, &dev->state);
4958 set_bit(__LINK_STATE_START, &dev->state);
4959
4960 return 0;
4961}
4962EXPORT_SYMBOL_GPL(init_dummy_netdev);
4963
4964
4965/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 * register_netdev - register a network device
4967 * @dev: device to register
4968 *
4969 * Take a completed network device structure and add it to the kernel
4970 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4971 * chain. 0 is returned on success. A negative errno code is returned
4972 * on a failure to set up the device, or if the name is a duplicate.
4973 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004974 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 * and expands the device name if you passed a format string to
4976 * alloc_netdev.
4977 */
4978int register_netdev(struct net_device *dev)
4979{
4980 int err;
4981
4982 rtnl_lock();
4983
4984 /*
4985 * If the name is a format string the caller wants us to do a
4986 * name allocation.
4987 */
4988 if (strchr(dev->name, '%')) {
4989 err = dev_alloc_name(dev, dev->name);
4990 if (err < 0)
4991 goto out;
4992 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004993
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994 err = register_netdevice(dev);
4995out:
4996 rtnl_unlock();
4997 return err;
4998}
4999EXPORT_SYMBOL(register_netdev);
5000
5001/*
5002 * netdev_wait_allrefs - wait until all references are gone.
5003 *
5004 * This is called when unregistering network devices.
5005 *
5006 * Any protocol or device that holds a reference should register
5007 * for netdevice notification, and cleanup and put back the
5008 * reference if they receive an UNREGISTER event.
5009 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005010 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011 */
5012static void netdev_wait_allrefs(struct net_device *dev)
5013{
5014 unsigned long rebroadcast_time, warning_time;
5015
5016 rebroadcast_time = warning_time = jiffies;
5017 while (atomic_read(&dev->refcnt) != 0) {
5018 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005019 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020
5021 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005022 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023
5024 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5025 &dev->state)) {
5026 /* We must not have linkwatch events
5027 * pending on unregister. If this
5028 * happens, we simply run the queue
5029 * unscheduled, resulting in a noop
5030 * for this device.
5031 */
5032 linkwatch_run_queue();
5033 }
5034
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005035 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036
5037 rebroadcast_time = jiffies;
5038 }
5039
5040 msleep(250);
5041
5042 if (time_after(jiffies, warning_time + 10 * HZ)) {
5043 printk(KERN_EMERG "unregister_netdevice: "
5044 "waiting for %s to become free. Usage "
5045 "count = %d\n",
5046 dev->name, atomic_read(&dev->refcnt));
5047 warning_time = jiffies;
5048 }
5049 }
5050}
5051
5052/* The sequence is:
5053 *
5054 * rtnl_lock();
5055 * ...
5056 * register_netdevice(x1);
5057 * register_netdevice(x2);
5058 * ...
5059 * unregister_netdevice(y1);
5060 * unregister_netdevice(y2);
5061 * ...
5062 * rtnl_unlock();
5063 * free_netdev(y1);
5064 * free_netdev(y2);
5065 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005066 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005068 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 * without deadlocking with linkwatch via keventd.
5070 * 2) Since we run with the RTNL semaphore not held, we can sleep
5071 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005072 *
5073 * We must not return until all unregister events added during
5074 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076void netdev_run_todo(void)
5077{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005078 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005079
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005081 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005082
5083 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005084
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085 while (!list_empty(&list)) {
5086 struct net_device *dev
5087 = list_entry(list.next, struct net_device, todo_list);
5088 list_del(&dev->todo_list);
5089
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005090 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 printk(KERN_ERR "network todo '%s' but state %d\n",
5092 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005093 dump_stack();
5094 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005096
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005097 dev->reg_state = NETREG_UNREGISTERED;
5098
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005099 on_each_cpu(flush_backlog, dev, 1);
5100
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005101 netdev_wait_allrefs(dev);
5102
5103 /* paranoia */
5104 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005105 WARN_ON(dev->ip_ptr);
5106 WARN_ON(dev->ip6_ptr);
5107 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005108
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005109 if (dev->destructor)
5110 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005111
5112 /* Free network device */
5113 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115}
5116
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005117/**
5118 * dev_get_stats - get network device statistics
5119 * @dev: device to get statistics from
5120 *
5121 * Get network statistics from device. The device driver may provide
5122 * its own method by setting dev->netdev_ops->get_stats; otherwise
5123 * the internal statistics structure is used.
5124 */
5125const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005126{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005127 const struct net_device_ops *ops = dev->netdev_ops;
5128
5129 if (ops->ndo_get_stats)
5130 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005131 else {
5132 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5133 struct net_device_stats *stats = &dev->stats;
5134 unsigned int i;
5135 struct netdev_queue *txq;
5136
5137 for (i = 0; i < dev->num_tx_queues; i++) {
5138 txq = netdev_get_tx_queue(dev, i);
5139 tx_bytes += txq->tx_bytes;
5140 tx_packets += txq->tx_packets;
5141 tx_dropped += txq->tx_dropped;
5142 }
5143 if (tx_bytes || tx_packets || tx_dropped) {
5144 stats->tx_bytes = tx_bytes;
5145 stats->tx_packets = tx_packets;
5146 stats->tx_dropped = tx_dropped;
5147 }
5148 return stats;
5149 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005150}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005151EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005152
David S. Millerdc2b4842008-07-08 17:18:23 -07005153static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005154 struct netdev_queue *queue,
5155 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005156{
David S. Millerdc2b4842008-07-08 17:18:23 -07005157 queue->dev = dev;
5158}
5159
David S. Millerbb949fb2008-07-08 16:55:56 -07005160static void netdev_init_queues(struct net_device *dev)
5161{
David S. Millere8a04642008-07-17 00:34:19 -07005162 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5163 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005164 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005165}
5166
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005168 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169 * @sizeof_priv: size of private data to allocate space for
5170 * @name: device name format string
5171 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005172 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173 *
5174 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005175 * and performs basic initialization. Also allocates subquue structs
5176 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005178struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5179 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180{
David S. Millere8a04642008-07-17 00:34:19 -07005181 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005183 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005184 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005186 BUG_ON(strlen(name) >= sizeof(dev->name));
5187
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005188 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005189 if (sizeof_priv) {
5190 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005191 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005192 alloc_size += sizeof_priv;
5193 }
5194 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005195 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005197 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005199 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 return NULL;
5201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202
Stephen Hemminger79439862008-07-21 13:28:44 -07005203 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005204 if (!tx) {
5205 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5206 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005207 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005208 }
5209
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005210 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005212
5213 if (dev_addr_init(dev))
5214 goto free_tx;
5215
Jiri Pirkoccffad252009-05-22 23:22:17 +00005216 dev_unicast_init(dev);
5217
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005218 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219
David S. Millere8a04642008-07-17 00:34:19 -07005220 dev->_tx = tx;
5221 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005222 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005223
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005224 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225
David S. Millerbb949fb2008-07-08 16:55:56 -07005226 netdev_init_queues(dev);
5227
Herbert Xud565b0a2008-12-15 23:38:52 -08005228 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005229 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 setup(dev);
5231 strcpy(dev->name, name);
5232 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005233
5234free_tx:
5235 kfree(tx);
5236
5237free_p:
5238 kfree(p);
5239 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005241EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242
5243/**
5244 * free_netdev - free network device
5245 * @dev: device
5246 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005247 * This function does the last stage of destroying an allocated device
5248 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249 * If this is the last reference then it will be freed.
5250 */
5251void free_netdev(struct net_device *dev)
5252{
Herbert Xud565b0a2008-12-15 23:38:52 -08005253 struct napi_struct *p, *n;
5254
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005255 release_net(dev_net(dev));
5256
David S. Millere8a04642008-07-17 00:34:19 -07005257 kfree(dev->_tx);
5258
Jiri Pirkof001fde2009-05-05 02:48:28 +00005259 /* Flush device addresses */
5260 dev_addr_flush(dev);
5261
Herbert Xud565b0a2008-12-15 23:38:52 -08005262 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5263 netif_napi_del(p);
5264
Stephen Hemminger3041a062006-05-26 13:25:24 -07005265 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 if (dev->reg_state == NETREG_UNINITIALIZED) {
5267 kfree((char *)dev - dev->padded);
5268 return;
5269 }
5270
5271 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5272 dev->reg_state = NETREG_RELEASED;
5273
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005274 /* will free via device release */
5275 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005277EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005278
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005279/**
5280 * synchronize_net - Synchronize with packet receive processing
5281 *
5282 * Wait for packets currently being received to be done.
5283 * Does not block later packets from starting.
5284 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005285void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286{
5287 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005288 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005290EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291
5292/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005293 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005294 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005295 * @head: list
5296
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005298 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005299 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300 *
5301 * Callers must hold the rtnl semaphore. You may want
5302 * unregister_netdev() instead of this.
5303 */
5304
Eric Dumazet44a08732009-10-27 07:03:04 +00005305void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306{
Herbert Xua6620712007-12-12 19:21:56 -08005307 ASSERT_RTNL();
5308
Eric Dumazet44a08732009-10-27 07:03:04 +00005309 if (head) {
5310 list_add_tail(&dev->unreg_list, head);
5311 } else {
5312 rollback_registered(dev);
5313 /* Finish processing unregister after unlock */
5314 net_set_todo(dev);
5315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316}
Eric Dumazet44a08732009-10-27 07:03:04 +00005317EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318
5319/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005320 * unregister_netdevice_many - unregister many devices
5321 * @head: list of devices
5322 *
5323 */
5324void unregister_netdevice_many(struct list_head *head)
5325{
5326 struct net_device *dev;
5327
5328 if (!list_empty(head)) {
5329 rollback_registered_many(head);
5330 list_for_each_entry(dev, head, unreg_list)
5331 net_set_todo(dev);
5332 }
5333}
Eric Dumazet63c80992009-10-27 07:06:49 +00005334EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005335
5336/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337 * unregister_netdev - remove device from the kernel
5338 * @dev: device
5339 *
5340 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005341 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 *
5343 * This is just a wrapper for unregister_netdevice that takes
5344 * the rtnl semaphore. In general you want to use this and not
5345 * unregister_netdevice.
5346 */
5347void unregister_netdev(struct net_device *dev)
5348{
5349 rtnl_lock();
5350 unregister_netdevice(dev);
5351 rtnl_unlock();
5352}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005353EXPORT_SYMBOL(unregister_netdev);
5354
Eric W. Biedermance286d32007-09-12 13:53:49 +02005355/**
5356 * dev_change_net_namespace - move device to different nethost namespace
5357 * @dev: device
5358 * @net: network namespace
5359 * @pat: If not NULL name pattern to try if the current device name
5360 * is already taken in the destination network namespace.
5361 *
5362 * This function shuts down a device interface and moves it
5363 * to a new network namespace. On success 0 is returned, on
5364 * a failure a netagive errno code is returned.
5365 *
5366 * Callers must hold the rtnl semaphore.
5367 */
5368
5369int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5370{
5371 char buf[IFNAMSIZ];
5372 const char *destname;
5373 int err;
5374
5375 ASSERT_RTNL();
5376
5377 /* Don't allow namespace local devices to be moved. */
5378 err = -EINVAL;
5379 if (dev->features & NETIF_F_NETNS_LOCAL)
5380 goto out;
5381
Eric W. Biederman38918452008-10-27 17:51:47 -07005382#ifdef CONFIG_SYSFS
5383 /* Don't allow real devices to be moved when sysfs
5384 * is enabled.
5385 */
5386 err = -EINVAL;
5387 if (dev->dev.parent)
5388 goto out;
5389#endif
5390
Eric W. Biedermance286d32007-09-12 13:53:49 +02005391 /* Ensure the device has been registrered */
5392 err = -EINVAL;
5393 if (dev->reg_state != NETREG_REGISTERED)
5394 goto out;
5395
5396 /* Get out if there is nothing todo */
5397 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005398 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005399 goto out;
5400
5401 /* Pick the destination device name, and ensure
5402 * we can use it in the destination network namespace.
5403 */
5404 err = -EEXIST;
5405 destname = dev->name;
5406 if (__dev_get_by_name(net, destname)) {
5407 /* We get here if we can't use the current device name */
5408 if (!pat)
5409 goto out;
5410 if (!dev_valid_name(pat))
5411 goto out;
5412 if (strchr(pat, '%')) {
5413 if (__dev_alloc_name(net, pat, buf) < 0)
5414 goto out;
5415 destname = buf;
5416 } else
5417 destname = pat;
5418 if (__dev_get_by_name(net, destname))
5419 goto out;
5420 }
5421
5422 /*
5423 * And now a mini version of register_netdevice unregister_netdevice.
5424 */
5425
5426 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005427 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005428
5429 /* And unlink it from device chain */
5430 err = -ENODEV;
5431 unlist_netdevice(dev);
5432
5433 synchronize_net();
5434
5435 /* Shutdown queueing discipline. */
5436 dev_shutdown(dev);
5437
5438 /* Notify protocols, that we are about to destroy
5439 this device. They should clean all the things.
5440 */
5441 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5442
5443 /*
5444 * Flush the unicast and multicast chains
5445 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005446 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005447 dev_addr_discard(dev);
5448
Eric W. Biederman38918452008-10-27 17:51:47 -07005449 netdev_unregister_kobject(dev);
5450
Eric W. Biedermance286d32007-09-12 13:53:49 +02005451 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005452 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005453
5454 /* Assign the new device name */
5455 if (destname != dev->name)
5456 strcpy(dev->name, destname);
5457
5458 /* If there is an ifindex conflict assign a new one */
5459 if (__dev_get_by_index(net, dev->ifindex)) {
5460 int iflink = (dev->iflink == dev->ifindex);
5461 dev->ifindex = dev_new_index(net);
5462 if (iflink)
5463 dev->iflink = dev->ifindex;
5464 }
5465
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005466 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005467 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005468 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005469
5470 /* Add the device back in the hashes */
5471 list_netdevice(dev);
5472
5473 /* Notify protocols, that a new device appeared. */
5474 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5475
5476 synchronize_net();
5477 err = 0;
5478out:
5479 return err;
5480}
Johannes Berg463d0182009-07-14 00:33:35 +02005481EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005482
Linus Torvalds1da177e2005-04-16 15:20:36 -07005483static int dev_cpu_callback(struct notifier_block *nfb,
5484 unsigned long action,
5485 void *ocpu)
5486{
5487 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005488 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 struct sk_buff *skb;
5490 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5491 struct softnet_data *sd, *oldsd;
5492
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005493 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494 return NOTIFY_OK;
5495
5496 local_irq_disable();
5497 cpu = smp_processor_id();
5498 sd = &per_cpu(softnet_data, cpu);
5499 oldsd = &per_cpu(softnet_data, oldcpu);
5500
5501 /* Find end of our completion_queue. */
5502 list_skb = &sd->completion_queue;
5503 while (*list_skb)
5504 list_skb = &(*list_skb)->next;
5505 /* Append completion queue from offline CPU. */
5506 *list_skb = oldsd->completion_queue;
5507 oldsd->completion_queue = NULL;
5508
5509 /* Find end of our output_queue. */
5510 list_net = &sd->output_queue;
5511 while (*list_net)
5512 list_net = &(*list_net)->next_sched;
5513 /* Append output queue from offline CPU. */
5514 *list_net = oldsd->output_queue;
5515 oldsd->output_queue = NULL;
5516
5517 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5518 local_irq_enable();
5519
5520 /* Process offline CPU's input_pkt_queue */
5521 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5522 netif_rx(skb);
5523
5524 return NOTIFY_OK;
5525}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005526
5527
Herbert Xu7f353bf2007-08-10 15:47:58 -07005528/**
Herbert Xub63365a2008-10-23 01:11:29 -07005529 * netdev_increment_features - increment feature set by one
5530 * @all: current feature set
5531 * @one: new feature set
5532 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005533 *
5534 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005535 * @one to the master device with current feature set @all. Will not
5536 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005537 */
Herbert Xub63365a2008-10-23 01:11:29 -07005538unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5539 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005540{
Herbert Xub63365a2008-10-23 01:11:29 -07005541 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005542 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005543 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5544 else if (mask & NETIF_F_ALL_CSUM) {
5545 /* If one device supports v4/v6 checksumming, set for all. */
5546 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5547 !(all & NETIF_F_GEN_CSUM)) {
5548 all &= ~NETIF_F_ALL_CSUM;
5549 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5550 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005551
Herbert Xub63365a2008-10-23 01:11:29 -07005552 /* If one device supports hw checksumming, set for all. */
5553 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5554 all &= ~NETIF_F_ALL_CSUM;
5555 all |= NETIF_F_HW_CSUM;
5556 }
5557 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005558
Herbert Xub63365a2008-10-23 01:11:29 -07005559 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005560
Herbert Xub63365a2008-10-23 01:11:29 -07005561 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005562 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005563 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005564
5565 return all;
5566}
Herbert Xub63365a2008-10-23 01:11:29 -07005567EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005568
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005569static struct hlist_head *netdev_create_hash(void)
5570{
5571 int i;
5572 struct hlist_head *hash;
5573
5574 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5575 if (hash != NULL)
5576 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5577 INIT_HLIST_HEAD(&hash[i]);
5578
5579 return hash;
5580}
5581
Eric W. Biederman881d9662007-09-17 11:56:21 -07005582/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005583static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005584{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005585 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005586
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005587 net->dev_name_head = netdev_create_hash();
5588 if (net->dev_name_head == NULL)
5589 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005590
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005591 net->dev_index_head = netdev_create_hash();
5592 if (net->dev_index_head == NULL)
5593 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005594
5595 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005596
5597err_idx:
5598 kfree(net->dev_name_head);
5599err_name:
5600 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005601}
5602
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005603/**
5604 * netdev_drivername - network driver for the device
5605 * @dev: network device
5606 * @buffer: buffer for resulting name
5607 * @len: size of buffer
5608 *
5609 * Determine network driver for device.
5610 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005611char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005612{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005613 const struct device_driver *driver;
5614 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005615
5616 if (len <= 0 || !buffer)
5617 return buffer;
5618 buffer[0] = 0;
5619
5620 parent = dev->dev.parent;
5621
5622 if (!parent)
5623 return buffer;
5624
5625 driver = parent->driver;
5626 if (driver && driver->name)
5627 strlcpy(buffer, driver->name, len);
5628 return buffer;
5629}
5630
Pavel Emelyanov46650792007-10-08 20:38:39 -07005631static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005632{
5633 kfree(net->dev_name_head);
5634 kfree(net->dev_index_head);
5635}
5636
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005637static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005638 .init = netdev_init,
5639 .exit = netdev_exit,
5640};
5641
Pavel Emelyanov46650792007-10-08 20:38:39 -07005642static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005643{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005644 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005645 /*
5646 * Push all migratable of the network devices back to the
5647 * initial network namespace
5648 */
5649 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005650restart:
5651 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005652 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005653 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005654
5655 /* Ignore unmoveable devices (i.e. loopback) */
5656 if (dev->features & NETIF_F_NETNS_LOCAL)
5657 continue;
5658
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005659 /* Delete virtual devices */
5660 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
Eric Dumazet23289a32009-10-27 07:06:36 +00005661 dev->rtnl_link_ops->dellink(dev, NULL);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005662 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005663 }
5664
Eric W. Biedermance286d32007-09-12 13:53:49 +02005665 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005666 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5667 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005668 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005669 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005670 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005671 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005672 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005673 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005674 }
5675 rtnl_unlock();
5676}
5677
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005678static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005679 .exit = default_device_exit,
5680};
5681
Linus Torvalds1da177e2005-04-16 15:20:36 -07005682/*
5683 * Initialize the DEV module. At boot time this walks the device list and
5684 * unhooks any devices that fail to initialise (normally hardware not
5685 * present) and leaves us with a valid list of present and active devices.
5686 *
5687 */
5688
5689/*
5690 * This is called single threaded during boot, so no need
5691 * to take the rtnl semaphore.
5692 */
5693static int __init net_dev_init(void)
5694{
5695 int i, rc = -ENOMEM;
5696
5697 BUG_ON(!dev_boot_phase);
5698
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699 if (dev_proc_init())
5700 goto out;
5701
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005702 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703 goto out;
5704
5705 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005706 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005707 INIT_LIST_HEAD(&ptype_base[i]);
5708
Eric W. Biederman881d9662007-09-17 11:56:21 -07005709 if (register_pernet_subsys(&netdev_net_ops))
5710 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711
5712 /*
5713 * Initialise the packet receive queues.
5714 */
5715
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005716 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717 struct softnet_data *queue;
5718
5719 queue = &per_cpu(softnet_data, i);
5720 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 queue->completion_queue = NULL;
5722 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005723
5724 queue->backlog.poll = process_backlog;
5725 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005726 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005727 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005728 }
5729
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730 dev_boot_phase = 0;
5731
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005732 /* The loopback device is special if any other network devices
5733 * is present in a network namespace the loopback device must
5734 * be present. Since we now dynamically allocate and free the
5735 * loopback device ensure this invariant is maintained by
5736 * keeping the loopback device as the first device on the
5737 * list of network devices. Ensuring the loopback devices
5738 * is the first device that appears and the last network device
5739 * that disappears.
5740 */
5741 if (register_pernet_device(&loopback_net_ops))
5742 goto out;
5743
5744 if (register_pernet_device(&default_device_ops))
5745 goto out;
5746
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005747 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5748 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005749
5750 hotcpu_notifier(dev_cpu_callback, 0);
5751 dst_init();
5752 dev_mcast_init();
5753 rc = 0;
5754out:
5755 return rc;
5756}
5757
5758subsys_initcall(net_dev_init);
5759
Krishna Kumare88721f2009-02-18 17:55:02 -08005760static int __init initialize_hashrnd(void)
5761{
5762 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5763 return 0;
5764}
5765
5766late_initcall_sync(initialize_hashrnd);
5767