#include <linux/if_link.h>
#ifdef __KERNEL__
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/timer.h>
+#include <linux/bug.h>
#include <linux/delay.h>
-#include <linux/mm.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
-#include <linux/device.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
+#include <linux/dynamic_queue_limits.h>
#include <linux/ethtool.h>
#include <net/net_namespace.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
+#include <net/netprio_cgroup.h>
+
+#include <linux/netdev_features.h>
-struct vlan_group;
struct netpoll_info;
+struct device;
struct phy_device;
/* 802.11 specific */
struct wireless_dev;
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
-#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
- functions are available. */
-#define HAVE_FREE_NETDEV /* free_netdev() */
-#define HAVE_NETDEV_PRIV /* netdev_priv() */
-
/* hardware address assignment types */
#define NET_ADDR_PERM 0 /* address is permanent (default) */
#define NET_ADDR_RANDOM 1 /* address is generated randomly */
* used.
*/
-#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
# define LL_MAX_HEADER 96
# endif
-#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#elif IS_ENABLED(CONFIG_TR)
# define LL_MAX_HEADER 48
#else
# define LL_MAX_HEADER 32
#endif
-#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
- !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
- !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
- !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
+#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
+ !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
#define MAX_HEADER LL_MAX_HEADER
#else
#define MAX_HEADER (LL_MAX_HEADER + 48)
#include <linux/cache.h>
#include <linux/skbuff.h>
+#ifdef CONFIG_RPS
+#include <linux/static_key.h>
+extern struct static_key rps_needed;
+#endif
+
struct neighbour;
struct neigh_parms;
struct sk_buff;
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache {
- struct hh_cache *hh_next; /* Next entry */
- atomic_t hh_refcnt; /* number of users */
-/*
- * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
- * cache line on SMP.
- * They are mostly read, but hh_refcnt may be changed quite frequently,
- * incurring cache line ping pongs.
- */
- __be16 hh_type ____cacheline_aligned_in_smp;
- /* protocol identifier, f.e ETH_P_IP
- * NOTE: For VLANs, this will be the
- * encapuslated type. --BLG
- */
- u16 hh_len; /* length of header */
- int (*hh_output)(struct sk_buff *skb);
+ u16 hh_len;
+ u16 __pad;
seqlock_t hh_lock;
/* cached hardware header; allow for machine alignment needs. */
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
-static inline void hh_cache_put(struct hh_cache *hh)
-{
- if (atomic_dec_and_test(&hh->hh_refcnt))
- kfree(hh);
-}
-
/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
*
* We could use other alignment values, but we must maintain the
* relationship HH alignment <= LL alignment.
- *
- * LL_ALLOCATED_SPACE also takes into account the tailroom the device
- * may need.
*/
#define LL_RESERVED_SPACE(dev) \
((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
-#define LL_ALLOCATED_SPACE(dev) \
- ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
int (*rebuild)(struct sk_buff *skb);
-#define HAVE_HEADER_CACHE
- int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
+ int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
extern void __napi_schedule(struct napi_struct *n);
-static inline int napi_disable_pending(struct napi_struct *n)
+static inline bool napi_disable_pending(struct napi_struct *n)
{
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
-static inline int napi_schedule_prep(struct napi_struct *n)
+static inline bool napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
-static inline int napi_reschedule(struct napi_struct *napi)
+static inline bool napi_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__napi_schedule(napi);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
#endif
enum netdev_queue_state_t {
- __QUEUE_STATE_XOFF,
+ __QUEUE_STATE_DRV_XOFF,
+ __QUEUE_STATE_STACK_XOFF,
__QUEUE_STATE_FROZEN,
-#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
- (1 << __QUEUE_STATE_FROZEN))
+#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
+ (1 << __QUEUE_STATE_STACK_XOFF))
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
+ (1 << __QUEUE_STATE_FROZEN))
};
+/*
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
+ * netif_tx_* functions below are used to manipulate this flag. The
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
+ * queue independently. The netif_xmit_*stopped functions below are called
+ * to check if the queue has been stopped by the driver or stack (either
+ * of the XOFF bits are set in the state). Drivers should not need to call
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
+ */
struct netdev_queue {
/*
*/
struct net_device *dev;
struct Qdisc *qdisc;
- unsigned long state;
struct Qdisc *qdisc_sleeping;
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
struct kobject kobj;
#endif
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
* please use this field instead of dev->trans_start
*/
unsigned long trans_start;
+
+ /*
+ * Number of TX timeouts for this queue
+ * (/sys/class/net/DEV/Q/trans_timeout)
+ */
+ unsigned long trans_timeout;
+
+ unsigned long state;
+
+#ifdef CONFIG_BQL
+ struct dql dql;
+#endif
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
struct rcu_head rcu;
u16 cpus[0];
};
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
- (_num * sizeof(struct rps_dev_flow)))
+ ((_num) * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
u16 ents[0];
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
- (_num * sizeof(u16)))
+ ((_num) * sizeof(u16)))
#define RPS_NO_CPU 0xffff
struct rcu_head rcu;
u16 queues[0];
};
-#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
/ sizeof(u16))
u16 offset;
};
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+/*
+ * This structure is to hold information about the device
+ * configured to run FCoE protocol stack.
+ */
+struct netdev_fcoe_hbainfo {
+ char manufacturer[64];
+ char serial_number[64];
+ char hardware_version[64];
+ char driver_version[64];
+ char optionrom_version[64];
+ char firmware_version[64];
+ char model[256];
+ char model_description[256];
+};
+#endif
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
- *
- * void (*ndo_set_multicast_list)(struct net_device *dev);
- * This function is called when the multicast address list changes.
+ * If driver handles unicast address filtering, it should set
+ * IFF_UNICAST_FLT to its priv_flags.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
- * If device support VLAN receive acceleration
- * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
- * when vlan groups for the device changes. Note: grp is NULL
- * if no vlan's groups are being used.
- *
- * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is registered.
*
- * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is unregistered.
*
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
* int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
* int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
+ * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_config)(struct net_device *dev,
* int vf, struct ifla_vf_info *ivf);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* perform necessary setup and returns 1 to indicate the device is set up
* successfully to perform DDP on this I/O, otherwise this returns 0.
*
+ * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+ * struct netdev_fcoe_hbainfo *hbainfo);
+ * Called when the FCoE Protocol stack wants information on the underlying
+ * device. This information is utilized by the FCoE protocol stack to
+ * register attributes with Fiber Channel management service as per the
+ * FC-GS Fabric Device Management Information(FDMI) specification.
+ *
* int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
* Called when the underlying device wants to override default World Wide
* Name (WWN) generation mechanism in FCoE protocol stack to pass its own
* Called to release previously enslaved netdev.
*
* Feature/offload setting functions.
- * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ * netdev_features_t features);
* Adjusts the requested feature flags according to device-specific
* constraints, and returns the resulting flags. Must not modify
* the device state.
*
- * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
* Must return >0 or -errno if it changed dev->features itself.
*
*/
-#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_uninit)(struct net_device *dev);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
- void (*ndo_set_multicast_list)(struct net_device *dev);
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
int (*ndo_validate_addr)(struct net_device *dev);
struct rtnl_link_stats64 *storage);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
- void (*ndo_vlan_rx_register)(struct net_device *dev,
- struct vlan_group *grp);
- void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
+ int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
- void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+ int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
int queue, u16 vlan, u8 qos);
int (*ndo_set_vf_tx_rate)(struct net_device *dev,
int vf, int rate);
+ int (*ndo_set_vf_spoofchk)(struct net_device *dev,
+ int vf, bool setting);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
u16 xid,
struct scatterlist *sgl,
unsigned int sgc);
+ int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+ struct netdev_fcoe_hbainfo *hbainfo);
+#endif
+
+#if IS_ENABLED(CONFIG_LIBFCOE)
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
+
#ifdef CONFIG_RFS_ACCEL
int (*ndo_rx_flow_steer)(struct net_device *dev,
const struct sk_buff *skb,
struct net_device *slave_dev);
int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev);
- u32 (*ndo_fix_features)(struct net_device *dev,
- u32 features);
+ netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
- u32 features);
+ netdev_features_t features);
+ int (*ndo_neigh_construct)(struct neighbour *n);
+ void (*ndo_neigh_destroy)(struct neighbour *n);
};
/*
*/
char name[IFNAMSIZ];
- struct pm_qos_request_list pm_qos_req;
+ struct pm_qos_request pm_qos_req;
/* device name hash chain */
struct hlist_node name_hlist;
struct list_head unreg_list;
/* currently active device features */
- u32 features;
+ netdev_features_t features;
/* user-changeable features */
- u32 hw_features;
+ netdev_features_t hw_features;
/* user-requested features */
- u32 wanted_features;
+ netdev_features_t wanted_features;
/* mask of features inheritable by VLAN devices */
- u32 vlan_features;
-
- /* Net device feature bits; if you change something,
- * also update netdev_features_strings[] in ethtool.c */
-
-#define NETIF_F_SG 1 /* Scatter/gather IO. */
-#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
-#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
-#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
-#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
-#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
-#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
-#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
-#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
-#define NETIF_F_GSO 2048 /* Enable software GSO. */
-#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
- /* do not use LLTX in new drivers */
-#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
-#define NETIF_F_GRO 16384 /* Generic receive offload */
-#define NETIF_F_LRO 32768 /* large receive offload */
-
-/* the GSO_MASK reserves bits 16 through 23 */
-#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
-#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
-#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
-#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
-#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
-#define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
-#define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */
-#define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */
-
- /* Segmentation offload features */
-#define NETIF_F_GSO_SHIFT 16
-#define NETIF_F_GSO_MASK 0x00ff0000
-#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
-#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
-
- /* Features valid for ethtool to change */
- /* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
-#define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
-
- /* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
- NETIF_F_TSO6 | NETIF_F_UFO)
-
-
-#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
-#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
-#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
-#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
-
-#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-
-#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
- NETIF_F_FSO)
-
- /*
- * If one device supports one of these features, then enable them
- * for all in netdev_increment_features.
- */
-#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
- NETIF_F_SG | NETIF_F_HIGHDMA | \
- NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
- /*
- * If one device doesn't support one of these features, then disable it
- * for all in netdev_increment_features.
- */
-#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
-
- /* changeable features with no special hardware requirements */
-#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+ netdev_features_t vlan_features;
/* Interface index. Unique device identifier */
int ifindex;
const struct header_ops *header_ops;
unsigned int flags; /* interface flags (a la BSD) */
- unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
+ unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
+ * See if.h for definitions. */
unsigned short gflags;
unsigned short padded; /* How much padding added by alloc_netdev() */
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_assign_type; /* hw address assignment type */
unsigned char addr_len; /* hardware address length */
+ unsigned char neigh_priv_len;
unsigned short dev_id; /* for shared network cards */
spinlock_t addr_list_lock;
struct netdev_hw_addr_list uc; /* Unicast mac addresses */
struct netdev_hw_addr_list mc; /* Multicast mac addresses */
- int uc_promisc;
+ bool uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
/* Protocol specific pointers */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- struct vlan_group __rcu *vlgrp; /* VLAN group */
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ struct vlan_info __rcu *vlan_info; /* VLAN info */
#endif
-#ifdef CONFIG_NET_DSA
- void *dsa_ptr; /* dsa specific data */
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
#endif
void *atalk_ptr; /* AppleTalk link */
struct in_device __rcu *ip_ptr; /* IPv4 specific data */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
struct kset *queues_kset;
+#endif
+#ifdef CONFIG_RPS
struct netdev_rx_queue *_rx;
/* Number of RX queues allocated at register_netdev() time */
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
/* max exchange id for FCoE LRO by ddp */
unsigned int fcoe_ddp_xid;
#endif
- /* n-tuple filter list attached to this device */
- struct ethtool_rx_ntuple_list ethtool_ntuple_list;
-
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+ struct netprio_map __rcu *priomap;
+#endif
/* phy device may attach itself for hardware timestamping */
struct phy_device *phydev;
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- u32 features);
+ netdev_features_t features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
struct list_head list;
};
-#include <linux/interrupt.h>
#include <linux/notifier.h>
+/* netdevice notifier chain. Please remember to update the rtnetlink
+ * notification exclusion list in rtnetlink_event() when adding new
+ * types.
+ */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#define NETDEV_CHANGE 0x0004 /* Notify device state change */
+#define NETDEV_REGISTER 0x0005
+#define NETDEV_UNREGISTER 0x0006
+#define NETDEV_CHANGEMTU 0x0007
+#define NETDEV_CHANGEADDR 0x0008
+#define NETDEV_GOING_DOWN 0x0009
+#define NETDEV_CHANGENAME 0x000A
+#define NETDEV_FEAT_CHANGE 0x000B
+#define NETDEV_BONDING_FAILOVER 0x000C
+#define NETDEV_PRE_UP 0x000D
+#define NETDEV_PRE_TYPE_CHANGE 0x000E
+#define NETDEV_POST_TYPE_CHANGE 0x000F
+#define NETDEV_POST_INIT 0x0010
+#define NETDEV_UNREGISTER_BATCH 0x0011
+#define NETDEV_RELEASE 0x0012
+#define NETDEV_NOTIFY_PEERS 0x0013
+#define NETDEV_JOIN 0x0014
+
+extern int register_netdevice_notifier(struct notifier_block *nb);
+extern int unregister_netdevice_notifier(struct notifier_block *nb);
+extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+
+
extern rwlock_t dev_base_lock; /* Device list lock */
extern int netdev_refcnt_read(const struct net_device *dev);
extern void free_netdev(struct net_device *dev);
extern void synchronize_net(void);
-extern int register_netdevice_notifier(struct notifier_block *nb);
-extern int unregister_netdevice_notifier(struct notifier_block *nb);
extern int init_dummy_netdev(struct net_device *dev);
extern void netdev_resync_ops(struct net_device *dev);
-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
+ if (!pskb_may_pull(skb, hlen))
+ return NULL;
+
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
- return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
+ return skb->data + offset;
}
static inline void *skb_gro_mac_header(struct sk_buff *skb)
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
-#define HAVE_NETIF_QUEUE
-
extern void __netif_schedule(struct Qdisc *q);
static inline void netif_schedule_queue(struct netdev_queue *txq)
{
- if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+ if (!(txq->state & QUEUE_STATE_ANY_XOFF))
__netif_schedule(txq->qdisc);
}
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
return;
}
#endif
- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
__netif_schedule(dev_queue->qdisc);
}
pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
return;
}
- set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
}
}
-static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
- return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
*
* Test if transmit queue on device is currently unable to send.
*/
-static inline int netif_queue_stopped(const struct net_device *dev)
+static inline bool netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
-static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
+{
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF;
+}
+
+static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
{
- return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
+}
+
+static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes)
+{
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+
+ if (likely(dql_avail(&dev_queue->dql) >= 0))
+ return;
+
+ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+
+ /*
+ * The XOFF flag must be set before checking the dql_avail below,
+ * because in netdev_tx_completed_queue we update the dql_completed
+ * before checking the XOFF flag.
+ */
+ smp_mb();
+
+ /* check again in case another CPU has just made room avail */
+ if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+#endif
+}
+
+static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
+{
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
+}
+
+static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
+ unsigned pkts, unsigned bytes)
+{
+#ifdef CONFIG_BQL
+ if (unlikely(!bytes))
+ return;
+
+ dql_completed(&dev_queue->dql, bytes);
+
+ /*
+ * Without the memory barrier there is a small possiblity that
+ * netdev_tx_sent_queue will miss the update and cause the queue to
+ * be stopped forever
+ */
+ smp_mb();
+
+ if (dql_avail(&dev_queue->dql) < 0)
+ return;
+
+ if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
+ netif_schedule_queue(dev_queue);
+#endif
+}
+
+static inline void netdev_completed_queue(struct net_device *dev,
+ unsigned pkts, unsigned bytes)
+{
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
+}
+
+static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+{
+#ifdef CONFIG_BQL
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
+ dql_reset(&q->dql);
+#endif
+}
+
+static inline void netdev_reset_queue(struct net_device *dev_queue)
+{
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
}
/**
*
* Test if the device has been brought up.
*/
-static inline int netif_running(const struct net_device *dev)
+static inline bool netif_running(const struct net_device *dev)
{
return test_bit(__LINK_STATE_START, &dev->state);
}
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
-static inline int __netif_subqueue_stopped(const struct net_device *dev,
- u16 queue_index)
+static inline bool __netif_subqueue_stopped(const struct net_device *dev,
+ u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return netif_tx_queue_stopped(txq);
}
-static inline int netif_subqueue_stopped(const struct net_device *dev,
- struct sk_buff *skb)
+static inline bool netif_subqueue_stopped(const struct net_device *dev,
+ struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
if (netpoll_trap())
return;
#endif
- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
*
* Check if device has multiple transmit queues
*/
-static inline int netif_is_multiqueue(const struct net_device *dev)
+static inline bool netif_is_multiqueue(const struct net_device *dev)
{
return dev->num_tx_queues > 1;
}
*/
extern void dev_kfree_skb_any(struct sk_buff *skb);
-#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
extern int netif_rx_ni(struct sk_buff *skb);
-#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
extern gro_result_t dev_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
void *rx_handler_data);
extern void netdev_rx_handler_unregister(struct net_device *dev);
-extern int dev_valid_name(const char *name);
+extern bool dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
extern int dev_ethtool(struct net *net, struct ifreq *);
extern unsigned dev_get_flags(const struct net_device *);
*/
static inline void dev_put(struct net_device *dev)
{
- irqsafe_cpu_dec(*dev->pcpu_refcnt);
+ this_cpu_dec(*dev->pcpu_refcnt);
}
/**
*/
static inline void dev_hold(struct net_device *dev)
{
- irqsafe_cpu_inc(*dev->pcpu_refcnt);
+ this_cpu_inc(*dev->pcpu_refcnt);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
*
* Check if carrier is present on device
*/
-static inline int netif_carrier_ok(const struct net_device *dev)
+static inline bool netif_carrier_ok(const struct net_device *dev)
{
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
*
* Check if carrier is present on device
*/
-static inline int netif_dormant(const struct net_device *dev)
+static inline bool netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
*
* Check if carrier is operational
*/
-static inline int netif_oper_up(const struct net_device *dev)
+static inline bool netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
*
* Check if device has not been removed from system.
*/
-static inline int netif_device_present(struct net_device *dev)
+static inline bool netif_device_present(struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
/*
* Network interface message level settings
*/
-#define HAVE_NETIF_MSG 1
enum {
NETIF_MSG_DRV = 0x0001,
txq->xmit_lock_owner = smp_processor_id();
}
-static inline int __netif_tx_trylock(struct netdev_queue *txq)
+static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
- int ok = spin_trylock(&txq->_xmit_lock);
+ bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
txq->xmit_lock_owner = smp_processor_id();
return ok;
spin_lock(&dev->addr_list_lock);
}
+static inline void netif_addr_lock_nested(struct net_device *dev)
+{
+ spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+}
+
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
extern void dev_mcast_init(void);
extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
+extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int netdev_set_bond_master(struct net_device *dev,
struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
extern void linkwatch_run_queue(void);
-static inline u32 netdev_get_wanted_features(struct net_device *dev)
+static inline netdev_features_t netdev_get_wanted_features(
+ struct net_device *dev)
{
return (dev->features & ~dev->hw_features) | dev->wanted_features;
}
-u32 netdev_increment_features(u32 all, u32 one, u32 mask);
-u32 netdev_fix_features(struct net_device *dev, u32 features);
+netdev_features_t netdev_increment_features(netdev_features_t all,
+ netdev_features_t one, netdev_features_t mask);
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-u32 netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_features(struct sk_buff *skb);
-static inline int net_gso_ok(u32 features, int gso_type)
+static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
- int feature = gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+
+ /* check flags correspondence */
+ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
+
return (features & feature) == feature;
}
-static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
+static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline int netif_needs_gso(struct sk_buff *skb, int features)
+static inline bool netif_needs_gso(struct sk_buff *skb,
+ netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
- unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
+ unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
+ (skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
-static inline int netif_is_bond_slave(struct net_device *dev)
+static inline bool netif_is_bond_slave(struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
-extern struct pernet_operations __net_initdata loopback_net_ops;
-
-int dev_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd);
-
-static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
+static inline bool netif_supports_nofcs(struct net_device *dev)
{
- if (dev->features & NETIF_F_RXCSUM)
- return 1;
- if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
- return 0;
- return dev->ethtool_ops->get_rx_csum(dev);
+ return dev->priv_flags & IFF_SUPP_NOFCS;
}
-static inline u32 dev_ethtool_get_flags(struct net_device *dev)
-{
- if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
- return 0;
- return dev->ethtool_ops->get_flags(dev);
-}
+extern struct pernet_operations __net_initdata loopback_net_ops;
/* Logging, debugging and troubleshooting/diagnostic helpers. */
return dev->name;
}
-extern int netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...)
- __attribute__ ((format (printf, 3, 4)));
-extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int netdev_alert(const struct net_device *dev, const char *format, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int netdev_crit(const struct net_device *dev, const char *format, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int netdev_err(const struct net_device *dev, const char *format, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int netdev_warn(const struct net_device *dev, const char *format, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int netdev_notice(const struct net_device *dev, const char *format, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int netdev_info(const struct net_device *dev, const char *format, ...)
- __attribute__ ((format (printf, 2, 3)));
+extern int __netdev_printk(const char *level, const struct net_device *dev,
+ struct va_format *vaf);
+
+extern __printf(3, 4)
+int netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...);
+extern __printf(2, 3)
+int netdev_emerg(const struct net_device *dev, const char *format, ...);
+extern __printf(2, 3)
+int netdev_alert(const struct net_device *dev, const char *format, ...);
+extern __printf(2, 3)
+int netdev_crit(const struct net_device *dev, const char *format, ...);
+extern __printf(2, 3)
+int netdev_err(const struct net_device *dev, const char *format, ...);
+extern __printf(2, 3)
+int netdev_warn(const struct net_device *dev, const char *format, ...);
+extern __printf(2, 3)
+int netdev_notice(const struct net_device *dev, const char *format, ...);
+extern __printf(2, 3)
+int netdev_info(const struct net_device *dev, const char *format, ...);
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
- dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
- netdev_name(__dev), ##args); \
+ dynamic_netdev_dbg(__dev, format, ##args); \
} while (0)
+#elif defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
#else
#define netdev_dbg(__dev, format, args...) \
({ \
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
- dynamic_dev_dbg((netdev)->dev.parent, \
- "%s: " format, \
- netdev_name(netdev), ##args); \
+ dynamic_netdev_dbg(netdev, format, ##args); \
} while (0)
#else
#define netif_dbg(priv, type, dev, format, args...) \