blob: 39845fc975f994b1c6b1c4c8b3065911d093b20e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
15struct netpoll;
16
17struct netpoll {
18 struct net_device *dev;
19 char dev_name[16], *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 void (*rx_hook)(struct netpoll *, int, char *, int);
21 void (*drop)(struct sk_buff *skb);
22 u32 local_ip, remote_ip;
23 u16 local_port, remote_port;
24 unsigned char local_mac[6], remote_mac[6];
Jeff Moyer115c1d62005-06-22 22:05:31 -070025};
26
27struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070028 atomic_t refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 spinlock_t poll_lock;
30 int poll_owner;
Matt Mackall0db1d6f2005-08-11 19:25:54 -070031 int tries;
Jeff Moyer115c1d62005-06-22 22:05:31 -070032 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070033 spinlock_t rx_lock;
34 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
Neil Horman068c6e92006-06-26 00:04:27 -070035 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036};
37
38void netpoll_poll(struct netpoll *np);
39void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
40int netpoll_parse_options(struct netpoll *np, char *opt);
41int netpoll_setup(struct netpoll *np);
42int netpoll_trap(void);
43void netpoll_set_trap(int trap);
44void netpoll_cleanup(struct netpoll *np);
45int __netpoll_rx(struct sk_buff *skb);
46void netpoll_queue(struct sk_buff *skb);
47
48#ifdef CONFIG_NETPOLL
49static inline int netpoll_rx(struct sk_buff *skb)
50{
Jeff Moyer115c1d62005-06-22 22:05:31 -070051 struct netpoll_info *npinfo = skb->dev->npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070052 unsigned long flags;
53 int ret = 0;
Jeff Moyer115c1d62005-06-22 22:05:31 -070054
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070055 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
Jeff Moyer115c1d62005-06-22 22:05:31 -070056 return 0;
57
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070058 spin_lock_irqsave(&npinfo->rx_lock, flags);
59 /* check rx_flags again with the lock held */
60 if (npinfo->rx_flags && __netpoll_rx(skb))
61 ret = 1;
62 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
63
64 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065}
66
Matt Mackall53fb95d2005-08-11 19:27:43 -070067static inline void *netpoll_poll_lock(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
Matt Mackall53fb95d2005-08-11 19:27:43 -070069 rcu_read_lock(); /* deal with race on ->npinfo */
Jeff Moyer115c1d62005-06-22 22:05:31 -070070 if (dev->npinfo) {
71 spin_lock(&dev->npinfo->poll_lock);
72 dev->npinfo->poll_owner = smp_processor_id();
Matt Mackall53fb95d2005-08-11 19:27:43 -070073 return dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070075 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77
Matt Mackall53fb95d2005-08-11 19:27:43 -070078static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Matt Mackall53fb95d2005-08-11 19:27:43 -070080 struct netpoll_info *npi = have;
81
82 if (npi) {
83 npi->poll_owner = -1;
84 spin_unlock(&npi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070086 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
89#else
90#define netpoll_rx(a) 0
Ben Dooksafb997c2005-10-12 15:12:21 -070091#define netpoll_poll_lock(a) NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#define netpoll_poll_unlock(a)
93#endif
94
95#endif