blob: db8335ad7559d1141c079127279b257b74c0587d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Randy Dunlap4fc268d2006-01-11 12:17:47 -080092#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400112#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000113#include <linux/user_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200121#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700122#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000124#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#include <net/xfrm.h>
126#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700127#include <net/cls_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129#include <linux/filter.h>
130
131#ifdef CONFIG_INET
132#include <net/tcp.h>
133#endif
134
Ingo Molnarda21f242006-07-03 00:25:12 -0700135/*
136 * Each address family might have different locking rules, so we have
137 * one slock key per address family:
138 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700139static struct lock_class_key af_family_keys[AF_MAX];
140static struct lock_class_key af_family_slock_keys[AF_MAX];
141
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700142/*
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
145 * locks is fast):
146 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700147static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700161 "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700162};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700163static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800173 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700176 "slock-AF_IEEE802154", "slock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700177 "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700178};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700179static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700189 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700192 "clock-AF_IEEE802154", "clock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700193 "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700194};
Ingo Molnarda21f242006-07-03 00:25:12 -0700195
196/*
197 * sk_callback_lock locking rules are per-address-family,
198 * so split the lock classes by using a per-AF key:
199 */
200static struct lock_class_key af_callback_keys[AF_MAX];
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/* Take into consideration the size of the struct sk_buff overhead in the
203 * determination of these values, since that is non-constant across
204 * platforms. This makes socket queueing behavior and performance
205 * not depend upon such differences.
206 */
207#define _SK_MEM_PACKETS 256
208#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
209#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
210#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
211
212/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218/* Maximal space eaten by iovec or ancilliary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000220EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Herbert Xuf8451722010-05-24 00:12:34 -0700222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
223int net_cls_subsys_id = -1;
224EXPORT_SYMBOL_GPL(net_cls_subsys_id);
225#endif
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
228{
229 struct timeval tv;
230
231 if (optlen < sizeof(tv))
232 return -EINVAL;
233 if (copy_from_user(&tv, optval, sizeof(tv)))
234 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
236 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Vasily Averinba780732007-05-24 16:58:54 -0700238 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700239 static int warned __read_mostly;
240
Vasily Averinba780732007-05-24 16:58:54 -0700241 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700242 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700243 warned++;
244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
245 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700246 current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700247 }
Vasily Averinba780732007-05-24 16:58:54 -0700248 return 0;
249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 *timeo_p = MAX_SCHEDULE_TIMEOUT;
251 if (tv.tv_sec == 0 && tv.tv_usec == 0)
252 return 0;
253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
255 return 0;
256}
257
258static void sock_warn_obsolete_bsdism(const char *name)
259{
260 static int warned;
261 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900262 if (strcmp(warncomm, current->comm) && warned < 5) {
263 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 printk(KERN_WARNING "process `%s' is using obsolete "
265 "%s SO_BSDCOMPAT\n", warncomm, name);
266 warned++;
267 }
268}
269
Patrick Ohly20d49472009-02-12 05:03:38 +0000270static void sock_disable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900271{
Patrick Ohly20d49472009-02-12 05:03:38 +0000272 if (sock_flag(sk, flag)) {
273 sock_reset_flag(sk, flag);
274 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
276 net_disable_timestamp();
277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279}
280
281
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
283{
Eric Dumazet766e90372009-10-14 20:40:11 -0700284 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800285 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700286 unsigned long flags;
287 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800288
Rami Rosen9ee6b7f2008-05-14 03:50:03 -0700289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800290 number of warnings when compiling with -W --ANK
291 */
292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293 (unsigned)sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700294 atomic_inc(&sk->sk_drops);
295 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800296 }
297
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700298 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800299 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700300 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800301
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800302 if (!sk_rmem_schedule(sk, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700303 atomic_inc(&sk->sk_drops);
304 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800305 }
306
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800307 skb->dev = NULL;
308 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800309
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800310 /* Cache the SKB length before we tack it onto the receive
311 * queue. Once it is added it no longer belongs to us and
312 * may be freed by other threads of control pulling packets
313 * from the queue.
314 */
315 skb_len = skb->len;
316
Eric Dumazet7fee2262010-05-11 23:19:48 +0000317 /* we escape from rcu protected region, make sure we dont leak
318 * a norefcounted dst
319 */
320 skb_dst_force(skb);
321
Neil Horman3b885782009-10-12 13:26:31 -0700322 spin_lock_irqsave(&list->lock, flags);
323 skb->dropcount = atomic_read(&sk->sk_drops);
324 __skb_queue_tail(list, skb);
325 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800326
327 if (!sock_flag(sk, SOCK_DEAD))
328 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700329 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800330}
331EXPORT_SYMBOL(sock_queue_rcv_skb);
332
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800334{
335 int rc = NET_RX_SUCCESS;
336
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700337 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800338 goto discard_and_relse;
339
340 skb->dev = NULL;
341
Eric Dumazetc3774112010-04-27 15:13:20 -0700342 if (sk_rcvqueues_full(sk, skb)) {
343 atomic_inc(&sk->sk_drops);
344 goto discard_and_relse;
345 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200346 if (nested)
347 bh_lock_sock_nested(sk);
348 else
349 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700350 if (!sock_owned_by_user(sk)) {
351 /*
352 * trylock + unlock semantics:
353 */
354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
355
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700356 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700357
358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Zhu Yia3a858f2010-03-04 18:01:47 +0000359 } else if (sk_add_backlog(sk, skb)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000360 bh_unlock_sock(sk);
361 atomic_inc(&sk->sk_drops);
362 goto discard_and_relse;
363 }
364
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800365 bh_unlock_sock(sk);
366out:
367 sock_put(sk);
368 return rc;
369discard_and_relse:
370 kfree_skb(skb);
371 goto out;
372}
373EXPORT_SYMBOL(sk_receive_skb);
374
Krishna Kumarea94ff32009-10-19 23:46:45 +0000375void sk_reset_txq(struct sock *sk)
376{
377 sk_tx_queue_clear(sk);
378}
379EXPORT_SYMBOL(sk_reset_txq);
380
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
382{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000383 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800384
385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000386 sk_tx_queue_clear(sk);
Eric Dumazetb6c67122010-04-08 23:03:29 +0000387 rcu_assign_pointer(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800388 dst_release(dst);
389 return NULL;
390 }
391
392 return dst;
393}
394EXPORT_SYMBOL(__sk_dst_check);
395
396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
397{
398 struct dst_entry *dst = sk_dst_get(sk);
399
400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
401 sk_dst_reset(sk);
402 dst_release(dst);
403 return NULL;
404 }
405
406 return dst;
407}
408EXPORT_SYMBOL(sk_dst_check);
409
David S. Miller48788092007-09-14 16:41:03 -0700410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
411{
412 int ret = -ENOPROTOOPT;
413#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900414 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700415 char devname[IFNAMSIZ];
416 int index;
417
418 /* Sorry... */
419 ret = -EPERM;
420 if (!capable(CAP_NET_RAW))
421 goto out;
422
423 ret = -EINVAL;
424 if (optlen < 0)
425 goto out;
426
427 /* Bind this socket to a particular device like "eth0",
428 * as specified in the passed interface name. If the
429 * name is "" or the option length is zero the socket
430 * is not bound.
431 */
432 if (optlen > IFNAMSIZ - 1)
433 optlen = IFNAMSIZ - 1;
434 memset(devname, 0, sizeof(devname));
435
436 ret = -EFAULT;
437 if (copy_from_user(devname, optval, optlen))
438 goto out;
439
David S. Miller000ba2e2009-11-05 22:37:11 -0800440 index = 0;
441 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800442 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700443
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800444 rcu_read_lock();
445 dev = dev_get_by_name_rcu(net, devname);
446 if (dev)
447 index = dev->ifindex;
448 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700449 ret = -ENODEV;
450 if (!dev)
451 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700452 }
453
454 lock_sock(sk);
455 sk->sk_bound_dev_if = index;
456 sk_dst_reset(sk);
457 release_sock(sk);
458
459 ret = 0;
460
461out:
462#endif
463
464 return ret;
465}
466
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
468{
469 if (valbool)
470 sock_set_flag(sk, bit);
471 else
472 sock_reset_flag(sk, bit);
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/*
476 * This is meant for all protocols to use and covers goings on
477 * at the socket level. Everything here is generic.
478 */
479
480int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700481 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
Eric Dumazet2a915252009-05-27 11:30:05 +0000483 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 int val;
485 int valbool;
486 struct linger ling;
487 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 /*
490 * Options without arguments
491 */
492
David S. Miller48788092007-09-14 16:41:03 -0700493 if (optname == SO_BINDTODEVICE)
494 return sock_bindtodevice(sk, optval, optlen);
495
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700496 if (optlen < sizeof(int))
497 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 if (get_user(val, (int __user *)optval))
500 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900501
Eric Dumazet2a915252009-05-27 11:30:05 +0000502 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 lock_sock(sk);
505
Eric Dumazet2a915252009-05-27 11:30:05 +0000506 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700507 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000508 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700509 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000510 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800511 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700512 break;
513 case SO_REUSEADDR:
514 sk->sk_reuse = valbool;
515 break;
516 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000517 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000518 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700519 case SO_ERROR:
520 ret = -ENOPROTOOPT;
521 break;
522 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700524 break;
525 case SO_BROADCAST:
526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
527 break;
528 case SO_SNDBUF:
529 /* Don't error on this BSD doesn't and if you think
530 about it this is right. Otherwise apps have to
531 play 'guess the biggest size' games. RCVBUF/SNDBUF
532 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900533
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700534 if (val > sysctl_wmem_max)
535 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700536set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
538 if ((val * 2) < SOCK_MIN_SNDBUF)
539 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
540 else
541 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700543 /*
544 * Wake up sending tasks if we
545 * upped the value.
546 */
547 sk->sk_write_space(sk);
548 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700550 case SO_SNDBUFFORCE:
551 if (!capable(CAP_NET_ADMIN)) {
552 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 break;
554 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700555 goto set_sndbuf;
556
557 case SO_RCVBUF:
558 /* Don't error on this BSD doesn't and if you think
559 about it this is right. Otherwise apps have to
560 play 'guess the biggest size' games. RCVBUF/SNDBUF
561 are treated in BSD as hints */
562
563 if (val > sysctl_rmem_max)
564 val = sysctl_rmem_max;
565set_rcvbuf:
566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567 /*
568 * We double it on the way in to account for
569 * "struct sk_buff" etc. overhead. Applications
570 * assume that the SO_RCVBUF setting they make will
571 * allow that much actual data to be received on that
572 * socket.
573 *
574 * Applications are unaware that "struct sk_buff" and
575 * other overheads allocate from the receive buffer
576 * during socket buffer allocation.
577 *
578 * And after considering the possible alternatives,
579 * returning the value we actually used in getsockopt
580 * is the most desirable behavior.
581 */
582 if ((val * 2) < SOCK_MIN_RCVBUF)
583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
584 else
585 sk->sk_rcvbuf = val * 2;
586 break;
587
588 case SO_RCVBUFFORCE:
589 if (!capable(CAP_NET_ADMIN)) {
590 ret = -EPERM;
591 break;
592 }
593 goto set_rcvbuf;
594
595 case SO_KEEPALIVE:
596#ifdef CONFIG_INET
597 if (sk->sk_protocol == IPPROTO_TCP)
598 tcp_set_keepalive(sk, valbool);
599#endif
600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
601 break;
602
603 case SO_OOBINLINE:
604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
605 break;
606
607 case SO_NO_CHECK:
608 sk->sk_no_check = valbool;
609 break;
610
611 case SO_PRIORITY:
612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
613 sk->sk_priority = val;
614 else
615 ret = -EPERM;
616 break;
617
618 case SO_LINGER:
619 if (optlen < sizeof(ling)) {
620 ret = -EINVAL; /* 1003.1g */
621 break;
622 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000623 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700624 ret = -EFAULT;
625 break;
626 }
627 if (!ling.l_onoff)
628 sock_reset_flag(sk, SOCK_LINGER);
629 else {
630#if (BITS_PER_LONG == 32)
631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
633 else
634#endif
635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
636 sock_set_flag(sk, SOCK_LINGER);
637 }
638 break;
639
640 case SO_BSDCOMPAT:
641 sock_warn_obsolete_bsdism("setsockopt");
642 break;
643
644 case SO_PASSCRED:
645 if (valbool)
646 set_bit(SOCK_PASSCRED, &sock->flags);
647 else
648 clear_bit(SOCK_PASSCRED, &sock->flags);
649 break;
650
651 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700652 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700653 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700654 if (optname == SO_TIMESTAMP)
655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
656 else
657 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700658 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000659 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700660 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700661 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
663 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700664 break;
665
Patrick Ohly20d49472009-02-12 05:03:38 +0000666 case SO_TIMESTAMPING:
667 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000668 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000669 break;
670 }
671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
672 val & SOF_TIMESTAMPING_TX_HARDWARE);
673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
674 val & SOF_TIMESTAMPING_TX_SOFTWARE);
675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
676 val & SOF_TIMESTAMPING_RX_HARDWARE);
677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
678 sock_enable_timestamp(sk,
679 SOCK_TIMESTAMPING_RX_SOFTWARE);
680 else
681 sock_disable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
684 val & SOF_TIMESTAMPING_SOFTWARE);
685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
686 val & SOF_TIMESTAMPING_SYS_HARDWARE);
687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
688 val & SOF_TIMESTAMPING_RAW_HARDWARE);
689 break;
690
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700691 case SO_RCVLOWAT:
692 if (val < 0)
693 val = INT_MAX;
694 sk->sk_rcvlowat = val ? : 1;
695 break;
696
697 case SO_RCVTIMEO:
698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
699 break;
700
701 case SO_SNDTIMEO:
702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
703 break;
704
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700705 case SO_ATTACH_FILTER:
706 ret = -EINVAL;
707 if (optlen == sizeof(struct sock_fprog)) {
708 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700710 ret = -EFAULT;
711 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700714 ret = sk_attach_filter(&fprog, sk);
715 }
716 break;
717
718 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700719 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700720 break;
721
722 case SO_PASSSEC:
723 if (valbool)
724 set_bit(SOCK_PASSSEC, &sock->flags);
725 else
726 clear_bit(SOCK_PASSSEC, &sock->flags);
727 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800728 case SO_MARK:
729 if (!capable(CAP_NET_ADMIN))
730 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000731 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800732 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800733 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 /* We implement the SO_SNDLOWAT etc to
736 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700737 case SO_RXQ_OVFL:
738 if (valbool)
739 sock_set_flag(sk, SOCK_RXQ_OVFL);
740 else
741 sock_reset_flag(sk, SOCK_RXQ_OVFL);
742 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700743 default:
744 ret = -ENOPROTOOPT;
745 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 release_sock(sk);
748 return ret;
749}
Eric Dumazet2a915252009-05-27 11:30:05 +0000750EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000753void cred_to_ucred(struct pid *pid, const struct cred *cred,
754 struct ucred *ucred)
755{
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
758 if (cred) {
759 struct user_namespace *current_ns = current_user_ns();
760
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763 }
764}
765
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766int sock_getsockopt(struct socket *sock, int level, int optname,
767 char __user *optval, int __user *optlen)
768{
769 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900770
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700771 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900772 int val;
773 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 struct timeval tm;
775 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900776
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800777 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900779
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700780 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900781 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700782 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900784
Eugene Teo50fee1d2009-02-23 15:38:41 -0800785 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800786
Eric Dumazet2a915252009-05-27 11:30:05 +0000787 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700788 case SO_DEBUG:
789 v.val = sock_flag(sk, SOCK_DBG);
790 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900791
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700792 case SO_DONTROUTE:
793 v.val = sock_flag(sk, SOCK_LOCALROUTE);
794 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900795
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700796 case SO_BROADCAST:
797 v.val = !!sock_flag(sk, SOCK_BROADCAST);
798 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700800 case SO_SNDBUF:
801 v.val = sk->sk_sndbuf;
802 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900803
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700804 case SO_RCVBUF:
805 v.val = sk->sk_rcvbuf;
806 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700808 case SO_REUSEADDR:
809 v.val = sk->sk_reuse;
810 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700812 case SO_KEEPALIVE:
813 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
814 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700816 case SO_TYPE:
817 v.val = sk->sk_type;
818 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000820 case SO_PROTOCOL:
821 v.val = sk->sk_protocol;
822 break;
823
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000824 case SO_DOMAIN:
825 v.val = sk->sk_family;
826 break;
827
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700828 case SO_ERROR:
829 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000830 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700831 v.val = xchg(&sk->sk_err_soft, 0);
832 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700834 case SO_OOBINLINE:
835 v.val = !!sock_flag(sk, SOCK_URGINLINE);
836 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900837
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700838 case SO_NO_CHECK:
839 v.val = sk->sk_no_check;
840 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700842 case SO_PRIORITY:
843 v.val = sk->sk_priority;
844 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900845
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700846 case SO_LINGER:
847 lv = sizeof(v.ling);
848 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
849 v.ling.l_linger = sk->sk_lingertime / HZ;
850 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900851
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700852 case SO_BSDCOMPAT:
853 sock_warn_obsolete_bsdism("getsockopt");
854 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700856 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700857 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
858 !sock_flag(sk, SOCK_RCVTSTAMPNS);
859 break;
860
861 case SO_TIMESTAMPNS:
862 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700863 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Patrick Ohly20d49472009-02-12 05:03:38 +0000865 case SO_TIMESTAMPING:
866 v.val = 0;
867 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
868 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
869 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
870 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
871 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
872 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
873 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
874 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
875 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
876 v.val |= SOF_TIMESTAMPING_SOFTWARE;
877 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
878 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
879 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
880 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
881 break;
882
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000884 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700885 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
886 v.tm.tv_sec = 0;
887 v.tm.tv_usec = 0;
888 } else {
889 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
890 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700892 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700894 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000895 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700896 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
897 v.tm.tv_sec = 0;
898 v.tm.tv_usec = 0;
899 } else {
900 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
901 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
902 }
903 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 case SO_RCVLOWAT:
906 v.val = sk->sk_rcvlowat;
907 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700908
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700909 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +0000910 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700911 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700913 case SO_PASSCRED:
914 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
915 break;
916
917 case SO_PEERCRED:
918 if (len > sizeof(sk->sk_peercred))
919 len = sizeof(sk->sk_peercred);
920 if (copy_to_user(optval, &sk->sk_peercred, len))
921 return -EFAULT;
922 goto lenout;
923
924 case SO_PEERNAME:
925 {
926 char address[128];
927
928 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
929 return -ENOTCONN;
930 if (lv < len)
931 return -EINVAL;
932 if (copy_to_user(optval, address, len))
933 return -EFAULT;
934 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700936
937 /* Dubious BSD thing... Probably nobody even uses it, but
938 * the UNIX standard wants it for whatever reason... -DaveM
939 */
940 case SO_ACCEPTCONN:
941 v.val = sk->sk_state == TCP_LISTEN;
942 break;
943
944 case SO_PASSSEC:
945 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
946 break;
947
948 case SO_PEERSEC:
949 return security_socket_getpeersec_stream(sock, optval, optlen, len);
950
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800951 case SO_MARK:
952 v.val = sk->sk_mark;
953 break;
954
Neil Horman3b885782009-10-12 13:26:31 -0700955 case SO_RXQ_OVFL:
956 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
957 break;
958
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700959 default:
960 return -ENOPROTOOPT;
961 }
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 if (len > lv)
964 len = lv;
965 if (copy_to_user(optval, &v, len))
966 return -EFAULT;
967lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900968 if (put_user(len, optlen))
969 return -EFAULT;
970 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700973/*
974 * Initialize an sk_lock.
975 *
976 * (We also register the sk_lock with the lock validator.)
977 */
Dave Jonesb6f99a22007-03-22 12:27:49 -0700978static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700979{
Peter Zijlstraed075362006-12-06 20:35:24 -0800980 sock_lock_init_class_and_name(sk,
981 af_family_slock_key_strings[sk->sk_family],
982 af_family_slock_keys + sk->sk_family,
983 af_family_key_strings[sk->sk_family],
984 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700985}
986
Eric Dumazet4dc6dc72009-07-15 23:13:10 +0000987/*
988 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
989 * even temporarly, because of RCU lookups. sk_node should also be left as is.
990 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700991static void sock_copy(struct sock *nsk, const struct sock *osk)
992{
993#ifdef CONFIG_SECURITY_NETWORK
994 void *sptr = nsk->sk_security;
995#endif
Eric Dumazet4dc6dc72009-07-15 23:13:10 +0000996 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
Krishna Kumare022f0b2009-10-19 23:46:20 +0000997 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
998 sizeof(osk->sk_tx_queue_mapping));
Eric Dumazet4dc6dc72009-07-15 23:13:10 +0000999 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
1000 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001001#ifdef CONFIG_SECURITY_NETWORK
1002 nsk->sk_security = sptr;
1003 security_sk_clone(osk, nsk);
1004#endif
1005}
1006
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001007static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1008 int family)
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001009{
1010 struct sock *sk;
1011 struct kmem_cache *slab;
1012
1013 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001014 if (slab != NULL) {
1015 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1016 if (!sk)
1017 return sk;
1018 if (priority & __GFP_ZERO) {
1019 /*
1020 * caches using SLAB_DESTROY_BY_RCU should let
1021 * sk_node.next un-modified. Special care is taken
1022 * when initializing object to zero.
1023 */
1024 if (offsetof(struct sock, sk_node.next) != 0)
1025 memset(sk, 0, offsetof(struct sock, sk_node.next));
1026 memset(&sk->sk_node.pprev, 0,
1027 prot->obj_size - offsetof(struct sock,
1028 sk_node.pprev));
1029 }
1030 }
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001031 else
1032 sk = kmalloc(prot->obj_size, priority);
1033
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001034 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001035 kmemcheck_annotate_bitfield(sk, flags);
1036
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001037 if (security_sk_alloc(sk, family, priority))
1038 goto out_free;
1039
1040 if (!try_module_get(prot->owner))
1041 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001042 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001043 }
1044
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001045 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001046
1047out_free_sec:
1048 security_sk_free(sk);
1049out_free:
1050 if (slab != NULL)
1051 kmem_cache_free(slab, sk);
1052 else
1053 kfree(sk);
1054 return NULL;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001055}
1056
1057static void sk_prot_free(struct proto *prot, struct sock *sk)
1058{
1059 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001060 struct module *owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001061
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001062 owner = prot->owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001063 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001064
1065 security_sk_free(sk);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001066 if (slab != NULL)
1067 kmem_cache_free(slab, sk);
1068 else
1069 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001070 module_put(owner);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001071}
1072
Herbert Xuf8451722010-05-24 00:12:34 -07001073#ifdef CONFIG_CGROUPS
1074void sock_update_classid(struct sock *sk)
1075{
1076 u32 classid = task_cls_classid(current);
1077
1078 if (classid && classid != sk->sk_classid)
1079 sk->sk_classid = classid;
1080}
Herbert Xu82862742010-05-24 00:14:10 -07001081EXPORT_SYMBOL(sock_update_classid);
Herbert Xuf8451722010-05-24 00:12:34 -07001082#endif
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084/**
1085 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001086 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001087 * @family: protocol family
1088 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1089 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001091struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001092 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001094 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001096 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001098 sk->sk_family = family;
1099 /*
1100 * See comment in struct sock definition to understand
1101 * why we need sk_prot_creator -acme
1102 */
1103 sk->sk_prot = sk->sk_prot_creator = prot;
1104 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001105 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001106 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001107
1108 sock_update_classid(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 }
Frank Filza79af592005-09-27 15:23:38 -07001110
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001111 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112}
Eric Dumazet2a915252009-05-27 11:30:05 +00001113EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
Eric Dumazet2b85a342009-06-11 02:55:43 -07001115static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116{
1117 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
1119 if (sk->sk_destruct)
1120 sk->sk_destruct(sk);
1121
Paul E. McKenneya898def2010-02-22 17:04:49 -08001122 filter = rcu_dereference_check(sk->sk_filter,
1123 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001125 sk_filter_uncharge(sk, filter);
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001126 rcu_assign_pointer(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 }
1128
Patrick Ohly20d49472009-02-12 05:03:38 +00001129 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1130 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132 if (atomic_read(&sk->sk_omem_alloc))
1133 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
Harvey Harrison0dc47872008-03-05 20:47:47 -08001134 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001136 put_net(sock_net(sk));
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001137 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001139
1140void sk_free(struct sock *sk)
1141{
1142 /*
1143 * We substract one from sk_wmem_alloc and can know if
1144 * some packets are still in some tx queue.
1145 * If not null, sock_wfree() will call __sk_free(sk) later
1146 */
1147 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1148 __sk_free(sk);
1149}
Eric Dumazet2a915252009-05-27 11:30:05 +00001150EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Denis V. Lunevedf02082008-02-29 11:18:32 -08001152/*
1153 * Last sock_put should drop referrence to sk->sk_net. It has already
1154 * been dropped in sk_change_net. Taking referrence to stopping namespace
1155 * is not an option.
1156 * Take referrence to a socket to remove it from hash _alive_ and after that
1157 * destroy it in the context of init_net.
1158 */
1159void sk_release_kernel(struct sock *sk)
1160{
1161 if (sk == NULL || sk->sk_socket == NULL)
1162 return;
1163
1164 sock_hold(sk);
1165 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001166 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001167 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001168 sock_put(sk);
1169}
David S. Miller45af1752008-02-29 11:33:19 -08001170EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001171
Al Virodd0fc662005-10-07 07:46:04 +01001172struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001173{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001174 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001175
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001176 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001177 if (newsk != NULL) {
1178 struct sk_filter *filter;
1179
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001180 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001181
1182 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001183 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001184 sk_node_init(&newsk->sk_node);
1185 sock_lock_init(newsk);
1186 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001187 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001188 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001189
1190 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001191 /*
1192 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1193 */
1194 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001195 atomic_set(&newsk->sk_omem_alloc, 0);
1196 skb_queue_head_init(&newsk->sk_receive_queue);
1197 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001198#ifdef CONFIG_NET_DMA
1199 skb_queue_head_init(&newsk->sk_async_wait_queue);
1200#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001201
Eric Dumazetb6c67122010-04-08 23:03:29 +00001202 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001203 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001204 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1205 af_callback_keys + newsk->sk_family,
1206 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001207
1208 newsk->sk_dst_cache = NULL;
1209 newsk->sk_wmem_queued = 0;
1210 newsk->sk_forward_alloc = 0;
1211 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001212 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1213
1214 sock_reset_flag(newsk, SOCK_DONE);
1215 skb_queue_head_init(&newsk->sk_error_queue);
1216
1217 filter = newsk->sk_filter;
1218 if (filter != NULL)
1219 sk_filter_charge(newsk, filter);
1220
1221 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1222 /* It is still raw copy of parent, so invalidate
1223 * destructor and make plain sk_free() */
1224 newsk->sk_destruct = NULL;
1225 sk_free(newsk);
1226 newsk = NULL;
1227 goto out;
1228 }
1229
1230 newsk->sk_err = 0;
1231 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001232 /*
1233 * Before updating sk_refcnt, we must commit prior changes to memory
1234 * (Documentation/RCU/rculist_nulls.txt for details)
1235 */
1236 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001237 atomic_set(&newsk->sk_refcnt, 2);
1238
1239 /*
1240 * Increment the counter in the same struct proto as the master
1241 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1242 * is the same as sk->sk_prot->socks, as this field was copied
1243 * with memcpy).
1244 *
1245 * This _changes_ the previous behaviour, where
1246 * tcp_create_openreq_child always was incrementing the
1247 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1248 * to be taken into account in all callers. -acme
1249 */
1250 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001251 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001252 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001253
1254 if (newsk->sk_prot->sockets_allocated)
Eric Dumazet17483762008-11-25 21:16:35 -08001255 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
Octavian Purdila704da5602010-01-08 00:00:09 -08001256
1257 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1258 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1259 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001260 }
1261out:
1262 return newsk;
1263}
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001264EXPORT_SYMBOL_GPL(sk_clone);
1265
Andi Kleen99580892007-04-20 17:12:43 -07001266void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1267{
1268 __sk_dst_set(sk, dst);
1269 sk->sk_route_caps = dst->dev->features;
1270 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001271 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001272 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001273 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001274 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001275 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001276 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001277 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001278 sk->sk_gso_max_size = dst->dev->gso_max_size;
1279 }
Andi Kleen99580892007-04-20 17:12:43 -07001280 }
1281}
1282EXPORT_SYMBOL_GPL(sk_setup_caps);
1283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284void __init sk_init(void)
1285{
Jan Beulich44813742009-09-21 17:03:05 -07001286 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 sysctl_wmem_max = 32767;
1288 sysctl_rmem_max = 32767;
1289 sysctl_wmem_default = 32767;
1290 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001291 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 sysctl_wmem_max = 131071;
1293 sysctl_rmem_max = 131071;
1294 }
1295}
1296
1297/*
1298 * Simple resource managers for sockets.
1299 */
1300
1301
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001302/*
1303 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 */
1305void sock_wfree(struct sk_buff *skb)
1306{
1307 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001308 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
Eric Dumazetd99927f2009-09-24 10:49:24 +00001310 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1311 /*
1312 * Keep a reference on sk_wmem_alloc, this will be released
1313 * after sk_write_space() call
1314 */
1315 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001317 len = 1;
1318 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001319 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001320 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1321 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001322 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001323 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001324 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325}
Eric Dumazet2a915252009-05-27 11:30:05 +00001326EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001328/*
1329 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 */
1331void sock_rfree(struct sk_buff *skb)
1332{
1333 struct sock *sk = skb->sk;
1334
1335 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001336 sk_mem_uncharge(skb->sk, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337}
Eric Dumazet2a915252009-05-27 11:30:05 +00001338EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340
1341int sock_i_uid(struct sock *sk)
1342{
1343 int uid;
1344
1345 read_lock(&sk->sk_callback_lock);
1346 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1347 read_unlock(&sk->sk_callback_lock);
1348 return uid;
1349}
Eric Dumazet2a915252009-05-27 11:30:05 +00001350EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
1352unsigned long sock_i_ino(struct sock *sk)
1353{
1354 unsigned long ino;
1355
1356 read_lock(&sk->sk_callback_lock);
1357 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1358 read_unlock(&sk->sk_callback_lock);
1359 return ino;
1360}
Eric Dumazet2a915252009-05-27 11:30:05 +00001361EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
1363/*
1364 * Allocate a skb from the socket's send buffer.
1365 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001366struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001367 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368{
1369 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001370 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (skb) {
1372 skb_set_owner_w(skb, sk);
1373 return skb;
1374 }
1375 }
1376 return NULL;
1377}
Eric Dumazet2a915252009-05-27 11:30:05 +00001378EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380/*
1381 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001382 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001383struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001384 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385{
1386 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1387 struct sk_buff *skb = alloc_skb(size, priority);
1388 if (skb) {
1389 skb_set_owner_r(skb, sk);
1390 return skb;
1391 }
1392 }
1393 return NULL;
1394}
1395
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001396/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001398 */
Al Virodd0fc662005-10-07 07:46:04 +01001399void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400{
1401 if ((unsigned)size <= sysctl_optmem_max &&
1402 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1403 void *mem;
1404 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001405 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 */
1407 atomic_add(size, &sk->sk_omem_alloc);
1408 mem = kmalloc(size, priority);
1409 if (mem)
1410 return mem;
1411 atomic_sub(size, &sk->sk_omem_alloc);
1412 }
1413 return NULL;
1414}
Eric Dumazet2a915252009-05-27 11:30:05 +00001415EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
1417/*
1418 * Free an option memory block.
1419 */
1420void sock_kfree_s(struct sock *sk, void *mem, int size)
1421{
1422 kfree(mem);
1423 atomic_sub(size, &sk->sk_omem_alloc);
1424}
Eric Dumazet2a915252009-05-27 11:30:05 +00001425EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1428 I think, these locks should be removed for datagram sockets.
1429 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001430static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431{
1432 DEFINE_WAIT(wait);
1433
1434 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1435 for (;;) {
1436 if (!timeo)
1437 break;
1438 if (signal_pending(current))
1439 break;
1440 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001441 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1443 break;
1444 if (sk->sk_shutdown & SEND_SHUTDOWN)
1445 break;
1446 if (sk->sk_err)
1447 break;
1448 timeo = schedule_timeout(timeo);
1449 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001450 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 return timeo;
1452}
1453
1454
1455/*
1456 * Generic send/receive buffer handlers
1457 */
1458
Herbert Xu4cc7f682009-02-04 16:55:54 -08001459struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1460 unsigned long data_len, int noblock,
1461 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462{
1463 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001464 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 long timeo;
1466 int err;
1467
1468 gfp_mask = sk->sk_allocation;
1469 if (gfp_mask & __GFP_WAIT)
1470 gfp_mask |= __GFP_REPEAT;
1471
1472 timeo = sock_sndtimeo(sk, noblock);
1473 while (1) {
1474 err = sock_error(sk);
1475 if (err != 0)
1476 goto failure;
1477
1478 err = -EPIPE;
1479 if (sk->sk_shutdown & SEND_SHUTDOWN)
1480 goto failure;
1481
1482 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001483 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 if (skb) {
1485 int npages;
1486 int i;
1487
1488 /* No pages, we're done... */
1489 if (!data_len)
1490 break;
1491
1492 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1493 skb->truesize += data_len;
1494 skb_shinfo(skb)->nr_frags = npages;
1495 for (i = 0; i < npages; i++) {
1496 struct page *page;
1497 skb_frag_t *frag;
1498
1499 page = alloc_pages(sk->sk_allocation, 0);
1500 if (!page) {
1501 err = -ENOBUFS;
1502 skb_shinfo(skb)->nr_frags = i;
1503 kfree_skb(skb);
1504 goto failure;
1505 }
1506
1507 frag = &skb_shinfo(skb)->frags[i];
1508 frag->page = page;
1509 frag->page_offset = 0;
1510 frag->size = (data_len >= PAGE_SIZE ?
1511 PAGE_SIZE :
1512 data_len);
1513 data_len -= PAGE_SIZE;
1514 }
1515
1516 /* Full success... */
1517 break;
1518 }
1519 err = -ENOBUFS;
1520 goto failure;
1521 }
1522 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1523 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1524 err = -EAGAIN;
1525 if (!timeo)
1526 goto failure;
1527 if (signal_pending(current))
1528 goto interrupted;
1529 timeo = sock_wait_for_wmem(sk, timeo);
1530 }
1531
1532 skb_set_owner_w(skb, sk);
1533 return skb;
1534
1535interrupted:
1536 err = sock_intr_errno(timeo);
1537failure:
1538 *errcode = err;
1539 return NULL;
1540}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001541EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001543struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 int noblock, int *errcode)
1545{
1546 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1547}
Eric Dumazet2a915252009-05-27 11:30:05 +00001548EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550static void __lock_sock(struct sock *sk)
1551{
1552 DEFINE_WAIT(wait);
1553
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001554 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1556 TASK_UNINTERRUPTIBLE);
1557 spin_unlock_bh(&sk->sk_lock.slock);
1558 schedule();
1559 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001560 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 break;
1562 }
1563 finish_wait(&sk->sk_lock.wq, &wait);
1564}
1565
1566static void __release_sock(struct sock *sk)
1567{
1568 struct sk_buff *skb = sk->sk_backlog.head;
1569
1570 do {
1571 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1572 bh_unlock_sock(sk);
1573
1574 do {
1575 struct sk_buff *next = skb->next;
1576
Eric Dumazet7fee2262010-05-11 23:19:48 +00001577 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001579 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
1581 /*
1582 * We are in process context here with softirqs
1583 * disabled, use cond_resched_softirq() to preempt.
1584 * This is safe to do because we've taken the backlog
1585 * queue private:
1586 */
1587 cond_resched_softirq();
1588
1589 skb = next;
1590 } while (skb != NULL);
1591
1592 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001593 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001594
1595 /*
1596 * Doing the zeroing here guarantee we can not loop forever
1597 * while a wild producer attempts to flood us.
1598 */
1599 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600}
1601
1602/**
1603 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001604 * @sk: sock to wait on
1605 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 *
1607 * Now socket state including sk->sk_err is changed only under lock,
1608 * hence we may omit checks after joining wait queue.
1609 * We check receive queue before schedule() only as optimization;
1610 * it is very likely that release_sock() added new data.
1611 */
1612int sk_wait_data(struct sock *sk, long *timeo)
1613{
1614 int rc;
1615 DEFINE_WAIT(wait);
1616
Eric Dumazetaa395142010-04-20 13:03:51 +00001617 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1619 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1620 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001621 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 return rc;
1623}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624EXPORT_SYMBOL(sk_wait_data);
1625
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001626/**
1627 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1628 * @sk: socket
1629 * @size: memory size to allocate
1630 * @kind: allocation type
1631 *
1632 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1633 * rmem allocation. This function assumes that protocols which have
1634 * memory_pressure use sk_wmem_queued as write buffer accounting.
1635 */
1636int __sk_mem_schedule(struct sock *sk, int size, int kind)
1637{
1638 struct proto *prot = sk->sk_prot;
1639 int amt = sk_mem_pages(size);
1640 int allocated;
1641
1642 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1643 allocated = atomic_add_return(amt, prot->memory_allocated);
1644
1645 /* Under limit. */
1646 if (allocated <= prot->sysctl_mem[0]) {
1647 if (prot->memory_pressure && *prot->memory_pressure)
1648 *prot->memory_pressure = 0;
1649 return 1;
1650 }
1651
1652 /* Under pressure. */
1653 if (allocated > prot->sysctl_mem[1])
1654 if (prot->enter_memory_pressure)
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001655 prot->enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001656
1657 /* Over hard limit. */
1658 if (allocated > prot->sysctl_mem[2])
1659 goto suppress_allocation;
1660
1661 /* guarantee minimum buffer size under pressure */
1662 if (kind == SK_MEM_RECV) {
1663 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1664 return 1;
1665 } else { /* SK_MEM_SEND */
1666 if (sk->sk_type == SOCK_STREAM) {
1667 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1668 return 1;
1669 } else if (atomic_read(&sk->sk_wmem_alloc) <
1670 prot->sysctl_wmem[0])
1671 return 1;
1672 }
1673
1674 if (prot->memory_pressure) {
Eric Dumazet17483762008-11-25 21:16:35 -08001675 int alloc;
1676
1677 if (!*prot->memory_pressure)
1678 return 1;
1679 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1680 if (prot->sysctl_mem[2] > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001681 sk_mem_pages(sk->sk_wmem_queued +
1682 atomic_read(&sk->sk_rmem_alloc) +
1683 sk->sk_forward_alloc))
1684 return 1;
1685 }
1686
1687suppress_allocation:
1688
1689 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1690 sk_stream_moderate_sndbuf(sk);
1691
1692 /* Fail only if socket is _under_ its sndbuf.
1693 * In this case we cannot block, so that we have to fail.
1694 */
1695 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1696 return 1;
1697 }
1698
1699 /* Alas. Undo changes. */
1700 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1701 atomic_sub(amt, prot->memory_allocated);
1702 return 0;
1703}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001704EXPORT_SYMBOL(__sk_mem_schedule);
1705
1706/**
1707 * __sk_reclaim - reclaim memory_allocated
1708 * @sk: socket
1709 */
1710void __sk_mem_reclaim(struct sock *sk)
1711{
1712 struct proto *prot = sk->sk_prot;
1713
Eric Dumazet680a5a52007-12-31 15:00:50 -08001714 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001715 prot->memory_allocated);
1716 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1717
1718 if (prot->memory_pressure && *prot->memory_pressure &&
1719 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1720 *prot->memory_pressure = 0;
1721}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001722EXPORT_SYMBOL(__sk_mem_reclaim);
1723
1724
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725/*
1726 * Set of default routines for initialising struct proto_ops when
1727 * the protocol does not support a particular function. In certain
1728 * cases where it makes no sense for a protocol to have a "do nothing"
1729 * function, some default processing is provided.
1730 */
1731
1732int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1733{
1734 return -EOPNOTSUPP;
1735}
Eric Dumazet2a915252009-05-27 11:30:05 +00001736EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001738int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 int len, int flags)
1740{
1741 return -EOPNOTSUPP;
1742}
Eric Dumazet2a915252009-05-27 11:30:05 +00001743EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
1745int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1746{
1747 return -EOPNOTSUPP;
1748}
Eric Dumazet2a915252009-05-27 11:30:05 +00001749EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1752{
1753 return -EOPNOTSUPP;
1754}
Eric Dumazet2a915252009-05-27 11:30:05 +00001755EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001757int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 int *len, int peer)
1759{
1760 return -EOPNOTSUPP;
1761}
Eric Dumazet2a915252009-05-27 11:30:05 +00001762EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
Eric Dumazet2a915252009-05-27 11:30:05 +00001764unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
1766 return 0;
1767}
Eric Dumazet2a915252009-05-27 11:30:05 +00001768EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1771{
1772 return -EOPNOTSUPP;
1773}
Eric Dumazet2a915252009-05-27 11:30:05 +00001774EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
1776int sock_no_listen(struct socket *sock, int backlog)
1777{
1778 return -EOPNOTSUPP;
1779}
Eric Dumazet2a915252009-05-27 11:30:05 +00001780EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
1782int sock_no_shutdown(struct socket *sock, int how)
1783{
1784 return -EOPNOTSUPP;
1785}
Eric Dumazet2a915252009-05-27 11:30:05 +00001786EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
1788int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001789 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790{
1791 return -EOPNOTSUPP;
1792}
Eric Dumazet2a915252009-05-27 11:30:05 +00001793EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
1795int sock_no_getsockopt(struct socket *sock, int level, int optname,
1796 char __user *optval, int __user *optlen)
1797{
1798 return -EOPNOTSUPP;
1799}
Eric Dumazet2a915252009-05-27 11:30:05 +00001800EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
1802int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1803 size_t len)
1804{
1805 return -EOPNOTSUPP;
1806}
Eric Dumazet2a915252009-05-27 11:30:05 +00001807EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1810 size_t len, int flags)
1811{
1812 return -EOPNOTSUPP;
1813}
Eric Dumazet2a915252009-05-27 11:30:05 +00001814EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
1816int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1817{
1818 /* Mirror missing mmap method error code */
1819 return -ENODEV;
1820}
Eric Dumazet2a915252009-05-27 11:30:05 +00001821EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
1823ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1824{
1825 ssize_t res;
1826 struct msghdr msg = {.msg_flags = flags};
1827 struct kvec iov;
1828 char *kaddr = kmap(page);
1829 iov.iov_base = kaddr + offset;
1830 iov.iov_len = size;
1831 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1832 kunmap(page);
1833 return res;
1834}
Eric Dumazet2a915252009-05-27 11:30:05 +00001835EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
1837/*
1838 * Default Socket Callbacks
1839 */
1840
1841static void sock_def_wakeup(struct sock *sk)
1842{
Eric Dumazet43815482010-04-29 11:01:49 +00001843 struct socket_wq *wq;
1844
1845 rcu_read_lock();
1846 wq = rcu_dereference(sk->sk_wq);
1847 if (wq_has_sleeper(wq))
1848 wake_up_interruptible_all(&wq->wait);
1849 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850}
1851
1852static void sock_def_error_report(struct sock *sk)
1853{
Eric Dumazet43815482010-04-29 11:01:49 +00001854 struct socket_wq *wq;
1855
1856 rcu_read_lock();
1857 wq = rcu_dereference(sk->sk_wq);
1858 if (wq_has_sleeper(wq))
1859 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001860 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00001861 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862}
1863
1864static void sock_def_readable(struct sock *sk, int len)
1865{
Eric Dumazet43815482010-04-29 11:01:49 +00001866 struct socket_wq *wq;
1867
1868 rcu_read_lock();
1869 wq = rcu_dereference(sk->sk_wq);
1870 if (wq_has_sleeper(wq))
1871 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
Davide Libenzi37e55402009-03-31 15:24:21 -07001872 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001873 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00001874 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875}
1876
1877static void sock_def_write_space(struct sock *sk)
1878{
Eric Dumazet43815482010-04-29 11:01:49 +00001879 struct socket_wq *wq;
1880
1881 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 /* Do not wake up a writer until he can make "significant"
1884 * progress. --DaveM
1885 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001886 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00001887 wq = rcu_dereference(sk->sk_wq);
1888 if (wq_has_sleeper(wq))
1889 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07001890 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891
1892 /* Should agree with poll, otherwise some programs break */
1893 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001894 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 }
1896
Eric Dumazet43815482010-04-29 11:01:49 +00001897 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898}
1899
1900static void sock_def_destruct(struct sock *sk)
1901{
Jesper Juhla51482b2005-11-08 09:41:34 -08001902 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903}
1904
1905void sk_send_sigurg(struct sock *sk)
1906{
1907 if (sk->sk_socket && sk->sk_socket->file)
1908 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001909 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910}
Eric Dumazet2a915252009-05-27 11:30:05 +00001911EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1914 unsigned long expires)
1915{
1916 if (!mod_timer(timer, expires))
1917 sock_hold(sk);
1918}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919EXPORT_SYMBOL(sk_reset_timer);
1920
1921void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1922{
1923 if (timer_pending(timer) && del_timer(timer))
1924 __sock_put(sk);
1925}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926EXPORT_SYMBOL(sk_stop_timer);
1927
1928void sock_init_data(struct socket *sock, struct sock *sk)
1929{
1930 skb_queue_head_init(&sk->sk_receive_queue);
1931 skb_queue_head_init(&sk->sk_write_queue);
1932 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001933#ifdef CONFIG_NET_DMA
1934 skb_queue_head_init(&sk->sk_async_wait_queue);
1935#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
1937 sk->sk_send_head = NULL;
1938
1939 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 sk->sk_allocation = GFP_KERNEL;
1942 sk->sk_rcvbuf = sysctl_rmem_default;
1943 sk->sk_sndbuf = sysctl_wmem_default;
1944 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07001945 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
1947 sock_set_flag(sk, SOCK_ZAPPED);
1948
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001949 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00001951 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 sock->sk = sk;
1953 } else
Eric Dumazet43815482010-04-29 11:01:49 +00001954 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Eric Dumazetb6c67122010-04-08 23:03:29 +00001956 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001958 lockdep_set_class_and_name(&sk->sk_callback_lock,
1959 af_callback_keys + sk->sk_family,
1960 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962 sk->sk_state_change = sock_def_wakeup;
1963 sk->sk_data_ready = sock_def_readable;
1964 sk->sk_write_space = sock_def_write_space;
1965 sk->sk_error_report = sock_def_error_report;
1966 sk->sk_destruct = sock_def_destruct;
1967
1968 sk->sk_sndmsg_page = NULL;
1969 sk->sk_sndmsg_off = 0;
1970
1971 sk->sk_peercred.pid = 0;
1972 sk->sk_peercred.uid = -1;
1973 sk->sk_peercred.gid = -1;
1974 sk->sk_write_pending = 0;
1975 sk->sk_rcvlowat = 1;
1976 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1977 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1978
Eric Dumazetf37f0af2008-04-13 21:39:26 -07001979 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001981 /*
1982 * Before updating sk_refcnt, we must commit prior changes to memory
1983 * (Documentation/RCU/rculist_nulls.txt for details)
1984 */
1985 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08001987 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988}
Eric Dumazet2a915252009-05-27 11:30:05 +00001989EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Harvey Harrisonb5606c22008-02-13 15:03:16 -08001991void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992{
1993 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001994 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02001995 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02001997 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001998 spin_unlock(&sk->sk_lock.slock);
1999 /*
2000 * The sk_lock has mutex_lock() semantics here:
2001 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002002 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002003 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002005EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002007void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002009 /*
2010 * The sk_lock has mutex_unlock() semantics:
2011 */
2012 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2013
2014 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 if (sk->sk_backlog.tail)
2016 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002017 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002018 if (waitqueue_active(&sk->sk_lock.wq))
2019 wake_up(&sk->sk_lock.wq);
2020 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021}
2022EXPORT_SYMBOL(release_sock);
2023
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002024/**
2025 * lock_sock_fast - fast version of lock_sock
2026 * @sk: socket
2027 *
2028 * This version should be used for very small section, where process wont block
2029 * return false if fast path is taken
2030 * sk_lock.slock locked, owned = 0, BH disabled
2031 * return true if slow path is taken
2032 * sk_lock.slock unlocked, owned = 1, BH enabled
2033 */
2034bool lock_sock_fast(struct sock *sk)
2035{
2036 might_sleep();
2037 spin_lock_bh(&sk->sk_lock.slock);
2038
2039 if (!sk->sk_lock.owned)
2040 /*
2041 * Note : We must disable BH
2042 */
2043 return false;
2044
2045 __lock_sock(sk);
2046 sk->sk_lock.owned = 1;
2047 spin_unlock(&sk->sk_lock.slock);
2048 /*
2049 * The sk_lock has mutex_lock() semantics here:
2050 */
2051 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2052 local_bh_enable();
2053 return true;
2054}
2055EXPORT_SYMBOL(lock_sock_fast);
2056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002058{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002059 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002061 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002062 tv = ktime_to_timeval(sk->sk_stamp);
2063 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002065 if (tv.tv_sec == 0) {
2066 sk->sk_stamp = ktime_get_real();
2067 tv = ktime_to_timeval(sk->sk_stamp);
2068 }
2069 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002070}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071EXPORT_SYMBOL(sock_get_timestamp);
2072
Eric Dumazetae40eb12007-03-18 17:33:16 -07002073int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2074{
2075 struct timespec ts;
2076 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002077 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002078 ts = ktime_to_timespec(sk->sk_stamp);
2079 if (ts.tv_sec == -1)
2080 return -ENOENT;
2081 if (ts.tv_sec == 0) {
2082 sk->sk_stamp = ktime_get_real();
2083 ts = ktime_to_timespec(sk->sk_stamp);
2084 }
2085 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2086}
2087EXPORT_SYMBOL(sock_get_timestampns);
2088
Patrick Ohly20d49472009-02-12 05:03:38 +00002089void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002090{
Patrick Ohly20d49472009-02-12 05:03:38 +00002091 if (!sock_flag(sk, flag)) {
2092 sock_set_flag(sk, flag);
2093 /*
2094 * we just set one of the two flags which require net
2095 * time stamping, but time stamping might have been on
2096 * already because of the other one
2097 */
2098 if (!sock_flag(sk,
2099 flag == SOCK_TIMESTAMP ?
2100 SOCK_TIMESTAMPING_RX_SOFTWARE :
2101 SOCK_TIMESTAMP))
2102 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 }
2104}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
2106/*
2107 * Get a socket option on an socket.
2108 *
2109 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2110 * asynchronous errors should be reported by getsockopt. We assume
2111 * this means if you specify SO_ERROR (otherwise whats the point of it).
2112 */
2113int sock_common_getsockopt(struct socket *sock, int level, int optname,
2114 char __user *optval, int __user *optlen)
2115{
2116 struct sock *sk = sock->sk;
2117
2118 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2119}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120EXPORT_SYMBOL(sock_common_getsockopt);
2121
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002122#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002123int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2124 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002125{
2126 struct sock *sk = sock->sk;
2127
Johannes Berg1e51f952007-03-06 13:44:06 -08002128 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002129 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2130 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002131 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2132}
2133EXPORT_SYMBOL(compat_sock_common_getsockopt);
2134#endif
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2137 struct msghdr *msg, size_t size, int flags)
2138{
2139 struct sock *sk = sock->sk;
2140 int addr_len = 0;
2141 int err;
2142
2143 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2144 flags & ~MSG_DONTWAIT, &addr_len);
2145 if (err >= 0)
2146 msg->msg_namelen = addr_len;
2147 return err;
2148}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149EXPORT_SYMBOL(sock_common_recvmsg);
2150
2151/*
2152 * Set socket options on an inet socket.
2153 */
2154int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002155 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156{
2157 struct sock *sk = sock->sk;
2158
2159 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2160}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161EXPORT_SYMBOL(sock_common_setsockopt);
2162
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002163#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002164int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002165 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002166{
2167 struct sock *sk = sock->sk;
2168
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002169 if (sk->sk_prot->compat_setsockopt != NULL)
2170 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2171 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002172 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2173}
2174EXPORT_SYMBOL(compat_sock_common_setsockopt);
2175#endif
2176
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177void sk_common_release(struct sock *sk)
2178{
2179 if (sk->sk_prot->destroy)
2180 sk->sk_prot->destroy(sk);
2181
2182 /*
2183 * Observation: when sock_common_release is called, processes have
2184 * no access to socket. But net still has.
2185 * Step one, detach it from networking:
2186 *
2187 * A. Remove from hash tables.
2188 */
2189
2190 sk->sk_prot->unhash(sk);
2191
2192 /*
2193 * In this point socket cannot receive new packets, but it is possible
2194 * that some packets are in flight because some CPU runs receiver and
2195 * did hash table lookup before we unhashed socket. They will achieve
2196 * receive queue and will be purged by socket destructor.
2197 *
2198 * Also we still have packets pending on receive queue and probably,
2199 * our own packets waiting in device queues. sock_destroy will drain
2200 * receive queue, but transmitted packets will delay socket destruction
2201 * until the last reference will be released.
2202 */
2203
2204 sock_orphan(sk);
2205
2206 xfrm_sk_free_policy(sk);
2207
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002208 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 sock_put(sk);
2210}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211EXPORT_SYMBOL(sk_common_release);
2212
2213static DEFINE_RWLOCK(proto_list_lock);
2214static LIST_HEAD(proto_list);
2215
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002216#ifdef CONFIG_PROC_FS
2217#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002218struct prot_inuse {
2219 int val[PROTO_INUSE_NR];
2220};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002221
2222static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002223
2224#ifdef CONFIG_NET_NS
2225void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2226{
2227 int cpu = smp_processor_id();
2228 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
2229}
2230EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2231
2232int sock_prot_inuse_get(struct net *net, struct proto *prot)
2233{
2234 int cpu, idx = prot->inuse_idx;
2235 int res = 0;
2236
2237 for_each_possible_cpu(cpu)
2238 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2239
2240 return res >= 0 ? res : 0;
2241}
2242EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2243
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002244static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002245{
2246 net->core.inuse = alloc_percpu(struct prot_inuse);
2247 return net->core.inuse ? 0 : -ENOMEM;
2248}
2249
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002250static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002251{
2252 free_percpu(net->core.inuse);
2253}
2254
2255static struct pernet_operations net_inuse_ops = {
2256 .init = sock_inuse_init_net,
2257 .exit = sock_inuse_exit_net,
2258};
2259
2260static __init int net_inuse_init(void)
2261{
2262 if (register_pernet_subsys(&net_inuse_ops))
2263 panic("Cannot initialize net inuse counters");
2264
2265 return 0;
2266}
2267
2268core_initcall(net_inuse_init);
2269#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002270static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2271
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002272void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002273{
2274 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
2275}
2276EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2277
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002278int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002279{
2280 int cpu, idx = prot->inuse_idx;
2281 int res = 0;
2282
2283 for_each_possible_cpu(cpu)
2284 res += per_cpu(prot_inuse, cpu).val[idx];
2285
2286 return res >= 0 ? res : 0;
2287}
2288EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002289#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002290
2291static void assign_proto_idx(struct proto *prot)
2292{
2293 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2294
2295 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2296 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2297 return;
2298 }
2299
2300 set_bit(prot->inuse_idx, proto_inuse_idx);
2301}
2302
2303static void release_proto_idx(struct proto *prot)
2304{
2305 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2306 clear_bit(prot->inuse_idx, proto_inuse_idx);
2307}
2308#else
2309static inline void assign_proto_idx(struct proto *prot)
2310{
2311}
2312
2313static inline void release_proto_idx(struct proto *prot)
2314{
2315}
2316#endif
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318int proto_register(struct proto *prot, int alloc_slab)
2319{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 if (alloc_slab) {
2321 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002322 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2323 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
2325 if (prot->slab == NULL) {
2326 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2327 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002328 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002330
2331 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002332 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002333 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002334 goto out_free_sock_slab;
2335
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002336 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002337 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002338 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002339
2340 if (prot->rsk_prot->slab == NULL) {
2341 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2342 prot->name);
2343 goto out_free_request_sock_slab_name;
2344 }
2345 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002346
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002347 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002348 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002349
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002350 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002351 goto out_free_request_sock_slab;
2352
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002353 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002354 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002355 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002356 0,
2357 SLAB_HWCACHE_ALIGN |
2358 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002359 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002360 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002361 goto out_free_timewait_sock_slab_name;
2362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 }
2364
Arnaldo Carvalho de Melo2a278052005-04-16 15:24:09 -07002365 write_lock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002367 assign_proto_idx(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 write_unlock(&proto_list_lock);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002369 return 0;
2370
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002371out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002372 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002373out_free_request_sock_slab:
2374 if (prot->rsk_prot && prot->rsk_prot->slab) {
2375 kmem_cache_destroy(prot->rsk_prot->slab);
2376 prot->rsk_prot->slab = NULL;
2377 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002378out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002379 if (prot->rsk_prot)
2380 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002381out_free_sock_slab:
2382 kmem_cache_destroy(prot->slab);
2383 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002384out:
2385 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387EXPORT_SYMBOL(proto_register);
2388
2389void proto_unregister(struct proto *prot)
2390{
2391 write_lock(&proto_list_lock);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002392 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002393 list_del(&prot->node);
2394 write_unlock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
2396 if (prot->slab != NULL) {
2397 kmem_cache_destroy(prot->slab);
2398 prot->slab = NULL;
2399 }
2400
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002401 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002402 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002403 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002404 prot->rsk_prot->slab = NULL;
2405 }
2406
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002407 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002408 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002409 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002410 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413EXPORT_SYMBOL(proto_unregister);
2414
2415#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002417 __acquires(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418{
2419 read_lock(&proto_list_lock);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002420 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421}
2422
2423static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2424{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002425 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426}
2427
2428static void proto_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002429 __releases(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430{
2431 read_unlock(&proto_list_lock);
2432}
2433
2434static char proto_method_implemented(const void *method)
2435{
2436 return method == NULL ? 'n' : 'y';
2437}
2438
2439static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2440{
2441 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2442 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2443 proto->name,
2444 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002445 sock_prot_inuse_get(seq_file_net(seq), proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2447 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2448 proto->max_header,
2449 proto->slab == NULL ? "no" : "yes",
2450 module_name(proto->owner),
2451 proto_method_implemented(proto->close),
2452 proto_method_implemented(proto->connect),
2453 proto_method_implemented(proto->disconnect),
2454 proto_method_implemented(proto->accept),
2455 proto_method_implemented(proto->ioctl),
2456 proto_method_implemented(proto->init),
2457 proto_method_implemented(proto->destroy),
2458 proto_method_implemented(proto->shutdown),
2459 proto_method_implemented(proto->setsockopt),
2460 proto_method_implemented(proto->getsockopt),
2461 proto_method_implemented(proto->sendmsg),
2462 proto_method_implemented(proto->recvmsg),
2463 proto_method_implemented(proto->sendpage),
2464 proto_method_implemented(proto->bind),
2465 proto_method_implemented(proto->backlog_rcv),
2466 proto_method_implemented(proto->hash),
2467 proto_method_implemented(proto->unhash),
2468 proto_method_implemented(proto->get_port),
2469 proto_method_implemented(proto->enter_memory_pressure));
2470}
2471
2472static int proto_seq_show(struct seq_file *seq, void *v)
2473{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002474 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2476 "protocol",
2477 "size",
2478 "sockets",
2479 "memory",
2480 "press",
2481 "maxhdr",
2482 "slab",
2483 "module",
2484 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2485 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002486 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 return 0;
2488}
2489
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002490static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 .start = proto_seq_start,
2492 .next = proto_seq_next,
2493 .stop = proto_seq_stop,
2494 .show = proto_seq_show,
2495};
2496
2497static int proto_seq_open(struct inode *inode, struct file *file)
2498{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002499 return seq_open_net(inode, file, &proto_seq_ops,
2500 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501}
2502
Arjan van de Ven9a321442007-02-12 00:55:35 -08002503static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 .owner = THIS_MODULE,
2505 .open = proto_seq_open,
2506 .read = seq_read,
2507 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002508 .release = seq_release_net,
2509};
2510
2511static __net_init int proto_init_net(struct net *net)
2512{
2513 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2514 return -ENOMEM;
2515
2516 return 0;
2517}
2518
2519static __net_exit void proto_exit_net(struct net *net)
2520{
2521 proc_net_remove(net, "protocols");
2522}
2523
2524
2525static __net_initdata struct pernet_operations proto_net_ops = {
2526 .init = proto_init_net,
2527 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528};
2529
2530static int __init proto_init(void)
2531{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002532 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533}
2534
2535subsys_initcall(proto_init);
2536
2537#endif /* PROC_FS */