blob: 0229d5566a46f947a817298edfb81483d6992498 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Randy Dunlap4fc268d2006-01-11 12:17:47 -080092#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400112#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000113#include <linux/user_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200121#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700122#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000124#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#include <net/xfrm.h>
126#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700127#include <net/cls_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129#include <linux/filter.h>
130
131#ifdef CONFIG_INET
132#include <net/tcp.h>
133#endif
134
Ingo Molnarda21f242006-07-03 00:25:12 -0700135/*
136 * Each address family might have different locking rules, so we have
137 * one slock key per address family:
138 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700139static struct lock_class_key af_family_keys[AF_MAX];
140static struct lock_class_key af_family_slock_keys[AF_MAX];
141
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700142/*
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
145 * locks is fast):
146 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700147static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700161 "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700162};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700163static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800173 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700176 "slock-AF_IEEE802154", "slock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700177 "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700178};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700179static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700189 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700192 "clock-AF_IEEE802154", "clock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700193 "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700194};
Ingo Molnarda21f242006-07-03 00:25:12 -0700195
196/*
197 * sk_callback_lock locking rules are per-address-family,
198 * so split the lock classes by using a per-AF key:
199 */
200static struct lock_class_key af_callback_keys[AF_MAX];
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/* Take into consideration the size of the struct sk_buff overhead in the
203 * determination of these values, since that is non-constant across
204 * platforms. This makes socket queueing behavior and performance
205 * not depend upon such differences.
206 */
207#define _SK_MEM_PACKETS 256
208#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
209#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
210#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
211
212/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218/* Maximal space eaten by iovec or ancilliary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000220EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Herbert Xuf8451722010-05-24 00:12:34 -0700222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
223int net_cls_subsys_id = -1;
224EXPORT_SYMBOL_GPL(net_cls_subsys_id);
225#endif
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
228{
229 struct timeval tv;
230
231 if (optlen < sizeof(tv))
232 return -EINVAL;
233 if (copy_from_user(&tv, optval, sizeof(tv)))
234 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
236 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Vasily Averinba780732007-05-24 16:58:54 -0700238 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700239 static int warned __read_mostly;
240
Vasily Averinba780732007-05-24 16:58:54 -0700241 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700242 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700243 warned++;
244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
245 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700246 current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700247 }
Vasily Averinba780732007-05-24 16:58:54 -0700248 return 0;
249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 *timeo_p = MAX_SCHEDULE_TIMEOUT;
251 if (tv.tv_sec == 0 && tv.tv_usec == 0)
252 return 0;
253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
255 return 0;
256}
257
258static void sock_warn_obsolete_bsdism(const char *name)
259{
260 static int warned;
261 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900262 if (strcmp(warncomm, current->comm) && warned < 5) {
263 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 printk(KERN_WARNING "process `%s' is using obsolete "
265 "%s SO_BSDCOMPAT\n", warncomm, name);
266 warned++;
267 }
268}
269
Patrick Ohly20d49472009-02-12 05:03:38 +0000270static void sock_disable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900271{
Patrick Ohly20d49472009-02-12 05:03:38 +0000272 if (sock_flag(sk, flag)) {
273 sock_reset_flag(sk, flag);
274 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
276 net_disable_timestamp();
277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279}
280
281
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
283{
Eric Dumazet766e90372009-10-14 20:40:11 -0700284 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800285 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700286 unsigned long flags;
287 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800288
Rami Rosen9ee6b7f2008-05-14 03:50:03 -0700289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800290 number of warnings when compiling with -W --ANK
291 */
292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293 (unsigned)sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700294 atomic_inc(&sk->sk_drops);
295 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800296 }
297
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700298 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800299 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700300 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800301
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800302 if (!sk_rmem_schedule(sk, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700303 atomic_inc(&sk->sk_drops);
304 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800305 }
306
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800307 skb->dev = NULL;
308 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800309
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800310 /* Cache the SKB length before we tack it onto the receive
311 * queue. Once it is added it no longer belongs to us and
312 * may be freed by other threads of control pulling packets
313 * from the queue.
314 */
315 skb_len = skb->len;
316
Eric Dumazet7fee2262010-05-11 23:19:48 +0000317 /* we escape from rcu protected region, make sure we dont leak
318 * a norefcounted dst
319 */
320 skb_dst_force(skb);
321
Neil Horman3b885782009-10-12 13:26:31 -0700322 spin_lock_irqsave(&list->lock, flags);
323 skb->dropcount = atomic_read(&sk->sk_drops);
324 __skb_queue_tail(list, skb);
325 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800326
327 if (!sock_flag(sk, SOCK_DEAD))
328 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700329 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800330}
331EXPORT_SYMBOL(sock_queue_rcv_skb);
332
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800334{
335 int rc = NET_RX_SUCCESS;
336
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700337 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800338 goto discard_and_relse;
339
340 skb->dev = NULL;
341
Eric Dumazetc3774112010-04-27 15:13:20 -0700342 if (sk_rcvqueues_full(sk, skb)) {
343 atomic_inc(&sk->sk_drops);
344 goto discard_and_relse;
345 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200346 if (nested)
347 bh_lock_sock_nested(sk);
348 else
349 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700350 if (!sock_owned_by_user(sk)) {
351 /*
352 * trylock + unlock semantics:
353 */
354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
355
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700356 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700357
358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Zhu Yia3a858f2010-03-04 18:01:47 +0000359 } else if (sk_add_backlog(sk, skb)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000360 bh_unlock_sock(sk);
361 atomic_inc(&sk->sk_drops);
362 goto discard_and_relse;
363 }
364
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800365 bh_unlock_sock(sk);
366out:
367 sock_put(sk);
368 return rc;
369discard_and_relse:
370 kfree_skb(skb);
371 goto out;
372}
373EXPORT_SYMBOL(sk_receive_skb);
374
Krishna Kumarea94ff32009-10-19 23:46:45 +0000375void sk_reset_txq(struct sock *sk)
376{
377 sk_tx_queue_clear(sk);
378}
379EXPORT_SYMBOL(sk_reset_txq);
380
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
382{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000383 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800384
385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000386 sk_tx_queue_clear(sk);
Eric Dumazetb6c67122010-04-08 23:03:29 +0000387 rcu_assign_pointer(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800388 dst_release(dst);
389 return NULL;
390 }
391
392 return dst;
393}
394EXPORT_SYMBOL(__sk_dst_check);
395
396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
397{
398 struct dst_entry *dst = sk_dst_get(sk);
399
400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
401 sk_dst_reset(sk);
402 dst_release(dst);
403 return NULL;
404 }
405
406 return dst;
407}
408EXPORT_SYMBOL(sk_dst_check);
409
David S. Miller48788092007-09-14 16:41:03 -0700410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
411{
412 int ret = -ENOPROTOOPT;
413#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900414 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700415 char devname[IFNAMSIZ];
416 int index;
417
418 /* Sorry... */
419 ret = -EPERM;
420 if (!capable(CAP_NET_RAW))
421 goto out;
422
423 ret = -EINVAL;
424 if (optlen < 0)
425 goto out;
426
427 /* Bind this socket to a particular device like "eth0",
428 * as specified in the passed interface name. If the
429 * name is "" or the option length is zero the socket
430 * is not bound.
431 */
432 if (optlen > IFNAMSIZ - 1)
433 optlen = IFNAMSIZ - 1;
434 memset(devname, 0, sizeof(devname));
435
436 ret = -EFAULT;
437 if (copy_from_user(devname, optval, optlen))
438 goto out;
439
David S. Miller000ba2e2009-11-05 22:37:11 -0800440 index = 0;
441 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800442 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700443
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800444 rcu_read_lock();
445 dev = dev_get_by_name_rcu(net, devname);
446 if (dev)
447 index = dev->ifindex;
448 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700449 ret = -ENODEV;
450 if (!dev)
451 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700452 }
453
454 lock_sock(sk);
455 sk->sk_bound_dev_if = index;
456 sk_dst_reset(sk);
457 release_sock(sk);
458
459 ret = 0;
460
461out:
462#endif
463
464 return ret;
465}
466
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
468{
469 if (valbool)
470 sock_set_flag(sk, bit);
471 else
472 sock_reset_flag(sk, bit);
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/*
476 * This is meant for all protocols to use and covers goings on
477 * at the socket level. Everything here is generic.
478 */
479
480int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700481 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
Eric Dumazet2a915252009-05-27 11:30:05 +0000483 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 int val;
485 int valbool;
486 struct linger ling;
487 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 /*
490 * Options without arguments
491 */
492
David S. Miller48788092007-09-14 16:41:03 -0700493 if (optname == SO_BINDTODEVICE)
494 return sock_bindtodevice(sk, optval, optlen);
495
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700496 if (optlen < sizeof(int))
497 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 if (get_user(val, (int __user *)optval))
500 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900501
Eric Dumazet2a915252009-05-27 11:30:05 +0000502 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 lock_sock(sk);
505
Eric Dumazet2a915252009-05-27 11:30:05 +0000506 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700507 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000508 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700509 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000510 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800511 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700512 break;
513 case SO_REUSEADDR:
514 sk->sk_reuse = valbool;
515 break;
516 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000517 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000518 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700519 case SO_ERROR:
520 ret = -ENOPROTOOPT;
521 break;
522 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700524 break;
525 case SO_BROADCAST:
526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
527 break;
528 case SO_SNDBUF:
529 /* Don't error on this BSD doesn't and if you think
530 about it this is right. Otherwise apps have to
531 play 'guess the biggest size' games. RCVBUF/SNDBUF
532 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900533
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700534 if (val > sysctl_wmem_max)
535 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700536set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
538 if ((val * 2) < SOCK_MIN_SNDBUF)
539 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
540 else
541 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700543 /*
544 * Wake up sending tasks if we
545 * upped the value.
546 */
547 sk->sk_write_space(sk);
548 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700550 case SO_SNDBUFFORCE:
551 if (!capable(CAP_NET_ADMIN)) {
552 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 break;
554 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700555 goto set_sndbuf;
556
557 case SO_RCVBUF:
558 /* Don't error on this BSD doesn't and if you think
559 about it this is right. Otherwise apps have to
560 play 'guess the biggest size' games. RCVBUF/SNDBUF
561 are treated in BSD as hints */
562
563 if (val > sysctl_rmem_max)
564 val = sysctl_rmem_max;
565set_rcvbuf:
566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567 /*
568 * We double it on the way in to account for
569 * "struct sk_buff" etc. overhead. Applications
570 * assume that the SO_RCVBUF setting they make will
571 * allow that much actual data to be received on that
572 * socket.
573 *
574 * Applications are unaware that "struct sk_buff" and
575 * other overheads allocate from the receive buffer
576 * during socket buffer allocation.
577 *
578 * And after considering the possible alternatives,
579 * returning the value we actually used in getsockopt
580 * is the most desirable behavior.
581 */
582 if ((val * 2) < SOCK_MIN_RCVBUF)
583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
584 else
585 sk->sk_rcvbuf = val * 2;
586 break;
587
588 case SO_RCVBUFFORCE:
589 if (!capable(CAP_NET_ADMIN)) {
590 ret = -EPERM;
591 break;
592 }
593 goto set_rcvbuf;
594
595 case SO_KEEPALIVE:
596#ifdef CONFIG_INET
597 if (sk->sk_protocol == IPPROTO_TCP)
598 tcp_set_keepalive(sk, valbool);
599#endif
600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
601 break;
602
603 case SO_OOBINLINE:
604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
605 break;
606
607 case SO_NO_CHECK:
608 sk->sk_no_check = valbool;
609 break;
610
611 case SO_PRIORITY:
612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
613 sk->sk_priority = val;
614 else
615 ret = -EPERM;
616 break;
617
618 case SO_LINGER:
619 if (optlen < sizeof(ling)) {
620 ret = -EINVAL; /* 1003.1g */
621 break;
622 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000623 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700624 ret = -EFAULT;
625 break;
626 }
627 if (!ling.l_onoff)
628 sock_reset_flag(sk, SOCK_LINGER);
629 else {
630#if (BITS_PER_LONG == 32)
631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
633 else
634#endif
635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
636 sock_set_flag(sk, SOCK_LINGER);
637 }
638 break;
639
640 case SO_BSDCOMPAT:
641 sock_warn_obsolete_bsdism("setsockopt");
642 break;
643
644 case SO_PASSCRED:
645 if (valbool)
646 set_bit(SOCK_PASSCRED, &sock->flags);
647 else
648 clear_bit(SOCK_PASSCRED, &sock->flags);
649 break;
650
651 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700652 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700653 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700654 if (optname == SO_TIMESTAMP)
655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
656 else
657 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700658 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000659 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700660 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700661 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
663 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700664 break;
665
Patrick Ohly20d49472009-02-12 05:03:38 +0000666 case SO_TIMESTAMPING:
667 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000668 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000669 break;
670 }
671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
672 val & SOF_TIMESTAMPING_TX_HARDWARE);
673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
674 val & SOF_TIMESTAMPING_TX_SOFTWARE);
675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
676 val & SOF_TIMESTAMPING_RX_HARDWARE);
677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
678 sock_enable_timestamp(sk,
679 SOCK_TIMESTAMPING_RX_SOFTWARE);
680 else
681 sock_disable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
684 val & SOF_TIMESTAMPING_SOFTWARE);
685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
686 val & SOF_TIMESTAMPING_SYS_HARDWARE);
687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
688 val & SOF_TIMESTAMPING_RAW_HARDWARE);
689 break;
690
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700691 case SO_RCVLOWAT:
692 if (val < 0)
693 val = INT_MAX;
694 sk->sk_rcvlowat = val ? : 1;
695 break;
696
697 case SO_RCVTIMEO:
698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
699 break;
700
701 case SO_SNDTIMEO:
702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
703 break;
704
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700705 case SO_ATTACH_FILTER:
706 ret = -EINVAL;
707 if (optlen == sizeof(struct sock_fprog)) {
708 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700710 ret = -EFAULT;
711 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700714 ret = sk_attach_filter(&fprog, sk);
715 }
716 break;
717
718 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700719 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700720 break;
721
722 case SO_PASSSEC:
723 if (valbool)
724 set_bit(SOCK_PASSSEC, &sock->flags);
725 else
726 clear_bit(SOCK_PASSSEC, &sock->flags);
727 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800728 case SO_MARK:
729 if (!capable(CAP_NET_ADMIN))
730 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000731 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800732 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800733 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 /* We implement the SO_SNDLOWAT etc to
736 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700737 case SO_RXQ_OVFL:
738 if (valbool)
739 sock_set_flag(sk, SOCK_RXQ_OVFL);
740 else
741 sock_reset_flag(sk, SOCK_RXQ_OVFL);
742 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700743 default:
744 ret = -ENOPROTOOPT;
745 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 release_sock(sk);
748 return ret;
749}
Eric Dumazet2a915252009-05-27 11:30:05 +0000750EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000753void cred_to_ucred(struct pid *pid, const struct cred *cred,
754 struct ucred *ucred)
755{
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
758 if (cred) {
759 struct user_namespace *current_ns = current_user_ns();
760
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763 }
764}
765
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766int sock_getsockopt(struct socket *sock, int level, int optname,
767 char __user *optval, int __user *optlen)
768{
769 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900770
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700771 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900772 int val;
773 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 struct timeval tm;
775 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900776
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800777 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900779
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700780 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900781 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700782 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900784
Eugene Teo50fee1d2009-02-23 15:38:41 -0800785 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800786
Eric Dumazet2a915252009-05-27 11:30:05 +0000787 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700788 case SO_DEBUG:
789 v.val = sock_flag(sk, SOCK_DBG);
790 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900791
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700792 case SO_DONTROUTE:
793 v.val = sock_flag(sk, SOCK_LOCALROUTE);
794 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900795
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700796 case SO_BROADCAST:
797 v.val = !!sock_flag(sk, SOCK_BROADCAST);
798 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700800 case SO_SNDBUF:
801 v.val = sk->sk_sndbuf;
802 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900803
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700804 case SO_RCVBUF:
805 v.val = sk->sk_rcvbuf;
806 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700808 case SO_REUSEADDR:
809 v.val = sk->sk_reuse;
810 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700812 case SO_KEEPALIVE:
813 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
814 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700816 case SO_TYPE:
817 v.val = sk->sk_type;
818 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000820 case SO_PROTOCOL:
821 v.val = sk->sk_protocol;
822 break;
823
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000824 case SO_DOMAIN:
825 v.val = sk->sk_family;
826 break;
827
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700828 case SO_ERROR:
829 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000830 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700831 v.val = xchg(&sk->sk_err_soft, 0);
832 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700834 case SO_OOBINLINE:
835 v.val = !!sock_flag(sk, SOCK_URGINLINE);
836 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900837
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700838 case SO_NO_CHECK:
839 v.val = sk->sk_no_check;
840 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700842 case SO_PRIORITY:
843 v.val = sk->sk_priority;
844 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900845
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700846 case SO_LINGER:
847 lv = sizeof(v.ling);
848 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
849 v.ling.l_linger = sk->sk_lingertime / HZ;
850 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900851
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700852 case SO_BSDCOMPAT:
853 sock_warn_obsolete_bsdism("getsockopt");
854 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700856 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700857 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
858 !sock_flag(sk, SOCK_RCVTSTAMPNS);
859 break;
860
861 case SO_TIMESTAMPNS:
862 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700863 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Patrick Ohly20d49472009-02-12 05:03:38 +0000865 case SO_TIMESTAMPING:
866 v.val = 0;
867 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
868 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
869 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
870 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
871 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
872 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
873 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
874 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
875 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
876 v.val |= SOF_TIMESTAMPING_SOFTWARE;
877 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
878 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
879 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
880 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
881 break;
882
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000884 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700885 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
886 v.tm.tv_sec = 0;
887 v.tm.tv_usec = 0;
888 } else {
889 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
890 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700892 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700894 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000895 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700896 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
897 v.tm.tv_sec = 0;
898 v.tm.tv_usec = 0;
899 } else {
900 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
901 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
902 }
903 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 case SO_RCVLOWAT:
906 v.val = sk->sk_rcvlowat;
907 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700908
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700909 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +0000910 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700911 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700913 case SO_PASSCRED:
914 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
915 break;
916
917 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000918 {
919 struct ucred peercred;
920 if (len > sizeof(peercred))
921 len = sizeof(peercred);
922 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
923 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700924 return -EFAULT;
925 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000926 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700927
928 case SO_PEERNAME:
929 {
930 char address[128];
931
932 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
933 return -ENOTCONN;
934 if (lv < len)
935 return -EINVAL;
936 if (copy_to_user(optval, address, len))
937 return -EFAULT;
938 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700940
941 /* Dubious BSD thing... Probably nobody even uses it, but
942 * the UNIX standard wants it for whatever reason... -DaveM
943 */
944 case SO_ACCEPTCONN:
945 v.val = sk->sk_state == TCP_LISTEN;
946 break;
947
948 case SO_PASSSEC:
949 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
950 break;
951
952 case SO_PEERSEC:
953 return security_socket_getpeersec_stream(sock, optval, optlen, len);
954
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800955 case SO_MARK:
956 v.val = sk->sk_mark;
957 break;
958
Neil Horman3b885782009-10-12 13:26:31 -0700959 case SO_RXQ_OVFL:
960 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
961 break;
962
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700963 default:
964 return -ENOPROTOOPT;
965 }
966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 if (len > lv)
968 len = lv;
969 if (copy_to_user(optval, &v, len))
970 return -EFAULT;
971lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900972 if (put_user(len, optlen))
973 return -EFAULT;
974 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700977/*
978 * Initialize an sk_lock.
979 *
980 * (We also register the sk_lock with the lock validator.)
981 */
Dave Jonesb6f99a22007-03-22 12:27:49 -0700982static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700983{
Peter Zijlstraed075362006-12-06 20:35:24 -0800984 sock_lock_init_class_and_name(sk,
985 af_family_slock_key_strings[sk->sk_family],
986 af_family_slock_keys + sk->sk_family,
987 af_family_key_strings[sk->sk_family],
988 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700989}
990
Eric Dumazet4dc6dc72009-07-15 23:13:10 +0000991/*
992 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
993 * even temporarly, because of RCU lookups. sk_node should also be left as is.
994 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700995static void sock_copy(struct sock *nsk, const struct sock *osk)
996{
997#ifdef CONFIG_SECURITY_NETWORK
998 void *sptr = nsk->sk_security;
999#endif
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001000 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
Krishna Kumare022f0b2009-10-19 23:46:20 +00001001 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
1002 sizeof(osk->sk_tx_queue_mapping));
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001003 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
1004 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001005#ifdef CONFIG_SECURITY_NETWORK
1006 nsk->sk_security = sptr;
1007 security_sk_clone(osk, nsk);
1008#endif
1009}
1010
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001011static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1012 int family)
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001013{
1014 struct sock *sk;
1015 struct kmem_cache *slab;
1016
1017 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001018 if (slab != NULL) {
1019 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1020 if (!sk)
1021 return sk;
1022 if (priority & __GFP_ZERO) {
1023 /*
1024 * caches using SLAB_DESTROY_BY_RCU should let
1025 * sk_node.next un-modified. Special care is taken
1026 * when initializing object to zero.
1027 */
1028 if (offsetof(struct sock, sk_node.next) != 0)
1029 memset(sk, 0, offsetof(struct sock, sk_node.next));
1030 memset(&sk->sk_node.pprev, 0,
1031 prot->obj_size - offsetof(struct sock,
1032 sk_node.pprev));
1033 }
1034 }
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001035 else
1036 sk = kmalloc(prot->obj_size, priority);
1037
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001038 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001039 kmemcheck_annotate_bitfield(sk, flags);
1040
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001041 if (security_sk_alloc(sk, family, priority))
1042 goto out_free;
1043
1044 if (!try_module_get(prot->owner))
1045 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001046 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001047 }
1048
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001049 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001050
1051out_free_sec:
1052 security_sk_free(sk);
1053out_free:
1054 if (slab != NULL)
1055 kmem_cache_free(slab, sk);
1056 else
1057 kfree(sk);
1058 return NULL;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001059}
1060
1061static void sk_prot_free(struct proto *prot, struct sock *sk)
1062{
1063 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001064 struct module *owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001065
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001066 owner = prot->owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001067 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001068
1069 security_sk_free(sk);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001070 if (slab != NULL)
1071 kmem_cache_free(slab, sk);
1072 else
1073 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001074 module_put(owner);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001075}
1076
Herbert Xuf8451722010-05-24 00:12:34 -07001077#ifdef CONFIG_CGROUPS
1078void sock_update_classid(struct sock *sk)
1079{
1080 u32 classid = task_cls_classid(current);
1081
1082 if (classid && classid != sk->sk_classid)
1083 sk->sk_classid = classid;
1084}
Herbert Xu82862742010-05-24 00:14:10 -07001085EXPORT_SYMBOL(sock_update_classid);
Herbert Xuf8451722010-05-24 00:12:34 -07001086#endif
1087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088/**
1089 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001090 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001091 * @family: protocol family
1092 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1093 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001095struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001096 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097{
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001098 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001100 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001102 sk->sk_family = family;
1103 /*
1104 * See comment in struct sock definition to understand
1105 * why we need sk_prot_creator -acme
1106 */
1107 sk->sk_prot = sk->sk_prot_creator = prot;
1108 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001109 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001110 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001111
1112 sock_update_classid(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 }
Frank Filza79af592005-09-27 15:23:38 -07001114
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001115 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
Eric Dumazet2a915252009-05-27 11:30:05 +00001117EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Eric Dumazet2b85a342009-06-11 02:55:43 -07001119static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120{
1121 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 if (sk->sk_destruct)
1124 sk->sk_destruct(sk);
1125
Paul E. McKenneya898def2010-02-22 17:04:49 -08001126 filter = rcu_dereference_check(sk->sk_filter,
1127 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001129 sk_filter_uncharge(sk, filter);
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001130 rcu_assign_pointer(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 }
1132
Patrick Ohly20d49472009-02-12 05:03:38 +00001133 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1134 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136 if (atomic_read(&sk->sk_omem_alloc))
1137 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
Harvey Harrison0dc47872008-03-05 20:47:47 -08001138 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001140 if (sk->sk_peer_cred)
1141 put_cred(sk->sk_peer_cred);
1142 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001143 put_net(sock_net(sk));
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001144 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001146
1147void sk_free(struct sock *sk)
1148{
1149 /*
1150 * We substract one from sk_wmem_alloc and can know if
1151 * some packets are still in some tx queue.
1152 * If not null, sock_wfree() will call __sk_free(sk) later
1153 */
1154 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1155 __sk_free(sk);
1156}
Eric Dumazet2a915252009-05-27 11:30:05 +00001157EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
Denis V. Lunevedf02082008-02-29 11:18:32 -08001159/*
1160 * Last sock_put should drop referrence to sk->sk_net. It has already
1161 * been dropped in sk_change_net. Taking referrence to stopping namespace
1162 * is not an option.
1163 * Take referrence to a socket to remove it from hash _alive_ and after that
1164 * destroy it in the context of init_net.
1165 */
1166void sk_release_kernel(struct sock *sk)
1167{
1168 if (sk == NULL || sk->sk_socket == NULL)
1169 return;
1170
1171 sock_hold(sk);
1172 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001173 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001174 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001175 sock_put(sk);
1176}
David S. Miller45af1752008-02-29 11:33:19 -08001177EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001178
Al Virodd0fc662005-10-07 07:46:04 +01001179struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001180{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001181 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001182
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001183 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001184 if (newsk != NULL) {
1185 struct sk_filter *filter;
1186
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001187 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001188
1189 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001190 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001191 sk_node_init(&newsk->sk_node);
1192 sock_lock_init(newsk);
1193 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001194 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001195 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001196
1197 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001198 /*
1199 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1200 */
1201 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001202 atomic_set(&newsk->sk_omem_alloc, 0);
1203 skb_queue_head_init(&newsk->sk_receive_queue);
1204 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001205#ifdef CONFIG_NET_DMA
1206 skb_queue_head_init(&newsk->sk_async_wait_queue);
1207#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001208
Eric Dumazetb6c67122010-04-08 23:03:29 +00001209 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001210 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001211 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1212 af_callback_keys + newsk->sk_family,
1213 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001214
1215 newsk->sk_dst_cache = NULL;
1216 newsk->sk_wmem_queued = 0;
1217 newsk->sk_forward_alloc = 0;
1218 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001219 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1220
1221 sock_reset_flag(newsk, SOCK_DONE);
1222 skb_queue_head_init(&newsk->sk_error_queue);
1223
1224 filter = newsk->sk_filter;
1225 if (filter != NULL)
1226 sk_filter_charge(newsk, filter);
1227
1228 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1229 /* It is still raw copy of parent, so invalidate
1230 * destructor and make plain sk_free() */
1231 newsk->sk_destruct = NULL;
1232 sk_free(newsk);
1233 newsk = NULL;
1234 goto out;
1235 }
1236
1237 newsk->sk_err = 0;
1238 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001239 /*
1240 * Before updating sk_refcnt, we must commit prior changes to memory
1241 * (Documentation/RCU/rculist_nulls.txt for details)
1242 */
1243 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001244 atomic_set(&newsk->sk_refcnt, 2);
1245
1246 /*
1247 * Increment the counter in the same struct proto as the master
1248 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1249 * is the same as sk->sk_prot->socks, as this field was copied
1250 * with memcpy).
1251 *
1252 * This _changes_ the previous behaviour, where
1253 * tcp_create_openreq_child always was incrementing the
1254 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1255 * to be taken into account in all callers. -acme
1256 */
1257 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001258 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001259 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001260
1261 if (newsk->sk_prot->sockets_allocated)
Eric Dumazet17483762008-11-25 21:16:35 -08001262 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
Octavian Purdila704da5602010-01-08 00:00:09 -08001263
1264 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1265 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1266 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001267 }
1268out:
1269 return newsk;
1270}
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001271EXPORT_SYMBOL_GPL(sk_clone);
1272
Andi Kleen99580892007-04-20 17:12:43 -07001273void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1274{
1275 __sk_dst_set(sk, dst);
1276 sk->sk_route_caps = dst->dev->features;
1277 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001278 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001279 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001280 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001281 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001282 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001283 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001284 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001285 sk->sk_gso_max_size = dst->dev->gso_max_size;
1286 }
Andi Kleen99580892007-04-20 17:12:43 -07001287 }
1288}
1289EXPORT_SYMBOL_GPL(sk_setup_caps);
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291void __init sk_init(void)
1292{
Jan Beulich44813742009-09-21 17:03:05 -07001293 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 sysctl_wmem_max = 32767;
1295 sysctl_rmem_max = 32767;
1296 sysctl_wmem_default = 32767;
1297 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001298 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 sysctl_wmem_max = 131071;
1300 sysctl_rmem_max = 131071;
1301 }
1302}
1303
1304/*
1305 * Simple resource managers for sockets.
1306 */
1307
1308
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001309/*
1310 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 */
1312void sock_wfree(struct sk_buff *skb)
1313{
1314 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001315 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Eric Dumazetd99927f2009-09-24 10:49:24 +00001317 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1318 /*
1319 * Keep a reference on sk_wmem_alloc, this will be released
1320 * after sk_write_space() call
1321 */
1322 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001324 len = 1;
1325 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001326 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001327 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1328 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001329 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001330 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001331 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332}
Eric Dumazet2a915252009-05-27 11:30:05 +00001333EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001335/*
1336 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 */
1338void sock_rfree(struct sk_buff *skb)
1339{
1340 struct sock *sk = skb->sk;
1341
1342 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001343 sk_mem_uncharge(skb->sk, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344}
Eric Dumazet2a915252009-05-27 11:30:05 +00001345EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347
1348int sock_i_uid(struct sock *sk)
1349{
1350 int uid;
1351
1352 read_lock(&sk->sk_callback_lock);
1353 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1354 read_unlock(&sk->sk_callback_lock);
1355 return uid;
1356}
Eric Dumazet2a915252009-05-27 11:30:05 +00001357EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
1359unsigned long sock_i_ino(struct sock *sk)
1360{
1361 unsigned long ino;
1362
1363 read_lock(&sk->sk_callback_lock);
1364 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1365 read_unlock(&sk->sk_callback_lock);
1366 return ino;
1367}
Eric Dumazet2a915252009-05-27 11:30:05 +00001368EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
1370/*
1371 * Allocate a skb from the socket's send buffer.
1372 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001373struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001374 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
1376 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001377 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 if (skb) {
1379 skb_set_owner_w(skb, sk);
1380 return skb;
1381 }
1382 }
1383 return NULL;
1384}
Eric Dumazet2a915252009-05-27 11:30:05 +00001385EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
1387/*
1388 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001389 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001390struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001391 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392{
1393 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1394 struct sk_buff *skb = alloc_skb(size, priority);
1395 if (skb) {
1396 skb_set_owner_r(skb, sk);
1397 return skb;
1398 }
1399 }
1400 return NULL;
1401}
1402
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001403/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001405 */
Al Virodd0fc662005-10-07 07:46:04 +01001406void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
1408 if ((unsigned)size <= sysctl_optmem_max &&
1409 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1410 void *mem;
1411 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001412 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 */
1414 atomic_add(size, &sk->sk_omem_alloc);
1415 mem = kmalloc(size, priority);
1416 if (mem)
1417 return mem;
1418 atomic_sub(size, &sk->sk_omem_alloc);
1419 }
1420 return NULL;
1421}
Eric Dumazet2a915252009-05-27 11:30:05 +00001422EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
1424/*
1425 * Free an option memory block.
1426 */
1427void sock_kfree_s(struct sock *sk, void *mem, int size)
1428{
1429 kfree(mem);
1430 atomic_sub(size, &sk->sk_omem_alloc);
1431}
Eric Dumazet2a915252009-05-27 11:30:05 +00001432EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
1434/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1435 I think, these locks should be removed for datagram sockets.
1436 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001437static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
1439 DEFINE_WAIT(wait);
1440
1441 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1442 for (;;) {
1443 if (!timeo)
1444 break;
1445 if (signal_pending(current))
1446 break;
1447 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001448 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1450 break;
1451 if (sk->sk_shutdown & SEND_SHUTDOWN)
1452 break;
1453 if (sk->sk_err)
1454 break;
1455 timeo = schedule_timeout(timeo);
1456 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001457 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 return timeo;
1459}
1460
1461
1462/*
1463 * Generic send/receive buffer handlers
1464 */
1465
Herbert Xu4cc7f682009-02-04 16:55:54 -08001466struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1467 unsigned long data_len, int noblock,
1468 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469{
1470 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001471 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 long timeo;
1473 int err;
1474
1475 gfp_mask = sk->sk_allocation;
1476 if (gfp_mask & __GFP_WAIT)
1477 gfp_mask |= __GFP_REPEAT;
1478
1479 timeo = sock_sndtimeo(sk, noblock);
1480 while (1) {
1481 err = sock_error(sk);
1482 if (err != 0)
1483 goto failure;
1484
1485 err = -EPIPE;
1486 if (sk->sk_shutdown & SEND_SHUTDOWN)
1487 goto failure;
1488
1489 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001490 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 if (skb) {
1492 int npages;
1493 int i;
1494
1495 /* No pages, we're done... */
1496 if (!data_len)
1497 break;
1498
1499 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1500 skb->truesize += data_len;
1501 skb_shinfo(skb)->nr_frags = npages;
1502 for (i = 0; i < npages; i++) {
1503 struct page *page;
1504 skb_frag_t *frag;
1505
1506 page = alloc_pages(sk->sk_allocation, 0);
1507 if (!page) {
1508 err = -ENOBUFS;
1509 skb_shinfo(skb)->nr_frags = i;
1510 kfree_skb(skb);
1511 goto failure;
1512 }
1513
1514 frag = &skb_shinfo(skb)->frags[i];
1515 frag->page = page;
1516 frag->page_offset = 0;
1517 frag->size = (data_len >= PAGE_SIZE ?
1518 PAGE_SIZE :
1519 data_len);
1520 data_len -= PAGE_SIZE;
1521 }
1522
1523 /* Full success... */
1524 break;
1525 }
1526 err = -ENOBUFS;
1527 goto failure;
1528 }
1529 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1530 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1531 err = -EAGAIN;
1532 if (!timeo)
1533 goto failure;
1534 if (signal_pending(current))
1535 goto interrupted;
1536 timeo = sock_wait_for_wmem(sk, timeo);
1537 }
1538
1539 skb_set_owner_w(skb, sk);
1540 return skb;
1541
1542interrupted:
1543 err = sock_intr_errno(timeo);
1544failure:
1545 *errcode = err;
1546 return NULL;
1547}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001548EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001550struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 int noblock, int *errcode)
1552{
1553 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1554}
Eric Dumazet2a915252009-05-27 11:30:05 +00001555EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
1557static void __lock_sock(struct sock *sk)
1558{
1559 DEFINE_WAIT(wait);
1560
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001561 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1563 TASK_UNINTERRUPTIBLE);
1564 spin_unlock_bh(&sk->sk_lock.slock);
1565 schedule();
1566 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001567 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 break;
1569 }
1570 finish_wait(&sk->sk_lock.wq, &wait);
1571}
1572
1573static void __release_sock(struct sock *sk)
1574{
1575 struct sk_buff *skb = sk->sk_backlog.head;
1576
1577 do {
1578 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1579 bh_unlock_sock(sk);
1580
1581 do {
1582 struct sk_buff *next = skb->next;
1583
Eric Dumazet7fee2262010-05-11 23:19:48 +00001584 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001586 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
1588 /*
1589 * We are in process context here with softirqs
1590 * disabled, use cond_resched_softirq() to preempt.
1591 * This is safe to do because we've taken the backlog
1592 * queue private:
1593 */
1594 cond_resched_softirq();
1595
1596 skb = next;
1597 } while (skb != NULL);
1598
1599 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001600 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001601
1602 /*
1603 * Doing the zeroing here guarantee we can not loop forever
1604 * while a wild producer attempts to flood us.
1605 */
1606 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607}
1608
1609/**
1610 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001611 * @sk: sock to wait on
1612 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 *
1614 * Now socket state including sk->sk_err is changed only under lock,
1615 * hence we may omit checks after joining wait queue.
1616 * We check receive queue before schedule() only as optimization;
1617 * it is very likely that release_sock() added new data.
1618 */
1619int sk_wait_data(struct sock *sk, long *timeo)
1620{
1621 int rc;
1622 DEFINE_WAIT(wait);
1623
Eric Dumazetaa395142010-04-20 13:03:51 +00001624 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1626 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1627 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001628 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 return rc;
1630}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631EXPORT_SYMBOL(sk_wait_data);
1632
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001633/**
1634 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1635 * @sk: socket
1636 * @size: memory size to allocate
1637 * @kind: allocation type
1638 *
1639 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1640 * rmem allocation. This function assumes that protocols which have
1641 * memory_pressure use sk_wmem_queued as write buffer accounting.
1642 */
1643int __sk_mem_schedule(struct sock *sk, int size, int kind)
1644{
1645 struct proto *prot = sk->sk_prot;
1646 int amt = sk_mem_pages(size);
1647 int allocated;
1648
1649 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1650 allocated = atomic_add_return(amt, prot->memory_allocated);
1651
1652 /* Under limit. */
1653 if (allocated <= prot->sysctl_mem[0]) {
1654 if (prot->memory_pressure && *prot->memory_pressure)
1655 *prot->memory_pressure = 0;
1656 return 1;
1657 }
1658
1659 /* Under pressure. */
1660 if (allocated > prot->sysctl_mem[1])
1661 if (prot->enter_memory_pressure)
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001662 prot->enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001663
1664 /* Over hard limit. */
1665 if (allocated > prot->sysctl_mem[2])
1666 goto suppress_allocation;
1667
1668 /* guarantee minimum buffer size under pressure */
1669 if (kind == SK_MEM_RECV) {
1670 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1671 return 1;
1672 } else { /* SK_MEM_SEND */
1673 if (sk->sk_type == SOCK_STREAM) {
1674 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1675 return 1;
1676 } else if (atomic_read(&sk->sk_wmem_alloc) <
1677 prot->sysctl_wmem[0])
1678 return 1;
1679 }
1680
1681 if (prot->memory_pressure) {
Eric Dumazet17483762008-11-25 21:16:35 -08001682 int alloc;
1683
1684 if (!*prot->memory_pressure)
1685 return 1;
1686 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1687 if (prot->sysctl_mem[2] > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001688 sk_mem_pages(sk->sk_wmem_queued +
1689 atomic_read(&sk->sk_rmem_alloc) +
1690 sk->sk_forward_alloc))
1691 return 1;
1692 }
1693
1694suppress_allocation:
1695
1696 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1697 sk_stream_moderate_sndbuf(sk);
1698
1699 /* Fail only if socket is _under_ its sndbuf.
1700 * In this case we cannot block, so that we have to fail.
1701 */
1702 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1703 return 1;
1704 }
1705
1706 /* Alas. Undo changes. */
1707 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1708 atomic_sub(amt, prot->memory_allocated);
1709 return 0;
1710}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001711EXPORT_SYMBOL(__sk_mem_schedule);
1712
1713/**
1714 * __sk_reclaim - reclaim memory_allocated
1715 * @sk: socket
1716 */
1717void __sk_mem_reclaim(struct sock *sk)
1718{
1719 struct proto *prot = sk->sk_prot;
1720
Eric Dumazet680a5a52007-12-31 15:00:50 -08001721 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001722 prot->memory_allocated);
1723 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1724
1725 if (prot->memory_pressure && *prot->memory_pressure &&
1726 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1727 *prot->memory_pressure = 0;
1728}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001729EXPORT_SYMBOL(__sk_mem_reclaim);
1730
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732/*
1733 * Set of default routines for initialising struct proto_ops when
1734 * the protocol does not support a particular function. In certain
1735 * cases where it makes no sense for a protocol to have a "do nothing"
1736 * function, some default processing is provided.
1737 */
1738
1739int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1740{
1741 return -EOPNOTSUPP;
1742}
Eric Dumazet2a915252009-05-27 11:30:05 +00001743EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001745int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 int len, int flags)
1747{
1748 return -EOPNOTSUPP;
1749}
Eric Dumazet2a915252009-05-27 11:30:05 +00001750EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1753{
1754 return -EOPNOTSUPP;
1755}
Eric Dumazet2a915252009-05-27 11:30:05 +00001756EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
1758int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1759{
1760 return -EOPNOTSUPP;
1761}
Eric Dumazet2a915252009-05-27 11:30:05 +00001762EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001764int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 int *len, int peer)
1766{
1767 return -EOPNOTSUPP;
1768}
Eric Dumazet2a915252009-05-27 11:30:05 +00001769EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Eric Dumazet2a915252009-05-27 11:30:05 +00001771unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772{
1773 return 0;
1774}
Eric Dumazet2a915252009-05-27 11:30:05 +00001775EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
1777int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1778{
1779 return -EOPNOTSUPP;
1780}
Eric Dumazet2a915252009-05-27 11:30:05 +00001781EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783int sock_no_listen(struct socket *sock, int backlog)
1784{
1785 return -EOPNOTSUPP;
1786}
Eric Dumazet2a915252009-05-27 11:30:05 +00001787EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789int sock_no_shutdown(struct socket *sock, int how)
1790{
1791 return -EOPNOTSUPP;
1792}
Eric Dumazet2a915252009-05-27 11:30:05 +00001793EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
1795int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001796 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797{
1798 return -EOPNOTSUPP;
1799}
Eric Dumazet2a915252009-05-27 11:30:05 +00001800EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
1802int sock_no_getsockopt(struct socket *sock, int level, int optname,
1803 char __user *optval, int __user *optlen)
1804{
1805 return -EOPNOTSUPP;
1806}
Eric Dumazet2a915252009-05-27 11:30:05 +00001807EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1810 size_t len)
1811{
1812 return -EOPNOTSUPP;
1813}
Eric Dumazet2a915252009-05-27 11:30:05 +00001814EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
1816int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1817 size_t len, int flags)
1818{
1819 return -EOPNOTSUPP;
1820}
Eric Dumazet2a915252009-05-27 11:30:05 +00001821EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
1823int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1824{
1825 /* Mirror missing mmap method error code */
1826 return -ENODEV;
1827}
Eric Dumazet2a915252009-05-27 11:30:05 +00001828EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
1830ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1831{
1832 ssize_t res;
1833 struct msghdr msg = {.msg_flags = flags};
1834 struct kvec iov;
1835 char *kaddr = kmap(page);
1836 iov.iov_base = kaddr + offset;
1837 iov.iov_len = size;
1838 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1839 kunmap(page);
1840 return res;
1841}
Eric Dumazet2a915252009-05-27 11:30:05 +00001842EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843
1844/*
1845 * Default Socket Callbacks
1846 */
1847
1848static void sock_def_wakeup(struct sock *sk)
1849{
Eric Dumazet43815482010-04-29 11:01:49 +00001850 struct socket_wq *wq;
1851
1852 rcu_read_lock();
1853 wq = rcu_dereference(sk->sk_wq);
1854 if (wq_has_sleeper(wq))
1855 wake_up_interruptible_all(&wq->wait);
1856 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857}
1858
1859static void sock_def_error_report(struct sock *sk)
1860{
Eric Dumazet43815482010-04-29 11:01:49 +00001861 struct socket_wq *wq;
1862
1863 rcu_read_lock();
1864 wq = rcu_dereference(sk->sk_wq);
1865 if (wq_has_sleeper(wq))
1866 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001867 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00001868 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869}
1870
1871static void sock_def_readable(struct sock *sk, int len)
1872{
Eric Dumazet43815482010-04-29 11:01:49 +00001873 struct socket_wq *wq;
1874
1875 rcu_read_lock();
1876 wq = rcu_dereference(sk->sk_wq);
1877 if (wq_has_sleeper(wq))
1878 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
Davide Libenzi37e55402009-03-31 15:24:21 -07001879 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001880 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00001881 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
1883
1884static void sock_def_write_space(struct sock *sk)
1885{
Eric Dumazet43815482010-04-29 11:01:49 +00001886 struct socket_wq *wq;
1887
1888 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
1890 /* Do not wake up a writer until he can make "significant"
1891 * progress. --DaveM
1892 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001893 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00001894 wq = rcu_dereference(sk->sk_wq);
1895 if (wq_has_sleeper(wq))
1896 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07001897 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 /* Should agree with poll, otherwise some programs break */
1900 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001901 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 }
1903
Eric Dumazet43815482010-04-29 11:01:49 +00001904 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905}
1906
1907static void sock_def_destruct(struct sock *sk)
1908{
Jesper Juhla51482b2005-11-08 09:41:34 -08001909 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910}
1911
1912void sk_send_sigurg(struct sock *sk)
1913{
1914 if (sk->sk_socket && sk->sk_socket->file)
1915 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001916 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917}
Eric Dumazet2a915252009-05-27 11:30:05 +00001918EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
1920void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1921 unsigned long expires)
1922{
1923 if (!mod_timer(timer, expires))
1924 sock_hold(sk);
1925}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926EXPORT_SYMBOL(sk_reset_timer);
1927
1928void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1929{
1930 if (timer_pending(timer) && del_timer(timer))
1931 __sock_put(sk);
1932}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933EXPORT_SYMBOL(sk_stop_timer);
1934
1935void sock_init_data(struct socket *sock, struct sock *sk)
1936{
1937 skb_queue_head_init(&sk->sk_receive_queue);
1938 skb_queue_head_init(&sk->sk_write_queue);
1939 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001940#ifdef CONFIG_NET_DMA
1941 skb_queue_head_init(&sk->sk_async_wait_queue);
1942#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
1944 sk->sk_send_head = NULL;
1945
1946 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 sk->sk_allocation = GFP_KERNEL;
1949 sk->sk_rcvbuf = sysctl_rmem_default;
1950 sk->sk_sndbuf = sysctl_wmem_default;
1951 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07001952 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954 sock_set_flag(sk, SOCK_ZAPPED);
1955
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001956 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00001958 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 sock->sk = sk;
1960 } else
Eric Dumazet43815482010-04-29 11:01:49 +00001961 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
Eric Dumazetb6c67122010-04-08 23:03:29 +00001963 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001965 lockdep_set_class_and_name(&sk->sk_callback_lock,
1966 af_callback_keys + sk->sk_family,
1967 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
1969 sk->sk_state_change = sock_def_wakeup;
1970 sk->sk_data_ready = sock_def_readable;
1971 sk->sk_write_space = sock_def_write_space;
1972 sk->sk_error_report = sock_def_error_report;
1973 sk->sk_destruct = sock_def_destruct;
1974
1975 sk->sk_sndmsg_page = NULL;
1976 sk->sk_sndmsg_off = 0;
1977
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001978 sk->sk_peer_pid = NULL;
1979 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 sk->sk_write_pending = 0;
1981 sk->sk_rcvlowat = 1;
1982 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1983 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1984
Eric Dumazetf37f0af2008-04-13 21:39:26 -07001985 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001987 /*
1988 * Before updating sk_refcnt, we must commit prior changes to memory
1989 * (Documentation/RCU/rculist_nulls.txt for details)
1990 */
1991 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08001993 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}
Eric Dumazet2a915252009-05-27 11:30:05 +00001995EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Harvey Harrisonb5606c22008-02-13 15:03:16 -08001997void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998{
1999 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002000 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002001 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002003 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002004 spin_unlock(&sk->sk_lock.slock);
2005 /*
2006 * The sk_lock has mutex_lock() semantics here:
2007 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002008 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002009 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002011EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002013void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002015 /*
2016 * The sk_lock has mutex_unlock() semantics:
2017 */
2018 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2019
2020 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 if (sk->sk_backlog.tail)
2022 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002023 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002024 if (waitqueue_active(&sk->sk_lock.wq))
2025 wake_up(&sk->sk_lock.wq);
2026 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027}
2028EXPORT_SYMBOL(release_sock);
2029
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002030/**
2031 * lock_sock_fast - fast version of lock_sock
2032 * @sk: socket
2033 *
2034 * This version should be used for very small section, where process wont block
2035 * return false if fast path is taken
2036 * sk_lock.slock locked, owned = 0, BH disabled
2037 * return true if slow path is taken
2038 * sk_lock.slock unlocked, owned = 1, BH enabled
2039 */
2040bool lock_sock_fast(struct sock *sk)
2041{
2042 might_sleep();
2043 spin_lock_bh(&sk->sk_lock.slock);
2044
2045 if (!sk->sk_lock.owned)
2046 /*
2047 * Note : We must disable BH
2048 */
2049 return false;
2050
2051 __lock_sock(sk);
2052 sk->sk_lock.owned = 1;
2053 spin_unlock(&sk->sk_lock.slock);
2054 /*
2055 * The sk_lock has mutex_lock() semantics here:
2056 */
2057 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2058 local_bh_enable();
2059 return true;
2060}
2061EXPORT_SYMBOL(lock_sock_fast);
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002064{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002065 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002067 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002068 tv = ktime_to_timeval(sk->sk_stamp);
2069 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002071 if (tv.tv_sec == 0) {
2072 sk->sk_stamp = ktime_get_real();
2073 tv = ktime_to_timeval(sk->sk_stamp);
2074 }
2075 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002076}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077EXPORT_SYMBOL(sock_get_timestamp);
2078
Eric Dumazetae40eb12007-03-18 17:33:16 -07002079int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2080{
2081 struct timespec ts;
2082 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002083 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002084 ts = ktime_to_timespec(sk->sk_stamp);
2085 if (ts.tv_sec == -1)
2086 return -ENOENT;
2087 if (ts.tv_sec == 0) {
2088 sk->sk_stamp = ktime_get_real();
2089 ts = ktime_to_timespec(sk->sk_stamp);
2090 }
2091 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2092}
2093EXPORT_SYMBOL(sock_get_timestampns);
2094
Patrick Ohly20d49472009-02-12 05:03:38 +00002095void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002096{
Patrick Ohly20d49472009-02-12 05:03:38 +00002097 if (!sock_flag(sk, flag)) {
2098 sock_set_flag(sk, flag);
2099 /*
2100 * we just set one of the two flags which require net
2101 * time stamping, but time stamping might have been on
2102 * already because of the other one
2103 */
2104 if (!sock_flag(sk,
2105 flag == SOCK_TIMESTAMP ?
2106 SOCK_TIMESTAMPING_RX_SOFTWARE :
2107 SOCK_TIMESTAMP))
2108 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 }
2110}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
2112/*
2113 * Get a socket option on an socket.
2114 *
2115 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2116 * asynchronous errors should be reported by getsockopt. We assume
2117 * this means if you specify SO_ERROR (otherwise whats the point of it).
2118 */
2119int sock_common_getsockopt(struct socket *sock, int level, int optname,
2120 char __user *optval, int __user *optlen)
2121{
2122 struct sock *sk = sock->sk;
2123
2124 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2125}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126EXPORT_SYMBOL(sock_common_getsockopt);
2127
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002128#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002129int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2130 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002131{
2132 struct sock *sk = sock->sk;
2133
Johannes Berg1e51f952007-03-06 13:44:06 -08002134 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002135 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2136 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002137 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2138}
2139EXPORT_SYMBOL(compat_sock_common_getsockopt);
2140#endif
2141
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2143 struct msghdr *msg, size_t size, int flags)
2144{
2145 struct sock *sk = sock->sk;
2146 int addr_len = 0;
2147 int err;
2148
2149 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2150 flags & ~MSG_DONTWAIT, &addr_len);
2151 if (err >= 0)
2152 msg->msg_namelen = addr_len;
2153 return err;
2154}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155EXPORT_SYMBOL(sock_common_recvmsg);
2156
2157/*
2158 * Set socket options on an inet socket.
2159 */
2160int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002161 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162{
2163 struct sock *sk = sock->sk;
2164
2165 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2166}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167EXPORT_SYMBOL(sock_common_setsockopt);
2168
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002169#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002170int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002171 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002172{
2173 struct sock *sk = sock->sk;
2174
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002175 if (sk->sk_prot->compat_setsockopt != NULL)
2176 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2177 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002178 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2179}
2180EXPORT_SYMBOL(compat_sock_common_setsockopt);
2181#endif
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183void sk_common_release(struct sock *sk)
2184{
2185 if (sk->sk_prot->destroy)
2186 sk->sk_prot->destroy(sk);
2187
2188 /*
2189 * Observation: when sock_common_release is called, processes have
2190 * no access to socket. But net still has.
2191 * Step one, detach it from networking:
2192 *
2193 * A. Remove from hash tables.
2194 */
2195
2196 sk->sk_prot->unhash(sk);
2197
2198 /*
2199 * In this point socket cannot receive new packets, but it is possible
2200 * that some packets are in flight because some CPU runs receiver and
2201 * did hash table lookup before we unhashed socket. They will achieve
2202 * receive queue and will be purged by socket destructor.
2203 *
2204 * Also we still have packets pending on receive queue and probably,
2205 * our own packets waiting in device queues. sock_destroy will drain
2206 * receive queue, but transmitted packets will delay socket destruction
2207 * until the last reference will be released.
2208 */
2209
2210 sock_orphan(sk);
2211
2212 xfrm_sk_free_policy(sk);
2213
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002214 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 sock_put(sk);
2216}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217EXPORT_SYMBOL(sk_common_release);
2218
2219static DEFINE_RWLOCK(proto_list_lock);
2220static LIST_HEAD(proto_list);
2221
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002222#ifdef CONFIG_PROC_FS
2223#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002224struct prot_inuse {
2225 int val[PROTO_INUSE_NR];
2226};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002227
2228static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002229
2230#ifdef CONFIG_NET_NS
2231void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2232{
2233 int cpu = smp_processor_id();
2234 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
2235}
2236EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2237
2238int sock_prot_inuse_get(struct net *net, struct proto *prot)
2239{
2240 int cpu, idx = prot->inuse_idx;
2241 int res = 0;
2242
2243 for_each_possible_cpu(cpu)
2244 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2245
2246 return res >= 0 ? res : 0;
2247}
2248EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2249
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002250static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002251{
2252 net->core.inuse = alloc_percpu(struct prot_inuse);
2253 return net->core.inuse ? 0 : -ENOMEM;
2254}
2255
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002256static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002257{
2258 free_percpu(net->core.inuse);
2259}
2260
2261static struct pernet_operations net_inuse_ops = {
2262 .init = sock_inuse_init_net,
2263 .exit = sock_inuse_exit_net,
2264};
2265
2266static __init int net_inuse_init(void)
2267{
2268 if (register_pernet_subsys(&net_inuse_ops))
2269 panic("Cannot initialize net inuse counters");
2270
2271 return 0;
2272}
2273
2274core_initcall(net_inuse_init);
2275#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002276static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2277
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002278void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002279{
2280 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
2281}
2282EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2283
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002284int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002285{
2286 int cpu, idx = prot->inuse_idx;
2287 int res = 0;
2288
2289 for_each_possible_cpu(cpu)
2290 res += per_cpu(prot_inuse, cpu).val[idx];
2291
2292 return res >= 0 ? res : 0;
2293}
2294EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002295#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002296
2297static void assign_proto_idx(struct proto *prot)
2298{
2299 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2300
2301 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2302 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2303 return;
2304 }
2305
2306 set_bit(prot->inuse_idx, proto_inuse_idx);
2307}
2308
2309static void release_proto_idx(struct proto *prot)
2310{
2311 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2312 clear_bit(prot->inuse_idx, proto_inuse_idx);
2313}
2314#else
2315static inline void assign_proto_idx(struct proto *prot)
2316{
2317}
2318
2319static inline void release_proto_idx(struct proto *prot)
2320{
2321}
2322#endif
2323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324int proto_register(struct proto *prot, int alloc_slab)
2325{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 if (alloc_slab) {
2327 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002328 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2329 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
2331 if (prot->slab == NULL) {
2332 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2333 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002334 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002336
2337 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002338 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002339 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002340 goto out_free_sock_slab;
2341
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002342 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002343 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002344 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002345
2346 if (prot->rsk_prot->slab == NULL) {
2347 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2348 prot->name);
2349 goto out_free_request_sock_slab_name;
2350 }
2351 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002352
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002353 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002354 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002355
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002356 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002357 goto out_free_request_sock_slab;
2358
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002359 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002360 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002361 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002362 0,
2363 SLAB_HWCACHE_ALIGN |
2364 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002365 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002366 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002367 goto out_free_timewait_sock_slab_name;
2368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 }
2370
Arnaldo Carvalho de Melo2a278052005-04-16 15:24:09 -07002371 write_lock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002373 assign_proto_idx(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 write_unlock(&proto_list_lock);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002375 return 0;
2376
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002377out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002378 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002379out_free_request_sock_slab:
2380 if (prot->rsk_prot && prot->rsk_prot->slab) {
2381 kmem_cache_destroy(prot->rsk_prot->slab);
2382 prot->rsk_prot->slab = NULL;
2383 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002384out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002385 if (prot->rsk_prot)
2386 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002387out_free_sock_slab:
2388 kmem_cache_destroy(prot->slab);
2389 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002390out:
2391 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393EXPORT_SYMBOL(proto_register);
2394
2395void proto_unregister(struct proto *prot)
2396{
2397 write_lock(&proto_list_lock);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002398 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002399 list_del(&prot->node);
2400 write_unlock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
2402 if (prot->slab != NULL) {
2403 kmem_cache_destroy(prot->slab);
2404 prot->slab = NULL;
2405 }
2406
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002407 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002408 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002409 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002410 prot->rsk_prot->slab = NULL;
2411 }
2412
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002413 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002414 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002415 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002416 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419EXPORT_SYMBOL(proto_unregister);
2420
2421#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002423 __acquires(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424{
2425 read_lock(&proto_list_lock);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002426 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427}
2428
2429static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2430{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002431 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432}
2433
2434static void proto_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002435 __releases(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436{
2437 read_unlock(&proto_list_lock);
2438}
2439
2440static char proto_method_implemented(const void *method)
2441{
2442 return method == NULL ? 'n' : 'y';
2443}
2444
2445static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2446{
2447 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2448 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2449 proto->name,
2450 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002451 sock_prot_inuse_get(seq_file_net(seq), proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2453 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2454 proto->max_header,
2455 proto->slab == NULL ? "no" : "yes",
2456 module_name(proto->owner),
2457 proto_method_implemented(proto->close),
2458 proto_method_implemented(proto->connect),
2459 proto_method_implemented(proto->disconnect),
2460 proto_method_implemented(proto->accept),
2461 proto_method_implemented(proto->ioctl),
2462 proto_method_implemented(proto->init),
2463 proto_method_implemented(proto->destroy),
2464 proto_method_implemented(proto->shutdown),
2465 proto_method_implemented(proto->setsockopt),
2466 proto_method_implemented(proto->getsockopt),
2467 proto_method_implemented(proto->sendmsg),
2468 proto_method_implemented(proto->recvmsg),
2469 proto_method_implemented(proto->sendpage),
2470 proto_method_implemented(proto->bind),
2471 proto_method_implemented(proto->backlog_rcv),
2472 proto_method_implemented(proto->hash),
2473 proto_method_implemented(proto->unhash),
2474 proto_method_implemented(proto->get_port),
2475 proto_method_implemented(proto->enter_memory_pressure));
2476}
2477
2478static int proto_seq_show(struct seq_file *seq, void *v)
2479{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002480 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2482 "protocol",
2483 "size",
2484 "sockets",
2485 "memory",
2486 "press",
2487 "maxhdr",
2488 "slab",
2489 "module",
2490 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2491 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002492 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 return 0;
2494}
2495
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002496static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 .start = proto_seq_start,
2498 .next = proto_seq_next,
2499 .stop = proto_seq_stop,
2500 .show = proto_seq_show,
2501};
2502
2503static int proto_seq_open(struct inode *inode, struct file *file)
2504{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002505 return seq_open_net(inode, file, &proto_seq_ops,
2506 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507}
2508
Arjan van de Ven9a321442007-02-12 00:55:35 -08002509static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 .owner = THIS_MODULE,
2511 .open = proto_seq_open,
2512 .read = seq_read,
2513 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002514 .release = seq_release_net,
2515};
2516
2517static __net_init int proto_init_net(struct net *net)
2518{
2519 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2520 return -ENOMEM;
2521
2522 return 0;
2523}
2524
2525static __net_exit void proto_exit_net(struct net *net)
2526{
2527 proc_net_remove(net, "protocols");
2528}
2529
2530
2531static __net_initdata struct pernet_operations proto_net_ops = {
2532 .init = proto_init_net,
2533 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534};
2535
2536static int __init proto_init(void)
2537{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002538 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539}
2540
2541subsys_initcall(proto_init);
2542
2543#endif /* PROC_FS */