active-standby: add cmdline into /proc/net/
[linux-2.6.git] / net / core / sock.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Generic socket support routines. Memory allocators, socket lock/release
7  *              handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:     Ross Biro
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Florian La Roche, <flla@stud.uni-sb.de>
13  *              Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *              Alan Cox        :       Numerous verify_area() problems
17  *              Alan Cox        :       Connecting on a connecting socket
18  *                                      now returns an error for tcp.
19  *              Alan Cox        :       sock->protocol is set correctly.
20  *                                      and is not sometimes left as 0.
21  *              Alan Cox        :       connect handles icmp errors on a
22  *                                      connect properly. Unfortunately there
23  *                                      is a restart syscall nasty there. I
24  *                                      can't match BSD without hacking the C
25  *                                      library. Ideas urgently sought!
26  *              Alan Cox        :       Disallow bind() to addresses that are
27  *                                      not ours - especially broadcast ones!!
28  *              Alan Cox        :       Socket 1024 _IS_ ok for users. (fencepost)
29  *              Alan Cox        :       sock_wfree/sock_rfree don't destroy sockets,
30  *                                      instead they leave that for the DESTROY timer.
31  *              Alan Cox        :       Clean up error flag in accept
32  *              Alan Cox        :       TCP ack handling is buggy, the DESTROY timer
33  *                                      was buggy. Put a remove_sock() in the handler
34  *                                      for memory when we hit 0. Also altered the timer
35  *                                      code. The ACK stuff can wait and needs major
36  *                                      TCP layer surgery.
37  *              Alan Cox        :       Fixed TCP ack bug, removed remove sock
38  *                                      and fixed timer/inet_bh race.
39  *              Alan Cox        :       Added zapped flag for TCP
40  *              Alan Cox        :       Move kfree_skb into skbuff.c and tidied up surplus code
41  *              Alan Cox        :       for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *              Alan Cox        :       kfree_s calls now are kfree_skbmem so we can track skb resources
43  *              Alan Cox        :       Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *              Alan Cox        :       Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *              Rick Sladkey    :       Relaxed UDP rules for matching packets.
46  *              C.E.Hawkins     :       IFF_PROMISC/SIOCGHWADDR support
47  *      Pauline Middelink       :       identd support
48  *              Alan Cox        :       Fixed connect() taking signals I think.
49  *              Alan Cox        :       SO_LINGER supported
50  *              Alan Cox        :       Error reporting fixes
51  *              Anonymous       :       inet_create tidied up (sk->reuse setting)
52  *              Alan Cox        :       inet sockets don't set sk->type!
53  *              Alan Cox        :       Split socket option code
54  *              Alan Cox        :       Callbacks
55  *              Alan Cox        :       Nagle flag for Charles & Johannes stuff
56  *              Alex            :       Removed restriction on inet fioctl
57  *              Alan Cox        :       Splitting INET from NET core
58  *              Alan Cox        :       Fixed bogus SO_TYPE handling in getsockopt()
59  *              Adam Caldwell   :       Missing return in SO_DONTROUTE/SO_DEBUG code
60  *              Alan Cox        :       Split IP from generic code
61  *              Alan Cox        :       New kfree_skbmem()
62  *              Alan Cox        :       Make SO_DEBUG superuser only.
63  *              Alan Cox        :       Allow anyone to clear SO_DEBUG
64  *                                      (compatibility fix)
65  *              Alan Cox        :       Added optimistic memory grabbing for AF_UNIX throughput.
66  *              Alan Cox        :       Allocator for a socket is settable.
67  *              Alan Cox        :       SO_ERROR includes soft errors.
68  *              Alan Cox        :       Allow NULL arguments on some SO_ opts
69  *              Alan Cox        :       Generic socket allocation to make hooks
70  *                                      easier (suggested by Craig Metz).
71  *              Michael Pall    :       SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *              Jay Schulist    :       Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *              Andi Kleen      :       Add sock_kmalloc()/sock_kfree_s()
79  *              Andi Kleen      :       Fix write_space callback
80  *              Chris Evans     :       Security fixes - signedness again
81  *              Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *              This program is free software; you can redistribute it and/or
87  *              modify it under the terms of the GNU General Public License
88  *              as published by the Free Software Foundation; either version
89  *              2 of the License, or (at your option) any later version.
90  *
91  * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
92  */
93
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/socket.h>
98 #include <linux/in.h>
99 #include <linux/kernel.h>
100 #include <linux/module.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/sched.h>
104 #include <linux/timer.h>
105 #include <linux/string.h>
106 #include <linux/sockios.h>
107 #include <linux/net.h>
108 #include <linux/mm.h>
109 #include <linux/slab.h>
110 #include <linux/interrupt.h>
111 #include <linux/poll.h>
112 #include <linux/tcp.h>
113 #include <linux/init.h>
114 #include <linux/highmem.h>
115 #include <linux/user_namespace.h>
116 #include <linux/static_key.h>
117 #include <linux/memcontrol.h>
118
119 #include <asm/uaccess.h>
120
121 #include <linux/netdevice.h>
122 #include <net/protocol.h>
123 #include <linux/skbuff.h>
124 #include <net/net_namespace.h>
125 #include <net/request_sock.h>
126 #include <net/sock.h>
127 #include <linux/net_tstamp.h>
128 #include <net/xfrm.h>
129 #include <linux/ipsec.h>
130 #include <net/cls_cgroup.h>
131 #include <net/netprio_cgroup.h>
132
133 #include <linux/filter.h>
134
135 #include <trace/events/sock.h>
136
137 #ifdef CONFIG_INET
138 #include <net/tcp.h>
139 #endif
140
141 #include <linux/eventpoll.h>
142
143 static DEFINE_MUTEX(proto_list_mutex);
144 static LIST_HEAD(proto_list);
145
146 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
147 int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
148 {
149         struct proto *proto;
150         int ret = 0;
151
152         mutex_lock(&proto_list_mutex);
153         list_for_each_entry(proto, &proto_list, node) {
154                 if (proto->init_cgroup) {
155                         ret = proto->init_cgroup(cgrp, ss);
156                         if (ret)
157                                 goto out;
158                 }
159         }
160
161         mutex_unlock(&proto_list_mutex);
162         return ret;
163 out:
164         list_for_each_entry_continue_reverse(proto, &proto_list, node)
165                 if (proto->destroy_cgroup)
166                         proto->destroy_cgroup(cgrp);
167         mutex_unlock(&proto_list_mutex);
168         return ret;
169 }
170
171 void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
172 {
173         struct proto *proto;
174
175         mutex_lock(&proto_list_mutex);
176         list_for_each_entry_reverse(proto, &proto_list, node)
177                 if (proto->destroy_cgroup)
178                         proto->destroy_cgroup(cgrp);
179         mutex_unlock(&proto_list_mutex);
180 }
181 #endif
182
183 /*
184  * Each address family might have different locking rules, so we have
185  * one slock key per address family:
186  */
187 static struct lock_class_key af_family_keys[AF_MAX];
188 static struct lock_class_key af_family_slock_keys[AF_MAX];
189
190 struct static_key memcg_socket_limit_enabled;
191 EXPORT_SYMBOL(memcg_socket_limit_enabled);
192
193 /*
194  * Make lock validator output more readable. (we pre-construct these
195  * strings build-time, so that runtime initialization of socket
196  * locks is fast):
197  */
198 static const char *const af_family_key_strings[AF_MAX+1] = {
199   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
200   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
201   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
202   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
203   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
204   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
205   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
206   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
207   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
208   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
209   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
210   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
211   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
212   "sk_lock-AF_NFC"   , "sk_lock-AF_MAX"
213 };
214 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
215   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
216   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
217   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
218   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
219   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
220   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
221   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
222   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
223   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
224   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
225   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
226   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
227   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
228   "slock-AF_NFC"   , "slock-AF_MAX"
229 };
230 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
231   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
232   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
233   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
234   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
235   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
236   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
237   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
238   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
239   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
240   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
241   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
242   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
243   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
244   "clock-AF_NFC"   , "clock-AF_MAX"
245 };
246
247 /*
248  * sk_callback_lock locking rules are per-address-family,
249  * so split the lock classes by using a per-AF key:
250  */
251 static struct lock_class_key af_callback_keys[AF_MAX];
252
253 /* Take into consideration the size of the struct sk_buff overhead in the
254  * determination of these values, since that is non-constant across
255  * platforms.  This makes socket queueing behavior and performance
256  * not depend upon such differences.
257  */
258 #define _SK_MEM_PACKETS         256
259 #define _SK_MEM_OVERHEAD        SKB_TRUESIZE(256)
260 #define SK_WMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
261 #define SK_RMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
262
263 /* Run time adjustable parameters. */
264 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
265 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
266 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
267 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
268
269 /* Maximal space eaten by iovec or ancillary data plus some space */
270 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
271 EXPORT_SYMBOL(sysctl_optmem_max);
272
273 #if defined(CONFIG_CGROUPS)
274 #if !defined(CONFIG_NET_CLS_CGROUP)
275 int net_cls_subsys_id = -1;
276 EXPORT_SYMBOL_GPL(net_cls_subsys_id);
277 #endif
278 #if !defined(CONFIG_NETPRIO_CGROUP)
279 int net_prio_subsys_id = -1;
280 EXPORT_SYMBOL_GPL(net_prio_subsys_id);
281 #endif
282 #endif
283
284 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
285 {
286         struct timeval tv;
287
288         if (optlen < sizeof(tv))
289                 return -EINVAL;
290         if (copy_from_user(&tv, optval, sizeof(tv)))
291                 return -EFAULT;
292         if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
293                 return -EDOM;
294
295         if (tv.tv_sec < 0) {
296                 static int warned __read_mostly;
297
298                 *timeo_p = 0;
299                 if (warned < 10 && net_ratelimit()) {
300                         warned++;
301                         printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
302                                "tries to set negative timeout\n",
303                                 current->comm, task_pid_nr(current));
304                 }
305                 return 0;
306         }
307         *timeo_p = MAX_SCHEDULE_TIMEOUT;
308         if (tv.tv_sec == 0 && tv.tv_usec == 0)
309                 return 0;
310         if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
311                 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
312         return 0;
313 }
314
315 static void sock_warn_obsolete_bsdism(const char *name)
316 {
317         static int warned;
318         static char warncomm[TASK_COMM_LEN];
319         if (strcmp(warncomm, current->comm) && warned < 5) {
320                 strcpy(warncomm,  current->comm);
321                 printk(KERN_WARNING "process `%s' is using obsolete "
322                        "%s SO_BSDCOMPAT\n", warncomm, name);
323                 warned++;
324         }
325 }
326
327 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
328
329 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
330 {
331         if (sk->sk_flags & flags) {
332                 sk->sk_flags &= ~flags;
333                 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
334                         net_disable_timestamp();
335         }
336 }
337
338
339 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
340 {
341         int err;
342         int skb_len;
343         unsigned long flags;
344         struct sk_buff_head *list = &sk->sk_receive_queue;
345
346         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
347                 atomic_inc(&sk->sk_drops);
348                 trace_sock_rcvqueue_full(sk, skb);
349                 return -ENOMEM;
350         }
351
352         err = sk_filter(sk, skb);
353         if (err)
354                 return err;
355
356         if (!sk_rmem_schedule(sk, skb->truesize)) {
357                 atomic_inc(&sk->sk_drops);
358                 return -ENOBUFS;
359         }
360
361         skb->dev = NULL;
362         skb_set_owner_r(skb, sk);
363
364         /* Cache the SKB length before we tack it onto the receive
365          * queue.  Once it is added it no longer belongs to us and
366          * may be freed by other threads of control pulling packets
367          * from the queue.
368          */
369         skb_len = skb->len;
370
371         /* we escape from rcu protected region, make sure we dont leak
372          * a norefcounted dst
373          */
374         skb_dst_force(skb);
375
376         spin_lock_irqsave(&list->lock, flags);
377         skb->dropcount = atomic_read(&sk->sk_drops);
378         __skb_queue_tail(list, skb);
379         spin_unlock_irqrestore(&list->lock, flags);
380
381         if (!sock_flag(sk, SOCK_DEAD))
382                 sk->sk_data_ready(sk, skb_len);
383         return 0;
384 }
385 EXPORT_SYMBOL(sock_queue_rcv_skb);
386
387 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
388 {
389         int rc = NET_RX_SUCCESS;
390
391         if (sk_filter(sk, skb))
392                 goto discard_and_relse;
393
394         skb->dev = NULL;
395
396         if (sk_rcvqueues_full(sk, skb)) {
397                 atomic_inc(&sk->sk_drops);
398                 goto discard_and_relse;
399         }
400         if (nested)
401                 bh_lock_sock_nested(sk);
402         else
403                 bh_lock_sock(sk);
404         if (!sock_owned_by_user(sk)) {
405                 /*
406                  * trylock + unlock semantics:
407                  */
408                 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
409
410                 rc = sk_backlog_rcv(sk, skb);
411
412                 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
413         } else if (sk_add_backlog(sk, skb)) {
414                 bh_unlock_sock(sk);
415                 atomic_inc(&sk->sk_drops);
416                 goto discard_and_relse;
417         }
418
419         bh_unlock_sock(sk);
420 out:
421         sock_put(sk);
422         return rc;
423 discard_and_relse:
424         kfree_skb(skb);
425         goto out;
426 }
427 EXPORT_SYMBOL(sk_receive_skb);
428
429 void sk_reset_txq(struct sock *sk)
430 {
431         sk_tx_queue_clear(sk);
432 }
433 EXPORT_SYMBOL(sk_reset_txq);
434
435 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
436 {
437         struct dst_entry *dst = __sk_dst_get(sk);
438
439         if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
440                 sk_tx_queue_clear(sk);
441                 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
442                 dst_release(dst);
443                 return NULL;
444         }
445
446         return dst;
447 }
448 EXPORT_SYMBOL(__sk_dst_check);
449
450 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
451 {
452         struct dst_entry *dst = sk_dst_get(sk);
453
454         if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
455                 sk_dst_reset(sk);
456                 dst_release(dst);
457                 return NULL;
458         }
459
460         return dst;
461 }
462 EXPORT_SYMBOL(sk_dst_check);
463
464 static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
465 {
466         int ret = -ENOPROTOOPT;
467 #ifdef CONFIG_NETDEVICES
468         struct net *net = sock_net(sk);
469         char devname[IFNAMSIZ];
470         int index;
471
472         /* Sorry... */
473         ret = -EPERM;
474         if (!capable(CAP_NET_RAW))
475                 goto out;
476
477         ret = -EINVAL;
478         if (optlen < 0)
479                 goto out;
480
481         /* Bind this socket to a particular device like "eth0",
482          * as specified in the passed interface name. If the
483          * name is "" or the option length is zero the socket
484          * is not bound.
485          */
486         if (optlen > IFNAMSIZ - 1)
487                 optlen = IFNAMSIZ - 1;
488         memset(devname, 0, sizeof(devname));
489
490         ret = -EFAULT;
491         if (copy_from_user(devname, optval, optlen))
492                 goto out;
493
494         index = 0;
495         if (devname[0] != '\0') {
496                 struct net_device *dev;
497
498                 rcu_read_lock();
499                 dev = dev_get_by_name_rcu(net, devname);
500                 if (dev)
501                         index = dev->ifindex;
502                 rcu_read_unlock();
503                 ret = -ENODEV;
504                 if (!dev)
505                         goto out;
506         }
507
508         lock_sock(sk);
509         sk->sk_bound_dev_if = index;
510         sk_dst_reset(sk);
511         release_sock(sk);
512
513         ret = 0;
514
515 out:
516 #endif
517
518         return ret;
519 }
520
521 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
522 {
523         if (valbool)
524                 sock_set_flag(sk, bit);
525         else
526                 sock_reset_flag(sk, bit);
527 }
528
529 /*
530  *      This is meant for all protocols to use and covers goings on
531  *      at the socket level. Everything here is generic.
532  */
533
534 int sock_setsockopt(struct socket *sock, int level, int optname,
535                     char __user *optval, unsigned int optlen)
536 {
537         struct sock *sk = sock->sk;
538         int val;
539         int valbool;
540         struct linger ling;
541         int ret = 0;
542
543         /*
544          *      Options without arguments
545          */
546
547         if (optname == SO_BINDTODEVICE)
548                 return sock_bindtodevice(sk, optval, optlen);
549
550         if (optlen < sizeof(int))
551                 return -EINVAL;
552
553         if (get_user(val, (int __user *)optval))
554                 return -EFAULT;
555
556         valbool = val ? 1 : 0;
557
558         lock_sock(sk);
559
560         switch (optname) {
561         case SO_DEBUG:
562                 if (val && !capable(CAP_NET_ADMIN))
563                         ret = -EACCES;
564                 else
565                         sock_valbool_flag(sk, SOCK_DBG, valbool);
566                 break;
567         case SO_REUSEADDR:
568                 sk->sk_reuse = valbool;
569                 break;
570         case SO_TYPE:
571         case SO_PROTOCOL:
572         case SO_DOMAIN:
573         case SO_ERROR:
574                 ret = -ENOPROTOOPT;
575                 break;
576         case SO_DONTROUTE:
577                 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
578                 break;
579         case SO_BROADCAST:
580                 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
581                 break;
582         case SO_SNDBUF:
583                 /* Don't error on this BSD doesn't and if you think
584                    about it this is right. Otherwise apps have to
585                    play 'guess the biggest size' games. RCVBUF/SNDBUF
586                    are treated in BSD as hints */
587
588                 if (val > sysctl_wmem_max)
589                         val = sysctl_wmem_max;
590 set_sndbuf:
591                 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
592                 if ((val * 2) < SOCK_MIN_SNDBUF)
593                         sk->sk_sndbuf = SOCK_MIN_SNDBUF;
594                 else
595                         sk->sk_sndbuf = val * 2;
596
597                 /*
598                  *      Wake up sending tasks if we
599                  *      upped the value.
600                  */
601                 sk->sk_write_space(sk);
602                 break;
603
604         case SO_SNDBUFFORCE:
605                 if (!capable(CAP_NET_ADMIN)) {
606                         ret = -EPERM;
607                         break;
608                 }
609                 goto set_sndbuf;
610
611         case SO_RCVBUF:
612                 /* Don't error on this BSD doesn't and if you think
613                    about it this is right. Otherwise apps have to
614                    play 'guess the biggest size' games. RCVBUF/SNDBUF
615                    are treated in BSD as hints */
616
617                 if (val > sysctl_rmem_max)
618                         val = sysctl_rmem_max;
619 set_rcvbuf:
620                 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
621                 /*
622                  * We double it on the way in to account for
623                  * "struct sk_buff" etc. overhead.   Applications
624                  * assume that the SO_RCVBUF setting they make will
625                  * allow that much actual data to be received on that
626                  * socket.
627                  *
628                  * Applications are unaware that "struct sk_buff" and
629                  * other overheads allocate from the receive buffer
630                  * during socket buffer allocation.
631                  *
632                  * And after considering the possible alternatives,
633                  * returning the value we actually used in getsockopt
634                  * is the most desirable behavior.
635                  */
636                 if ((val * 2) < SOCK_MIN_RCVBUF)
637                         sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
638                 else
639                         sk->sk_rcvbuf = val * 2;
640                 break;
641
642         case SO_RCVBUFFORCE:
643                 if (!capable(CAP_NET_ADMIN)) {
644                         ret = -EPERM;
645                         break;
646                 }
647                 goto set_rcvbuf;
648
649         case SO_KEEPALIVE:
650 #ifdef CONFIG_INET
651                 if (sk->sk_protocol == IPPROTO_TCP &&
652                     sk->sk_type == SOCK_STREAM)
653                         tcp_set_keepalive(sk, valbool);
654 #endif
655                 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
656                 break;
657
658         case SO_OOBINLINE:
659                 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
660                 break;
661
662         case SO_NO_CHECK:
663                 sk->sk_no_check = valbool;
664                 break;
665
666         case SO_PRIORITY:
667                 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
668                         sk->sk_priority = val;
669                 else
670                         ret = -EPERM;
671                 break;
672
673         case SO_LINGER:
674                 if (optlen < sizeof(ling)) {
675                         ret = -EINVAL;  /* 1003.1g */
676                         break;
677                 }
678                 if (copy_from_user(&ling, optval, sizeof(ling))) {
679                         ret = -EFAULT;
680                         break;
681                 }
682                 if (!ling.l_onoff)
683                         sock_reset_flag(sk, SOCK_LINGER);
684                 else {
685 #if (BITS_PER_LONG == 32)
686                         if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
687                                 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
688                         else
689 #endif
690                                 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
691                         sock_set_flag(sk, SOCK_LINGER);
692                 }
693                 break;
694
695         case SO_BSDCOMPAT:
696                 sock_warn_obsolete_bsdism("setsockopt");
697                 break;
698
699         case SO_PASSCRED:
700                 if (valbool)
701                         set_bit(SOCK_PASSCRED, &sock->flags);
702                 else
703                         clear_bit(SOCK_PASSCRED, &sock->flags);
704                 break;
705
706         case SO_TIMESTAMP:
707         case SO_TIMESTAMPNS:
708                 if (valbool)  {
709                         if (optname == SO_TIMESTAMP)
710                                 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
711                         else
712                                 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
713                         sock_set_flag(sk, SOCK_RCVTSTAMP);
714                         sock_enable_timestamp(sk, SOCK_TIMESTAMP);
715                 } else {
716                         sock_reset_flag(sk, SOCK_RCVTSTAMP);
717                         sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
718                 }
719                 break;
720
721         case SO_TIMESTAMPING:
722                 if (val & ~SOF_TIMESTAMPING_MASK) {
723                         ret = -EINVAL;
724                         break;
725                 }
726                 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
727                                   val & SOF_TIMESTAMPING_TX_HARDWARE);
728                 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
729                                   val & SOF_TIMESTAMPING_TX_SOFTWARE);
730                 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
731                                   val & SOF_TIMESTAMPING_RX_HARDWARE);
732                 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
733                         sock_enable_timestamp(sk,
734                                               SOCK_TIMESTAMPING_RX_SOFTWARE);
735                 else
736                         sock_disable_timestamp(sk,
737                                                (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
738                 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
739                                   val & SOF_TIMESTAMPING_SOFTWARE);
740                 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
741                                   val & SOF_TIMESTAMPING_SYS_HARDWARE);
742                 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
743                                   val & SOF_TIMESTAMPING_RAW_HARDWARE);
744                 break;
745
746         case SO_RCVLOWAT:
747                 if (val < 0)
748                         val = INT_MAX;
749                 sk->sk_rcvlowat = val ? : 1;
750                 break;
751
752         case SO_RCVTIMEO:
753                 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
754                 break;
755
756         case SO_SNDTIMEO:
757                 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
758                 break;
759
760         case SO_ATTACH_FILTER:
761                 ret = -EINVAL;
762                 if (optlen == sizeof(struct sock_fprog)) {
763                         struct sock_fprog fprog;
764
765                         ret = -EFAULT;
766                         if (copy_from_user(&fprog, optval, sizeof(fprog)))
767                                 break;
768
769                         ret = sk_attach_filter(&fprog, sk);
770                 }
771                 break;
772
773         case SO_DETACH_FILTER:
774                 ret = sk_detach_filter(sk);
775                 break;
776
777         case SO_PASSSEC:
778                 if (valbool)
779                         set_bit(SOCK_PASSSEC, &sock->flags);
780                 else
781                         clear_bit(SOCK_PASSSEC, &sock->flags);
782                 break;
783         case SO_MARK:
784                 if (!capable(CAP_NET_ADMIN))
785                         ret = -EPERM;
786                 else
787                         sk->sk_mark = val;
788                 break;
789
790                 /* We implement the SO_SNDLOWAT etc to
791                    not be settable (1003.1g 5.3) */
792         case SO_RXQ_OVFL:
793                 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
794                 break;
795
796         case SO_WIFI_STATUS:
797                 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
798                 break;
799
800         case SO_PEEK_OFF:
801                 if (sock->ops->set_peek_off)
802                         sock->ops->set_peek_off(sk, val);
803                 else
804                         ret = -EOPNOTSUPP;
805                 break;
806
807         case SO_NOFCS:
808                 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
809                 break;
810
811         default:
812                 ret = -ENOPROTOOPT;
813                 break;
814         }
815         release_sock(sk);
816         return ret;
817 }
818 EXPORT_SYMBOL(sock_setsockopt);
819
820
821 void cred_to_ucred(struct pid *pid, const struct cred *cred,
822                    struct ucred *ucred, bool use_effective)
823 {
824         ucred->pid = pid_vnr(pid);
825         ucred->uid = ucred->gid = -1;
826         if (cred) {
827                 struct user_namespace *current_ns = current_user_ns();
828
829                 if (use_effective) {
830                         ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
831                         ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
832                 } else {
833                         ucred->uid = user_ns_map_uid(current_ns, cred, cred->uid);
834                         ucred->gid = user_ns_map_gid(current_ns, cred, cred->gid);
835                 }
836         }
837 }
838 EXPORT_SYMBOL_GPL(cred_to_ucred);
839
840 int sock_getsockopt(struct socket *sock, int level, int optname,
841                     char __user *optval, int __user *optlen)
842 {
843         struct sock *sk = sock->sk;
844
845         union {
846                 int val;
847                 struct linger ling;
848                 struct timeval tm;
849         } v;
850
851         int lv = sizeof(int);
852         int len;
853
854         if (get_user(len, optlen))
855                 return -EFAULT;
856         if (len < 0)
857                 return -EINVAL;
858
859         memset(&v, 0, sizeof(v));
860
861         switch (optname) {
862         case SO_DEBUG:
863                 v.val = sock_flag(sk, SOCK_DBG);
864                 break;
865
866         case SO_DONTROUTE:
867                 v.val = sock_flag(sk, SOCK_LOCALROUTE);
868                 break;
869
870         case SO_BROADCAST:
871                 v.val = !!sock_flag(sk, SOCK_BROADCAST);
872                 break;
873
874         case SO_SNDBUF:
875                 v.val = sk->sk_sndbuf;
876                 break;
877
878         case SO_RCVBUF:
879                 v.val = sk->sk_rcvbuf;
880                 break;
881
882         case SO_REUSEADDR:
883                 v.val = sk->sk_reuse;
884                 break;
885
886         case SO_KEEPALIVE:
887                 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
888                 break;
889
890         case SO_TYPE:
891                 v.val = sk->sk_type;
892                 break;
893
894         case SO_PROTOCOL:
895                 v.val = sk->sk_protocol;
896                 break;
897
898         case SO_DOMAIN:
899                 v.val = sk->sk_family;
900                 break;
901
902         case SO_ERROR:
903                 v.val = -sock_error(sk);
904                 if (v.val == 0)
905                         v.val = xchg(&sk->sk_err_soft, 0);
906                 break;
907
908         case SO_OOBINLINE:
909                 v.val = !!sock_flag(sk, SOCK_URGINLINE);
910                 break;
911
912         case SO_NO_CHECK:
913                 v.val = sk->sk_no_check;
914                 break;
915
916         case SO_PRIORITY:
917                 v.val = sk->sk_priority;
918                 break;
919
920         case SO_LINGER:
921                 lv              = sizeof(v.ling);
922                 v.ling.l_onoff  = !!sock_flag(sk, SOCK_LINGER);
923                 v.ling.l_linger = sk->sk_lingertime / HZ;
924                 break;
925
926         case SO_BSDCOMPAT:
927                 sock_warn_obsolete_bsdism("getsockopt");
928                 break;
929
930         case SO_TIMESTAMP:
931                 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
932                                 !sock_flag(sk, SOCK_RCVTSTAMPNS);
933                 break;
934
935         case SO_TIMESTAMPNS:
936                 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
937                 break;
938
939         case SO_TIMESTAMPING:
940                 v.val = 0;
941                 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
942                         v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
943                 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
944                         v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
945                 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
946                         v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
947                 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
948                         v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
949                 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
950                         v.val |= SOF_TIMESTAMPING_SOFTWARE;
951                 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
952                         v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
953                 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
954                         v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
955                 break;
956
957         case SO_RCVTIMEO:
958                 lv = sizeof(struct timeval);
959                 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
960                         v.tm.tv_sec = 0;
961                         v.tm.tv_usec = 0;
962                 } else {
963                         v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
964                         v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
965                 }
966                 break;
967
968         case SO_SNDTIMEO:
969                 lv = sizeof(struct timeval);
970                 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
971                         v.tm.tv_sec = 0;
972                         v.tm.tv_usec = 0;
973                 } else {
974                         v.tm.tv_sec = sk->sk_sndtimeo / HZ;
975                         v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
976                 }
977                 break;
978
979         case SO_RCVLOWAT:
980                 v.val = sk->sk_rcvlowat;
981                 break;
982
983         case SO_SNDLOWAT:
984                 v.val = 1;
985                 break;
986
987         case SO_PASSCRED:
988                 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
989                 break;
990
991         case SO_PEERCRED:
992         {
993                 struct ucred peercred;
994                 if (len > sizeof(peercred))
995                         len = sizeof(peercred);
996                 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred,
997                               &peercred, true);
998                 if (copy_to_user(optval, &peercred, len))
999                         return -EFAULT;
1000                 goto lenout;
1001         }
1002
1003         case SO_PEERNAME:
1004         {
1005                 char address[128];
1006
1007                 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1008                         return -ENOTCONN;
1009                 if (lv < len)
1010                         return -EINVAL;
1011                 if (copy_to_user(optval, address, len))
1012                         return -EFAULT;
1013                 goto lenout;
1014         }
1015
1016         /* Dubious BSD thing... Probably nobody even uses it, but
1017          * the UNIX standard wants it for whatever reason... -DaveM
1018          */
1019         case SO_ACCEPTCONN:
1020                 v.val = sk->sk_state == TCP_LISTEN;
1021                 break;
1022
1023         case SO_PASSSEC:
1024                 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
1025                 break;
1026
1027         case SO_PEERSEC:
1028                 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1029
1030         case SO_MARK:
1031                 v.val = sk->sk_mark;
1032                 break;
1033
1034         case SO_RXQ_OVFL:
1035                 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
1036                 break;
1037
1038         case SO_WIFI_STATUS:
1039                 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
1040                 break;
1041
1042         case SO_PEEK_OFF:
1043                 if (!sock->ops->set_peek_off)
1044                         return -EOPNOTSUPP;
1045
1046                 v.val = sk->sk_peek_off;
1047                 break;
1048         case SO_NOFCS:
1049                 v.val = !!sock_flag(sk, SOCK_NOFCS);
1050                 break;
1051         default:
1052                 return -ENOPROTOOPT;
1053         }
1054
1055         if (len > lv)
1056                 len = lv;
1057         if (copy_to_user(optval, &v, len))
1058                 return -EFAULT;
1059 lenout:
1060         if (put_user(len, optlen))
1061                 return -EFAULT;
1062         return 0;
1063 }
1064
1065 /*
1066  * Initialize an sk_lock.
1067  *
1068  * (We also register the sk_lock with the lock validator.)
1069  */
1070 static inline void sock_lock_init(struct sock *sk)
1071 {
1072         sock_lock_init_class_and_name(sk,
1073                         af_family_slock_key_strings[sk->sk_family],
1074                         af_family_slock_keys + sk->sk_family,
1075                         af_family_key_strings[sk->sk_family],
1076                         af_family_keys + sk->sk_family);
1077 }
1078
1079 /*
1080  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1081  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1082  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1083  */
1084 static void sock_copy(struct sock *nsk, const struct sock *osk)
1085 {
1086 #ifdef CONFIG_SECURITY_NETWORK
1087         void *sptr = nsk->sk_security;
1088 #endif
1089         memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1090
1091         memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1092                osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1093
1094 #ifdef CONFIG_SECURITY_NETWORK
1095         nsk->sk_security = sptr;
1096         security_sk_clone(osk, nsk);
1097 #endif
1098 }
1099
1100 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1101 {
1102         unsigned long nulls1, nulls2;
1103
1104         nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1105         nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1106         if (nulls1 > nulls2)
1107                 swap(nulls1, nulls2);
1108
1109         if (nulls1 != 0)
1110                 memset((char *)sk, 0, nulls1);
1111         memset((char *)sk + nulls1 + sizeof(void *), 0,
1112                nulls2 - nulls1 - sizeof(void *));
1113         memset((char *)sk + nulls2 + sizeof(void *), 0,
1114                size - nulls2 - sizeof(void *));
1115 }
1116 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1117
1118 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1119                 int family)
1120 {
1121         struct sock *sk;
1122         struct kmem_cache *slab;
1123
1124         slab = prot->slab;
1125         if (slab != NULL) {
1126                 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1127                 if (!sk)
1128                         return sk;
1129                 if (priority & __GFP_ZERO) {
1130                         if (prot->clear_sk)
1131                                 prot->clear_sk(sk, prot->obj_size);
1132                         else
1133                                 sk_prot_clear_nulls(sk, prot->obj_size);
1134                 }
1135         } else
1136                 sk = kmalloc(prot->obj_size, priority);
1137
1138         if (sk != NULL) {
1139                 kmemcheck_annotate_bitfield(sk, flags);
1140
1141                 if (security_sk_alloc(sk, family, priority))
1142                         goto out_free;
1143
1144                 if (!try_module_get(prot->owner))
1145                         goto out_free_sec;
1146                 sk_tx_queue_clear(sk);
1147         }
1148
1149         return sk;
1150
1151 out_free_sec:
1152         security_sk_free(sk);
1153 out_free:
1154         if (slab != NULL)
1155                 kmem_cache_free(slab, sk);
1156         else
1157                 kfree(sk);
1158         return NULL;
1159 }
1160
1161 static void sk_prot_free(struct proto *prot, struct sock *sk)
1162 {
1163         struct kmem_cache *slab;
1164         struct module *owner;
1165
1166         owner = prot->owner;
1167         slab = prot->slab;
1168
1169         security_sk_free(sk);
1170         if (slab != NULL)
1171                 kmem_cache_free(slab, sk);
1172         else
1173                 kfree(sk);
1174         module_put(owner);
1175 }
1176
1177 #ifdef CONFIG_CGROUPS
1178 void sock_update_classid(struct sock *sk)
1179 {
1180         u32 classid;
1181
1182         rcu_read_lock();  /* doing current task, which cannot vanish. */
1183         classid = task_cls_classid(current);
1184         rcu_read_unlock();
1185         if (classid && classid != sk->sk_classid)
1186                 sk->sk_classid = classid;
1187 }
1188 EXPORT_SYMBOL(sock_update_classid);
1189
1190 void sock_update_netprioidx(struct sock *sk)
1191 {
1192         if (in_interrupt())
1193                 return;
1194
1195         sk->sk_cgrp_prioidx = task_netprioidx(current);
1196 }
1197 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1198 #endif
1199
1200 /**
1201  *      sk_alloc - All socket objects are allocated here
1202  *      @net: the applicable net namespace
1203  *      @family: protocol family
1204  *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1205  *      @prot: struct proto associated with this new sock instance
1206  */
1207 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1208                       struct proto *prot)
1209 {
1210         struct sock *sk;
1211
1212         sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1213         if (sk) {
1214                 sk->sk_family = family;
1215                 /*
1216                  * See comment in struct sock definition to understand
1217                  * why we need sk_prot_creator -acme
1218                  */
1219                 sk->sk_prot = sk->sk_prot_creator = prot;
1220                 sock_lock_init(sk);
1221                 sock_net_set(sk, get_net(net));
1222                 atomic_set(&sk->sk_wmem_alloc, 1);
1223
1224                 sock_update_classid(sk);
1225                 sock_update_netprioidx(sk);
1226         }
1227
1228         return sk;
1229 }
1230 EXPORT_SYMBOL(sk_alloc);
1231
1232 static void __sk_free(struct sock *sk)
1233 {
1234         struct sk_filter *filter;
1235
1236         if (sk->sk_destruct)
1237                 sk->sk_destruct(sk);
1238
1239         filter = rcu_dereference_check(sk->sk_filter,
1240                                        atomic_read(&sk->sk_wmem_alloc) == 0);
1241         if (filter) {
1242                 sk_filter_uncharge(sk, filter);
1243                 RCU_INIT_POINTER(sk->sk_filter, NULL);
1244         }
1245
1246         sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1247
1248         if (atomic_read(&sk->sk_omem_alloc))
1249                 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1250                        __func__, atomic_read(&sk->sk_omem_alloc));
1251
1252         if (sk->sk_peer_cred)
1253                 put_cred(sk->sk_peer_cred);
1254         put_pid(sk->sk_peer_pid);
1255         put_net(sock_net(sk));
1256         sk_prot_free(sk->sk_prot_creator, sk);
1257 }
1258
1259 void sk_free(struct sock *sk)
1260 {
1261         /*
1262          * We subtract one from sk_wmem_alloc and can know if
1263          * some packets are still in some tx queue.
1264          * If not null, sock_wfree() will call __sk_free(sk) later
1265          */
1266         if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1267                 __sk_free(sk);
1268 }
1269 EXPORT_SYMBOL(sk_free);
1270
1271 /*
1272  * Last sock_put should drop reference to sk->sk_net. It has already
1273  * been dropped in sk_change_net. Taking reference to stopping namespace
1274  * is not an option.
1275  * Take reference to a socket to remove it from hash _alive_ and after that
1276  * destroy it in the context of init_net.
1277  */
1278 void sk_release_kernel(struct sock *sk)
1279 {
1280         if (sk == NULL || sk->sk_socket == NULL)
1281                 return;
1282
1283         sock_hold(sk);
1284         sock_release(sk->sk_socket);
1285         release_net(sock_net(sk));
1286         sock_net_set(sk, get_net(&init_net));
1287         sock_put(sk);
1288 }
1289 EXPORT_SYMBOL(sk_release_kernel);
1290
1291 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1292 {
1293         if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1294                 sock_update_memcg(newsk);
1295 }
1296
1297 /**
1298  *      sk_clone_lock - clone a socket, and lock its clone
1299  *      @sk: the socket to clone
1300  *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1301  *
1302  *      Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1303  */
1304 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1305 {
1306         struct sock *newsk;
1307
1308         newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1309         if (newsk != NULL) {
1310                 struct sk_filter *filter;
1311
1312                 sock_copy(newsk, sk);
1313
1314                 /* SANITY */
1315                 get_net(sock_net(newsk));
1316                 sk_node_init(&newsk->sk_node);
1317                 sock_lock_init(newsk);
1318                 bh_lock_sock(newsk);
1319                 newsk->sk_backlog.head  = newsk->sk_backlog.tail = NULL;
1320                 newsk->sk_backlog.len = 0;
1321
1322                 atomic_set(&newsk->sk_rmem_alloc, 0);
1323                 /*
1324                  * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1325                  */
1326                 atomic_set(&newsk->sk_wmem_alloc, 1);
1327                 atomic_set(&newsk->sk_omem_alloc, 0);
1328                 skb_queue_head_init(&newsk->sk_receive_queue);
1329                 skb_queue_head_init(&newsk->sk_write_queue);
1330 #ifdef CONFIG_NET_DMA
1331                 skb_queue_head_init(&newsk->sk_async_wait_queue);
1332 #endif
1333
1334                 spin_lock_init(&newsk->sk_dst_lock);
1335                 rwlock_init(&newsk->sk_callback_lock);
1336                 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1337                                 af_callback_keys + newsk->sk_family,
1338                                 af_family_clock_key_strings[newsk->sk_family]);
1339
1340                 newsk->sk_dst_cache     = NULL;
1341                 newsk->sk_wmem_queued   = 0;
1342                 newsk->sk_forward_alloc = 0;
1343                 newsk->sk_send_head     = NULL;
1344                 newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1345
1346                 sock_reset_flag(newsk, SOCK_DONE);
1347                 skb_queue_head_init(&newsk->sk_error_queue);
1348
1349                 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1350                 if (filter != NULL)
1351                         sk_filter_charge(newsk, filter);
1352
1353                 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1354                         /* It is still raw copy of parent, so invalidate
1355                          * destructor and make plain sk_free() */
1356                         newsk->sk_destruct = NULL;
1357                         bh_unlock_sock(newsk);
1358                         sk_free(newsk);
1359                         newsk = NULL;
1360                         goto out;
1361                 }
1362
1363                 newsk->sk_err      = 0;
1364                 newsk->sk_priority = 0;
1365                 /*
1366                  * Before updating sk_refcnt, we must commit prior changes to memory
1367                  * (Documentation/RCU/rculist_nulls.txt for details)
1368                  */
1369                 smp_wmb();
1370                 atomic_set(&newsk->sk_refcnt, 2);
1371
1372                 /*
1373                  * Increment the counter in the same struct proto as the master
1374                  * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1375                  * is the same as sk->sk_prot->socks, as this field was copied
1376                  * with memcpy).
1377                  *
1378                  * This _changes_ the previous behaviour, where
1379                  * tcp_create_openreq_child always was incrementing the
1380                  * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1381                  * to be taken into account in all callers. -acme
1382                  */
1383                 sk_refcnt_debug_inc(newsk);
1384                 sk_set_socket(newsk, NULL);
1385                 newsk->sk_wq = NULL;
1386
1387                 sk_update_clone(sk, newsk);
1388
1389                 if (newsk->sk_prot->sockets_allocated)
1390                         sk_sockets_allocated_inc(newsk);
1391
1392                 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1393                         net_enable_timestamp();
1394         }
1395 out:
1396         return newsk;
1397 }
1398 EXPORT_SYMBOL_GPL(sk_clone_lock);
1399
1400 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1401 {
1402         __sk_dst_set(sk, dst);
1403         sk->sk_route_caps = dst->dev->features;
1404         if (sk->sk_route_caps & NETIF_F_GSO)
1405                 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1406         sk->sk_route_caps &= ~sk->sk_route_nocaps;
1407         if (sk_can_gso(sk)) {
1408                 if (dst->header_len) {
1409                         sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1410                 } else {
1411                         sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1412                         sk->sk_gso_max_size = dst->dev->gso_max_size;
1413                         sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1414                 }
1415         }
1416 }
1417 EXPORT_SYMBOL_GPL(sk_setup_caps);
1418
1419 void __init sk_init(void)
1420 {
1421         if (totalram_pages <= 4096) {
1422                 sysctl_wmem_max = 32767;
1423                 sysctl_rmem_max = 32767;
1424                 sysctl_wmem_default = 32767;
1425                 sysctl_rmem_default = 32767;
1426         } else if (totalram_pages >= 131072) {
1427                 sysctl_wmem_max = 131071;
1428                 sysctl_rmem_max = 131071;
1429         }
1430 }
1431
1432 /*
1433  *      Simple resource managers for sockets.
1434  */
1435
1436
1437 /*
1438  * Write buffer destructor automatically called from kfree_skb.
1439  */
1440 void sock_wfree(struct sk_buff *skb)
1441 {
1442         struct sock *sk = skb->sk;
1443         unsigned int len = skb->truesize;
1444
1445         if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1446                 /*
1447                  * Keep a reference on sk_wmem_alloc, this will be released
1448                  * after sk_write_space() call
1449                  */
1450                 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1451                 sk->sk_write_space(sk);
1452                 len = 1;
1453         }
1454         /*
1455          * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1456          * could not do because of in-flight packets
1457          */
1458         if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1459                 __sk_free(sk);
1460 }
1461 EXPORT_SYMBOL(sock_wfree);
1462
1463 /*
1464  * Read buffer destructor automatically called from kfree_skb.
1465  */
1466 void sock_rfree(struct sk_buff *skb)
1467 {
1468         struct sock *sk = skb->sk;
1469         unsigned int len = skb->truesize;
1470
1471         atomic_sub(len, &sk->sk_rmem_alloc);
1472         sk_mem_uncharge(sk, len);
1473 }
1474 EXPORT_SYMBOL(sock_rfree);
1475
1476
1477 int sock_i_uid(struct sock *sk)
1478 {
1479         int uid;
1480
1481         read_lock_bh(&sk->sk_callback_lock);
1482         uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1483         read_unlock_bh(&sk->sk_callback_lock);
1484         return uid;
1485 }
1486 EXPORT_SYMBOL(sock_i_uid);
1487
1488 unsigned long sock_i_ino(struct sock *sk)
1489 {
1490         unsigned long ino;
1491
1492         read_lock_bh(&sk->sk_callback_lock);
1493         ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1494         read_unlock_bh(&sk->sk_callback_lock);
1495         return ino;
1496 }
1497 EXPORT_SYMBOL(sock_i_ino);
1498
1499 /*
1500  * Allocate a skb from the socket's send buffer.
1501  */
1502 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1503                              gfp_t priority)
1504 {
1505         if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1506                 struct sk_buff *skb = alloc_skb(size, priority);
1507                 if (skb) {
1508                         skb_set_owner_w(skb, sk);
1509                         return skb;
1510                 }
1511         }
1512         return NULL;
1513 }
1514 EXPORT_SYMBOL(sock_wmalloc);
1515
1516 /*
1517  * Allocate a skb from the socket's receive buffer.
1518  */
1519 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1520                              gfp_t priority)
1521 {
1522         if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1523                 struct sk_buff *skb = alloc_skb(size, priority);
1524                 if (skb) {
1525                         skb_set_owner_r(skb, sk);
1526                         return skb;
1527                 }
1528         }
1529         return NULL;
1530 }
1531
1532 /*
1533  * Allocate a memory block from the socket's option memory buffer.
1534  */
1535 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1536 {
1537         if ((unsigned)size <= sysctl_optmem_max &&
1538             atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1539                 void *mem;
1540                 /* First do the add, to avoid the race if kmalloc
1541                  * might sleep.
1542                  */
1543                 atomic_add(size, &sk->sk_omem_alloc);
1544                 mem = kmalloc(size, priority);
1545                 if (mem)
1546                         return mem;
1547                 atomic_sub(size, &sk->sk_omem_alloc);
1548         }
1549         return NULL;
1550 }
1551 EXPORT_SYMBOL(sock_kmalloc);
1552
1553 /*
1554  * Free an option memory block.
1555  */
1556 void sock_kfree_s(struct sock *sk, void *mem, int size)
1557 {
1558         kfree(mem);
1559         atomic_sub(size, &sk->sk_omem_alloc);
1560 }
1561 EXPORT_SYMBOL(sock_kfree_s);
1562
1563 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1564    I think, these locks should be removed for datagram sockets.
1565  */
1566 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1567 {
1568         DEFINE_WAIT(wait);
1569
1570         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1571         for (;;) {
1572                 if (!timeo)
1573                         break;
1574                 if (signal_pending(current))
1575                         break;
1576                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1577                 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1578                 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1579                         break;
1580                 if (sk->sk_shutdown & SEND_SHUTDOWN)
1581                         break;
1582                 if (sk->sk_err)
1583                         break;
1584                 timeo = schedule_timeout(timeo);
1585         }
1586         finish_wait(sk_sleep(sk), &wait);
1587         return timeo;
1588 }
1589
1590
1591 /*
1592  *      Generic send/receive buffer handlers
1593  */
1594
1595 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1596                                      unsigned long data_len, int noblock,
1597                                      int *errcode)
1598 {
1599         struct sk_buff *skb;
1600         gfp_t gfp_mask;
1601         long timeo;
1602         int err;
1603         int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1604
1605         err = -EMSGSIZE;
1606         if (npages > MAX_SKB_FRAGS)
1607                 goto failure;
1608
1609         gfp_mask = sk->sk_allocation;
1610         if (gfp_mask & __GFP_WAIT)
1611                 gfp_mask |= __GFP_REPEAT;
1612
1613         timeo = sock_sndtimeo(sk, noblock);
1614         while (1) {
1615                 err = sock_error(sk);
1616                 if (err != 0)
1617                         goto failure;
1618
1619                 err = -EPIPE;
1620                 if (sk->sk_shutdown & SEND_SHUTDOWN)
1621                         goto failure;
1622
1623                 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1624                         skb = alloc_skb(header_len, gfp_mask);
1625                         if (skb) {
1626                                 int i;
1627
1628                                 /* No pages, we're done... */
1629                                 if (!data_len)
1630                                         break;
1631
1632                                 skb->truesize += data_len;
1633                                 skb_shinfo(skb)->nr_frags = npages;
1634                                 for (i = 0; i < npages; i++) {
1635                                         struct page *page;
1636
1637                                         page = alloc_pages(sk->sk_allocation, 0);
1638                                         if (!page) {
1639                                                 err = -ENOBUFS;
1640                                                 skb_shinfo(skb)->nr_frags = i;
1641                                                 kfree_skb(skb);
1642                                                 goto failure;
1643                                         }
1644
1645                                         __skb_fill_page_desc(skb, i,
1646                                                         page, 0,
1647                                                         (data_len >= PAGE_SIZE ?
1648                                                          PAGE_SIZE :
1649                                                          data_len));
1650                                         data_len -= PAGE_SIZE;
1651                                 }
1652
1653                                 /* Full success... */
1654                                 break;
1655                         }
1656                         err = -ENOBUFS;
1657                         goto failure;
1658                 }
1659                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1660                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1661                 err = -EAGAIN;
1662                 if (!timeo)
1663                         goto failure;
1664                 if (signal_pending(current))
1665                         goto interrupted;
1666                 timeo = sock_wait_for_wmem(sk, timeo);
1667         }
1668
1669         skb_set_owner_w(skb, sk);
1670         return skb;
1671
1672 interrupted:
1673         err = sock_intr_errno(timeo);
1674 failure:
1675         *errcode = err;
1676         return NULL;
1677 }
1678 EXPORT_SYMBOL(sock_alloc_send_pskb);
1679
1680 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1681                                     int noblock, int *errcode)
1682 {
1683         return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1684 }
1685 EXPORT_SYMBOL(sock_alloc_send_skb);
1686
1687 static void __lock_sock(struct sock *sk)
1688         __releases(&sk->sk_lock.slock)
1689         __acquires(&sk->sk_lock.slock)
1690 {
1691         DEFINE_WAIT(wait);
1692
1693         for (;;) {
1694                 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1695                                         TASK_UNINTERRUPTIBLE);
1696                 spin_unlock_bh(&sk->sk_lock.slock);
1697                 schedule();
1698                 spin_lock_bh(&sk->sk_lock.slock);
1699                 if (!sock_owned_by_user(sk))
1700                         break;
1701         }
1702         finish_wait(&sk->sk_lock.wq, &wait);
1703 }
1704
1705 static void __release_sock(struct sock *sk)
1706         __releases(&sk->sk_lock.slock)
1707         __acquires(&sk->sk_lock.slock)
1708 {
1709         struct sk_buff *skb = sk->sk_backlog.head;
1710
1711         do {
1712                 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1713                 bh_unlock_sock(sk);
1714
1715                 do {
1716                         struct sk_buff *next = skb->next;
1717
1718                         WARN_ON_ONCE(skb_dst_is_noref(skb));
1719                         skb->next = NULL;
1720                         sk_backlog_rcv(sk, skb);
1721
1722                         /*
1723                          * We are in process context here with softirqs
1724                          * disabled, use cond_resched_softirq() to preempt.
1725                          * This is safe to do because we've taken the backlog
1726                          * queue private:
1727                          */
1728                         cond_resched_softirq();
1729
1730                         skb = next;
1731                 } while (skb != NULL);
1732
1733                 bh_lock_sock(sk);
1734         } while ((skb = sk->sk_backlog.head) != NULL);
1735
1736         /*
1737          * Doing the zeroing here guarantee we can not loop forever
1738          * while a wild producer attempts to flood us.
1739          */
1740         sk->sk_backlog.len = 0;
1741 }
1742
1743 /**
1744  * sk_wait_data - wait for data to arrive at sk_receive_queue
1745  * @sk:    sock to wait on
1746  * @timeo: for how long
1747  *
1748  * Now socket state including sk->sk_err is changed only under lock,
1749  * hence we may omit checks after joining wait queue.
1750  * We check receive queue before schedule() only as optimization;
1751  * it is very likely that release_sock() added new data.
1752  */
1753 int sk_wait_data(struct sock *sk, long *timeo)
1754 {
1755         int rc;
1756         DEFINE_WAIT(wait);
1757
1758         prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1759         set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1760         rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1761         clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1762         finish_wait(sk_sleep(sk), &wait);
1763         return rc;
1764 }
1765 EXPORT_SYMBOL(sk_wait_data);
1766
1767 /**
1768  *      __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1769  *      @sk: socket
1770  *      @size: memory size to allocate
1771  *      @kind: allocation type
1772  *
1773  *      If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1774  *      rmem allocation. This function assumes that protocols which have
1775  *      memory_pressure use sk_wmem_queued as write buffer accounting.
1776  */
1777 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1778 {
1779         struct proto *prot = sk->sk_prot;
1780         int amt = sk_mem_pages(size);
1781         long allocated;
1782         int parent_status = UNDER_LIMIT;
1783
1784         sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1785
1786         allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1787
1788         /* Under limit. */
1789         if (parent_status == UNDER_LIMIT &&
1790                         allocated <= sk_prot_mem_limits(sk, 0)) {
1791                 sk_leave_memory_pressure(sk);
1792                 return 1;
1793         }
1794
1795         /* Under pressure. (we or our parents) */
1796         if ((parent_status > SOFT_LIMIT) ||
1797                         allocated > sk_prot_mem_limits(sk, 1))
1798                 sk_enter_memory_pressure(sk);
1799
1800         /* Over hard limit (we or our parents) */
1801         if ((parent_status == OVER_LIMIT) ||
1802                         (allocated > sk_prot_mem_limits(sk, 2)))
1803                 goto suppress_allocation;
1804
1805         /* guarantee minimum buffer size under pressure */
1806         if (kind == SK_MEM_RECV) {
1807                 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1808                         return 1;
1809
1810         } else { /* SK_MEM_SEND */
1811                 if (sk->sk_type == SOCK_STREAM) {
1812                         if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1813                                 return 1;
1814                 } else if (atomic_read(&sk->sk_wmem_alloc) <
1815                            prot->sysctl_wmem[0])
1816                                 return 1;
1817         }
1818
1819         if (sk_has_memory_pressure(sk)) {
1820                 int alloc;
1821
1822                 if (!sk_under_memory_pressure(sk))
1823                         return 1;
1824                 alloc = sk_sockets_allocated_read_positive(sk);
1825                 if (sk_prot_mem_limits(sk, 2) > alloc *
1826                     sk_mem_pages(sk->sk_wmem_queued +
1827                                  atomic_read(&sk->sk_rmem_alloc) +
1828                                  sk->sk_forward_alloc))
1829                         return 1;
1830         }
1831
1832 suppress_allocation:
1833
1834         if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1835                 sk_stream_moderate_sndbuf(sk);
1836
1837                 /* Fail only if socket is _under_ its sndbuf.
1838                  * In this case we cannot block, so that we have to fail.
1839                  */
1840                 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1841                         return 1;
1842         }
1843
1844         trace_sock_exceed_buf_limit(sk, prot, allocated);
1845
1846         /* Alas. Undo changes. */
1847         sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1848
1849         sk_memory_allocated_sub(sk, amt);
1850
1851         return 0;
1852 }
1853 EXPORT_SYMBOL(__sk_mem_schedule);
1854
1855 /**
1856  *      __sk_reclaim - reclaim memory_allocated
1857  *      @sk: socket
1858  */
1859 void __sk_mem_reclaim(struct sock *sk)
1860 {
1861         sk_memory_allocated_sub(sk,
1862                                 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
1863         sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1864
1865         if (sk_under_memory_pressure(sk) &&
1866             (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1867                 sk_leave_memory_pressure(sk);
1868 }
1869 EXPORT_SYMBOL(__sk_mem_reclaim);
1870
1871
1872 /*
1873  * Set of default routines for initialising struct proto_ops when
1874  * the protocol does not support a particular function. In certain
1875  * cases where it makes no sense for a protocol to have a "do nothing"
1876  * function, some default processing is provided.
1877  */
1878
1879 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1880 {
1881         return -EOPNOTSUPP;
1882 }
1883 EXPORT_SYMBOL(sock_no_bind);
1884
1885 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1886                     int len, int flags)
1887 {
1888         return -EOPNOTSUPP;
1889 }
1890 EXPORT_SYMBOL(sock_no_connect);
1891
1892 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1893 {
1894         return -EOPNOTSUPP;
1895 }
1896 EXPORT_SYMBOL(sock_no_socketpair);
1897
1898 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1899 {
1900         return -EOPNOTSUPP;
1901 }
1902 EXPORT_SYMBOL(sock_no_accept);
1903
1904 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1905                     int *len, int peer)
1906 {
1907         return -EOPNOTSUPP;
1908 }
1909 EXPORT_SYMBOL(sock_no_getname);
1910
1911 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1912 {
1913         return 0;
1914 }
1915 EXPORT_SYMBOL(sock_no_poll);
1916
1917 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1918 {
1919         return -EOPNOTSUPP;
1920 }
1921 EXPORT_SYMBOL(sock_no_ioctl);
1922
1923 int sock_no_listen(struct socket *sock, int backlog)
1924 {
1925         return -EOPNOTSUPP;
1926 }
1927 EXPORT_SYMBOL(sock_no_listen);
1928
1929 int sock_no_shutdown(struct socket *sock, int how)
1930 {
1931         return -EOPNOTSUPP;
1932 }
1933 EXPORT_SYMBOL(sock_no_shutdown);
1934
1935 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1936                     char __user *optval, unsigned int optlen)
1937 {
1938         return -EOPNOTSUPP;
1939 }
1940 EXPORT_SYMBOL(sock_no_setsockopt);
1941
1942 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1943                     char __user *optval, int __user *optlen)
1944 {
1945         return -EOPNOTSUPP;
1946 }
1947 EXPORT_SYMBOL(sock_no_getsockopt);
1948
1949 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1950                     size_t len)
1951 {
1952         return -EOPNOTSUPP;
1953 }
1954 EXPORT_SYMBOL(sock_no_sendmsg);
1955
1956 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1957                     size_t len, int flags)
1958 {
1959         return -EOPNOTSUPP;
1960 }
1961 EXPORT_SYMBOL(sock_no_recvmsg);
1962
1963 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1964 {
1965         /* Mirror missing mmap method error code */
1966         return -ENODEV;
1967 }
1968 EXPORT_SYMBOL(sock_no_mmap);
1969
1970 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1971 {
1972         ssize_t res;
1973         struct msghdr msg = {.msg_flags = flags};
1974         struct kvec iov;
1975         char *kaddr = kmap(page);
1976         iov.iov_base = kaddr + offset;
1977         iov.iov_len = size;
1978         res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1979         kunmap(page);
1980         return res;
1981 }
1982 EXPORT_SYMBOL(sock_no_sendpage);
1983
1984 /*
1985  *      Default Socket Callbacks
1986  */
1987
1988 static void sock_def_wakeup(struct sock *sk)
1989 {
1990         struct socket_wq *wq;
1991
1992         rcu_read_lock();
1993         wq = rcu_dereference(sk->sk_wq);
1994         if (wq_has_sleeper(wq))
1995                 wake_up_interruptible_all(&wq->wait);
1996         rcu_read_unlock();
1997 }
1998
1999 static void sock_def_error_report(struct sock *sk)
2000 {
2001         struct socket_wq *wq;
2002
2003         rcu_read_lock();
2004         wq = rcu_dereference(sk->sk_wq);
2005         if (wq_has_sleeper(wq))
2006                 wake_up_interruptible_poll(&wq->wait, POLLERR);
2007         sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2008         rcu_read_unlock();
2009 }
2010
2011 static void sock_def_readable(struct sock *sk, int len)
2012 {
2013         struct socket_wq *wq;
2014
2015         rcu_read_lock();
2016         wq = rcu_dereference(sk->sk_wq);
2017         if (wq_has_sleeper(wq))
2018                 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2019                                                 POLLRDNORM | POLLRDBAND);
2020         sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2021         rcu_read_unlock();
2022 }
2023
2024 static void sock_def_write_space(struct sock *sk)
2025 {
2026         struct socket_wq *wq;
2027
2028         rcu_read_lock();
2029
2030         /* Do not wake up a writer until he can make "significant"
2031          * progress.  --DaveM
2032          */
2033         if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2034                 wq = rcu_dereference(sk->sk_wq);
2035                 if (wq_has_sleeper(wq))
2036                         wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2037                                                 POLLWRNORM | POLLWRBAND);
2038
2039                 /* Should agree with poll, otherwise some programs break */
2040                 if (sock_writeable(sk))
2041                         sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2042         }
2043
2044         rcu_read_unlock();
2045 }
2046
2047 static void sock_def_destruct(struct sock *sk)
2048 {
2049         kfree(sk->sk_protinfo);
2050 }
2051
2052 void sk_send_sigurg(struct sock *sk)
2053 {
2054         if (sk->sk_socket && sk->sk_socket->file)
2055                 if (send_sigurg(&sk->sk_socket->file->f_owner))
2056                         sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2057 }
2058 EXPORT_SYMBOL(sk_send_sigurg);
2059
2060 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2061                     unsigned long expires)
2062 {
2063         if (!mod_timer(timer, expires))
2064                 sock_hold(sk);
2065 }
2066 EXPORT_SYMBOL(sk_reset_timer);
2067
2068 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2069 {
2070         if (timer_pending(timer) && del_timer(timer))
2071                 __sock_put(sk);
2072 }
2073 EXPORT_SYMBOL(sk_stop_timer);
2074
2075 void sock_init_data(struct socket *sock, struct sock *sk)
2076 {
2077         skb_queue_head_init(&sk->sk_receive_queue);
2078         skb_queue_head_init(&sk->sk_write_queue);
2079         skb_queue_head_init(&sk->sk_error_queue);
2080 #ifdef CONFIG_NET_DMA
2081         skb_queue_head_init(&sk->sk_async_wait_queue);
2082 #endif
2083
2084         sk->sk_send_head        =       NULL;
2085
2086         init_timer(&sk->sk_timer);
2087
2088         sk->sk_allocation       =       GFP_KERNEL;
2089         sk->sk_rcvbuf           =       sysctl_rmem_default;
2090         sk->sk_sndbuf           =       sysctl_wmem_default;
2091         sk->sk_state            =       TCP_CLOSE;
2092         sk_set_socket(sk, sock);
2093
2094         sock_set_flag(sk, SOCK_ZAPPED);
2095
2096         if (sock) {
2097                 sk->sk_type     =       sock->type;
2098                 sk->sk_wq       =       sock->wq;
2099                 sock->sk        =       sk;
2100         } else
2101                 sk->sk_wq       =       NULL;
2102
2103         spin_lock_init(&sk->sk_dst_lock);
2104         rwlock_init(&sk->sk_callback_lock);
2105         lockdep_set_class_and_name(&sk->sk_callback_lock,
2106                         af_callback_keys + sk->sk_family,
2107                         af_family_clock_key_strings[sk->sk_family]);
2108
2109         sk->sk_state_change     =       sock_def_wakeup;
2110         sk->sk_data_ready       =       sock_def_readable;
2111         sk->sk_write_space      =       sock_def_write_space;
2112         sk->sk_error_report     =       sock_def_error_report;
2113         sk->sk_destruct         =       sock_def_destruct;
2114
2115         sk->sk_sndmsg_page      =       NULL;
2116         sk->sk_sndmsg_off       =       0;
2117         sk->sk_peek_off         =       -1;
2118
2119         sk->sk_peer_pid         =       NULL;
2120         sk->sk_peer_cred        =       NULL;
2121         sk->sk_write_pending    =       0;
2122         sk->sk_rcvlowat         =       1;
2123         sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
2124         sk->sk_sndtimeo         =       MAX_SCHEDULE_TIMEOUT;
2125
2126         sk->sk_stamp = ktime_set(-1L, 0);
2127
2128         /*
2129          * Before updating sk_refcnt, we must commit prior changes to memory
2130          * (Documentation/RCU/rculist_nulls.txt for details)
2131          */
2132         smp_wmb();
2133         atomic_set(&sk->sk_refcnt, 1);
2134         atomic_set(&sk->sk_drops, 0);
2135 }
2136 EXPORT_SYMBOL(sock_init_data);
2137
2138 void lock_sock_nested(struct sock *sk, int subclass)
2139 {
2140         might_sleep();
2141         spin_lock_bh(&sk->sk_lock.slock);
2142         if (sk->sk_lock.owned)
2143                 __lock_sock(sk);
2144         sk->sk_lock.owned = 1;
2145         spin_unlock(&sk->sk_lock.slock);
2146         /*
2147          * The sk_lock has mutex_lock() semantics here:
2148          */
2149         mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2150         local_bh_enable();
2151 }
2152 EXPORT_SYMBOL(lock_sock_nested);
2153
2154 void release_sock(struct sock *sk)
2155 {
2156         /*
2157          * The sk_lock has mutex_unlock() semantics:
2158          */
2159         mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2160
2161         spin_lock_bh(&sk->sk_lock.slock);
2162         if (sk->sk_backlog.tail)
2163                 __release_sock(sk);
2164         sk->sk_lock.owned = 0;
2165         if (waitqueue_active(&sk->sk_lock.wq))
2166                 wake_up(&sk->sk_lock.wq);
2167         spin_unlock_bh(&sk->sk_lock.slock);
2168 }
2169 EXPORT_SYMBOL(release_sock);
2170
2171 /**
2172  * lock_sock_fast - fast version of lock_sock
2173  * @sk: socket
2174  *
2175  * This version should be used for very small section, where process wont block
2176  * return false if fast path is taken
2177  *   sk_lock.slock locked, owned = 0, BH disabled
2178  * return true if slow path is taken
2179  *   sk_lock.slock unlocked, owned = 1, BH enabled
2180  */
2181 bool lock_sock_fast(struct sock *sk)
2182 {
2183         might_sleep();
2184         spin_lock_bh(&sk->sk_lock.slock);
2185
2186         if (!sk->sk_lock.owned)
2187                 /*
2188                  * Note : We must disable BH
2189                  */
2190                 return false;
2191
2192         __lock_sock(sk);
2193         sk->sk_lock.owned = 1;
2194         spin_unlock(&sk->sk_lock.slock);
2195         /*
2196          * The sk_lock has mutex_lock() semantics here:
2197          */
2198         mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2199         local_bh_enable();
2200         return true;
2201 }
2202 EXPORT_SYMBOL(lock_sock_fast);
2203
2204 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2205 {
2206         struct timeval tv;
2207         if (!sock_flag(sk, SOCK_TIMESTAMP))
2208                 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2209         tv = ktime_to_timeval(sk->sk_stamp);
2210         if (tv.tv_sec == -1)
2211                 return -ENOENT;
2212         if (tv.tv_sec == 0) {
2213                 sk->sk_stamp = ktime_get_real();
2214                 tv = ktime_to_timeval(sk->sk_stamp);
2215         }
2216         return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2217 }
2218 EXPORT_SYMBOL(sock_get_timestamp);
2219
2220 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2221 {
2222         struct timespec ts;
2223         if (!sock_flag(sk, SOCK_TIMESTAMP))
2224                 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2225         ts = ktime_to_timespec(sk->sk_stamp);
2226         if (ts.tv_sec == -1)
2227                 return -ENOENT;
2228         if (ts.tv_sec == 0) {
2229                 sk->sk_stamp = ktime_get_real();
2230                 ts = ktime_to_timespec(sk->sk_stamp);
2231         }
2232         return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2233 }
2234 EXPORT_SYMBOL(sock_get_timestampns);
2235
2236 void sock_enable_timestamp(struct sock *sk, int flag)
2237 {
2238         if (!sock_flag(sk, flag)) {
2239                 unsigned long previous_flags = sk->sk_flags;
2240
2241                 sock_set_flag(sk, flag);
2242                 /*
2243                  * we just set one of the two flags which require net
2244                  * time stamping, but time stamping might have been on
2245                  * already because of the other one
2246                  */
2247                 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2248                         net_enable_timestamp();
2249         }
2250 }
2251
2252 /*
2253  *      Get a socket option on an socket.
2254  *
2255  *      FIX: POSIX 1003.1g is very ambiguous here. It states that
2256  *      asynchronous errors should be reported by getsockopt. We assume
2257  *      this means if you specify SO_ERROR (otherwise whats the point of it).
2258  */
2259 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2260                            char __user *optval, int __user *optlen)
2261 {
2262         struct sock *sk = sock->sk;
2263
2264         return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2265 }
2266 EXPORT_SYMBOL(sock_common_getsockopt);
2267
2268 #ifdef CONFIG_COMPAT
2269 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2270                                   char __user *optval, int __user *optlen)
2271 {
2272         struct sock *sk = sock->sk;
2273
2274         if (sk->sk_prot->compat_getsockopt != NULL)
2275                 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2276                                                       optval, optlen);
2277         return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2278 }
2279 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2280 #endif
2281
2282 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2283                         struct msghdr *msg, size_t size, int flags)
2284 {
2285         struct sock *sk = sock->sk;
2286         int addr_len = 0;
2287         int err;
2288
2289         err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2290                                    flags & ~MSG_DONTWAIT, &addr_len);
2291         if (err >= 0)
2292                 msg->msg_namelen = addr_len;
2293         return err;
2294 }
2295 EXPORT_SYMBOL(sock_common_recvmsg);
2296
2297 /*
2298  *      Set socket options on an inet socket.
2299  */
2300 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2301                            char __user *optval, unsigned int optlen)
2302 {
2303         struct sock *sk = sock->sk;
2304
2305         return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2306 }
2307 EXPORT_SYMBOL(sock_common_setsockopt);
2308
2309 #ifdef CONFIG_COMPAT
2310 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2311                                   char __user *optval, unsigned int optlen)
2312 {
2313         struct sock *sk = sock->sk;
2314
2315         if (sk->sk_prot->compat_setsockopt != NULL)
2316                 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2317                                                       optval, optlen);
2318         return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2319 }
2320 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2321 #endif
2322
2323 void sk_common_release(struct sock *sk)
2324 {
2325         if (sk->sk_prot->destroy)
2326                 sk->sk_prot->destroy(sk);
2327
2328         /*
2329          * Observation: when sock_common_release is called, processes have
2330          * no access to socket. But net still has.
2331          * Step one, detach it from networking:
2332          *
2333          * A. Remove from hash tables.
2334          */
2335
2336         sk->sk_prot->unhash(sk);
2337
2338         /*
2339          * In this point socket cannot receive new packets, but it is possible
2340          * that some packets are in flight because some CPU runs receiver and
2341          * did hash table lookup before we unhashed socket. They will achieve
2342          * receive queue and will be purged by socket destructor.
2343          *
2344          * Also we still have packets pending on receive queue and probably,
2345          * our own packets waiting in device queues. sock_destroy will drain
2346          * receive queue, but transmitted packets will delay socket destruction
2347          * until the last reference will be released.
2348          */
2349
2350         sock_orphan(sk);
2351
2352         xfrm_sk_free_policy(sk);
2353
2354         sk_refcnt_debug_release(sk);
2355         sock_put(sk);
2356 }
2357 EXPORT_SYMBOL(sk_common_release);
2358
2359 char *sk_get_waiting_task_cmdline(struct sock *sk, char *cmdline)
2360 {
2361         bool softirq_enabled = false;
2362         int res = 0;
2363         unsigned int len;
2364         char *program_name = cmdline;
2365         struct task_struct *task = NULL;
2366         struct mm_struct *mm = NULL;
2367         static char *apk_path_prefix = "/data/data";
2368         wait_queue_t *wq = NULL;
2369         struct list_head *lh = NULL;
2370         struct socket_wq *sk_wq = NULL;
2371         wait_queue_func_t wait_func;
2372         enum pid_type type;
2373         struct pid *pid = NULL;
2374         struct fown_struct *fown = NULL;
2375         struct file *file;
2376         int preempt_count;
2377
2378         *program_name = '\0';
2379
2380         if (!sk || !sk->sk_wq)
2381                 goto out;
2382         lh = sk->sk_wq->wait.task_list.next;
2383         if (!wq_has_sleeper(sk->sk_wq)) {
2384                 sk_wq = sk->sk_wq;
2385                 if (sk_wq->fasync_list && sk_wq->fasync_list->fa_file) {
2386                         fown = &sk_wq->fasync_list->fa_file->f_owner;
2387                         pid = fown->pid;
2388                         type = fown->pid_type;
2389                         do_each_pid_task(pid, type, task) {
2390                                 if (task)
2391                                         break;
2392                         } while_each_pid_task(pid, type, task);
2393                         printk(KERN_DEBUG "Async wakeup process:%p\n", task);
2394                 }
2395         } else {
2396                 lh = sk->sk_wq->wait.task_list.next;
2397                 wq = list_entry(lh, wait_queue_t, task_list);
2398
2399                 wait_func = wq->func;
2400                 printk(KERN_DEBUG "Wakeup function:%p\n", wait_func);
2401                 if (wait_func == pollwake)
2402                         task = ((struct poll_wqueues *)
2403                                 (wq->private))->polling_task;
2404                 else if (wait_func == default_wake_function)
2405                         task = (struct task_struct *)(wq->private);
2406                 else if (wait_func == ep_poll_callback)
2407                         task = (struct task_struct *)(wq->private);
2408                 else if (wait_func == autoremove_wake_function)
2409                         task = (struct task_struct *)(wq->private);
2410                 else
2411                         printk(KERN_ERR "Unhandled wakeup:%p\n", wait_func);
2412
2413                 if (task)
2414                         task = get_thread_process(task);
2415         }
2416
2417 #ifdef CONFIG_EPOLL
2418         if (!task) {
2419                 file = sk->sk_socket->file;
2420                 if (file)
2421                         task = get_epoll_file_task(file);
2422         }
2423 #endif
2424
2425         if (!task && sk && sk->sk_socket)
2426                 task = SOCK_INODE(sk->sk_socket)->i_private;
2427
2428         if (!task) {
2429                 printk(KERN_WARNING "Can't find a process for this sock.\n");
2430                 goto out;
2431         }
2432
2433         mm = get_task_mm(task);
2434         if (mm && mm->arg_end) {
2435                 len = mm->arg_end - mm->arg_start;
2436
2437                 if (len > PAGE_SIZE)
2438                         len = PAGE_SIZE;
2439
2440                 if (softirq_count()) {
2441                         softirq_enabled = true;
2442                         local_bh_enable();
2443                 }
2444                 if (preempt_count()) {
2445                         preempt_count = preempt_count();
2446                         preempt_count() = 0;
2447                 }
2448
2449                 res = access_process_vm(task, mm->arg_start, cmdline, len, 0);
2450
2451                 if (res > 0 && cmdline[res-1] != '\0' && len < PAGE_SIZE) {
2452                         len = strnlen(cmdline, res);
2453                         if (len < res) {
2454                                 res = len;
2455                         } else {
2456                                 len = mm->env_end - mm->env_start;
2457                                 if (len > PAGE_SIZE - res)
2458                                         len = PAGE_SIZE - res;
2459                                 res += access_process_vm(task,
2460                                         mm->env_start, cmdline+res, len, 0);
2461                                 res = strnlen(cmdline, res);
2462                         }
2463                 }
2464
2465                 if (preempt_count)
2466                         preempt_count() = preempt_count;
2467                 if (softirq_enabled)
2468                         local_bh_disable();
2469
2470                 if (res > PAGE_SIZE)
2471                         cmdline[PAGE_SIZE-1] = '\0';
2472
2473                 len = strlen(apk_path_prefix);
2474                 if (!strncmp(apk_path_prefix, program_name, len))
2475                         program_name += len;
2476                 else
2477                         program_name = strrchr(cmdline, '/');
2478
2479                 if (program_name == NULL)
2480                         program_name = cmdline;
2481                 else
2482                         program_name++;
2483         }
2484
2485         if (mm)
2486                 mmput(mm);
2487
2488         len = strlen(program_name);
2489         snprintf(program_name + len, PAGE_SIZE-(program_name-cmdline)-len,
2490                  " %d %s", task->pid, task->comm);
2491 out:
2492         return program_name;
2493 }
2494 EXPORT_SYMBOL(sk_get_waiting_task_cmdline);
2495
2496
2497 #ifdef CONFIG_PROC_FS
2498 #define PROTO_INUSE_NR  64      /* should be enough for the first time */
2499 struct prot_inuse {
2500         int val[PROTO_INUSE_NR];
2501 };
2502
2503 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2504
2505 #ifdef CONFIG_NET_NS
2506 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2507 {
2508         __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2509 }
2510 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2511
2512 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2513 {
2514         int cpu, idx = prot->inuse_idx;
2515         int res = 0;
2516
2517         for_each_possible_cpu(cpu)
2518                 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2519
2520         return res >= 0 ? res : 0;
2521 }
2522 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2523
2524 static int __net_init sock_inuse_init_net(struct net *net)
2525 {
2526         net->core.inuse = alloc_percpu(struct prot_inuse);
2527         return net->core.inuse ? 0 : -ENOMEM;
2528 }
2529
2530 static void __net_exit sock_inuse_exit_net(struct net *net)
2531 {
2532         free_percpu(net->core.inuse);
2533 }
2534
2535 static struct pernet_operations net_inuse_ops = {
2536         .init = sock_inuse_init_net,
2537         .exit = sock_inuse_exit_net,
2538 };
2539
2540 static __init int net_inuse_init(void)
2541 {
2542         if (register_pernet_subsys(&net_inuse_ops))
2543                 panic("Cannot initialize net inuse counters");
2544
2545         return 0;
2546 }
2547
2548 core_initcall(net_inuse_init);
2549 #else
2550 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2551
2552 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2553 {
2554         __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2555 }
2556 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2557
2558 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2559 {
2560         int cpu, idx = prot->inuse_idx;
2561         int res = 0;
2562
2563         for_each_possible_cpu(cpu)
2564                 res += per_cpu(prot_inuse, cpu).val[idx];
2565
2566         return res >= 0 ? res : 0;
2567 }
2568 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2569 #endif
2570
2571 static void assign_proto_idx(struct proto *prot)
2572 {
2573         prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2574
2575         if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2576                 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2577                 return;
2578         }
2579
2580         set_bit(prot->inuse_idx, proto_inuse_idx);
2581 }
2582
2583 static void release_proto_idx(struct proto *prot)
2584 {
2585         if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2586                 clear_bit(prot->inuse_idx, proto_inuse_idx);
2587 }
2588 #else
2589 static inline void assign_proto_idx(struct proto *prot)
2590 {
2591 }
2592
2593 static inline void release_proto_idx(struct proto *prot)
2594 {
2595 }
2596 #endif
2597
2598 int proto_register(struct proto *prot, int alloc_slab)
2599 {
2600         if (alloc_slab) {
2601                 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2602                                         SLAB_HWCACHE_ALIGN | prot->slab_flags,
2603                                         NULL);
2604
2605                 if (prot->slab == NULL) {
2606                         printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2607                                prot->name);
2608                         goto out;
2609                 }
2610
2611                 if (prot->rsk_prot != NULL) {
2612                         prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2613                         if (prot->rsk_prot->slab_name == NULL)
2614                                 goto out_free_sock_slab;
2615
2616                         prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2617                                                                  prot->rsk_prot->obj_size, 0,
2618                                                                  SLAB_HWCACHE_ALIGN, NULL);
2619
2620                         if (prot->rsk_prot->slab == NULL) {
2621                                 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2622                                        prot->name);
2623                                 goto out_free_request_sock_slab_name;
2624                         }
2625                 }
2626
2627                 if (prot->twsk_prot != NULL) {
2628                         prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2629
2630                         if (prot->twsk_prot->twsk_slab_name == NULL)
2631                                 goto out_free_request_sock_slab;
2632
2633                         prot->twsk_prot->twsk_slab =
2634                                 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2635                                                   prot->twsk_prot->twsk_obj_size,
2636                                                   0,
2637                                                   SLAB_HWCACHE_ALIGN |
2638                                                         prot->slab_flags,
2639                                                   NULL);
2640                         if (prot->twsk_prot->twsk_slab == NULL)
2641                                 goto out_free_timewait_sock_slab_name;
2642                 }
2643         }
2644
2645         mutex_lock(&proto_list_mutex);
2646         list_add(&prot->node, &proto_list);
2647         assign_proto_idx(prot);
2648         mutex_unlock(&proto_list_mutex);
2649         return 0;
2650
2651 out_free_timewait_sock_slab_name:
2652         kfree(prot->twsk_prot->twsk_slab_name);
2653 out_free_request_sock_slab:
2654         if (prot->rsk_prot && prot->rsk_prot->slab) {
2655                 kmem_cache_destroy(prot->rsk_prot->slab);
2656                 prot->rsk_prot->slab = NULL;
2657         }
2658 out_free_request_sock_slab_name:
2659         if (prot->rsk_prot)
2660                 kfree(prot->rsk_prot->slab_name);
2661 out_free_sock_slab:
2662         kmem_cache_destroy(prot->slab);
2663         prot->slab = NULL;
2664 out:
2665         return -ENOBUFS;
2666 }
2667 EXPORT_SYMBOL(proto_register);
2668
2669 void proto_unregister(struct proto *prot)
2670 {
2671         mutex_lock(&proto_list_mutex);
2672         release_proto_idx(prot);
2673         list_del(&prot->node);
2674         mutex_unlock(&proto_list_mutex);
2675
2676         if (prot->slab != NULL) {
2677                 kmem_cache_destroy(prot->slab);
2678                 prot->slab = NULL;
2679         }
2680
2681         if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2682                 kmem_cache_destroy(prot->rsk_prot->slab);
2683                 kfree(prot->rsk_prot->slab_name);
2684                 prot->rsk_prot->slab = NULL;
2685         }
2686
2687         if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2688                 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2689                 kfree(prot->twsk_prot->twsk_slab_name);
2690                 prot->twsk_prot->twsk_slab = NULL;
2691         }
2692 }
2693 EXPORT_SYMBOL(proto_unregister);
2694
2695 #ifdef CONFIG_PROC_FS
2696 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2697         __acquires(proto_list_mutex)
2698 {
2699         mutex_lock(&proto_list_mutex);
2700         return seq_list_start_head(&proto_list, *pos);
2701 }
2702
2703 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2704 {
2705         return seq_list_next(v, &proto_list, pos);
2706 }
2707
2708 static void proto_seq_stop(struct seq_file *seq, void *v)
2709         __releases(proto_list_mutex)
2710 {
2711         mutex_unlock(&proto_list_mutex);
2712 }
2713
2714 static char proto_method_implemented(const void *method)
2715 {
2716         return method == NULL ? 'n' : 'y';
2717 }
2718 static long sock_prot_memory_allocated(struct proto *proto)
2719 {
2720         return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
2721 }
2722
2723 static char *sock_prot_memory_pressure(struct proto *proto)
2724 {
2725         return proto->memory_pressure != NULL ?
2726         proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2727 }
2728
2729 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2730 {
2731
2732         seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2733                         "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2734                    proto->name,
2735                    proto->obj_size,
2736                    sock_prot_inuse_get(seq_file_net(seq), proto),
2737                    sock_prot_memory_allocated(proto),
2738                    sock_prot_memory_pressure(proto),
2739                    proto->max_header,
2740                    proto->slab == NULL ? "no" : "yes",
2741                    module_name(proto->owner),
2742                    proto_method_implemented(proto->close),
2743                    proto_method_implemented(proto->connect),
2744                    proto_method_implemented(proto->disconnect),
2745                    proto_method_implemented(proto->accept),
2746                    proto_method_implemented(proto->ioctl),
2747                    proto_method_implemented(proto->init),
2748                    proto_method_implemented(proto->destroy),
2749                    proto_method_implemented(proto->shutdown),
2750                    proto_method_implemented(proto->setsockopt),
2751                    proto_method_implemented(proto->getsockopt),
2752                    proto_method_implemented(proto->sendmsg),
2753                    proto_method_implemented(proto->recvmsg),
2754                    proto_method_implemented(proto->sendpage),
2755                    proto_method_implemented(proto->bind),
2756                    proto_method_implemented(proto->backlog_rcv),
2757                    proto_method_implemented(proto->hash),
2758                    proto_method_implemented(proto->unhash),
2759                    proto_method_implemented(proto->get_port),
2760                    proto_method_implemented(proto->enter_memory_pressure));
2761 }
2762
2763 static int proto_seq_show(struct seq_file *seq, void *v)
2764 {
2765         if (v == &proto_list)
2766                 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2767                            "protocol",
2768                            "size",
2769                            "sockets",
2770                            "memory",
2771                            "press",
2772                            "maxhdr",
2773                            "slab",
2774                            "module",
2775                            "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2776         else
2777                 proto_seq_printf(seq, list_entry(v, struct proto, node));
2778         return 0;
2779 }
2780
2781 static const struct seq_operations proto_seq_ops = {
2782         .start  = proto_seq_start,
2783         .next   = proto_seq_next,
2784         .stop   = proto_seq_stop,
2785         .show   = proto_seq_show,
2786 };
2787
2788 static int proto_seq_open(struct inode *inode, struct file *file)
2789 {
2790         return seq_open_net(inode, file, &proto_seq_ops,
2791                             sizeof(struct seq_net_private));
2792 }
2793
2794 static const struct file_operations proto_seq_fops = {
2795         .owner          = THIS_MODULE,
2796         .open           = proto_seq_open,
2797         .read           = seq_read,
2798         .llseek         = seq_lseek,
2799         .release        = seq_release_net,
2800 };
2801
2802 static __net_init int proto_init_net(struct net *net)
2803 {
2804         if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2805                 return -ENOMEM;
2806
2807         return 0;
2808 }
2809
2810 static __net_exit void proto_exit_net(struct net *net)
2811 {
2812         proc_net_remove(net, "protocols");
2813 }
2814
2815
2816 static __net_initdata struct pernet_operations proto_net_ops = {
2817         .init = proto_init_net,
2818         .exit = proto_exit_net,
2819 };
2820
2821 static int __init proto_init(void)
2822 {
2823         return register_pernet_subsys(&proto_net_ops);
2824 }
2825
2826 subsys_initcall(proto_init);
2827
2828 #endif /* PROC_FS */