[NET]: cleanup extra semicolons
[linux-2.6.git] / net / ipv4 / tcp.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9  *
10  * Authors:     Ross Biro
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *              Florian La Roche, <flla@stud.uni-sb.de>
15  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
17  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
18  *              Matthew Dillon, <dillon@apollo.west.oic.com>
19  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20  *              Jorge Cwik, <jorge@laser.satlink.net>
21  *
22  * Fixes:
23  *              Alan Cox        :       Numerous verify_area() calls
24  *              Alan Cox        :       Set the ACK bit on a reset
25  *              Alan Cox        :       Stopped it crashing if it closed while
26  *                                      sk->inuse=1 and was trying to connect
27  *                                      (tcp_err()).
28  *              Alan Cox        :       All icmp error handling was broken
29  *                                      pointers passed where wrong and the
30  *                                      socket was looked up backwards. Nobody
31  *                                      tested any icmp error code obviously.
32  *              Alan Cox        :       tcp_err() now handled properly. It
33  *                                      wakes people on errors. poll
34  *                                      behaves and the icmp error race
35  *                                      has gone by moving it into sock.c
36  *              Alan Cox        :       tcp_send_reset() fixed to work for
37  *                                      everything not just packets for
38  *                                      unknown sockets.
39  *              Alan Cox        :       tcp option processing.
40  *              Alan Cox        :       Reset tweaked (still not 100%) [Had
41  *                                      syn rule wrong]
42  *              Herp Rosmanith  :       More reset fixes
43  *              Alan Cox        :       No longer acks invalid rst frames.
44  *                                      Acking any kind of RST is right out.
45  *              Alan Cox        :       Sets an ignore me flag on an rst
46  *                                      receive otherwise odd bits of prattle
47  *                                      escape still
48  *              Alan Cox        :       Fixed another acking RST frame bug.
49  *                                      Should stop LAN workplace lockups.
50  *              Alan Cox        :       Some tidyups using the new skb list
51  *                                      facilities
52  *              Alan Cox        :       sk->keepopen now seems to work
53  *              Alan Cox        :       Pulls options out correctly on accepts
54  *              Alan Cox        :       Fixed assorted sk->rqueue->next errors
55  *              Alan Cox        :       PSH doesn't end a TCP read. Switched a
56  *                                      bit to skb ops.
57  *              Alan Cox        :       Tidied tcp_data to avoid a potential
58  *                                      nasty.
59  *              Alan Cox        :       Added some better commenting, as the
60  *                                      tcp is hard to follow
61  *              Alan Cox        :       Removed incorrect check for 20 * psh
62  *      Michael O'Reilly        :       ack < copied bug fix.
63  *      Johannes Stille         :       Misc tcp fixes (not all in yet).
64  *              Alan Cox        :       FIN with no memory -> CRASH
65  *              Alan Cox        :       Added socket option proto entries.
66  *                                      Also added awareness of them to accept.
67  *              Alan Cox        :       Added TCP options (SOL_TCP)
68  *              Alan Cox        :       Switched wakeup calls to callbacks,
69  *                                      so the kernel can layer network
70  *                                      sockets.
71  *              Alan Cox        :       Use ip_tos/ip_ttl settings.
72  *              Alan Cox        :       Handle FIN (more) properly (we hope).
73  *              Alan Cox        :       RST frames sent on unsynchronised
74  *                                      state ack error.
75  *              Alan Cox        :       Put in missing check for SYN bit.
76  *              Alan Cox        :       Added tcp_select_window() aka NET2E
77  *                                      window non shrink trick.
78  *              Alan Cox        :       Added a couple of small NET2E timer
79  *                                      fixes
80  *              Charles Hedrick :       TCP fixes
81  *              Toomas Tamm     :       TCP window fixes
82  *              Alan Cox        :       Small URG fix to rlogin ^C ack fight
83  *              Charles Hedrick :       Rewrote most of it to actually work
84  *              Linus           :       Rewrote tcp_read() and URG handling
85  *                                      completely
86  *              Gerhard Koerting:       Fixed some missing timer handling
87  *              Matthew Dillon  :       Reworked TCP machine states as per RFC
88  *              Gerhard Koerting:       PC/TCP workarounds
89  *              Adam Caldwell   :       Assorted timer/timing errors
90  *              Matthew Dillon  :       Fixed another RST bug
91  *              Alan Cox        :       Move to kernel side addressing changes.
92  *              Alan Cox        :       Beginning work on TCP fastpathing
93  *                                      (not yet usable)
94  *              Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
95  *              Alan Cox        :       TCP fast path debugging
96  *              Alan Cox        :       Window clamping
97  *              Michael Riepe   :       Bug in tcp_check()
98  *              Matt Dillon     :       More TCP improvements and RST bug fixes
99  *              Matt Dillon     :       Yet more small nasties remove from the
100  *                                      TCP code (Be very nice to this man if
101  *                                      tcp finally works 100%) 8)
102  *              Alan Cox        :       BSD accept semantics.
103  *              Alan Cox        :       Reset on closedown bug.
104  *      Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
105  *              Michael Pall    :       Handle poll() after URG properly in
106  *                                      all cases.
107  *              Michael Pall    :       Undo the last fix in tcp_read_urg()
108  *                                      (multi URG PUSH broke rlogin).
109  *              Michael Pall    :       Fix the multi URG PUSH problem in
110  *                                      tcp_readable(), poll() after URG
111  *                                      works now.
112  *              Michael Pall    :       recv(...,MSG_OOB) never blocks in the
113  *                                      BSD api.
114  *              Alan Cox        :       Changed the semantics of sk->socket to
115  *                                      fix a race and a signal problem with
116  *                                      accept() and async I/O.
117  *              Alan Cox        :       Relaxed the rules on tcp_sendto().
118  *              Yury Shevchuk   :       Really fixed accept() blocking problem.
119  *              Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
120  *                                      clients/servers which listen in on
121  *                                      fixed ports.
122  *              Alan Cox        :       Cleaned the above up and shrank it to
123  *                                      a sensible code size.
124  *              Alan Cox        :       Self connect lockup fix.
125  *              Alan Cox        :       No connect to multicast.
126  *              Ross Biro       :       Close unaccepted children on master
127  *                                      socket close.
128  *              Alan Cox        :       Reset tracing code.
129  *              Alan Cox        :       Spurious resets on shutdown.
130  *              Alan Cox        :       Giant 15 minute/60 second timer error
131  *              Alan Cox        :       Small whoops in polling before an
132  *                                      accept.
133  *              Alan Cox        :       Kept the state trace facility since
134  *                                      it's handy for debugging.
135  *              Alan Cox        :       More reset handler fixes.
136  *              Alan Cox        :       Started rewriting the code based on
137  *                                      the RFC's for other useful protocol
138  *                                      references see: Comer, KA9Q NOS, and
139  *                                      for a reference on the difference
140  *                                      between specifications and how BSD
141  *                                      works see the 4.4lite source.
142  *              A.N.Kuznetsov   :       Don't time wait on completion of tidy
143  *                                      close.
144  *              Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
145  *              Linus Torvalds  :       Fixed BSD port reuse to work first syn
146  *              Alan Cox        :       Reimplemented timers as per the RFC
147  *                                      and using multiple timers for sanity.
148  *              Alan Cox        :       Small bug fixes, and a lot of new
149  *                                      comments.
150  *              Alan Cox        :       Fixed dual reader crash by locking
151  *                                      the buffers (much like datagram.c)
152  *              Alan Cox        :       Fixed stuck sockets in probe. A probe
153  *                                      now gets fed up of retrying without
154  *                                      (even a no space) answer.
155  *              Alan Cox        :       Extracted closing code better
156  *              Alan Cox        :       Fixed the closing state machine to
157  *                                      resemble the RFC.
158  *              Alan Cox        :       More 'per spec' fixes.
159  *              Jorge Cwik      :       Even faster checksumming.
160  *              Alan Cox        :       tcp_data() doesn't ack illegal PSH
161  *                                      only frames. At least one pc tcp stack
162  *                                      generates them.
163  *              Alan Cox        :       Cache last socket.
164  *              Alan Cox        :       Per route irtt.
165  *              Matt Day        :       poll()->select() match BSD precisely on error
166  *              Alan Cox        :       New buffers
167  *              Marc Tamsky     :       Various sk->prot->retransmits and
168  *                                      sk->retransmits misupdating fixed.
169  *                                      Fixed tcp_write_timeout: stuck close,
170  *                                      and TCP syn retries gets used now.
171  *              Mark Yarvis     :       In tcp_read_wakeup(), don't send an
172  *                                      ack if state is TCP_CLOSED.
173  *              Alan Cox        :       Look up device on a retransmit - routes may
174  *                                      change. Doesn't yet cope with MSS shrink right
175  *                                      but it's a start!
176  *              Marc Tamsky     :       Closing in closing fixes.
177  *              Mike Shaver     :       RFC1122 verifications.
178  *              Alan Cox        :       rcv_saddr errors.
179  *              Alan Cox        :       Block double connect().
180  *              Alan Cox        :       Small hooks for enSKIP.
181  *              Alexey Kuznetsov:       Path MTU discovery.
182  *              Alan Cox        :       Support soft errors.
183  *              Alan Cox        :       Fix MTU discovery pathological case
184  *                                      when the remote claims no mtu!
185  *              Marc Tamsky     :       TCP_CLOSE fix.
186  *              Colin (G3TNE)   :       Send a reset on syn ack replies in
187  *                                      window but wrong (fixes NT lpd problems)
188  *              Pedro Roque     :       Better TCP window handling, delayed ack.
189  *              Joerg Reuter    :       No modification of locked buffers in
190  *                                      tcp_do_retransmit()
191  *              Eric Schenk     :       Changed receiver side silly window
192  *                                      avoidance algorithm to BSD style
193  *                                      algorithm. This doubles throughput
194  *                                      against machines running Solaris,
195  *                                      and seems to result in general
196  *                                      improvement.
197  *      Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
198  *      Willy Konynenberg       :       Transparent proxying support.
199  *      Mike McLagan            :       Routing by source
200  *              Keith Owens     :       Do proper merging with partial SKB's in
201  *                                      tcp_do_sendmsg to avoid burstiness.
202  *              Eric Schenk     :       Fix fast close down bug with
203  *                                      shutdown() followed by close().
204  *              Andi Kleen      :       Make poll agree with SIGIO
205  *      Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
206  *                                      lingertime == 0 (RFC 793 ABORT Call)
207  *      Hirokazu Takahashi      :       Use copy_from_user() instead of
208  *                                      csum_and_copy_from_user() if possible.
209  *
210  *              This program is free software; you can redistribute it and/or
211  *              modify it under the terms of the GNU General Public License
212  *              as published by the Free Software Foundation; either version
213  *              2 of the License, or(at your option) any later version.
214  *
215  * Description of States:
216  *
217  *      TCP_SYN_SENT            sent a connection request, waiting for ack
218  *
219  *      TCP_SYN_RECV            received a connection request, sent ack,
220  *                              waiting for final ack in three-way handshake.
221  *
222  *      TCP_ESTABLISHED         connection established
223  *
224  *      TCP_FIN_WAIT1           our side has shutdown, waiting to complete
225  *                              transmission of remaining buffered data
226  *
227  *      TCP_FIN_WAIT2           all buffered data sent, waiting for remote
228  *                              to shutdown
229  *
230  *      TCP_CLOSING             both sides have shutdown but we still have
231  *                              data we have to finish sending
232  *
233  *      TCP_TIME_WAIT           timeout to catch resent junk before entering
234  *                              closed, can only be entered from FIN_WAIT2
235  *                              or CLOSING.  Required because the other end
236  *                              may not have gotten our last ACK causing it
237  *                              to retransmit the data packet (which we ignore)
238  *
239  *      TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
240  *                              us to finish writing our data and to shutdown
241  *                              (we have to close() to move on to LAST_ACK)
242  *
243  *      TCP_LAST_ACK            out side has shutdown after remote has
244  *                              shutdown.  There may still be data in our
245  *                              buffer that we have to finish sending
246  *
247  *      TCP_CLOSE               socket is finished
248  */
249
250 #include <linux/module.h>
251 #include <linux/types.h>
252 #include <linux/fcntl.h>
253 #include <linux/poll.h>
254 #include <linux/init.h>
255 #include <linux/smp_lock.h>
256 #include <linux/fs.h>
257 #include <linux/random.h>
258 #include <linux/bootmem.h>
259 #include <linux/cache.h>
260 #include <linux/err.h>
261 #include <linux/crypto.h>
262
263 #include <net/icmp.h>
264 #include <net/tcp.h>
265 #include <net/xfrm.h>
266 #include <net/ip.h>
267 #include <net/netdma.h>
268
269 #include <asm/uaccess.h>
270 #include <asm/ioctls.h>
271
272 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
273
274 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
275
276 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
277
278 EXPORT_SYMBOL_GPL(tcp_orphan_count);
279
280 int sysctl_tcp_mem[3] __read_mostly;
281 int sysctl_tcp_wmem[3] __read_mostly;
282 int sysctl_tcp_rmem[3] __read_mostly;
283
284 EXPORT_SYMBOL(sysctl_tcp_mem);
285 EXPORT_SYMBOL(sysctl_tcp_rmem);
286 EXPORT_SYMBOL(sysctl_tcp_wmem);
287
288 atomic_t tcp_memory_allocated;  /* Current allocated memory. */
289 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
290
291 EXPORT_SYMBOL(tcp_memory_allocated);
292 EXPORT_SYMBOL(tcp_sockets_allocated);
293
294 /*
295  * Pressure flag: try to collapse.
296  * Technical note: it is used by multiple contexts non atomically.
297  * All the sk_stream_mem_schedule() is of this nature: accounting
298  * is strict, actions are advisory and have some latency.
299  */
300 int tcp_memory_pressure __read_mostly;
301
302 EXPORT_SYMBOL(tcp_memory_pressure);
303
304 void tcp_enter_memory_pressure(void)
305 {
306         if (!tcp_memory_pressure) {
307                 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
308                 tcp_memory_pressure = 1;
309         }
310 }
311
312 EXPORT_SYMBOL(tcp_enter_memory_pressure);
313
314 /*
315  *      Wait for a TCP event.
316  *
317  *      Note that we don't need to lock the socket, as the upper poll layers
318  *      take care of normal races (between the test and the event) and we don't
319  *      go look at any of the socket buffers directly.
320  */
321 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
322 {
323         unsigned int mask;
324         struct sock *sk = sock->sk;
325         struct tcp_sock *tp = tcp_sk(sk);
326
327         poll_wait(file, sk->sk_sleep, wait);
328         if (sk->sk_state == TCP_LISTEN)
329                 return inet_csk_listen_poll(sk);
330
331         /* Socket is not locked. We are protected from async events
332            by poll logic and correct handling of state changes
333            made by another threads is impossible in any case.
334          */
335
336         mask = 0;
337         if (sk->sk_err)
338                 mask = POLLERR;
339
340         /*
341          * POLLHUP is certainly not done right. But poll() doesn't
342          * have a notion of HUP in just one direction, and for a
343          * socket the read side is more interesting.
344          *
345          * Some poll() documentation says that POLLHUP is incompatible
346          * with the POLLOUT/POLLWR flags, so somebody should check this
347          * all. But careful, it tends to be safer to return too many
348          * bits than too few, and you can easily break real applications
349          * if you don't tell them that something has hung up!
350          *
351          * Check-me.
352          *
353          * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
354          * our fs/select.c). It means that after we received EOF,
355          * poll always returns immediately, making impossible poll() on write()
356          * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
357          * if and only if shutdown has been made in both directions.
358          * Actually, it is interesting to look how Solaris and DUX
359          * solve this dilemma. I would prefer, if PULLHUP were maskable,
360          * then we could set it on SND_SHUTDOWN. BTW examples given
361          * in Stevens' books assume exactly this behaviour, it explains
362          * why PULLHUP is incompatible with POLLOUT.    --ANK
363          *
364          * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
365          * blocking on fresh not-connected or disconnected socket. --ANK
366          */
367         if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
368                 mask |= POLLHUP;
369         if (sk->sk_shutdown & RCV_SHUTDOWN)
370                 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
371
372         /* Connected? */
373         if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
374                 /* Potential race condition. If read of tp below will
375                  * escape above sk->sk_state, we can be illegally awaken
376                  * in SYN_* states. */
377                 if ((tp->rcv_nxt != tp->copied_seq) &&
378                     (tp->urg_seq != tp->copied_seq ||
379                      tp->rcv_nxt != tp->copied_seq + 1 ||
380                      sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
381                         mask |= POLLIN | POLLRDNORM;
382
383                 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
384                         if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
385                                 mask |= POLLOUT | POLLWRNORM;
386                         } else {  /* send SIGIO later */
387                                 set_bit(SOCK_ASYNC_NOSPACE,
388                                         &sk->sk_socket->flags);
389                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
390
391                                 /* Race breaker. If space is freed after
392                                  * wspace test but before the flags are set,
393                                  * IO signal will be lost.
394                                  */
395                                 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
396                                         mask |= POLLOUT | POLLWRNORM;
397                         }
398                 }
399
400                 if (tp->urg_data & TCP_URG_VALID)
401                         mask |= POLLPRI;
402         }
403         return mask;
404 }
405
406 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
407 {
408         struct tcp_sock *tp = tcp_sk(sk);
409         int answ;
410
411         switch (cmd) {
412         case SIOCINQ:
413                 if (sk->sk_state == TCP_LISTEN)
414                         return -EINVAL;
415
416                 lock_sock(sk);
417                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
418                         answ = 0;
419                 else if (sock_flag(sk, SOCK_URGINLINE) ||
420                          !tp->urg_data ||
421                          before(tp->urg_seq, tp->copied_seq) ||
422                          !before(tp->urg_seq, tp->rcv_nxt)) {
423                         answ = tp->rcv_nxt - tp->copied_seq;
424
425                         /* Subtract 1, if FIN is in queue. */
426                         if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427                                 answ -=
428                        tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
429                 } else
430                         answ = tp->urg_seq - tp->copied_seq;
431                 release_sock(sk);
432                 break;
433         case SIOCATMARK:
434                 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
435                 break;
436         case SIOCOUTQ:
437                 if (sk->sk_state == TCP_LISTEN)
438                         return -EINVAL;
439
440                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
441                         answ = 0;
442                 else
443                         answ = tp->write_seq - tp->snd_una;
444                 break;
445         default:
446                 return -ENOIOCTLCMD;
447         }
448
449         return put_user(answ, (int __user *)arg);
450 }
451
452 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
453 {
454         TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
455         tp->pushed_seq = tp->write_seq;
456 }
457
458 static inline int forced_push(struct tcp_sock *tp)
459 {
460         return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
461 }
462
463 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
464                               struct sk_buff *skb)
465 {
466         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
467
468         skb->csum    = 0;
469         tcb->seq     = tcb->end_seq = tp->write_seq;
470         tcb->flags   = TCPCB_FLAG_ACK;
471         tcb->sacked  = 0;
472         skb_header_release(skb);
473         tcp_add_write_queue_tail(sk, skb);
474         sk_charge_skb(sk, skb);
475         if (tp->nonagle & TCP_NAGLE_PUSH)
476                 tp->nonagle &= ~TCP_NAGLE_PUSH;
477 }
478
479 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
480                                 struct sk_buff *skb)
481 {
482         if (flags & MSG_OOB) {
483                 tp->urg_mode = 1;
484                 tp->snd_up = tp->write_seq;
485                 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
486         }
487 }
488
489 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
490                             int mss_now, int nonagle)
491 {
492         if (tcp_send_head(sk)) {
493                 struct sk_buff *skb = tcp_write_queue_tail(sk);
494                 if (!(flags & MSG_MORE) || forced_push(tp))
495                         tcp_mark_push(tp, skb);
496                 tcp_mark_urg(tp, flags, skb);
497                 __tcp_push_pending_frames(sk, tp, mss_now,
498                                           (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
499         }
500 }
501
502 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
503                          size_t psize, int flags)
504 {
505         struct tcp_sock *tp = tcp_sk(sk);
506         int mss_now, size_goal;
507         int err;
508         ssize_t copied;
509         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
510
511         /* Wait for a connection to finish. */
512         if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
513                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
514                         goto out_err;
515
516         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
517
518         mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
519         size_goal = tp->xmit_size_goal;
520         copied = 0;
521
522         err = -EPIPE;
523         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
524                 goto do_error;
525
526         while (psize > 0) {
527                 struct sk_buff *skb = tcp_write_queue_tail(sk);
528                 struct page *page = pages[poffset / PAGE_SIZE];
529                 int copy, i, can_coalesce;
530                 int offset = poffset % PAGE_SIZE;
531                 int size = min_t(size_t, psize, PAGE_SIZE - offset);
532
533                 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
534 new_segment:
535                         if (!sk_stream_memory_free(sk))
536                                 goto wait_for_sndbuf;
537
538                         skb = sk_stream_alloc_pskb(sk, 0, 0,
539                                                    sk->sk_allocation);
540                         if (!skb)
541                                 goto wait_for_memory;
542
543                         skb_entail(sk, tp, skb);
544                         copy = size_goal;
545                 }
546
547                 if (copy > size)
548                         copy = size;
549
550                 i = skb_shinfo(skb)->nr_frags;
551                 can_coalesce = skb_can_coalesce(skb, i, page, offset);
552                 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
553                         tcp_mark_push(tp, skb);
554                         goto new_segment;
555                 }
556                 if (!sk_stream_wmem_schedule(sk, copy))
557                         goto wait_for_memory;
558
559                 if (can_coalesce) {
560                         skb_shinfo(skb)->frags[i - 1].size += copy;
561                 } else {
562                         get_page(page);
563                         skb_fill_page_desc(skb, i, page, offset, copy);
564                 }
565
566                 skb->len += copy;
567                 skb->data_len += copy;
568                 skb->truesize += copy;
569                 sk->sk_wmem_queued += copy;
570                 sk->sk_forward_alloc -= copy;
571                 skb->ip_summed = CHECKSUM_PARTIAL;
572                 tp->write_seq += copy;
573                 TCP_SKB_CB(skb)->end_seq += copy;
574                 skb_shinfo(skb)->gso_segs = 0;
575
576                 if (!copied)
577                         TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
578
579                 copied += copy;
580                 poffset += copy;
581                 if (!(psize -= copy))
582                         goto out;
583
584                 if (skb->len < mss_now || (flags & MSG_OOB))
585                         continue;
586
587                 if (forced_push(tp)) {
588                         tcp_mark_push(tp, skb);
589                         __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
590                 } else if (skb == tcp_send_head(sk))
591                         tcp_push_one(sk, mss_now);
592                 continue;
593
594 wait_for_sndbuf:
595                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
596 wait_for_memory:
597                 if (copied)
598                         tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
599
600                 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
601                         goto do_error;
602
603                 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
604                 size_goal = tp->xmit_size_goal;
605         }
606
607 out:
608         if (copied)
609                 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
610         return copied;
611
612 do_error:
613         if (copied)
614                 goto out;
615 out_err:
616         return sk_stream_error(sk, flags, err);
617 }
618
619 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
620                      size_t size, int flags)
621 {
622         ssize_t res;
623         struct sock *sk = sock->sk;
624
625         if (!(sk->sk_route_caps & NETIF_F_SG) ||
626             !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
627                 return sock_no_sendpage(sock, page, offset, size, flags);
628
629         lock_sock(sk);
630         TCP_CHECK_TIMER(sk);
631         res = do_tcp_sendpages(sk, &page, offset, size, flags);
632         TCP_CHECK_TIMER(sk);
633         release_sock(sk);
634         return res;
635 }
636
637 #define TCP_PAGE(sk)    (sk->sk_sndmsg_page)
638 #define TCP_OFF(sk)     (sk->sk_sndmsg_off)
639
640 static inline int select_size(struct sock *sk, struct tcp_sock *tp)
641 {
642         int tmp = tp->mss_cache;
643
644         if (sk->sk_route_caps & NETIF_F_SG) {
645                 if (sk_can_gso(sk))
646                         tmp = 0;
647                 else {
648                         int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
649
650                         if (tmp >= pgbreak &&
651                             tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
652                                 tmp = pgbreak;
653                 }
654         }
655
656         return tmp;
657 }
658
659 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
660                 size_t size)
661 {
662         struct iovec *iov;
663         struct tcp_sock *tp = tcp_sk(sk);
664         struct sk_buff *skb;
665         int iovlen, flags;
666         int mss_now, size_goal;
667         int err, copied;
668         long timeo;
669
670         lock_sock(sk);
671         TCP_CHECK_TIMER(sk);
672
673         flags = msg->msg_flags;
674         timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
675
676         /* Wait for a connection to finish. */
677         if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
678                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
679                         goto out_err;
680
681         /* This should be in poll */
682         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
683
684         mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
685         size_goal = tp->xmit_size_goal;
686
687         /* Ok commence sending. */
688         iovlen = msg->msg_iovlen;
689         iov = msg->msg_iov;
690         copied = 0;
691
692         err = -EPIPE;
693         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
694                 goto do_error;
695
696         while (--iovlen >= 0) {
697                 int seglen = iov->iov_len;
698                 unsigned char __user *from = iov->iov_base;
699
700                 iov++;
701
702                 while (seglen > 0) {
703                         int copy;
704
705                         skb = tcp_write_queue_tail(sk);
706
707                         if (!tcp_send_head(sk) ||
708                             (copy = size_goal - skb->len) <= 0) {
709
710 new_segment:
711                                 /* Allocate new segment. If the interface is SG,
712                                  * allocate skb fitting to single page.
713                                  */
714                                 if (!sk_stream_memory_free(sk))
715                                         goto wait_for_sndbuf;
716
717                                 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
718                                                            0, sk->sk_allocation);
719                                 if (!skb)
720                                         goto wait_for_memory;
721
722                                 /*
723                                  * Check whether we can use HW checksum.
724                                  */
725                                 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
726                                         skb->ip_summed = CHECKSUM_PARTIAL;
727
728                                 skb_entail(sk, tp, skb);
729                                 copy = size_goal;
730                         }
731
732                         /* Try to append data to the end of skb. */
733                         if (copy > seglen)
734                                 copy = seglen;
735
736                         /* Where to copy to? */
737                         if (skb_tailroom(skb) > 0) {
738                                 /* We have some space in skb head. Superb! */
739                                 if (copy > skb_tailroom(skb))
740                                         copy = skb_tailroom(skb);
741                                 if ((err = skb_add_data(skb, from, copy)) != 0)
742                                         goto do_fault;
743                         } else {
744                                 int merge = 0;
745                                 int i = skb_shinfo(skb)->nr_frags;
746                                 struct page *page = TCP_PAGE(sk);
747                                 int off = TCP_OFF(sk);
748
749                                 if (skb_can_coalesce(skb, i, page, off) &&
750                                     off != PAGE_SIZE) {
751                                         /* We can extend the last page
752                                          * fragment. */
753                                         merge = 1;
754                                 } else if (i == MAX_SKB_FRAGS ||
755                                            (!i &&
756                                            !(sk->sk_route_caps & NETIF_F_SG))) {
757                                         /* Need to add new fragment and cannot
758                                          * do this because interface is non-SG,
759                                          * or because all the page slots are
760                                          * busy. */
761                                         tcp_mark_push(tp, skb);
762                                         goto new_segment;
763                                 } else if (page) {
764                                         if (off == PAGE_SIZE) {
765                                                 put_page(page);
766                                                 TCP_PAGE(sk) = page = NULL;
767                                                 off = 0;
768                                         }
769                                 } else
770                                         off = 0;
771
772                                 if (copy > PAGE_SIZE - off)
773                                         copy = PAGE_SIZE - off;
774
775                                 if (!sk_stream_wmem_schedule(sk, copy))
776                                         goto wait_for_memory;
777
778                                 if (!page) {
779                                         /* Allocate new cache page. */
780                                         if (!(page = sk_stream_alloc_page(sk)))
781                                                 goto wait_for_memory;
782                                 }
783
784                                 /* Time to copy data. We are close to
785                                  * the end! */
786                                 err = skb_copy_to_page(sk, from, skb, page,
787                                                        off, copy);
788                                 if (err) {
789                                         /* If this page was new, give it to the
790                                          * socket so it does not get leaked.
791                                          */
792                                         if (!TCP_PAGE(sk)) {
793                                                 TCP_PAGE(sk) = page;
794                                                 TCP_OFF(sk) = 0;
795                                         }
796                                         goto do_error;
797                                 }
798
799                                 /* Update the skb. */
800                                 if (merge) {
801                                         skb_shinfo(skb)->frags[i - 1].size +=
802                                                                         copy;
803                                 } else {
804                                         skb_fill_page_desc(skb, i, page, off, copy);
805                                         if (TCP_PAGE(sk)) {
806                                                 get_page(page);
807                                         } else if (off + copy < PAGE_SIZE) {
808                                                 get_page(page);
809                                                 TCP_PAGE(sk) = page;
810                                         }
811                                 }
812
813                                 TCP_OFF(sk) = off + copy;
814                         }
815
816                         if (!copied)
817                                 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
818
819                         tp->write_seq += copy;
820                         TCP_SKB_CB(skb)->end_seq += copy;
821                         skb_shinfo(skb)->gso_segs = 0;
822
823                         from += copy;
824                         copied += copy;
825                         if ((seglen -= copy) == 0 && iovlen == 0)
826                                 goto out;
827
828                         if (skb->len < mss_now || (flags & MSG_OOB))
829                                 continue;
830
831                         if (forced_push(tp)) {
832                                 tcp_mark_push(tp, skb);
833                                 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
834                         } else if (skb == tcp_send_head(sk))
835                                 tcp_push_one(sk, mss_now);
836                         continue;
837
838 wait_for_sndbuf:
839                         set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
840 wait_for_memory:
841                         if (copied)
842                                 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
843
844                         if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
845                                 goto do_error;
846
847                         mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
848                         size_goal = tp->xmit_size_goal;
849                 }
850         }
851
852 out:
853         if (copied)
854                 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
855         TCP_CHECK_TIMER(sk);
856         release_sock(sk);
857         return copied;
858
859 do_fault:
860         if (!skb->len) {
861                 tcp_unlink_write_queue(skb, sk);
862                 /* It is the one place in all of TCP, except connection
863                  * reset, where we can be unlinking the send_head.
864                  */
865                 tcp_check_send_head(sk, skb);
866                 sk_stream_free_skb(sk, skb);
867         }
868
869 do_error:
870         if (copied)
871                 goto out;
872 out_err:
873         err = sk_stream_error(sk, flags, err);
874         TCP_CHECK_TIMER(sk);
875         release_sock(sk);
876         return err;
877 }
878
879 /*
880  *      Handle reading urgent data. BSD has very simple semantics for
881  *      this, no blocking and very strange errors 8)
882  */
883
884 static int tcp_recv_urg(struct sock *sk, long timeo,
885                         struct msghdr *msg, int len, int flags,
886                         int *addr_len)
887 {
888         struct tcp_sock *tp = tcp_sk(sk);
889
890         /* No URG data to read. */
891         if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
892             tp->urg_data == TCP_URG_READ)
893                 return -EINVAL; /* Yes this is right ! */
894
895         if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
896                 return -ENOTCONN;
897
898         if (tp->urg_data & TCP_URG_VALID) {
899                 int err = 0;
900                 char c = tp->urg_data;
901
902                 if (!(flags & MSG_PEEK))
903                         tp->urg_data = TCP_URG_READ;
904
905                 /* Read urgent data. */
906                 msg->msg_flags |= MSG_OOB;
907
908                 if (len > 0) {
909                         if (!(flags & MSG_TRUNC))
910                                 err = memcpy_toiovec(msg->msg_iov, &c, 1);
911                         len = 1;
912                 } else
913                         msg->msg_flags |= MSG_TRUNC;
914
915                 return err ? -EFAULT : len;
916         }
917
918         if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
919                 return 0;
920
921         /* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
922          * the available implementations agree in this case:
923          * this call should never block, independent of the
924          * blocking state of the socket.
925          * Mike <pall@rz.uni-karlsruhe.de>
926          */
927         return -EAGAIN;
928 }
929
930 /* Clean up the receive buffer for full frames taken by the user,
931  * then send an ACK if necessary.  COPIED is the number of bytes
932  * tcp_recvmsg has given to the user so far, it speeds up the
933  * calculation of whether or not we must ACK for the sake of
934  * a window update.
935  */
936 void tcp_cleanup_rbuf(struct sock *sk, int copied)
937 {
938         struct tcp_sock *tp = tcp_sk(sk);
939         int time_to_ack = 0;
940
941 #if TCP_DEBUG
942         struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
943
944         BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
945 #endif
946
947         if (inet_csk_ack_scheduled(sk)) {
948                 const struct inet_connection_sock *icsk = inet_csk(sk);
949                    /* Delayed ACKs frequently hit locked sockets during bulk
950                     * receive. */
951                 if (icsk->icsk_ack.blocked ||
952                     /* Once-per-two-segments ACK was not sent by tcp_input.c */
953                     tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
954                     /*
955                      * If this read emptied read buffer, we send ACK, if
956                      * connection is not bidirectional, user drained
957                      * receive buffer and there was a small segment
958                      * in queue.
959                      */
960                     (copied > 0 &&
961                      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
962                       ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
963                        !icsk->icsk_ack.pingpong)) &&
964                       !atomic_read(&sk->sk_rmem_alloc)))
965                         time_to_ack = 1;
966         }
967
968         /* We send an ACK if we can now advertise a non-zero window
969          * which has been raised "significantly".
970          *
971          * Even if window raised up to infinity, do not send window open ACK
972          * in states, where we will not receive more. It is useless.
973          */
974         if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
975                 __u32 rcv_window_now = tcp_receive_window(tp);
976
977                 /* Optimize, __tcp_select_window() is not cheap. */
978                 if (2*rcv_window_now <= tp->window_clamp) {
979                         __u32 new_window = __tcp_select_window(sk);
980
981                         /* Send ACK now, if this read freed lots of space
982                          * in our buffer. Certainly, new_window is new window.
983                          * We can advertise it now, if it is not less than current one.
984                          * "Lots" means "at least twice" here.
985                          */
986                         if (new_window && new_window >= 2 * rcv_window_now)
987                                 time_to_ack = 1;
988                 }
989         }
990         if (time_to_ack)
991                 tcp_send_ack(sk);
992 }
993
994 static void tcp_prequeue_process(struct sock *sk)
995 {
996         struct sk_buff *skb;
997         struct tcp_sock *tp = tcp_sk(sk);
998
999         NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1000
1001         /* RX process wants to run with disabled BHs, though it is not
1002          * necessary */
1003         local_bh_disable();
1004         while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1005                 sk->sk_backlog_rcv(sk, skb);
1006         local_bh_enable();
1007
1008         /* Clear memory counter. */
1009         tp->ucopy.memory = 0;
1010 }
1011
1012 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1013 {
1014         struct sk_buff *skb;
1015         u32 offset;
1016
1017         skb_queue_walk(&sk->sk_receive_queue, skb) {
1018                 offset = seq - TCP_SKB_CB(skb)->seq;
1019                 if (tcp_hdr(skb)->syn)
1020                         offset--;
1021                 if (offset < skb->len || tcp_hdr(skb)->fin) {
1022                         *off = offset;
1023                         return skb;
1024                 }
1025         }
1026         return NULL;
1027 }
1028
1029 /*
1030  * This routine provides an alternative to tcp_recvmsg() for routines
1031  * that would like to handle copying from skbuffs directly in 'sendfile'
1032  * fashion.
1033  * Note:
1034  *      - It is assumed that the socket was locked by the caller.
1035  *      - The routine does not block.
1036  *      - At present, there is no support for reading OOB data
1037  *        or for 'peeking' the socket using this routine
1038  *        (although both would be easy to implement).
1039  */
1040 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1041                   sk_read_actor_t recv_actor)
1042 {
1043         struct sk_buff *skb;
1044         struct tcp_sock *tp = tcp_sk(sk);
1045         u32 seq = tp->copied_seq;
1046         u32 offset;
1047         int copied = 0;
1048
1049         if (sk->sk_state == TCP_LISTEN)
1050                 return -ENOTCONN;
1051         while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1052                 if (offset < skb->len) {
1053                         size_t used, len;
1054
1055                         len = skb->len - offset;
1056                         /* Stop reading if we hit a patch of urgent data */
1057                         if (tp->urg_data) {
1058                                 u32 urg_offset = tp->urg_seq - seq;
1059                                 if (urg_offset < len)
1060                                         len = urg_offset;
1061                                 if (!len)
1062                                         break;
1063                         }
1064                         used = recv_actor(desc, skb, offset, len);
1065                         if (used <= len) {
1066                                 seq += used;
1067                                 copied += used;
1068                                 offset += used;
1069                         }
1070                         if (offset != skb->len)
1071                                 break;
1072                 }
1073                 if (tcp_hdr(skb)->fin) {
1074                         sk_eat_skb(sk, skb, 0);
1075                         ++seq;
1076                         break;
1077                 }
1078                 sk_eat_skb(sk, skb, 0);
1079                 if (!desc->count)
1080                         break;
1081         }
1082         tp->copied_seq = seq;
1083
1084         tcp_rcv_space_adjust(sk);
1085
1086         /* Clean up data we have read: This will do ACK frames. */
1087         if (copied)
1088                 tcp_cleanup_rbuf(sk, copied);
1089         return copied;
1090 }
1091
1092 /*
1093  *      This routine copies from a sock struct into the user buffer.
1094  *
1095  *      Technical note: in 2.3 we work on _locked_ socket, so that
1096  *      tricks with *seq access order and skb->users are not required.
1097  *      Probably, code can be easily improved even more.
1098  */
1099
1100 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1101                 size_t len, int nonblock, int flags, int *addr_len)
1102 {
1103         struct tcp_sock *tp = tcp_sk(sk);
1104         int copied = 0;
1105         u32 peek_seq;
1106         u32 *seq;
1107         unsigned long used;
1108         int err;
1109         int target;             /* Read at least this many bytes */
1110         long timeo;
1111         struct task_struct *user_recv = NULL;
1112         int copied_early = 0;
1113
1114         lock_sock(sk);
1115
1116         TCP_CHECK_TIMER(sk);
1117
1118         err = -ENOTCONN;
1119         if (sk->sk_state == TCP_LISTEN)
1120                 goto out;
1121
1122         timeo = sock_rcvtimeo(sk, nonblock);
1123
1124         /* Urgent data needs to be handled specially. */
1125         if (flags & MSG_OOB)
1126                 goto recv_urg;
1127
1128         seq = &tp->copied_seq;
1129         if (flags & MSG_PEEK) {
1130                 peek_seq = tp->copied_seq;
1131                 seq = &peek_seq;
1132         }
1133
1134         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1135
1136 #ifdef CONFIG_NET_DMA
1137         tp->ucopy.dma_chan = NULL;
1138         preempt_disable();
1139         if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1140             !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1141                 preempt_enable_no_resched();
1142                 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1143         } else
1144                 preempt_enable_no_resched();
1145 #endif
1146
1147         do {
1148                 struct sk_buff *skb;
1149                 u32 offset;
1150
1151                 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1152                 if (tp->urg_data && tp->urg_seq == *seq) {
1153                         if (copied)
1154                                 break;
1155                         if (signal_pending(current)) {
1156                                 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1157                                 break;
1158                         }
1159                 }
1160
1161                 /* Next get a buffer. */
1162
1163                 skb = skb_peek(&sk->sk_receive_queue);
1164                 do {
1165                         if (!skb)
1166                                 break;
1167
1168                         /* Now that we have two receive queues this
1169                          * shouldn't happen.
1170                          */
1171                         if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1172                                 printk(KERN_INFO "recvmsg bug: copied %X "
1173                                        "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1174                                 break;
1175                         }
1176                         offset = *seq - TCP_SKB_CB(skb)->seq;
1177                         if (tcp_hdr(skb)->syn)
1178                                 offset--;
1179                         if (offset < skb->len)
1180                                 goto found_ok_skb;
1181                         if (tcp_hdr(skb)->fin)
1182                                 goto found_fin_ok;
1183                         BUG_TRAP(flags & MSG_PEEK);
1184                         skb = skb->next;
1185                 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1186
1187                 /* Well, if we have backlog, try to process it now yet. */
1188
1189                 if (copied >= target && !sk->sk_backlog.tail)
1190                         break;
1191
1192                 if (copied) {
1193                         if (sk->sk_err ||
1194                             sk->sk_state == TCP_CLOSE ||
1195                             (sk->sk_shutdown & RCV_SHUTDOWN) ||
1196                             !timeo ||
1197                             signal_pending(current) ||
1198                             (flags & MSG_PEEK))
1199                                 break;
1200                 } else {
1201                         if (sock_flag(sk, SOCK_DONE))
1202                                 break;
1203
1204                         if (sk->sk_err) {
1205                                 copied = sock_error(sk);
1206                                 break;
1207                         }
1208
1209                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1210                                 break;
1211
1212                         if (sk->sk_state == TCP_CLOSE) {
1213                                 if (!sock_flag(sk, SOCK_DONE)) {
1214                                         /* This occurs when user tries to read
1215                                          * from never connected socket.
1216                                          */
1217                                         copied = -ENOTCONN;
1218                                         break;
1219                                 }
1220                                 break;
1221                         }
1222
1223                         if (!timeo) {
1224                                 copied = -EAGAIN;
1225                                 break;
1226                         }
1227
1228                         if (signal_pending(current)) {
1229                                 copied = sock_intr_errno(timeo);
1230                                 break;
1231                         }
1232                 }
1233
1234                 tcp_cleanup_rbuf(sk, copied);
1235
1236                 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1237                         /* Install new reader */
1238                         if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1239                                 user_recv = current;
1240                                 tp->ucopy.task = user_recv;
1241                                 tp->ucopy.iov = msg->msg_iov;
1242                         }
1243
1244                         tp->ucopy.len = len;
1245
1246                         BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1247                                  (flags & (MSG_PEEK | MSG_TRUNC)));
1248
1249                         /* Ugly... If prequeue is not empty, we have to
1250                          * process it before releasing socket, otherwise
1251                          * order will be broken at second iteration.
1252                          * More elegant solution is required!!!
1253                          *
1254                          * Look: we have the following (pseudo)queues:
1255                          *
1256                          * 1. packets in flight
1257                          * 2. backlog
1258                          * 3. prequeue
1259                          * 4. receive_queue
1260                          *
1261                          * Each queue can be processed only if the next ones
1262                          * are empty. At this point we have empty receive_queue.
1263                          * But prequeue _can_ be not empty after 2nd iteration,
1264                          * when we jumped to start of loop because backlog
1265                          * processing added something to receive_queue.
1266                          * We cannot release_sock(), because backlog contains
1267                          * packets arrived _after_ prequeued ones.
1268                          *
1269                          * Shortly, algorithm is clear --- to process all
1270                          * the queues in order. We could make it more directly,
1271                          * requeueing packets from backlog to prequeue, if
1272                          * is not empty. It is more elegant, but eats cycles,
1273                          * unfortunately.
1274                          */
1275                         if (!skb_queue_empty(&tp->ucopy.prequeue))
1276                                 goto do_prequeue;
1277
1278                         /* __ Set realtime policy in scheduler __ */
1279                 }
1280
1281                 if (copied >= target) {
1282                         /* Do not sleep, just process backlog. */
1283                         release_sock(sk);
1284                         lock_sock(sk);
1285                 } else
1286                         sk_wait_data(sk, &timeo);
1287
1288 #ifdef CONFIG_NET_DMA
1289                 tp->ucopy.wakeup = 0;
1290 #endif
1291
1292                 if (user_recv) {
1293                         int chunk;
1294
1295                         /* __ Restore normal policy in scheduler __ */
1296
1297                         if ((chunk = len - tp->ucopy.len) != 0) {
1298                                 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1299                                 len -= chunk;
1300                                 copied += chunk;
1301                         }
1302
1303                         if (tp->rcv_nxt == tp->copied_seq &&
1304                             !skb_queue_empty(&tp->ucopy.prequeue)) {
1305 do_prequeue:
1306                                 tcp_prequeue_process(sk);
1307
1308                                 if ((chunk = len - tp->ucopy.len) != 0) {
1309                                         NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1310                                         len -= chunk;
1311                                         copied += chunk;
1312                                 }
1313                         }
1314                 }
1315                 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1316                         if (net_ratelimit())
1317                                 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1318                                        current->comm, current->pid);
1319                         peek_seq = tp->copied_seq;
1320                 }
1321                 continue;
1322
1323         found_ok_skb:
1324                 /* Ok so how much can we use? */
1325                 used = skb->len - offset;
1326                 if (len < used)
1327                         used = len;
1328
1329                 /* Do we have urgent data here? */
1330                 if (tp->urg_data) {
1331                         u32 urg_offset = tp->urg_seq - *seq;
1332                         if (urg_offset < used) {
1333                                 if (!urg_offset) {
1334                                         if (!sock_flag(sk, SOCK_URGINLINE)) {
1335                                                 ++*seq;
1336                                                 offset++;
1337                                                 used--;
1338                                                 if (!used)
1339                                                         goto skip_copy;
1340                                         }
1341                                 } else
1342                                         used = urg_offset;
1343                         }
1344                 }
1345
1346                 if (!(flags & MSG_TRUNC)) {
1347 #ifdef CONFIG_NET_DMA
1348                         if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1349                                 tp->ucopy.dma_chan = get_softnet_dma();
1350
1351                         if (tp->ucopy.dma_chan) {
1352                                 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1353                                         tp->ucopy.dma_chan, skb, offset,
1354                                         msg->msg_iov, used,
1355                                         tp->ucopy.pinned_list);
1356
1357                                 if (tp->ucopy.dma_cookie < 0) {
1358
1359                                         printk(KERN_ALERT "dma_cookie < 0\n");
1360
1361                                         /* Exception. Bailout! */
1362                                         if (!copied)
1363                                                 copied = -EFAULT;
1364                                         break;
1365                                 }
1366                                 if ((offset + used) == skb->len)
1367                                         copied_early = 1;
1368
1369                         } else
1370 #endif
1371                         {
1372                                 err = skb_copy_datagram_iovec(skb, offset,
1373                                                 msg->msg_iov, used);
1374                                 if (err) {
1375                                         /* Exception. Bailout! */
1376                                         if (!copied)
1377                                                 copied = -EFAULT;
1378                                         break;
1379                                 }
1380                         }
1381                 }
1382
1383                 *seq += used;
1384                 copied += used;
1385                 len -= used;
1386
1387                 tcp_rcv_space_adjust(sk);
1388
1389 skip_copy:
1390                 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1391                         tp->urg_data = 0;
1392                         tcp_fast_path_check(sk, tp);
1393                 }
1394                 if (used + offset < skb->len)
1395                         continue;
1396
1397                 if (tcp_hdr(skb)->fin)
1398                         goto found_fin_ok;
1399                 if (!(flags & MSG_PEEK)) {
1400                         sk_eat_skb(sk, skb, copied_early);
1401                         copied_early = 0;
1402                 }
1403                 continue;
1404
1405         found_fin_ok:
1406                 /* Process the FIN. */
1407                 ++*seq;
1408                 if (!(flags & MSG_PEEK)) {
1409                         sk_eat_skb(sk, skb, copied_early);
1410                         copied_early = 0;
1411                 }
1412                 break;
1413         } while (len > 0);
1414
1415         if (user_recv) {
1416                 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1417                         int chunk;
1418
1419                         tp->ucopy.len = copied > 0 ? len : 0;
1420
1421                         tcp_prequeue_process(sk);
1422
1423                         if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1424                                 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1425                                 len -= chunk;
1426                                 copied += chunk;
1427                         }
1428                 }
1429
1430                 tp->ucopy.task = NULL;
1431                 tp->ucopy.len = 0;
1432         }
1433
1434 #ifdef CONFIG_NET_DMA
1435         if (tp->ucopy.dma_chan) {
1436                 struct sk_buff *skb;
1437                 dma_cookie_t done, used;
1438
1439                 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1440
1441                 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1442                                                  tp->ucopy.dma_cookie, &done,
1443                                                  &used) == DMA_IN_PROGRESS) {
1444                         /* do partial cleanup of sk_async_wait_queue */
1445                         while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1446                                (dma_async_is_complete(skb->dma_cookie, done,
1447                                                       used) == DMA_SUCCESS)) {
1448                                 __skb_dequeue(&sk->sk_async_wait_queue);
1449                                 kfree_skb(skb);
1450                         }
1451                 }
1452
1453                 /* Safe to free early-copied skbs now */
1454                 __skb_queue_purge(&sk->sk_async_wait_queue);
1455                 dma_chan_put(tp->ucopy.dma_chan);
1456                 tp->ucopy.dma_chan = NULL;
1457         }
1458         if (tp->ucopy.pinned_list) {
1459                 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1460                 tp->ucopy.pinned_list = NULL;
1461         }
1462 #endif
1463
1464         /* According to UNIX98, msg_name/msg_namelen are ignored
1465          * on connected socket. I was just happy when found this 8) --ANK
1466          */
1467
1468         /* Clean up data we have read: This will do ACK frames. */
1469         tcp_cleanup_rbuf(sk, copied);
1470
1471         TCP_CHECK_TIMER(sk);
1472         release_sock(sk);
1473         return copied;
1474
1475 out:
1476         TCP_CHECK_TIMER(sk);
1477         release_sock(sk);
1478         return err;
1479
1480 recv_urg:
1481         err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1482         goto out;
1483 }
1484
1485 /*
1486  *      State processing on a close. This implements the state shift for
1487  *      sending our FIN frame. Note that we only send a FIN for some
1488  *      states. A shutdown() may have already sent the FIN, or we may be
1489  *      closed.
1490  */
1491
1492 static const unsigned char new_state[16] = {
1493   /* current state:        new state:      action:      */
1494   /* (Invalid)          */ TCP_CLOSE,
1495   /* TCP_ESTABLISHED    */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1496   /* TCP_SYN_SENT       */ TCP_CLOSE,
1497   /* TCP_SYN_RECV       */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1498   /* TCP_FIN_WAIT1      */ TCP_FIN_WAIT1,
1499   /* TCP_FIN_WAIT2      */ TCP_FIN_WAIT2,
1500   /* TCP_TIME_WAIT      */ TCP_CLOSE,
1501   /* TCP_CLOSE          */ TCP_CLOSE,
1502   /* TCP_CLOSE_WAIT     */ TCP_LAST_ACK  | TCP_ACTION_FIN,
1503   /* TCP_LAST_ACK       */ TCP_LAST_ACK,
1504   /* TCP_LISTEN         */ TCP_CLOSE,
1505   /* TCP_CLOSING        */ TCP_CLOSING,
1506 };
1507
1508 static int tcp_close_state(struct sock *sk)
1509 {
1510         int next = (int)new_state[sk->sk_state];
1511         int ns = next & TCP_STATE_MASK;
1512
1513         tcp_set_state(sk, ns);
1514
1515         return next & TCP_ACTION_FIN;
1516 }
1517
1518 /*
1519  *      Shutdown the sending side of a connection. Much like close except
1520  *      that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1521  */
1522
1523 void tcp_shutdown(struct sock *sk, int how)
1524 {
1525         /*      We need to grab some memory, and put together a FIN,
1526          *      and then put it into the queue to be sent.
1527          *              Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1528          */
1529         if (!(how & SEND_SHUTDOWN))
1530                 return;
1531
1532         /* If we've already sent a FIN, or it's a closed state, skip this. */
1533         if ((1 << sk->sk_state) &
1534             (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1535              TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1536                 /* Clear out any half completed packets.  FIN if needed. */
1537                 if (tcp_close_state(sk))
1538                         tcp_send_fin(sk);
1539         }
1540 }
1541
1542 void tcp_close(struct sock *sk, long timeout)
1543 {
1544         struct sk_buff *skb;
1545         int data_was_unread = 0;
1546         int state;
1547
1548         lock_sock(sk);
1549         sk->sk_shutdown = SHUTDOWN_MASK;
1550
1551         if (sk->sk_state == TCP_LISTEN) {
1552                 tcp_set_state(sk, TCP_CLOSE);
1553
1554                 /* Special case. */
1555                 inet_csk_listen_stop(sk);
1556
1557                 goto adjudge_to_death;
1558         }
1559
1560         /*  We need to flush the recv. buffs.  We do this only on the
1561          *  descriptor close, not protocol-sourced closes, because the
1562          *  reader process may not have drained the data yet!
1563          */
1564         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1565                 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1566                           tcp_hdr(skb)->fin;
1567                 data_was_unread += len;
1568                 __kfree_skb(skb);
1569         }
1570
1571         sk_stream_mem_reclaim(sk);
1572
1573         /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1574          * 3.10, we send a RST here because data was lost.  To
1575          * witness the awful effects of the old behavior of always
1576          * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1577          * a bulk GET in an FTP client, suspend the process, wait
1578          * for the client to advertise a zero window, then kill -9
1579          * the FTP client, wheee...  Note: timeout is always zero
1580          * in such a case.
1581          */
1582         if (data_was_unread) {
1583                 /* Unread data was tossed, zap the connection. */
1584                 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1585                 tcp_set_state(sk, TCP_CLOSE);
1586                 tcp_send_active_reset(sk, GFP_KERNEL);
1587         } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1588                 /* Check zero linger _after_ checking for unread data. */
1589                 sk->sk_prot->disconnect(sk, 0);
1590                 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1591         } else if (tcp_close_state(sk)) {
1592                 /* We FIN if the application ate all the data before
1593                  * zapping the connection.
1594                  */
1595
1596                 /* RED-PEN. Formally speaking, we have broken TCP state
1597                  * machine. State transitions:
1598                  *
1599                  * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1600                  * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1601                  * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1602                  *
1603                  * are legal only when FIN has been sent (i.e. in window),
1604                  * rather than queued out of window. Purists blame.
1605                  *
1606                  * F.e. "RFC state" is ESTABLISHED,
1607                  * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1608                  *
1609                  * The visible declinations are that sometimes
1610                  * we enter time-wait state, when it is not required really
1611                  * (harmless), do not send active resets, when they are
1612                  * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1613                  * they look as CLOSING or LAST_ACK for Linux)
1614                  * Probably, I missed some more holelets.
1615                  *                                              --ANK
1616                  */
1617                 tcp_send_fin(sk);
1618         }
1619
1620         sk_stream_wait_close(sk, timeout);
1621
1622 adjudge_to_death:
1623         state = sk->sk_state;
1624         sock_hold(sk);
1625         sock_orphan(sk);
1626         atomic_inc(sk->sk_prot->orphan_count);
1627
1628         /* It is the last release_sock in its life. It will remove backlog. */
1629         release_sock(sk);
1630
1631
1632         /* Now socket is owned by kernel and we acquire BH lock
1633            to finish close. No need to check for user refs.
1634          */
1635         local_bh_disable();
1636         bh_lock_sock(sk);
1637         BUG_TRAP(!sock_owned_by_user(sk));
1638
1639         /* Have we already been destroyed by a softirq or backlog? */
1640         if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1641                 goto out;
1642
1643         /*      This is a (useful) BSD violating of the RFC. There is a
1644          *      problem with TCP as specified in that the other end could
1645          *      keep a socket open forever with no application left this end.
1646          *      We use a 3 minute timeout (about the same as BSD) then kill
1647          *      our end. If they send after that then tough - BUT: long enough
1648          *      that we won't make the old 4*rto = almost no time - whoops
1649          *      reset mistake.
1650          *
1651          *      Nope, it was not mistake. It is really desired behaviour
1652          *      f.e. on http servers, when such sockets are useless, but
1653          *      consume significant resources. Let's do it with special
1654          *      linger2 option.                                 --ANK
1655          */
1656
1657         if (sk->sk_state == TCP_FIN_WAIT2) {
1658                 struct tcp_sock *tp = tcp_sk(sk);
1659                 if (tp->linger2 < 0) {
1660                         tcp_set_state(sk, TCP_CLOSE);
1661                         tcp_send_active_reset(sk, GFP_ATOMIC);
1662                         NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1663                 } else {
1664                         const int tmo = tcp_fin_time(sk);
1665
1666                         if (tmo > TCP_TIMEWAIT_LEN) {
1667                                 inet_csk_reset_keepalive_timer(sk,
1668                                                 tmo - TCP_TIMEWAIT_LEN);
1669                         } else {
1670                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1671                                 goto out;
1672                         }
1673                 }
1674         }
1675         if (sk->sk_state != TCP_CLOSE) {
1676                 sk_stream_mem_reclaim(sk);
1677                 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1678                     (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1679                      atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1680                         if (net_ratelimit())
1681                                 printk(KERN_INFO "TCP: too many of orphaned "
1682                                        "sockets\n");
1683                         tcp_set_state(sk, TCP_CLOSE);
1684                         tcp_send_active_reset(sk, GFP_ATOMIC);
1685                         NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1686                 }
1687         }
1688
1689         if (sk->sk_state == TCP_CLOSE)
1690                 inet_csk_destroy_sock(sk);
1691         /* Otherwise, socket is reprieved until protocol close. */
1692
1693 out:
1694         bh_unlock_sock(sk);
1695         local_bh_enable();
1696         sock_put(sk);
1697 }
1698
1699 /* These states need RST on ABORT according to RFC793 */
1700
1701 static inline int tcp_need_reset(int state)
1702 {
1703         return (1 << state) &
1704                (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1705                 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1706 }
1707
1708 int tcp_disconnect(struct sock *sk, int flags)
1709 {
1710         struct inet_sock *inet = inet_sk(sk);
1711         struct inet_connection_sock *icsk = inet_csk(sk);
1712         struct tcp_sock *tp = tcp_sk(sk);
1713         int err = 0;
1714         int old_state = sk->sk_state;
1715
1716         if (old_state != TCP_CLOSE)
1717                 tcp_set_state(sk, TCP_CLOSE);
1718
1719         /* ABORT function of RFC793 */
1720         if (old_state == TCP_LISTEN) {
1721                 inet_csk_listen_stop(sk);
1722         } else if (tcp_need_reset(old_state) ||
1723                    (tp->snd_nxt != tp->write_seq &&
1724                     (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1725                 /* The last check adjusts for discrepancy of Linux wrt. RFC
1726                  * states
1727                  */
1728                 tcp_send_active_reset(sk, gfp_any());
1729                 sk->sk_err = ECONNRESET;
1730         } else if (old_state == TCP_SYN_SENT)
1731                 sk->sk_err = ECONNRESET;
1732
1733         tcp_clear_xmit_timers(sk);
1734         __skb_queue_purge(&sk->sk_receive_queue);
1735         tcp_write_queue_purge(sk);
1736         __skb_queue_purge(&tp->out_of_order_queue);
1737 #ifdef CONFIG_NET_DMA
1738         __skb_queue_purge(&sk->sk_async_wait_queue);
1739 #endif
1740
1741         inet->dport = 0;
1742
1743         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1744                 inet_reset_saddr(sk);
1745
1746         sk->sk_shutdown = 0;
1747         sock_reset_flag(sk, SOCK_DONE);
1748         tp->srtt = 0;
1749         if ((tp->write_seq += tp->max_window + 2) == 0)
1750                 tp->write_seq = 1;
1751         icsk->icsk_backoff = 0;
1752         tp->snd_cwnd = 2;
1753         icsk->icsk_probes_out = 0;
1754         tp->packets_out = 0;
1755         tp->snd_ssthresh = 0x7fffffff;
1756         tp->snd_cwnd_cnt = 0;
1757         tp->bytes_acked = 0;
1758         tcp_set_ca_state(sk, TCP_CA_Open);
1759         tcp_clear_retrans(tp);
1760         inet_csk_delack_init(sk);
1761         tcp_init_send_head(sk);
1762         tp->rx_opt.saw_tstamp = 0;
1763         tcp_sack_reset(&tp->rx_opt);
1764         __sk_dst_reset(sk);
1765
1766         BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1767
1768         sk->sk_error_report(sk);
1769         return err;
1770 }
1771
1772 /*
1773  *      Socket option code for TCP.
1774  */
1775 static int do_tcp_setsockopt(struct sock *sk, int level,
1776                 int optname, char __user *optval, int optlen)
1777 {
1778         struct tcp_sock *tp = tcp_sk(sk);
1779         struct inet_connection_sock *icsk = inet_csk(sk);
1780         int val;
1781         int err = 0;
1782
1783         /* This is a string value all the others are int's */
1784         if (optname == TCP_CONGESTION) {
1785                 char name[TCP_CA_NAME_MAX];
1786
1787                 if (optlen < 1)
1788                         return -EINVAL;
1789
1790                 val = strncpy_from_user(name, optval,
1791                                         min(TCP_CA_NAME_MAX-1, optlen));
1792                 if (val < 0)
1793                         return -EFAULT;
1794                 name[val] = 0;
1795
1796                 lock_sock(sk);
1797                 err = tcp_set_congestion_control(sk, name);
1798                 release_sock(sk);
1799                 return err;
1800         }
1801
1802         if (optlen < sizeof(int))
1803                 return -EINVAL;
1804
1805         if (get_user(val, (int __user *)optval))
1806                 return -EFAULT;
1807
1808         lock_sock(sk);
1809
1810         switch (optname) {
1811         case TCP_MAXSEG:
1812                 /* Values greater than interface MTU won't take effect. However
1813                  * at the point when this call is done we typically don't yet
1814                  * know which interface is going to be used */
1815                 if (val < 8 || val > MAX_TCP_WINDOW) {
1816                         err = -EINVAL;
1817                         break;
1818                 }
1819                 tp->rx_opt.user_mss = val;
1820                 break;
1821
1822         case TCP_NODELAY:
1823                 if (val) {
1824                         /* TCP_NODELAY is weaker than TCP_CORK, so that
1825                          * this option on corked socket is remembered, but
1826                          * it is not activated until cork is cleared.
1827                          *
1828                          * However, when TCP_NODELAY is set we make
1829                          * an explicit push, which overrides even TCP_CORK
1830                          * for currently queued segments.
1831                          */
1832                         tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1833                         tcp_push_pending_frames(sk, tp);
1834                 } else {
1835                         tp->nonagle &= ~TCP_NAGLE_OFF;
1836                 }
1837                 break;
1838
1839         case TCP_CORK:
1840                 /* When set indicates to always queue non-full frames.
1841                  * Later the user clears this option and we transmit
1842                  * any pending partial frames in the queue.  This is
1843                  * meant to be used alongside sendfile() to get properly
1844                  * filled frames when the user (for example) must write
1845                  * out headers with a write() call first and then use
1846                  * sendfile to send out the data parts.
1847                  *
1848                  * TCP_CORK can be set together with TCP_NODELAY and it is
1849                  * stronger than TCP_NODELAY.
1850                  */
1851                 if (val) {
1852                         tp->nonagle |= TCP_NAGLE_CORK;
1853                 } else {
1854                         tp->nonagle &= ~TCP_NAGLE_CORK;
1855                         if (tp->nonagle&TCP_NAGLE_OFF)
1856                                 tp->nonagle |= TCP_NAGLE_PUSH;
1857                         tcp_push_pending_frames(sk, tp);
1858                 }
1859                 break;
1860
1861         case TCP_KEEPIDLE:
1862                 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1863                         err = -EINVAL;
1864                 else {
1865                         tp->keepalive_time = val * HZ;
1866                         if (sock_flag(sk, SOCK_KEEPOPEN) &&
1867                             !((1 << sk->sk_state) &
1868                               (TCPF_CLOSE | TCPF_LISTEN))) {
1869                                 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1870                                 if (tp->keepalive_time > elapsed)
1871                                         elapsed = tp->keepalive_time - elapsed;
1872                                 else
1873                                         elapsed = 0;
1874                                 inet_csk_reset_keepalive_timer(sk, elapsed);
1875                         }
1876                 }
1877                 break;
1878         case TCP_KEEPINTVL:
1879                 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1880                         err = -EINVAL;
1881                 else
1882                         tp->keepalive_intvl = val * HZ;
1883                 break;
1884         case TCP_KEEPCNT:
1885                 if (val < 1 || val > MAX_TCP_KEEPCNT)
1886                         err = -EINVAL;
1887                 else
1888                         tp->keepalive_probes = val;
1889                 break;
1890         case TCP_SYNCNT:
1891                 if (val < 1 || val > MAX_TCP_SYNCNT)
1892                         err = -EINVAL;
1893                 else
1894                         icsk->icsk_syn_retries = val;
1895                 break;
1896
1897         case TCP_LINGER2:
1898                 if (val < 0)
1899                         tp->linger2 = -1;
1900                 else if (val > sysctl_tcp_fin_timeout / HZ)
1901                         tp->linger2 = 0;
1902                 else
1903                         tp->linger2 = val * HZ;
1904                 break;
1905
1906         case TCP_DEFER_ACCEPT:
1907                 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1908                 if (val > 0) {
1909                         /* Translate value in seconds to number of
1910                          * retransmits */
1911                         while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1912                                val > ((TCP_TIMEOUT_INIT / HZ) <<
1913                                        icsk->icsk_accept_queue.rskq_defer_accept))
1914                                 icsk->icsk_accept_queue.rskq_defer_accept++;
1915                         icsk->icsk_accept_queue.rskq_defer_accept++;
1916                 }
1917                 break;
1918
1919         case TCP_WINDOW_CLAMP:
1920                 if (!val) {
1921                         if (sk->sk_state != TCP_CLOSE) {
1922                                 err = -EINVAL;
1923                                 break;
1924                         }
1925                         tp->window_clamp = 0;
1926                 } else
1927                         tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1928                                                 SOCK_MIN_RCVBUF / 2 : val;
1929                 break;
1930
1931         case TCP_QUICKACK:
1932                 if (!val) {
1933                         icsk->icsk_ack.pingpong = 1;
1934                 } else {
1935                         icsk->icsk_ack.pingpong = 0;
1936                         if ((1 << sk->sk_state) &
1937                             (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1938                             inet_csk_ack_scheduled(sk)) {
1939                                 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1940                                 tcp_cleanup_rbuf(sk, 1);
1941                                 if (!(val & 1))
1942                                         icsk->icsk_ack.pingpong = 1;
1943                         }
1944                 }
1945                 break;
1946
1947 #ifdef CONFIG_TCP_MD5SIG
1948         case TCP_MD5SIG:
1949                 /* Read the IP->Key mappings from userspace */
1950                 err = tp->af_specific->md5_parse(sk, optval, optlen);
1951                 break;
1952 #endif
1953
1954         default:
1955                 err = -ENOPROTOOPT;
1956                 break;
1957         }
1958
1959         release_sock(sk);
1960         return err;
1961 }
1962
1963 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1964                    int optlen)
1965 {
1966         struct inet_connection_sock *icsk = inet_csk(sk);
1967
1968         if (level != SOL_TCP)
1969                 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1970                                                      optval, optlen);
1971         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1972 }
1973
1974 #ifdef CONFIG_COMPAT
1975 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1976                           char __user *optval, int optlen)
1977 {
1978         if (level != SOL_TCP)
1979                 return inet_csk_compat_setsockopt(sk, level, optname,
1980                                                   optval, optlen);
1981         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1982 }
1983
1984 EXPORT_SYMBOL(compat_tcp_setsockopt);
1985 #endif
1986
1987 /* Return information about state of tcp endpoint in API format. */
1988 void tcp_get_info(struct sock *sk, struct tcp_info *info)
1989 {
1990         struct tcp_sock *tp = tcp_sk(sk);
1991         const struct inet_connection_sock *icsk = inet_csk(sk);
1992         u32 now = tcp_time_stamp;
1993
1994         memset(info, 0, sizeof(*info));
1995
1996         info->tcpi_state = sk->sk_state;
1997         info->tcpi_ca_state = icsk->icsk_ca_state;
1998         info->tcpi_retransmits = icsk->icsk_retransmits;
1999         info->tcpi_probes = icsk->icsk_probes_out;
2000         info->tcpi_backoff = icsk->icsk_backoff;
2001
2002         if (tp->rx_opt.tstamp_ok)
2003                 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2004         if (tp->rx_opt.sack_ok)
2005                 info->tcpi_options |= TCPI_OPT_SACK;
2006         if (tp->rx_opt.wscale_ok) {
2007                 info->tcpi_options |= TCPI_OPT_WSCALE;
2008                 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2009                 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2010         }
2011
2012         if (tp->ecn_flags&TCP_ECN_OK)
2013                 info->tcpi_options |= TCPI_OPT_ECN;
2014
2015         info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2016         info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2017         info->tcpi_snd_mss = tp->mss_cache;
2018         info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2019
2020         info->tcpi_unacked = tp->packets_out;
2021         info->tcpi_sacked = tp->sacked_out;
2022         info->tcpi_lost = tp->lost_out;
2023         info->tcpi_retrans = tp->retrans_out;
2024         info->tcpi_fackets = tp->fackets_out;
2025
2026         info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2027         info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2028         info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2029
2030         info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2031         info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2032         info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2033         info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2034         info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2035         info->tcpi_snd_cwnd = tp->snd_cwnd;
2036         info->tcpi_advmss = tp->advmss;
2037         info->tcpi_reordering = tp->reordering;
2038
2039         info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2040         info->tcpi_rcv_space = tp->rcvq_space.space;
2041
2042         info->tcpi_total_retrans = tp->total_retrans;
2043 }
2044
2045 EXPORT_SYMBOL_GPL(tcp_get_info);
2046
2047 static int do_tcp_getsockopt(struct sock *sk, int level,
2048                 int optname, char __user *optval, int __user *optlen)
2049 {
2050         struct inet_connection_sock *icsk = inet_csk(sk);
2051         struct tcp_sock *tp = tcp_sk(sk);
2052         int val, len;
2053
2054         if (get_user(len, optlen))
2055                 return -EFAULT;
2056
2057         len = min_t(unsigned int, len, sizeof(int));
2058
2059         if (len < 0)
2060                 return -EINVAL;
2061
2062         switch (optname) {
2063         case TCP_MAXSEG:
2064                 val = tp->mss_cache;
2065                 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2066                         val = tp->rx_opt.user_mss;
2067                 break;
2068         case TCP_NODELAY:
2069                 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2070                 break;
2071         case TCP_CORK:
2072                 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2073                 break;
2074         case TCP_KEEPIDLE:
2075                 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2076                 break;
2077         case TCP_KEEPINTVL:
2078                 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2079                 break;
2080         case TCP_KEEPCNT:
2081                 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2082                 break;
2083         case TCP_SYNCNT:
2084                 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2085                 break;
2086         case TCP_LINGER2:
2087                 val = tp->linger2;
2088                 if (val >= 0)
2089                         val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2090                 break;
2091         case TCP_DEFER_ACCEPT:
2092                 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2093                         ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2094                 break;
2095         case TCP_WINDOW_CLAMP:
2096                 val = tp->window_clamp;
2097                 break;
2098         case TCP_INFO: {
2099                 struct tcp_info info;
2100
2101                 if (get_user(len, optlen))
2102                         return -EFAULT;
2103
2104                 tcp_get_info(sk, &info);
2105
2106                 len = min_t(unsigned int, len, sizeof(info));
2107                 if (put_user(len, optlen))
2108                         return -EFAULT;
2109                 if (copy_to_user(optval, &info, len))
2110                         return -EFAULT;
2111                 return 0;
2112         }
2113         case TCP_QUICKACK:
2114                 val = !icsk->icsk_ack.pingpong;
2115                 break;
2116
2117         case TCP_CONGESTION:
2118                 if (get_user(len, optlen))
2119                         return -EFAULT;
2120                 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2121                 if (put_user(len, optlen))
2122                         return -EFAULT;
2123                 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2124                         return -EFAULT;
2125                 return 0;
2126         default:
2127                 return -ENOPROTOOPT;
2128         }
2129
2130         if (put_user(len, optlen))
2131                 return -EFAULT;
2132         if (copy_to_user(optval, &val, len))
2133                 return -EFAULT;
2134         return 0;
2135 }
2136
2137 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2138                    int __user *optlen)
2139 {
2140         struct inet_connection_sock *icsk = inet_csk(sk);
2141
2142         if (level != SOL_TCP)
2143                 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2144                                                      optval, optlen);
2145         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2146 }
2147
2148 #ifdef CONFIG_COMPAT
2149 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2150                           char __user *optval, int __user *optlen)
2151 {
2152         if (level != SOL_TCP)
2153                 return inet_csk_compat_getsockopt(sk, level, optname,
2154                                                   optval, optlen);
2155         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2156 }
2157
2158 EXPORT_SYMBOL(compat_tcp_getsockopt);
2159 #endif
2160
2161 struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2162 {
2163         struct sk_buff *segs = ERR_PTR(-EINVAL);
2164         struct tcphdr *th;
2165         unsigned thlen;
2166         unsigned int seq;
2167         __be32 delta;
2168         unsigned int oldlen;
2169         unsigned int len;
2170
2171         if (!pskb_may_pull(skb, sizeof(*th)))
2172                 goto out;
2173
2174         th = tcp_hdr(skb);
2175         thlen = th->doff * 4;
2176         if (thlen < sizeof(*th))
2177                 goto out;
2178
2179         if (!pskb_may_pull(skb, thlen))
2180                 goto out;
2181
2182         oldlen = (u16)~skb->len;
2183         __skb_pull(skb, thlen);
2184
2185         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2186                 /* Packet is from an untrusted source, reset gso_segs. */
2187                 int type = skb_shinfo(skb)->gso_type;
2188                 int mss;
2189
2190                 if (unlikely(type &
2191                              ~(SKB_GSO_TCPV4 |
2192                                SKB_GSO_DODGY |
2193                                SKB_GSO_TCP_ECN |
2194                                SKB_GSO_TCPV6 |
2195                                0) ||
2196                              !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2197                         goto out;
2198
2199                 mss = skb_shinfo(skb)->gso_size;
2200                 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2201
2202                 segs = NULL;
2203                 goto out;
2204         }
2205
2206         segs = skb_segment(skb, features);
2207         if (IS_ERR(segs))
2208                 goto out;
2209
2210         len = skb_shinfo(skb)->gso_size;
2211         delta = htonl(oldlen + (thlen + len));
2212
2213         skb = segs;
2214         th = tcp_hdr(skb);
2215         seq = ntohl(th->seq);
2216
2217         do {
2218                 th->fin = th->psh = 0;
2219
2220                 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2221                                        (__force u32)delta));
2222                 if (skb->ip_summed != CHECKSUM_PARTIAL)
2223                         th->check =
2224                              csum_fold(csum_partial(skb_transport_header(skb),
2225                                                     thlen, skb->csum));
2226
2227                 seq += len;
2228                 skb = skb->next;
2229                 th = tcp_hdr(skb);
2230
2231                 th->seq = htonl(seq);
2232                 th->cwr = 0;
2233         } while (skb->next);
2234
2235         delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2236                       skb->data_len);
2237         th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2238                                 (__force u32)delta));
2239         if (skb->ip_summed != CHECKSUM_PARTIAL)
2240                 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2241                                                    thlen, skb->csum));
2242
2243 out:
2244         return segs;
2245 }
2246 EXPORT_SYMBOL(tcp_tso_segment);
2247
2248 #ifdef CONFIG_TCP_MD5SIG
2249 static unsigned long tcp_md5sig_users;
2250 static struct tcp_md5sig_pool **tcp_md5sig_pool;
2251 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2252
2253 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2254 {
2255         int cpu;
2256         for_each_possible_cpu(cpu) {
2257                 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2258                 if (p) {
2259                         if (p->md5_desc.tfm)
2260                                 crypto_free_hash(p->md5_desc.tfm);
2261                         kfree(p);
2262                         p = NULL;
2263                 }
2264         }
2265         free_percpu(pool);
2266 }
2267
2268 void tcp_free_md5sig_pool(void)
2269 {
2270         struct tcp_md5sig_pool **pool = NULL;
2271
2272         spin_lock_bh(&tcp_md5sig_pool_lock);
2273         if (--tcp_md5sig_users == 0) {
2274                 pool = tcp_md5sig_pool;
2275                 tcp_md5sig_pool = NULL;
2276         }
2277         spin_unlock_bh(&tcp_md5sig_pool_lock);
2278         if (pool)
2279                 __tcp_free_md5sig_pool(pool);
2280 }
2281
2282 EXPORT_SYMBOL(tcp_free_md5sig_pool);
2283
2284 static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2285 {
2286         int cpu;
2287         struct tcp_md5sig_pool **pool;
2288
2289         pool = alloc_percpu(struct tcp_md5sig_pool *);
2290         if (!pool)
2291                 return NULL;
2292
2293         for_each_possible_cpu(cpu) {
2294                 struct tcp_md5sig_pool *p;
2295                 struct crypto_hash *hash;
2296
2297                 p = kzalloc(sizeof(*p), GFP_KERNEL);
2298                 if (!p)
2299                         goto out_free;
2300                 *per_cpu_ptr(pool, cpu) = p;
2301
2302                 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2303                 if (!hash || IS_ERR(hash))
2304                         goto out_free;
2305
2306                 p->md5_desc.tfm = hash;
2307         }
2308         return pool;
2309 out_free:
2310         __tcp_free_md5sig_pool(pool);
2311         return NULL;
2312 }
2313
2314 struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2315 {
2316         struct tcp_md5sig_pool **pool;
2317         int alloc = 0;
2318
2319 retry:
2320         spin_lock_bh(&tcp_md5sig_pool_lock);
2321         pool = tcp_md5sig_pool;
2322         if (tcp_md5sig_users++ == 0) {
2323                 alloc = 1;
2324                 spin_unlock_bh(&tcp_md5sig_pool_lock);
2325         } else if (!pool) {
2326                 tcp_md5sig_users--;
2327                 spin_unlock_bh(&tcp_md5sig_pool_lock);
2328                 cpu_relax();
2329                 goto retry;
2330         } else
2331                 spin_unlock_bh(&tcp_md5sig_pool_lock);
2332
2333         if (alloc) {
2334                 /* we cannot hold spinlock here because this may sleep. */
2335                 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2336                 spin_lock_bh(&tcp_md5sig_pool_lock);
2337                 if (!p) {
2338                         tcp_md5sig_users--;
2339                         spin_unlock_bh(&tcp_md5sig_pool_lock);
2340                         return NULL;
2341                 }
2342                 pool = tcp_md5sig_pool;
2343                 if (pool) {
2344                         /* oops, it has already been assigned. */
2345                         spin_unlock_bh(&tcp_md5sig_pool_lock);
2346                         __tcp_free_md5sig_pool(p);
2347                 } else {
2348                         tcp_md5sig_pool = pool = p;
2349                         spin_unlock_bh(&tcp_md5sig_pool_lock);
2350                 }
2351         }
2352         return pool;
2353 }
2354
2355 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2356
2357 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2358 {
2359         struct tcp_md5sig_pool **p;
2360         spin_lock_bh(&tcp_md5sig_pool_lock);
2361         p = tcp_md5sig_pool;
2362         if (p)
2363                 tcp_md5sig_users++;
2364         spin_unlock_bh(&tcp_md5sig_pool_lock);
2365         return (p ? *per_cpu_ptr(p, cpu) : NULL);
2366 }
2367
2368 EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2369
2370 void __tcp_put_md5sig_pool(void)
2371 {
2372         tcp_free_md5sig_pool();
2373 }
2374
2375 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2376 #endif
2377
2378 extern void __skb_cb_too_small_for_tcp(int, int);
2379 extern struct tcp_congestion_ops tcp_reno;
2380
2381 static __initdata unsigned long thash_entries;
2382 static int __init set_thash_entries(char *str)
2383 {
2384         if (!str)
2385                 return 0;
2386         thash_entries = simple_strtoul(str, &str, 0);
2387         return 1;
2388 }
2389 __setup("thash_entries=", set_thash_entries);
2390
2391 void __init tcp_init(void)
2392 {
2393         struct sk_buff *skb = NULL;
2394         unsigned long limit;
2395         int order, i, max_share;
2396
2397         if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2398                 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2399                                            sizeof(skb->cb));
2400
2401         tcp_hashinfo.bind_bucket_cachep =
2402                 kmem_cache_create("tcp_bind_bucket",
2403                                   sizeof(struct inet_bind_bucket), 0,
2404                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
2405
2406         /* Size and allocate the main established and bind bucket
2407          * hash tables.
2408          *
2409          * The methodology is similar to that of the buffer cache.
2410          */
2411         tcp_hashinfo.ehash =
2412                 alloc_large_system_hash("TCP established",
2413                                         sizeof(struct inet_ehash_bucket),
2414                                         thash_entries,
2415                                         (num_physpages >= 128 * 1024) ?
2416                                         13 : 15,
2417                                         0,
2418                                         &tcp_hashinfo.ehash_size,
2419                                         NULL,
2420                                         0);
2421         tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2422         for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2423                 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2424                 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2425                 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2426         }
2427
2428         tcp_hashinfo.bhash =
2429                 alloc_large_system_hash("TCP bind",
2430                                         sizeof(struct inet_bind_hashbucket),
2431                                         tcp_hashinfo.ehash_size,
2432                                         (num_physpages >= 128 * 1024) ?
2433                                         13 : 15,
2434                                         0,
2435                                         &tcp_hashinfo.bhash_size,
2436                                         NULL,
2437                                         64 * 1024);
2438         tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2439         for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2440                 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2441                 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2442         }
2443
2444         /* Try to be a bit smarter and adjust defaults depending
2445          * on available memory.
2446          */
2447         for (order = 0; ((1 << order) << PAGE_SHIFT) <
2448                         (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2449                         order++)
2450                 ;
2451         if (order >= 4) {
2452                 sysctl_local_port_range[0] = 32768;
2453                 sysctl_local_port_range[1] = 61000;
2454                 tcp_death_row.sysctl_max_tw_buckets = 180000;
2455                 sysctl_tcp_max_orphans = 4096 << (order - 4);
2456                 sysctl_max_syn_backlog = 1024;
2457         } else if (order < 3) {
2458                 sysctl_local_port_range[0] = 1024 * (3 - order);
2459                 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2460                 sysctl_tcp_max_orphans >>= (3 - order);
2461                 sysctl_max_syn_backlog = 128;
2462         }
2463
2464         /* Set the pressure threshold to be a fraction of global memory that
2465          * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2466          * memory, with a floor of 128 pages.
2467          */
2468         limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2469         limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2470         limit = max(limit, 128UL);
2471         sysctl_tcp_mem[0] = limit / 4 * 3;
2472         sysctl_tcp_mem[1] = limit;
2473         sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
2474
2475         /* Set per-socket limits to no more than 1/128 the pressure threshold */
2476         limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2477         max_share = min(4UL*1024*1024, limit);
2478
2479         sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2480         sysctl_tcp_wmem[1] = 16*1024;
2481         sysctl_tcp_wmem[2] = max(64*1024, max_share);
2482
2483         sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2484         sysctl_tcp_rmem[1] = 87380;
2485         sysctl_tcp_rmem[2] = max(87380, max_share);
2486
2487         printk(KERN_INFO "TCP: Hash tables configured "
2488                "(established %d bind %d)\n",
2489                tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2490
2491         tcp_register_congestion_control(&tcp_reno);
2492 }
2493
2494 EXPORT_SYMBOL(tcp_close);
2495 EXPORT_SYMBOL(tcp_disconnect);
2496 EXPORT_SYMBOL(tcp_getsockopt);
2497 EXPORT_SYMBOL(tcp_ioctl);
2498 EXPORT_SYMBOL(tcp_poll);
2499 EXPORT_SYMBOL(tcp_read_sock);
2500 EXPORT_SYMBOL(tcp_recvmsg);
2501 EXPORT_SYMBOL(tcp_sendmsg);
2502 EXPORT_SYMBOL(tcp_sendpage);
2503 EXPORT_SYMBOL(tcp_setsockopt);
2504 EXPORT_SYMBOL(tcp_shutdown);
2505 EXPORT_SYMBOL(tcp_statistics);