int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- int err = 0;
+ int err;
int skb_len;
+ unsigned long flags;
+ struct sk_buff_head *list = &sk->sk_receive_queue;
/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf) {
- err = -ENOMEM;
- goto out;
+ atomic_inc(&sk->sk_drops);
+ return -ENOMEM;
}
err = sk_filter(sk, skb);
if (err)
- goto out;
+ return err;
if (!sk_rmem_schedule(sk, skb->truesize)) {
- err = -ENOBUFS;
- goto out;
+ atomic_inc(&sk->sk_drops);
+ return -ENOBUFS;
}
skb->dev = NULL;
*/
skb_len = skb->len;
- skb_queue_tail(&sk->sk_receive_queue, skb);
+ spin_lock_irqsave(&list->lock, flags);
+ skb->dropcount = atomic_read(&sk->sk_drops);
+ __skb_queue_tail(list, skb);
+ spin_unlock_irqrestore(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb_len);
-out:
- return err;
+ return 0;
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else
- sk_add_backlog(sk, skb);
+ } else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+
bh_unlock_sock(sk);
out:
sock_put(sk);
}
EXPORT_SYMBOL(sk_receive_skb);
+void sk_reset_txq(struct sock *sk)
+{
+ sk_tx_queue_clear(sk);
+}
+EXPORT_SYMBOL(sk_reset_txq);
+
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
struct dst_entry *dst = sk->sk_dst_cache;
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk_tx_queue_clear(sk);
sk->sk_dst_cache = NULL;
dst_release(dst);
return NULL;
if (copy_from_user(devname, optval, optlen))
goto out;
- if (devname[0] == '\0') {
- index = 0;
- } else {
- struct net_device *dev = dev_get_by_name(net, devname);
+ index = 0;
+ if (devname[0] != '\0') {
+ struct net_device *dev;
+ rcu_read_lock();
+ dev = dev_get_by_name_rcu(net, devname);
+ if (dev)
+ index = dev->ifindex;
+ rcu_read_unlock();
ret = -ENODEV;
if (!dev)
goto out;
-
- index = dev->ifindex;
- dev_put(dev);
}
lock_sock(sk);
*/
int sock_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int val;
sk->sk_reuse = valbool;
break;
case SO_TYPE:
+ case SO_PROTOCOL:
+ case SO_DOMAIN:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
+ case SO_RXQ_OVFL:
+ if (valbool)
+ sock_set_flag(sk, SOCK_RXQ_OVFL);
+ else
+ sock_reset_flag(sk, SOCK_RXQ_OVFL);
+ break;
default:
ret = -ENOPROTOOPT;
break;
struct timeval tm;
} v;
- unsigned int lv = sizeof(int);
+ int lv = sizeof(int);
int len;
if (get_user(len, optlen))
v.val = sk->sk_type;
break;
+ case SO_PROTOCOL:
+ v.val = sk->sk_protocol;
+ break;
+
+ case SO_DOMAIN:
+ v.val = sk->sk_family;
+ break;
+
case SO_ERROR:
v.val = -sock_error(sk);
if (v.val == 0)
v.val = sk->sk_mark;
break;
+ case SO_RXQ_OVFL:
+ v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
+ break;
+
default:
return -ENOPROTOOPT;
}
void *sptr = nsk->sk_security;
#endif
BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
- sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
+ sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
+ sizeof(osk->sk_tx_queue_mapping));
memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
#ifdef CONFIG_SECURITY_NETWORK
if (!try_module_get(prot->owner))
goto out_free_sec;
+ sk_tx_queue_clear(sk);
}
return sk;
sk->sk_prot = sk->sk_prot_creator = prot;
sock_lock_init(sk);
sock_net_set(sk, get_net(net));
+ atomic_set(&sk->sk_wmem_alloc, 1);
}
return sk;
if (sk->sk_destruct)
sk->sk_destruct(sk);
- filter = rcu_dereference(sk->sk_filter);
+ filter = rcu_dereference_check(sk->sk_filter,
+ atomic_read(&sk->sk_wmem_alloc) == 0);
if (filter) {
sk_filter_uncharge(sk, filter);
rcu_assign_pointer(sk->sk_filter, NULL);
sock_lock_init(newsk);
bh_lock_sock(newsk);
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_backlog.len = 0;
atomic_set(&newsk->sk_rmem_alloc, 0);
/*
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+
+ if (sock_flag(newsk, SOCK_TIMESTAMP) ||
+ sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
+ net_enable_timestamp();
}
out:
return newsk;
void __init sk_init(void)
{
- if (num_physpages <= 4096) {
+ if (totalram_pages <= 4096) {
sysctl_wmem_max = 32767;
sysctl_rmem_max = 32767;
sysctl_wmem_default = 32767;
sysctl_rmem_default = 32767;
- } else if (num_physpages >= 131072) {
+ } else if (totalram_pages >= 131072) {
sysctl_wmem_max = 131071;
sysctl_rmem_max = 131071;
}
void sock_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
- int res;
+ unsigned int len = skb->truesize;
- /* In case it might be waiting for more memory. */
- res = atomic_sub_return(skb->truesize, &sk->sk_wmem_alloc);
- if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
+ if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
+ /*
+ * Keep a reference on sk_wmem_alloc, this will be released
+ * after sk_write_space() call
+ */
+ atomic_sub(len - 1, &sk->sk_wmem_alloc);
sk->sk_write_space(sk);
+ len = 1;
+ }
/*
- * if sk_wmem_alloc reached 0, we are last user and should
- * free this sock, as sk_free() call could not do it.
+ * if sk_wmem_alloc reaches 0, we must finish what sk_free()
+ * could not do because of in-flight packets
*/
- if (res == 0)
+ if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
__sk_free(sk);
}
EXPORT_SYMBOL(sock_wfree);
bh_lock_sock(sk);
} while ((skb = sk->sk_backlog.head) != NULL);
+
+ /*
+ * Doing the zeroing here guarantee we can not loop forever
+ * while a wild producer attempts to flood us.
+ */
+ sk->sk_backlog.len = 0;
}
/**
EXPORT_SYMBOL(sock_no_shutdown);
int sock_no_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char __user *optval, unsigned int optlen)
{
return -EOPNOTSUPP;
}
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
*/
smp_wmb();
atomic_set(&sk->sk_refcnt, 1);
- atomic_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_drops, 0);
}
EXPORT_SYMBOL(sock_init_data);
* Set socket options on an inet socket.
*/
int sock_common_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
#ifdef CONFIG_COMPAT
int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
-static int sock_inuse_init_net(struct net *net)
+static int __net_init sock_inuse_init_net(struct net *net)
{
net->core.inuse = alloc_percpu(struct prot_inuse);
return net->core.inuse ? 0 : -ENOMEM;
}
-static void sock_inuse_exit_net(struct net *net)
+static void __net_exit sock_inuse_exit_net(struct net *net)
{
free_percpu(net->core.inuse);
}
}
if (prot->rsk_prot != NULL) {
- static const char mask[] = "request_sock_%s";
-
- prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
if (prot->rsk_prot->slab_name == NULL)
goto out_free_sock_slab;
- sprintf(prot->rsk_prot->slab_name, mask, prot->name);
prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
prot->rsk_prot->obj_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
}
if (prot->twsk_prot != NULL) {
- static const char mask[] = "tw_sock_%s";
-
- prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
if (prot->twsk_prot->twsk_slab_name == NULL)
goto out_free_request_sock_slab;
- sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
prot->twsk_prot->twsk_slab =
kmem_cache_create(prot->twsk_prot->twsk_slab_name,
prot->twsk_prot->twsk_obj_size,