virtio_net: Check for room in the vq before adding buffer
[linux-2.6.git] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_ids.h>
26 #include <linux/virtio_net.h>
27 #include <linux/scatterlist.h>
28 #include <linux/if_vlan.h>
29
30 static int napi_weight = 128;
31 module_param(napi_weight, int, 0444);
32
33 static int csum = 1, gso = 1;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36
37 /* FIXME: MTU in config. */
38 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39 #define GOOD_COPY_LEN   128
40
41 #define VIRTNET_SEND_COMMAND_SG_MAX    2
42
43 struct virtnet_info
44 {
45         struct virtio_device *vdev;
46         struct virtqueue *rvq, *svq, *cvq;
47         struct net_device *dev;
48         struct napi_struct napi;
49         unsigned int status;
50
51         /* Number of input buffers, and max we've ever had. */
52         unsigned int num, max;
53
54         /* I like... big packets and I cannot lie! */
55         bool big_packets;
56
57         /* Host will merge rx buffers for big packets (shake it! shake it!) */
58         bool mergeable_rx_bufs;
59
60         /* Receive & send queues. */
61         struct sk_buff_head recv;
62         struct sk_buff_head send;
63
64         /* Work struct for refilling if we run low on memory. */
65         struct delayed_work refill;
66
67         /* Chain pages by the private ptr. */
68         struct page *pages;
69 };
70
71 struct skb_vnet_hdr {
72         union {
73                 struct virtio_net_hdr hdr;
74                 struct virtio_net_hdr_mrg_rxbuf mhdr;
75         };
76         unsigned int num_sg;
77 };
78
79 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
80 {
81         return (struct skb_vnet_hdr *)skb->cb;
82 }
83
84 static void give_a_page(struct virtnet_info *vi, struct page *page)
85 {
86         page->private = (unsigned long)vi->pages;
87         vi->pages = page;
88 }
89
90 static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
91 {
92         unsigned int i;
93
94         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
95                 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
96         skb_shinfo(skb)->nr_frags = 0;
97         skb->data_len = 0;
98 }
99
100 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
101 {
102         struct page *p = vi->pages;
103
104         if (p)
105                 vi->pages = (struct page *)p->private;
106         else
107                 p = alloc_page(gfp_mask);
108         return p;
109 }
110
111 static void skb_xmit_done(struct virtqueue *svq)
112 {
113         struct virtnet_info *vi = svq->vdev->priv;
114
115         /* Suppress further interrupts. */
116         svq->vq_ops->disable_cb(svq);
117
118         /* We were probably waiting for more output buffers. */
119         netif_wake_queue(vi->dev);
120 }
121
122 static void receive_skb(struct net_device *dev, struct sk_buff *skb,
123                         unsigned len)
124 {
125         struct virtnet_info *vi = netdev_priv(dev);
126         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
127         int err;
128         int i;
129
130         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
131                 pr_debug("%s: short packet %i\n", dev->name, len);
132                 dev->stats.rx_length_errors++;
133                 goto drop;
134         }
135
136         if (vi->mergeable_rx_bufs) {
137                 unsigned int copy;
138                 char *p = page_address(skb_shinfo(skb)->frags[0].page);
139
140                 if (len > PAGE_SIZE)
141                         len = PAGE_SIZE;
142                 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
143
144                 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
145                 p += sizeof(hdr->mhdr);
146
147                 copy = len;
148                 if (copy > skb_tailroom(skb))
149                         copy = skb_tailroom(skb);
150
151                 memcpy(skb_put(skb, copy), p, copy);
152
153                 len -= copy;
154
155                 if (!len) {
156                         give_a_page(vi, skb_shinfo(skb)->frags[0].page);
157                         skb_shinfo(skb)->nr_frags--;
158                 } else {
159                         skb_shinfo(skb)->frags[0].page_offset +=
160                                 sizeof(hdr->mhdr) + copy;
161                         skb_shinfo(skb)->frags[0].size = len;
162                         skb->data_len += len;
163                         skb->len += len;
164                 }
165
166                 while (--hdr->mhdr.num_buffers) {
167                         struct sk_buff *nskb;
168
169                         i = skb_shinfo(skb)->nr_frags;
170                         if (i >= MAX_SKB_FRAGS) {
171                                 pr_debug("%s: packet too long %d\n", dev->name,
172                                          len);
173                                 dev->stats.rx_length_errors++;
174                                 goto drop;
175                         }
176
177                         nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
178                         if (!nskb) {
179                                 pr_debug("%s: rx error: %d buffers missing\n",
180                                          dev->name, hdr->mhdr.num_buffers);
181                                 dev->stats.rx_length_errors++;
182                                 goto drop;
183                         }
184
185                         __skb_unlink(nskb, &vi->recv);
186                         vi->num--;
187
188                         skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
189                         skb_shinfo(nskb)->nr_frags = 0;
190                         kfree_skb(nskb);
191
192                         if (len > PAGE_SIZE)
193                                 len = PAGE_SIZE;
194
195                         skb_shinfo(skb)->frags[i].size = len;
196                         skb_shinfo(skb)->nr_frags++;
197                         skb->data_len += len;
198                         skb->len += len;
199                 }
200         } else {
201                 len -= sizeof(hdr->hdr);
202
203                 if (len <= MAX_PACKET_LEN)
204                         trim_pages(vi, skb);
205
206                 err = pskb_trim(skb, len);
207                 if (err) {
208                         pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
209                                  len, err);
210                         dev->stats.rx_dropped++;
211                         goto drop;
212                 }
213         }
214
215         skb->truesize += skb->data_len;
216         dev->stats.rx_bytes += skb->len;
217         dev->stats.rx_packets++;
218
219         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
220                 pr_debug("Needs csum!\n");
221                 if (!skb_partial_csum_set(skb,
222                                           hdr->hdr.csum_start,
223                                           hdr->hdr.csum_offset))
224                         goto frame_err;
225         }
226
227         skb->protocol = eth_type_trans(skb, dev);
228         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
229                  ntohs(skb->protocol), skb->len, skb->pkt_type);
230
231         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
232                 pr_debug("GSO!\n");
233                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
234                 case VIRTIO_NET_HDR_GSO_TCPV4:
235                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
236                         break;
237                 case VIRTIO_NET_HDR_GSO_UDP:
238                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
239                         break;
240                 case VIRTIO_NET_HDR_GSO_TCPV6:
241                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
242                         break;
243                 default:
244                         if (net_ratelimit())
245                                 printk(KERN_WARNING "%s: bad gso type %u.\n",
246                                        dev->name, hdr->hdr.gso_type);
247                         goto frame_err;
248                 }
249
250                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
251                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
252
253                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
254                 if (skb_shinfo(skb)->gso_size == 0) {
255                         if (net_ratelimit())
256                                 printk(KERN_WARNING "%s: zero gso size.\n",
257                                        dev->name);
258                         goto frame_err;
259                 }
260
261                 /* Header must be checked, and gso_segs computed. */
262                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
263                 skb_shinfo(skb)->gso_segs = 0;
264         }
265
266         netif_receive_skb(skb);
267         return;
268
269 frame_err:
270         dev->stats.rx_frame_errors++;
271 drop:
272         dev_kfree_skb(skb);
273 }
274
275 static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
276 {
277         struct sk_buff *skb;
278         struct scatterlist sg[2+MAX_SKB_FRAGS];
279         int num, err, i;
280         bool oom = false;
281
282         sg_init_table(sg, 2+MAX_SKB_FRAGS);
283         do {
284                 struct skb_vnet_hdr *hdr;
285
286                 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
287                 if (unlikely(!skb)) {
288                         oom = true;
289                         break;
290                 }
291
292                 skb_reserve(skb, NET_IP_ALIGN);
293                 skb_put(skb, MAX_PACKET_LEN);
294
295                 hdr = skb_vnet_hdr(skb);
296                 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
297
298                 if (vi->big_packets) {
299                         for (i = 0; i < MAX_SKB_FRAGS; i++) {
300                                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
301                                 f->page = get_a_page(vi, gfp);
302                                 if (!f->page)
303                                         break;
304
305                                 f->page_offset = 0;
306                                 f->size = PAGE_SIZE;
307
308                                 skb->data_len += PAGE_SIZE;
309                                 skb->len += PAGE_SIZE;
310
311                                 skb_shinfo(skb)->nr_frags++;
312                         }
313                 }
314
315                 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
316                 skb_queue_head(&vi->recv, skb);
317
318                 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
319                 if (err < 0) {
320                         skb_unlink(skb, &vi->recv);
321                         trim_pages(vi, skb);
322                         kfree_skb(skb);
323                         break;
324                 }
325                 vi->num++;
326         } while (err >= num);
327         if (unlikely(vi->num > vi->max))
328                 vi->max = vi->num;
329         vi->rvq->vq_ops->kick(vi->rvq);
330         return !oom;
331 }
332
333 /* Returns false if we couldn't fill entirely (OOM). */
334 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
335 {
336         struct sk_buff *skb;
337         struct scatterlist sg[1];
338         int err;
339         bool oom = false;
340
341         if (!vi->mergeable_rx_bufs)
342                 return try_fill_recv_maxbufs(vi, gfp);
343
344         do {
345                 skb_frag_t *f;
346
347                 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
348                 if (unlikely(!skb)) {
349                         oom = true;
350                         break;
351                 }
352
353                 skb_reserve(skb, NET_IP_ALIGN);
354
355                 f = &skb_shinfo(skb)->frags[0];
356                 f->page = get_a_page(vi, gfp);
357                 if (!f->page) {
358                         oom = true;
359                         kfree_skb(skb);
360                         break;
361                 }
362
363                 f->page_offset = 0;
364                 f->size = PAGE_SIZE;
365
366                 skb_shinfo(skb)->nr_frags++;
367
368                 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
369                 skb_queue_head(&vi->recv, skb);
370
371                 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
372                 if (err < 0) {
373                         skb_unlink(skb, &vi->recv);
374                         kfree_skb(skb);
375                         break;
376                 }
377                 vi->num++;
378         } while (err > 0);
379         if (unlikely(vi->num > vi->max))
380                 vi->max = vi->num;
381         vi->rvq->vq_ops->kick(vi->rvq);
382         return !oom;
383 }
384
385 static void skb_recv_done(struct virtqueue *rvq)
386 {
387         struct virtnet_info *vi = rvq->vdev->priv;
388         /* Schedule NAPI, Suppress further interrupts if successful. */
389         if (napi_schedule_prep(&vi->napi)) {
390                 rvq->vq_ops->disable_cb(rvq);
391                 __napi_schedule(&vi->napi);
392         }
393 }
394
395 static void refill_work(struct work_struct *work)
396 {
397         struct virtnet_info *vi;
398         bool still_empty;
399
400         vi = container_of(work, struct virtnet_info, refill.work);
401         napi_disable(&vi->napi);
402         try_fill_recv(vi, GFP_KERNEL);
403         still_empty = (vi->num == 0);
404         napi_enable(&vi->napi);
405
406         /* In theory, this can happen: if we don't get any buffers in
407          * we will *never* try to fill again. */
408         if (still_empty)
409                 schedule_delayed_work(&vi->refill, HZ/2);
410 }
411
412 static int virtnet_poll(struct napi_struct *napi, int budget)
413 {
414         struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
415         struct sk_buff *skb = NULL;
416         unsigned int len, received = 0;
417
418 again:
419         while (received < budget &&
420                (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
421                 __skb_unlink(skb, &vi->recv);
422                 receive_skb(vi->dev, skb, len);
423                 vi->num--;
424                 received++;
425         }
426
427         if (vi->num < vi->max / 2) {
428                 if (!try_fill_recv(vi, GFP_ATOMIC))
429                         schedule_delayed_work(&vi->refill, 0);
430         }
431
432         /* Out of packets? */
433         if (received < budget) {
434                 napi_complete(napi);
435                 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
436                     && napi_schedule_prep(napi)) {
437                         vi->rvq->vq_ops->disable_cb(vi->rvq);
438                         __napi_schedule(napi);
439                         goto again;
440                 }
441         }
442
443         return received;
444 }
445
446 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
447 {
448         struct sk_buff *skb;
449         unsigned int len, tot_sgs = 0;
450
451         while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
452                 pr_debug("Sent skb %p\n", skb);
453                 __skb_unlink(skb, &vi->send);
454                 vi->dev->stats.tx_bytes += skb->len;
455                 vi->dev->stats.tx_packets++;
456                 tot_sgs += skb_vnet_hdr(skb)->num_sg;
457                 kfree_skb(skb);
458         }
459         return tot_sgs;
460 }
461
462 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
463 {
464         struct scatterlist sg[2+MAX_SKB_FRAGS];
465         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
466         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
467
468         sg_init_table(sg, 2+MAX_SKB_FRAGS);
469
470         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
471
472         if (skb->ip_summed == CHECKSUM_PARTIAL) {
473                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
474                 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
475                 hdr->hdr.csum_offset = skb->csum_offset;
476         } else {
477                 hdr->hdr.flags = 0;
478                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
479         }
480
481         if (skb_is_gso(skb)) {
482                 hdr->hdr.hdr_len = skb_headlen(skb);
483                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
484                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
485                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
486                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
487                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
488                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
489                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
490                 else
491                         BUG();
492                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
493                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
494         } else {
495                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
496                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
497         }
498
499         hdr->mhdr.num_buffers = 0;
500
501         /* Encode metadata header at front. */
502         if (vi->mergeable_rx_bufs)
503                 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
504         else
505                 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
506
507         hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
508         return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
509 }
510
511 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
512 {
513         struct virtnet_info *vi = netdev_priv(dev);
514         int capacity;
515
516 again:
517         /* Free up any pending old buffers before queueing new ones. */
518         free_old_xmit_skbs(vi);
519
520         /* Put new one in send queue and do transmit */
521         __skb_queue_head(&vi->send, skb);
522         capacity = xmit_skb(vi, skb);
523
524         /* This can happen with OOM and indirect buffers. */
525         if (unlikely(capacity < 0)) {
526                 netif_stop_queue(dev);
527                 dev_warn(&dev->dev, "Unexpected full queue\n");
528                 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
529                         vi->svq->vq_ops->disable_cb(vi->svq);
530                         netif_start_queue(dev);
531                         goto again;
532                 }
533                 return NETDEV_TX_BUSY;
534         }
535
536         vi->svq->vq_ops->kick(vi->svq);
537         /* Don't wait up for transmitted skbs to be freed. */
538         skb_orphan(skb);
539         nf_reset(skb);
540
541         /* Apparently nice girls don't return TX_BUSY; stop the queue
542          * before it gets out of hand.  Naturally, this wastes entries. */
543         if (capacity < 2+MAX_SKB_FRAGS) {
544                 netif_stop_queue(dev);
545                 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
546                         /* More just got used, free them then recheck. */
547                         capacity += free_old_xmit_skbs(vi);
548                         if (capacity >= 2+MAX_SKB_FRAGS) {
549                                 netif_start_queue(dev);
550                                 vi->svq->vq_ops->disable_cb(vi->svq);
551                         }
552                 }
553         }
554
555         return NETDEV_TX_OK;
556 }
557
558 static int virtnet_set_mac_address(struct net_device *dev, void *p)
559 {
560         struct virtnet_info *vi = netdev_priv(dev);
561         struct virtio_device *vdev = vi->vdev;
562         int ret;
563
564         ret = eth_mac_addr(dev, p);
565         if (ret)
566                 return ret;
567
568         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
569                 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
570                                   dev->dev_addr, dev->addr_len);
571
572         return 0;
573 }
574
575 #ifdef CONFIG_NET_POLL_CONTROLLER
576 static void virtnet_netpoll(struct net_device *dev)
577 {
578         struct virtnet_info *vi = netdev_priv(dev);
579
580         napi_schedule(&vi->napi);
581 }
582 #endif
583
584 static int virtnet_open(struct net_device *dev)
585 {
586         struct virtnet_info *vi = netdev_priv(dev);
587
588         napi_enable(&vi->napi);
589
590         /* If all buffers were filled by other side before we napi_enabled, we
591          * won't get another interrupt, so process any outstanding packets
592          * now.  virtnet_poll wants re-enable the queue, so we disable here.
593          * We synchronize against interrupts via NAPI_STATE_SCHED */
594         if (napi_schedule_prep(&vi->napi)) {
595                 vi->rvq->vq_ops->disable_cb(vi->rvq);
596                 __napi_schedule(&vi->napi);
597         }
598         return 0;
599 }
600
601 /*
602  * Send command via the control virtqueue and check status.  Commands
603  * supported by the hypervisor, as indicated by feature bits, should
604  * never fail unless improperly formated.
605  */
606 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
607                                  struct scatterlist *data, int out, int in)
608 {
609         struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
610         struct virtio_net_ctrl_hdr ctrl;
611         virtio_net_ctrl_ack status = ~0;
612         unsigned int tmp;
613         int i;
614
615         /* Caller should know better */
616         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
617                 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
618
619         out++; /* Add header */
620         in++; /* Add return status */
621
622         ctrl.class = class;
623         ctrl.cmd = cmd;
624
625         sg_init_table(sg, out + in);
626
627         sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
628         for_each_sg(data, s, out + in - 2, i)
629                 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
630         sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
631
632         BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
633
634         vi->cvq->vq_ops->kick(vi->cvq);
635
636         /*
637          * Spin for a response, the kick causes an ioport write, trapping
638          * into the hypervisor, so the request should be handled immediately.
639          */
640         while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
641                 cpu_relax();
642
643         return status == VIRTIO_NET_OK;
644 }
645
646 static int virtnet_close(struct net_device *dev)
647 {
648         struct virtnet_info *vi = netdev_priv(dev);
649
650         napi_disable(&vi->napi);
651
652         return 0;
653 }
654
655 static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
656 {
657         struct virtnet_info *vi = netdev_priv(dev);
658         struct virtio_device *vdev = vi->vdev;
659
660         if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
661                 return -ENOSYS;
662
663         return ethtool_op_set_tx_hw_csum(dev, data);
664 }
665
666 static void virtnet_set_rx_mode(struct net_device *dev)
667 {
668         struct virtnet_info *vi = netdev_priv(dev);
669         struct scatterlist sg[2];
670         u8 promisc, allmulti;
671         struct virtio_net_ctrl_mac *mac_data;
672         struct dev_addr_list *addr;
673         struct netdev_hw_addr *ha;
674         void *buf;
675         int i;
676
677         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
678         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
679                 return;
680
681         promisc = ((dev->flags & IFF_PROMISC) != 0);
682         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
683
684         sg_init_one(sg, &promisc, sizeof(promisc));
685
686         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
687                                   VIRTIO_NET_CTRL_RX_PROMISC,
688                                   sg, 1, 0))
689                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
690                          promisc ? "en" : "dis");
691
692         sg_init_one(sg, &allmulti, sizeof(allmulti));
693
694         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
695                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
696                                   sg, 1, 0))
697                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
698                          allmulti ? "en" : "dis");
699
700         /* MAC filter - use one buffer for both lists */
701         mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
702                                  (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
703         if (!buf) {
704                 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
705                 return;
706         }
707
708         sg_init_table(sg, 2);
709
710         /* Store the unicast list and count in the front of the buffer */
711         mac_data->entries = dev->uc.count;
712         i = 0;
713         list_for_each_entry(ha, &dev->uc.list, list)
714                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
715
716         sg_set_buf(&sg[0], mac_data,
717                    sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
718
719         /* multicast list and count fill the end */
720         mac_data = (void *)&mac_data->macs[dev->uc.count][0];
721
722         mac_data->entries = dev->mc_count;
723         addr = dev->mc_list;
724         for (i = 0; i < dev->mc_count; i++, addr = addr->next)
725                 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
726
727         sg_set_buf(&sg[1], mac_data,
728                    sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
729
730         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
731                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
732                                   sg, 2, 0))
733                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
734
735         kfree(buf);
736 }
737
738 static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
739 {
740         struct virtnet_info *vi = netdev_priv(dev);
741         struct scatterlist sg;
742
743         sg_init_one(&sg, &vid, sizeof(vid));
744
745         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
746                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
747                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
748 }
749
750 static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
751 {
752         struct virtnet_info *vi = netdev_priv(dev);
753         struct scatterlist sg;
754
755         sg_init_one(&sg, &vid, sizeof(vid));
756
757         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
758                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
759                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
760 }
761
762 static const struct ethtool_ops virtnet_ethtool_ops = {
763         .set_tx_csum = virtnet_set_tx_csum,
764         .set_sg = ethtool_op_set_sg,
765         .set_tso = ethtool_op_set_tso,
766         .set_ufo = ethtool_op_set_ufo,
767         .get_link = ethtool_op_get_link,
768 };
769
770 #define MIN_MTU 68
771 #define MAX_MTU 65535
772
773 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
774 {
775         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
776                 return -EINVAL;
777         dev->mtu = new_mtu;
778         return 0;
779 }
780
781 static const struct net_device_ops virtnet_netdev = {
782         .ndo_open            = virtnet_open,
783         .ndo_stop            = virtnet_close,
784         .ndo_start_xmit      = start_xmit,
785         .ndo_validate_addr   = eth_validate_addr,
786         .ndo_set_mac_address = virtnet_set_mac_address,
787         .ndo_set_rx_mode     = virtnet_set_rx_mode,
788         .ndo_change_mtu      = virtnet_change_mtu,
789         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
790         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
791 #ifdef CONFIG_NET_POLL_CONTROLLER
792         .ndo_poll_controller = virtnet_netpoll,
793 #endif
794 };
795
796 static void virtnet_update_status(struct virtnet_info *vi)
797 {
798         u16 v;
799
800         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
801                 return;
802
803         vi->vdev->config->get(vi->vdev,
804                               offsetof(struct virtio_net_config, status),
805                               &v, sizeof(v));
806
807         /* Ignore unknown (future) status bits */
808         v &= VIRTIO_NET_S_LINK_UP;
809
810         if (vi->status == v)
811                 return;
812
813         vi->status = v;
814
815         if (vi->status & VIRTIO_NET_S_LINK_UP) {
816                 netif_carrier_on(vi->dev);
817                 netif_wake_queue(vi->dev);
818         } else {
819                 netif_carrier_off(vi->dev);
820                 netif_stop_queue(vi->dev);
821         }
822 }
823
824 static void virtnet_config_changed(struct virtio_device *vdev)
825 {
826         struct virtnet_info *vi = vdev->priv;
827
828         virtnet_update_status(vi);
829 }
830
831 static int virtnet_probe(struct virtio_device *vdev)
832 {
833         int err;
834         struct net_device *dev;
835         struct virtnet_info *vi;
836         struct virtqueue *vqs[3];
837         vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
838         const char *names[] = { "input", "output", "control" };
839         int nvqs;
840
841         /* Allocate ourselves a network device with room for our info */
842         dev = alloc_etherdev(sizeof(struct virtnet_info));
843         if (!dev)
844                 return -ENOMEM;
845
846         /* Set up network device as normal. */
847         dev->netdev_ops = &virtnet_netdev;
848         dev->features = NETIF_F_HIGHDMA;
849         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
850         SET_NETDEV_DEV(dev, &vdev->dev);
851
852         /* Do we support "hardware" checksums? */
853         if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
854                 /* This opens up the world of extra features. */
855                 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
856                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
857                         dev->features |= NETIF_F_TSO | NETIF_F_UFO
858                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
859                 }
860                 /* Individual feature bits: what can host handle? */
861                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
862                         dev->features |= NETIF_F_TSO;
863                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
864                         dev->features |= NETIF_F_TSO6;
865                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
866                         dev->features |= NETIF_F_TSO_ECN;
867                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
868                         dev->features |= NETIF_F_UFO;
869         }
870
871         /* Configuration may specify what MAC to use.  Otherwise random. */
872         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
873                 vdev->config->get(vdev,
874                                   offsetof(struct virtio_net_config, mac),
875                                   dev->dev_addr, dev->addr_len);
876         } else
877                 random_ether_addr(dev->dev_addr);
878
879         /* Set up our device-specific information */
880         vi = netdev_priv(dev);
881         netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
882         vi->dev = dev;
883         vi->vdev = vdev;
884         vdev->priv = vi;
885         vi->pages = NULL;
886         INIT_DELAYED_WORK(&vi->refill, refill_work);
887
888         /* If we can receive ANY GSO packets, we must allocate large ones. */
889         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
890             || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
891             || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
892                 vi->big_packets = true;
893
894         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
895                 vi->mergeable_rx_bufs = true;
896
897         /* We expect two virtqueues, receive then send,
898          * and optionally control. */
899         nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
900
901         err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
902         if (err)
903                 goto free;
904
905         vi->rvq = vqs[0];
906         vi->svq = vqs[1];
907
908         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
909                 vi->cvq = vqs[2];
910
911                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
912                         dev->features |= NETIF_F_HW_VLAN_FILTER;
913         }
914
915         /* Initialize our empty receive and send queues. */
916         skb_queue_head_init(&vi->recv);
917         skb_queue_head_init(&vi->send);
918
919         err = register_netdev(dev);
920         if (err) {
921                 pr_debug("virtio_net: registering device failed\n");
922                 goto free_vqs;
923         }
924
925         /* Last of all, set up some receive buffers. */
926         try_fill_recv(vi, GFP_KERNEL);
927
928         /* If we didn't even get one input buffer, we're useless. */
929         if (vi->num == 0) {
930                 err = -ENOMEM;
931                 goto unregister;
932         }
933
934         vi->status = VIRTIO_NET_S_LINK_UP;
935         virtnet_update_status(vi);
936         netif_carrier_on(dev);
937
938         pr_debug("virtnet: registered device %s\n", dev->name);
939         return 0;
940
941 unregister:
942         unregister_netdev(dev);
943         cancel_delayed_work_sync(&vi->refill);
944 free_vqs:
945         vdev->config->del_vqs(vdev);
946 free:
947         free_netdev(dev);
948         return err;
949 }
950
951 static void virtnet_remove(struct virtio_device *vdev)
952 {
953         struct virtnet_info *vi = vdev->priv;
954         struct sk_buff *skb;
955
956         /* Stop all the virtqueues. */
957         vdev->config->reset(vdev);
958
959         /* Free our skbs in send and recv queues, if any. */
960         while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
961                 kfree_skb(skb);
962                 vi->num--;
963         }
964         __skb_queue_purge(&vi->send);
965
966         BUG_ON(vi->num != 0);
967
968         unregister_netdev(vi->dev);
969         cancel_delayed_work_sync(&vi->refill);
970
971         vdev->config->del_vqs(vi->vdev);
972
973         while (vi->pages)
974                 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
975
976         free_netdev(vi->dev);
977 }
978
979 static struct virtio_device_id id_table[] = {
980         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
981         { 0 },
982 };
983
984 static unsigned int features[] = {
985         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
986         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
987         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
988         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
989         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
990         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
991         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
992 };
993
994 static struct virtio_driver virtio_net = {
995         .feature_table = features,
996         .feature_table_size = ARRAY_SIZE(features),
997         .driver.name =  KBUILD_MODNAME,
998         .driver.owner = THIS_MODULE,
999         .id_table =     id_table,
1000         .probe =        virtnet_probe,
1001         .remove =       __devexit_p(virtnet_remove),
1002         .config_changed = virtnet_config_changed,
1003 };
1004
1005 static int __init init(void)
1006 {
1007         return register_virtio_driver(&virtio_net);
1008 }
1009
1010 static void __exit fini(void)
1011 {
1012         unregister_virtio_driver(&virtio_net);
1013 }
1014 module_init(init);
1015 module_exit(fini);
1016
1017 MODULE_DEVICE_TABLE(virtio, id_table);
1018 MODULE_DESCRIPTION("Virtio network driver");
1019 MODULE_LICENSE("GPL");