blob: ae822b8fa76da1f7f8972e8c0782594aa9dff0fe [file] [log] [blame]
Paul Durrant3254f832016-10-04 10:29:12 +01001/*
2 * Copyright (c) 2016 Citrix Systems Inc.
3 * Copyright (c) 2002-2005, K A Fraser
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation; or, when distributed
8 * separately from the Linux kernel or incorporated into other
9 * software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
Paul Durrant3254f832016-10-04 10:29:12 +010029#include "common.h"
30
31#include <linux/kthread.h>
32
33#include <xen/xen.h>
34#include <xen/events.h>
35
36static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
37{
38 RING_IDX prod, cons;
39 struct sk_buff *skb;
40 int needed;
41
42 skb = skb_peek(&queue->rx_queue);
43 if (!skb)
44 return false;
45
46 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
47 if (skb_is_gso(skb))
48 needed++;
49 if (skb->sw_hash)
50 needed++;
51
52 do {
53 prod = queue->rx.sring->req_prod;
54 cons = queue->rx.req_cons;
55
56 if (prod - cons >= needed)
57 return true;
58
59 queue->rx.sring->req_event = prod + 1;
60
61 /* Make sure event is visible before we check prod
62 * again.
63 */
64 mb();
65 } while (queue->rx.sring->req_prod != prod);
66
67 return false;
68}
69
70void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
71{
72 unsigned long flags;
73
74 spin_lock_irqsave(&queue->rx_queue.lock, flags);
75
76 __skb_queue_tail(&queue->rx_queue, skb);
77
78 queue->rx_queue_len += skb->len;
79 if (queue->rx_queue_len > queue->rx_queue_max) {
80 struct net_device *dev = queue->vif->dev;
81
82 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
83 }
84
85 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
86}
87
88static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
89{
90 struct sk_buff *skb;
91
92 spin_lock_irq(&queue->rx_queue.lock);
93
94 skb = __skb_dequeue(&queue->rx_queue);
David Vrabel7c0b1a22016-10-04 10:29:15 +010095 if (skb) {
Paul Durrant3254f832016-10-04 10:29:12 +010096 queue->rx_queue_len -= skb->len;
David Vrabel7c0b1a22016-10-04 10:29:15 +010097 if (queue->rx_queue_len < queue->rx_queue_max) {
98 struct netdev_queue *txq;
99
100 txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
101 netif_tx_wake_queue(txq);
102 }
103 }
Paul Durrant3254f832016-10-04 10:29:12 +0100104
105 spin_unlock_irq(&queue->rx_queue.lock);
106
107 return skb;
108}
109
Paul Durrant3254f832016-10-04 10:29:12 +0100110static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
111{
112 struct sk_buff *skb;
113
114 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
115 kfree_skb(skb);
116}
117
118static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
119{
120 struct sk_buff *skb;
121
122 for (;;) {
123 skb = skb_peek(&queue->rx_queue);
124 if (!skb)
125 break;
126 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
127 break;
128 xenvif_rx_dequeue(queue);
129 kfree_skb(skb);
130 }
131}
132
David Vrabeleb1723a2016-10-04 10:29:14 +0100133static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
Paul Durrant3254f832016-10-04 10:29:12 +0100134{
David Vrabeleb1723a2016-10-04 10:29:14 +0100135 unsigned int i;
Paul Durrant3254f832016-10-04 10:29:12 +0100136
David Vrabeleb1723a2016-10-04 10:29:14 +0100137 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
Paul Durrant3254f832016-10-04 10:29:12 +0100138
David Vrabeleb1723a2016-10-04 10:29:14 +0100139 for (i = 0; i < queue->rx_copy.num; i++) {
140 struct gnttab_copy *op;
Paul Durrant3254f832016-10-04 10:29:12 +0100141
David Vrabeleb1723a2016-10-04 10:29:14 +0100142 op = &queue->rx_copy.op[i];
Paul Durrant3254f832016-10-04 10:29:12 +0100143
David Vrabeleb1723a2016-10-04 10:29:14 +0100144 /* If the copy failed, overwrite the status field in
145 * the corresponding response.
146 */
147 if (unlikely(op->status != GNTST_okay)) {
148 struct xen_netif_rx_response *rsp;
149
150 rsp = RING_GET_RESPONSE(&queue->rx,
151 queue->rx_copy.idx[i]);
152 rsp->status = op->status;
153 }
154 }
155
156 queue->rx_copy.num = 0;
Paul Durrant3254f832016-10-04 10:29:12 +0100157}
158
David Vrabeleb1723a2016-10-04 10:29:14 +0100159static void xenvif_rx_copy_add(struct xenvif_queue *queue,
160 struct xen_netif_rx_request *req,
161 unsigned int offset, void *data, size_t len)
Paul Durrant3254f832016-10-04 10:29:12 +0100162{
David Vrabeleb1723a2016-10-04 10:29:14 +0100163 struct gnttab_copy *op;
164 struct page *page;
Paul Durrant3254f832016-10-04 10:29:12 +0100165 struct xen_page_foreign *foreign;
Paul Durrant3254f832016-10-04 10:29:12 +0100166
David Vrabeleb1723a2016-10-04 10:29:14 +0100167 if (queue->rx_copy.num == COPY_BATCH_SIZE)
168 xenvif_rx_copy_flush(queue);
Paul Durrant3254f832016-10-04 10:29:12 +0100169
David Vrabeleb1723a2016-10-04 10:29:14 +0100170 op = &queue->rx_copy.op[queue->rx_copy.num];
Paul Durrant3254f832016-10-04 10:29:12 +0100171
David Vrabeleb1723a2016-10-04 10:29:14 +0100172 page = virt_to_page(data);
Paul Durrant3254f832016-10-04 10:29:12 +0100173
David Vrabeleb1723a2016-10-04 10:29:14 +0100174 op->flags = GNTCOPY_dest_gref;
Paul Durrant3254f832016-10-04 10:29:12 +0100175
176 foreign = xen_page_foreign(page);
177 if (foreign) {
David Vrabeleb1723a2016-10-04 10:29:14 +0100178 op->source.domid = foreign->domid;
179 op->source.u.ref = foreign->gref;
180 op->flags |= GNTCOPY_source_gref;
Paul Durrant3254f832016-10-04 10:29:12 +0100181 } else {
David Vrabeleb1723a2016-10-04 10:29:14 +0100182 op->source.u.gmfn = virt_to_gfn(data);
183 op->source.domid = DOMID_SELF;
Paul Durrant3254f832016-10-04 10:29:12 +0100184 }
Paul Durrant3254f832016-10-04 10:29:12 +0100185
David Vrabeleb1723a2016-10-04 10:29:14 +0100186 op->source.offset = xen_offset_in_page(data);
187 op->dest.u.ref = req->gref;
188 op->dest.domid = queue->vif->domid;
189 op->dest.offset = offset;
190 op->len = len;
Paul Durrant3254f832016-10-04 10:29:12 +0100191
David Vrabeleb1723a2016-10-04 10:29:14 +0100192 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
193 queue->rx_copy.num++;
Paul Durrant3254f832016-10-04 10:29:12 +0100194}
195
David Vrabeleb1723a2016-10-04 10:29:14 +0100196static unsigned int xenvif_gso_type(struct sk_buff *skb)
Paul Durrant3254f832016-10-04 10:29:12 +0100197{
Paul Durrant3254f832016-10-04 10:29:12 +0100198 if (skb_is_gso(skb)) {
199 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
David Vrabeleb1723a2016-10-04 10:29:14 +0100200 return XEN_NETIF_GSO_TYPE_TCPV4;
Paul Durrant3254f832016-10-04 10:29:12 +0100201 else
David Vrabeleb1723a2016-10-04 10:29:14 +0100202 return XEN_NETIF_GSO_TYPE_TCPV6;
Paul Durrant3254f832016-10-04 10:29:12 +0100203 }
David Vrabeleb1723a2016-10-04 10:29:14 +0100204 return XEN_NETIF_GSO_TYPE_NONE;
Paul Durrant3254f832016-10-04 10:29:12 +0100205}
206
David Vrabeleb1723a2016-10-04 10:29:14 +0100207struct xenvif_pkt_state {
Paul Durrant3254f832016-10-04 10:29:12 +0100208 struct sk_buff *skb;
David Vrabeleb1723a2016-10-04 10:29:14 +0100209 size_t remaining_len;
210 int frag; /* frag == -1 => skb->head */
211 unsigned int frag_offset;
212 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
213 unsigned int extra_count;
214 unsigned int slot;
215};
Paul Durrant3254f832016-10-04 10:29:12 +0100216
David Vrabeleb1723a2016-10-04 10:29:14 +0100217static void xenvif_rx_next_skb(struct xenvif_queue *queue,
218 struct xenvif_pkt_state *pkt)
219{
220 struct sk_buff *skb;
221 unsigned int gso_type;
Paul Durrant3254f832016-10-04 10:29:12 +0100222
David Vrabeleb1723a2016-10-04 10:29:14 +0100223 skb = xenvif_rx_dequeue(queue);
Paul Durrant3254f832016-10-04 10:29:12 +0100224
David Vrabeleb1723a2016-10-04 10:29:14 +0100225 queue->stats.tx_bytes += skb->len;
226 queue->stats.tx_packets++;
Paul Durrant3254f832016-10-04 10:29:12 +0100227
David Vrabeleb1723a2016-10-04 10:29:14 +0100228 /* Reset packet state. */
229 memset(pkt, 0, sizeof(struct xenvif_pkt_state));
Paul Durrant3254f832016-10-04 10:29:12 +0100230
David Vrabeleb1723a2016-10-04 10:29:14 +0100231 pkt->skb = skb;
232 pkt->remaining_len = skb->len;
233 pkt->frag = -1;
234
235 gso_type = xenvif_gso_type(skb);
236 if ((1 << gso_type) & queue->vif->gso_mask) {
237 struct xen_netif_extra_info *extra;
238
239 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
240
241 extra->u.gso.type = gso_type;
242 extra->u.gso.size = skb_shinfo(skb)->gso_size;
243 extra->u.gso.pad = 0;
244 extra->u.gso.features = 0;
245 extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
246 extra->flags = 0;
247
248 pkt->extra_count++;
Paul Durrant3254f832016-10-04 10:29:12 +0100249 }
250
David Vrabeleb1723a2016-10-04 10:29:14 +0100251 if (skb->sw_hash) {
252 struct xen_netif_extra_info *extra;
Paul Durrant3254f832016-10-04 10:29:12 +0100253
David Vrabeleb1723a2016-10-04 10:29:14 +0100254 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
Paul Durrant3254f832016-10-04 10:29:12 +0100255
David Vrabeleb1723a2016-10-04 10:29:14 +0100256 extra->u.hash.algorithm =
257 XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
Paul Durrant3254f832016-10-04 10:29:12 +0100258
David Vrabeleb1723a2016-10-04 10:29:14 +0100259 if (skb->l4_hash)
260 extra->u.hash.type =
261 skb->protocol == htons(ETH_P_IP) ?
262 _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
263 _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
Paul Durrant3254f832016-10-04 10:29:12 +0100264 else
David Vrabeleb1723a2016-10-04 10:29:14 +0100265 extra->u.hash.type =
266 skb->protocol == htons(ETH_P_IP) ?
267 _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
268 _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
Paul Durrant3254f832016-10-04 10:29:12 +0100269
David Vrabeleb1723a2016-10-04 10:29:14 +0100270 *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
271
272 extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
273 extra->flags = 0;
274
275 pkt->extra_count++;
276 }
277}
278
279static void xenvif_rx_complete(struct xenvif_queue *queue,
280 struct xenvif_pkt_state *pkt)
281{
282 int notify;
283
284 /* Complete any outstanding copy ops for this skb. */
285 xenvif_rx_copy_flush(queue);
286
287 /* Push responses and notify. */
288 queue->rx.rsp_prod_pvt = queue->rx.req_cons;
289 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
290 if (notify)
291 notify_remote_via_irq(queue->rx_irq);
292
293 dev_kfree_skb(pkt->skb);
294}
295
296static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
297 struct xenvif_pkt_state *pkt,
298 unsigned int offset, void **data,
299 size_t *len)
300{
301 struct sk_buff *skb = pkt->skb;
302 void *frag_data;
303 size_t frag_len, chunk_len;
304
305 if (pkt->frag == -1) {
306 frag_data = skb->data;
307 frag_len = skb_headlen(skb);
308 } else {
309 skb_frag_t *frag = &skb_shinfo(skb)->frags[pkt->frag];
310
311 frag_data = skb_frag_address(frag);
312 frag_len = skb_frag_size(frag);
313 }
314
315 frag_data += pkt->frag_offset;
316 frag_len -= pkt->frag_offset;
317
318 chunk_len = min(frag_len, XEN_PAGE_SIZE - offset);
319 chunk_len = min(chunk_len,
320 XEN_PAGE_SIZE - xen_offset_in_page(frag_data));
321
322 pkt->frag_offset += chunk_len;
323
324 /* Advance to next frag? */
325 if (frag_len == chunk_len) {
326 pkt->frag++;
327 pkt->frag_offset = 0;
328 }
329
330 *data = frag_data;
331 *len = chunk_len;
332}
333
334static void xenvif_rx_data_slot(struct xenvif_queue *queue,
335 struct xenvif_pkt_state *pkt,
336 struct xen_netif_rx_request *req,
337 struct xen_netif_rx_response *rsp)
338{
339 unsigned int offset = 0;
340 unsigned int flags;
341
342 do {
343 size_t len;
344 void *data;
345
346 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
347 xenvif_rx_copy_add(queue, req, offset, data, len);
348
349 offset += len;
350 pkt->remaining_len -= len;
351
352 } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
353
354 if (pkt->remaining_len > 0)
355 flags = XEN_NETRXF_more_data;
356 else
357 flags = 0;
358
359 if (pkt->slot == 0) {
360 struct sk_buff *skb = pkt->skb;
361
362 if (skb->ip_summed == CHECKSUM_PARTIAL)
Paul Durrant3254f832016-10-04 10:29:12 +0100363 flags |= XEN_NETRXF_csum_blank |
364 XEN_NETRXF_data_validated;
365 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
Paul Durrant3254f832016-10-04 10:29:12 +0100366 flags |= XEN_NETRXF_data_validated;
367
David Vrabeleb1723a2016-10-04 10:29:14 +0100368 if (pkt->extra_count != 0)
369 flags |= XEN_NETRXF_extra_info;
Paul Durrant3254f832016-10-04 10:29:12 +0100370 }
371
David Vrabeleb1723a2016-10-04 10:29:14 +0100372 rsp->offset = 0;
373 rsp->flags = flags;
374 rsp->id = req->id;
375 rsp->status = (s16)offset;
376}
377
378static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
379 struct xenvif_pkt_state *pkt,
380 struct xen_netif_rx_request *req,
381 struct xen_netif_rx_response *rsp)
382{
383 struct xen_netif_extra_info *extra = (void *)rsp;
384 unsigned int i;
385
386 pkt->extra_count--;
387
388 for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
389 if (pkt->extras[i].type) {
390 *extra = pkt->extras[i];
391
392 if (pkt->extra_count != 0)
393 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
394
395 pkt->extras[i].type = 0;
396 return;
397 }
398 }
399 BUG();
400}
401
David Vrabel98f6d572016-10-04 10:29:16 +0100402void xenvif_rx_skb(struct xenvif_queue *queue)
David Vrabeleb1723a2016-10-04 10:29:14 +0100403{
404 struct xenvif_pkt_state pkt;
405
406 xenvif_rx_next_skb(queue, &pkt);
407
408 do {
409 struct xen_netif_rx_request *req;
410 struct xen_netif_rx_response *rsp;
411
412 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
413 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
414
415 /* Extras must go after the first data slot */
416 if (pkt.slot != 0 && pkt.extra_count != 0)
417 xenvif_rx_extra_slot(queue, &pkt, req, rsp);
418 else
419 xenvif_rx_data_slot(queue, &pkt, req, rsp);
420
421 queue->rx.req_cons++;
422 pkt.slot++;
423 } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
424
425 xenvif_rx_complete(queue, &pkt);
Paul Durrant3254f832016-10-04 10:29:12 +0100426}
427
David Vrabel98f6d572016-10-04 10:29:16 +0100428#define RX_BATCH_SIZE 64
429
430void xenvif_rx_action(struct xenvif_queue *queue)
431{
432 unsigned int work_done = 0;
433
434 while (xenvif_rx_ring_slots_available(queue) &&
435 work_done < RX_BATCH_SIZE) {
436 xenvif_rx_skb(queue);
437 work_done++;
438 }
439}
440
Paul Durrant3254f832016-10-04 10:29:12 +0100441static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
442{
443 RING_IDX prod, cons;
444
445 prod = queue->rx.sring->req_prod;
446 cons = queue->rx.req_cons;
447
448 return !queue->stalled &&
449 prod - cons < 1 &&
450 time_after(jiffies,
451 queue->last_rx_time + queue->vif->stall_timeout);
452}
453
454static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
455{
456 RING_IDX prod, cons;
457
458 prod = queue->rx.sring->req_prod;
459 cons = queue->rx.req_cons;
460
461 return queue->stalled && prod - cons >= 1;
462}
463
464static bool xenvif_have_rx_work(struct xenvif_queue *queue)
465{
466 return xenvif_rx_ring_slots_available(queue) ||
467 (queue->vif->stall_timeout &&
468 (xenvif_rx_queue_stalled(queue) ||
469 xenvif_rx_queue_ready(queue))) ||
470 kthread_should_stop() ||
471 queue->vif->disabled;
472}
473
474static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
475{
476 struct sk_buff *skb;
477 long timeout;
478
479 skb = skb_peek(&queue->rx_queue);
480 if (!skb)
481 return MAX_SCHEDULE_TIMEOUT;
482
483 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
484 return timeout < 0 ? 0 : timeout;
485}
486
487/* Wait until the guest Rx thread has work.
488 *
489 * The timeout needs to be adjusted based on the current head of the
490 * queue (and not just the head at the beginning). In particular, if
491 * the queue is initially empty an infinite timeout is used and this
492 * needs to be reduced when a skb is queued.
493 *
494 * This cannot be done with wait_event_timeout() because it only
495 * calculates the timeout once.
496 */
497static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
498{
499 DEFINE_WAIT(wait);
500
501 if (xenvif_have_rx_work(queue))
502 return;
503
504 for (;;) {
505 long ret;
506
507 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
508 if (xenvif_have_rx_work(queue))
509 break;
510 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
511 if (!ret)
512 break;
513 }
514 finish_wait(&queue->wq, &wait);
515}
516
517static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
518{
519 struct xenvif *vif = queue->vif;
520
521 queue->stalled = true;
522
523 /* At least one queue has stalled? Disable the carrier. */
524 spin_lock(&vif->lock);
525 if (vif->stalled_queues++ == 0) {
526 netdev_info(vif->dev, "Guest Rx stalled");
527 netif_carrier_off(vif->dev);
528 }
529 spin_unlock(&vif->lock);
530}
531
532static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
533{
534 struct xenvif *vif = queue->vif;
535
536 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
537 queue->stalled = false;
538
539 /* All queues are ready? Enable the carrier. */
540 spin_lock(&vif->lock);
541 if (--vif->stalled_queues == 0) {
542 netdev_info(vif->dev, "Guest Rx ready");
543 netif_carrier_on(vif->dev);
544 }
545 spin_unlock(&vif->lock);
546}
547
548int xenvif_kthread_guest_rx(void *data)
549{
550 struct xenvif_queue *queue = data;
551 struct xenvif *vif = queue->vif;
552
553 if (!vif->stall_timeout)
554 xenvif_queue_carrier_on(queue);
555
556 for (;;) {
557 xenvif_wait_for_rx_work(queue);
558
559 if (kthread_should_stop())
560 break;
561
562 /* This frontend is found to be rogue, disable it in
563 * kthread context. Currently this is only set when
564 * netback finds out frontend sends malformed packet,
565 * but we cannot disable the interface in softirq
566 * context so we defer it here, if this thread is
567 * associated with queue 0.
568 */
569 if (unlikely(vif->disabled && queue->id == 0)) {
570 xenvif_carrier_off(vif);
571 break;
572 }
573
574 if (!skb_queue_empty(&queue->rx_queue))
575 xenvif_rx_action(queue);
576
577 /* If the guest hasn't provided any Rx slots for a
578 * while it's probably not responsive, drop the
579 * carrier so packets are dropped earlier.
580 */
581 if (vif->stall_timeout) {
582 if (xenvif_rx_queue_stalled(queue))
583 xenvif_queue_carrier_off(queue);
584 else if (xenvif_rx_queue_ready(queue))
585 xenvif_queue_carrier_on(queue);
586 }
587
588 /* Queued packets may have foreign pages from other
589 * domains. These cannot be queued indefinitely as
590 * this would starve guests of grant refs and transmit
591 * slots.
592 */
593 xenvif_rx_queue_drop_expired(queue);
594
Paul Durrant3254f832016-10-04 10:29:12 +0100595 cond_resched();
596 }
597
598 /* Bin any remaining skbs */
599 xenvif_rx_queue_purge(queue);
600
601 return 0;
602}