blob: 2625740bdc4a672af75c46a072fb51bdedd42a63 [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Xenbus code for netif backend
3 *
4 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
5 * Copyright (C) 2005 XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080018 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Ian Campbellf942dc22011-03-15 00:06:18 +000019*/
20
21#include "common.h"
Wei Liue9ce7cb2014-06-04 10:30:42 +010022#include <linux/vmalloc.h>
23#include <linux/rtnetlink.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000024
25struct backend_info {
26 struct xenbus_device *dev;
27 struct xenvif *vif;
Paul Durrantea732df2013-09-26 12:09:52 +010028
29 /* This is the state that will be reflected in xenstore when any
30 * active hotplug script completes.
31 */
32 enum xenbus_state state;
33
Ian Campbellf942dc22011-03-15 00:06:18 +000034 enum xenbus_state frontend_state;
35 struct xenbus_watch hotplug_status_watch;
Ian Campbell17938a62011-04-03 22:26:24 +000036 u8 have_hotplug_status_watch:1;
Ian Campbell31a41892015-06-01 11:30:24 +010037
38 const char *hotplug_script;
Ian Campbellf942dc22011-03-15 00:06:18 +000039};
40
Paul Durrant4e15ee22016-05-13 09:37:26 +010041static int connect_data_rings(struct backend_info *be,
42 struct xenvif_queue *queue);
Wei Liue9ce7cb2014-06-04 10:30:42 +010043static void connect(struct backend_info *be);
44static int read_xenbus_vif_flags(struct backend_info *be);
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +030045static int backend_create_xenvif(struct backend_info *be);
Ian Campbellf942dc22011-03-15 00:06:18 +000046static void unregister_hotplug_status_watch(struct backend_info *be);
Palik, Imreedafc132015-03-19 11:05:42 +010047static void xen_unregister_watchers(struct xenvif *vif);
David Vrabeldc62cca2013-10-07 13:55:19 +010048static void set_backend_state(struct backend_info *be,
49 enum xenbus_state state);
Ian Campbellf942dc22011-03-15 00:06:18 +000050
Zoltan Kissf51de242014-07-08 19:49:14 +010051#ifdef CONFIG_DEBUG_FS
52struct dentry *xen_netback_dbg_root = NULL;
53
54static int xenvif_read_io_ring(struct seq_file *m, void *v)
55{
56 struct xenvif_queue *queue = m->private;
57 struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
58 struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
David Vrabelf48da8b2014-10-22 14:08:54 +010059 struct netdev_queue *dev_queue;
Zoltan Kissf51de242014-07-08 19:49:14 +010060
61 if (tx_ring->sring) {
62 struct xen_netif_tx_sring *sring = tx_ring->sring;
63
64 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
65 tx_ring->nr_ents);
66 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
67 sring->req_prod,
68 sring->req_prod - sring->rsp_prod,
69 tx_ring->req_cons,
70 tx_ring->req_cons - sring->rsp_prod,
71 sring->req_event,
72 sring->req_event - sring->rsp_prod);
73 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
74 sring->rsp_prod,
75 tx_ring->rsp_prod_pvt,
76 tx_ring->rsp_prod_pvt - sring->rsp_prod,
77 sring->rsp_event,
78 sring->rsp_event - sring->rsp_prod);
79 seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
80 queue->pending_prod,
81 queue->pending_cons,
82 nr_pending_reqs(queue));
83 seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
84 queue->dealloc_prod,
85 queue->dealloc_cons,
86 queue->dealloc_prod - queue->dealloc_cons);
87 }
88
89 if (rx_ring->sring) {
90 struct xen_netif_rx_sring *sring = rx_ring->sring;
91
92 seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
93 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
94 sring->req_prod,
95 sring->req_prod - sring->rsp_prod,
96 rx_ring->req_cons,
97 rx_ring->req_cons - sring->rsp_prod,
98 sring->req_event,
99 sring->req_event - sring->rsp_prod);
100 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
101 sring->rsp_prod,
102 rx_ring->rsp_prod_pvt,
103 rx_ring->rsp_prod_pvt - sring->rsp_prod,
104 sring->rsp_event,
105 sring->rsp_event - sring->rsp_prod);
106 }
107
108 seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
109 "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
110 "remaining: %lu, expires: %lu, now: %lu\n",
111 queue->napi.state, queue->napi.weight,
112 skb_queue_len(&queue->tx_queue),
113 timer_pending(&queue->credit_timeout),
114 queue->credit_bytes,
115 queue->credit_usec,
116 queue->remaining_credit,
117 queue->credit_timeout.expires,
118 jiffies);
119
David Vrabelf48da8b2014-10-22 14:08:54 +0100120 dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
121
122 seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
123 queue->rx_queue_len, queue->rx_queue_max,
124 skb_queue_len(&queue->rx_queue),
125 netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
126
Zoltan Kissf51de242014-07-08 19:49:14 +0100127 return 0;
128}
129
130#define XENVIF_KICK_STR "kick"
Wei Liu5c807002014-08-12 11:59:29 +0100131#define BUFFER_SIZE 32
Zoltan Kissf51de242014-07-08 19:49:14 +0100132
133static ssize_t
134xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
135 loff_t *ppos)
136{
137 struct xenvif_queue *queue =
138 ((struct seq_file *)filp->private_data)->private;
139 int len;
Wei Liu5c807002014-08-12 11:59:29 +0100140 char write[BUFFER_SIZE];
Zoltan Kissf51de242014-07-08 19:49:14 +0100141
142 /* don't allow partial writes and check the length */
143 if (*ppos != 0)
144 return 0;
Wei Liu5c807002014-08-12 11:59:29 +0100145 if (count >= sizeof(write))
Zoltan Kissf51de242014-07-08 19:49:14 +0100146 return -ENOSPC;
147
148 len = simple_write_to_buffer(write,
Wei Liu5c807002014-08-12 11:59:29 +0100149 sizeof(write) - 1,
Zoltan Kissf51de242014-07-08 19:49:14 +0100150 ppos,
151 buf,
152 count);
153 if (len < 0)
154 return len;
155
Wei Liu5c807002014-08-12 11:59:29 +0100156 write[len] = '\0';
157
Zoltan Kissf51de242014-07-08 19:49:14 +0100158 if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
159 xenvif_interrupt(0, (void *)queue);
160 else {
161 pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
162 queue->id);
163 count = -EINVAL;
164 }
165 return count;
166}
167
Paul Durranta9339b82016-10-10 09:30:53 +0100168static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
Zoltan Kissf51de242014-07-08 19:49:14 +0100169{
170 int ret;
171 void *queue = NULL;
172
173 if (inode->i_private)
174 queue = inode->i_private;
175 ret = single_open(filp, xenvif_read_io_ring, queue);
176 filp->f_mode |= FMODE_PWRITE;
177 return ret;
178}
179
180static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
181 .owner = THIS_MODULE,
Paul Durranta9339b82016-10-10 09:30:53 +0100182 .open = xenvif_io_ring_open,
Zoltan Kissf51de242014-07-08 19:49:14 +0100183 .read = seq_read,
184 .llseek = seq_lseek,
185 .release = single_release,
186 .write = xenvif_write_io_ring,
187};
188
Yangtao Li5061e3f2018-12-10 10:53:29 -0500189static int xenvif_ctrl_show(struct seq_file *m, void *v)
Paul Durranta9339b82016-10-10 09:30:53 +0100190{
191 struct xenvif *vif = m->private;
192
193 xenvif_dump_hash_info(vif, m);
194
195 return 0;
196}
Yangtao Li5061e3f2018-12-10 10:53:29 -0500197DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
Paul Durranta9339b82016-10-10 09:30:53 +0100198
Wei Liu628fa762014-08-12 11:59:30 +0100199static void xenvif_debugfs_addif(struct xenvif *vif)
Zoltan Kissf51de242014-07-08 19:49:14 +0100200{
201 struct dentry *pfile;
Zoltan Kissf51de242014-07-08 19:49:14 +0100202 int i;
203
204 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
205 return;
206
207 vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
208 xen_netback_dbg_root);
209 if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
210 for (i = 0; i < vif->num_queues; ++i) {
211 char filename[sizeof("io_ring_q") + 4];
212
213 snprintf(filename, sizeof(filename), "io_ring_q%d", i);
214 pfile = debugfs_create_file(filename,
Joe Perchesd61e4032018-03-23 15:54:39 -0700215 0600,
Zoltan Kissf51de242014-07-08 19:49:14 +0100216 vif->xenvif_dbg_root,
217 &vif->queues[i],
218 &xenvif_dbg_io_ring_ops_fops);
219 if (IS_ERR_OR_NULL(pfile))
220 pr_warn("Creation of io_ring file returned %ld!\n",
221 PTR_ERR(pfile));
222 }
Paul Durranta9339b82016-10-10 09:30:53 +0100223
224 if (vif->ctrl_irq) {
225 pfile = debugfs_create_file("ctrl",
Joe Perchesd61e4032018-03-23 15:54:39 -0700226 0400,
Paul Durranta9339b82016-10-10 09:30:53 +0100227 vif->xenvif_dbg_root,
228 vif,
Yangtao Li5061e3f2018-12-10 10:53:29 -0500229 &xenvif_ctrl_fops);
Paul Durranta9339b82016-10-10 09:30:53 +0100230 if (IS_ERR_OR_NULL(pfile))
231 pr_warn("Creation of ctrl file returned %ld!\n",
232 PTR_ERR(pfile));
233 }
Zoltan Kissf51de242014-07-08 19:49:14 +0100234 } else
235 netdev_warn(vif->dev,
236 "Creation of vif debugfs dir returned %ld!\n",
237 PTR_ERR(vif->xenvif_dbg_root));
238}
239
240static void xenvif_debugfs_delif(struct xenvif *vif)
241{
242 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
243 return;
244
zhong jiang05270972018-09-08 21:35:06 +0800245 debugfs_remove_recursive(vif->xenvif_dbg_root);
Zoltan Kissf51de242014-07-08 19:49:14 +0100246 vif->xenvif_dbg_root = NULL;
247}
248#endif /* CONFIG_DEBUG_FS */
249
Ian Campbellf942dc22011-03-15 00:06:18 +0000250static int netback_remove(struct xenbus_device *dev)
251{
252 struct backend_info *be = dev_get_drvdata(&dev->dev);
253
David Vrabeldc62cca2013-10-07 13:55:19 +0100254 set_backend_state(be, XenbusStateClosed);
255
Ian Campbellf942dc22011-03-15 00:06:18 +0000256 unregister_hotplug_status_watch(be);
257 if (be->vif) {
258 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
Palik, Imreedafc132015-03-19 11:05:42 +0100259 xen_unregister_watchers(be->vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000260 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
Paul Durrant279f4382013-09-17 17:46:08 +0100261 xenvif_free(be->vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000262 be->vif = NULL;
263 }
Ian Campbell31a41892015-06-01 11:30:24 +0100264 kfree(be->hotplug_script);
Ian Campbellf942dc22011-03-15 00:06:18 +0000265 kfree(be);
266 dev_set_drvdata(&dev->dev, NULL);
267 return 0;
268}
269
270
271/**
272 * Entry point to this code when a new device is created. Allocate the basic
273 * structures and switch to InitWait.
274 */
275static int netback_probe(struct xenbus_device *dev,
276 const struct xenbus_device_id *id)
277{
278 const char *message;
279 struct xenbus_transaction xbt;
280 int err;
281 int sg;
Ian Campbell31a41892015-06-01 11:30:24 +0100282 const char *script;
Ian Campbellf942dc22011-03-15 00:06:18 +0000283 struct backend_info *be = kzalloc(sizeof(struct backend_info),
284 GFP_KERNEL);
285 if (!be) {
286 xenbus_dev_fatal(dev, -ENOMEM,
287 "allocating backend structure");
288 return -ENOMEM;
289 }
290
291 be->dev = dev;
292 dev_set_drvdata(&dev->dev, be);
293
Filipe Mancocce94482016-09-15 17:10:46 +0200294 be->state = XenbusStateInitialising;
295 err = xenbus_switch_state(dev, XenbusStateInitialising);
296 if (err)
297 goto fail;
298
Ian Campbellf942dc22011-03-15 00:06:18 +0000299 sg = 1;
300
301 do {
302 err = xenbus_transaction_start(&xbt);
303 if (err) {
304 xenbus_dev_fatal(dev, err, "starting transaction");
305 goto fail;
306 }
307
308 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
309 if (err) {
310 message = "writing feature-sg";
311 goto abort_transaction;
312 }
313
314 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
315 "%d", sg);
316 if (err) {
317 message = "writing feature-gso-tcpv4";
318 goto abort_transaction;
319 }
320
Paul Durranta9468582013-10-16 17:50:31 +0100321 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
322 "%d", sg);
323 if (err) {
324 message = "writing feature-gso-tcpv6";
325 goto abort_transaction;
326 }
327
Paul Durrant2eba61d2013-10-16 17:50:29 +0100328 /* We support partial checksum setup for IPv6 packets */
329 err = xenbus_printf(xbt, dev->nodename,
330 "feature-ipv6-csum-offload",
331 "%d", 1);
332 if (err) {
333 message = "writing feature-ipv6-csum-offload";
334 goto abort_transaction;
335 }
336
Ian Campbellf942dc22011-03-15 00:06:18 +0000337 /* We support rx-copy path. */
338 err = xenbus_printf(xbt, dev->nodename,
339 "feature-rx-copy", "%d", 1);
340 if (err) {
341 message = "writing feature-rx-copy";
342 goto abort_transaction;
343 }
344
345 /*
346 * We don't support rx-flip path (except old guests who don't
347 * grok this feature flag).
348 */
349 err = xenbus_printf(xbt, dev->nodename,
350 "feature-rx-flip", "%d", 0);
351 if (err) {
352 message = "writing feature-rx-flip";
353 goto abort_transaction;
354 }
355
Paul Durrant22fae972016-02-02 11:55:05 +0000356 /* We support dynamic multicast-control. */
Paul Durrant210c34d2015-09-02 17:58:36 +0100357 err = xenbus_printf(xbt, dev->nodename,
358 "feature-multicast-control", "%d", 1);
359 if (err) {
360 message = "writing feature-multicast-control";
361 goto abort_transaction;
362 }
363
Paul Durrant22fae972016-02-02 11:55:05 +0000364 err = xenbus_printf(xbt, dev->nodename,
365 "feature-dynamic-multicast-control",
366 "%d", 1);
367 if (err) {
368 message = "writing feature-dynamic-multicast-control";
369 goto abort_transaction;
370 }
371
Ian Campbellf942dc22011-03-15 00:06:18 +0000372 err = xenbus_transaction_end(xbt, 0);
373 } while (err == -EAGAIN);
374
375 if (err) {
376 xenbus_dev_fatal(dev, err, "completing transaction");
377 goto fail;
378 }
379
Wei Liue1f00a692013-05-22 06:34:45 +0000380 /*
381 * Split event channels support, this is optional so it is not
382 * put inside the above loop.
383 */
384 err = xenbus_printf(XBT_NIL, dev->nodename,
385 "feature-split-event-channels",
386 "%u", separate_tx_rx_irq);
387 if (err)
Wei Liu8ef2c3b2013-07-02 00:08:54 +0100388 pr_debug("Error writing feature-split-event-channels\n");
Wei Liue1f00a692013-05-22 06:34:45 +0000389
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100390 /* Multi-queue support: This is an optional feature. */
391 err = xenbus_printf(XBT_NIL, dev->nodename,
392 "multi-queue-max-queues", "%u", xenvif_max_queues);
393 if (err)
394 pr_debug("Error writing multi-queue-max-queues\n");
395
Paul Durrant4e15ee22016-05-13 09:37:26 +0100396 err = xenbus_printf(XBT_NIL, dev->nodename,
397 "feature-ctrl-ring",
398 "%u", true);
399 if (err)
400 pr_debug("Error writing feature-ctrl-ring\n");
401
Ian Campbell31a41892015-06-01 11:30:24 +0100402 script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
403 if (IS_ERR(script)) {
404 err = PTR_ERR(script);
405 xenbus_dev_fatal(dev, err, "reading script");
406 goto fail;
407 }
408
409 be->hotplug_script = script;
410
Paul Durrantea732df2013-09-26 12:09:52 +0100411
Ian Campbellf942dc22011-03-15 00:06:18 +0000412 /* This kicks hotplug scripts, so do it immediately. */
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300413 err = backend_create_xenvif(be);
414 if (err)
415 goto fail;
Ian Campbellf942dc22011-03-15 00:06:18 +0000416
417 return 0;
418
419abort_transaction:
420 xenbus_transaction_end(xbt, 1);
421 xenbus_dev_fatal(dev, err, "%s", message);
422fail:
Wei Liu8ef2c3b2013-07-02 00:08:54 +0100423 pr_debug("failed\n");
Ian Campbellf942dc22011-03-15 00:06:18 +0000424 netback_remove(dev);
425 return err;
426}
427
428
429/*
430 * Handle the creation of the hotplug script environment. We add the script
431 * and vif variables to the environment, for the benefit of the vif-* hotplug
432 * scripts.
433 */
434static int netback_uevent(struct xenbus_device *xdev,
435 struct kobj_uevent_env *env)
436{
437 struct backend_info *be = dev_get_drvdata(&xdev->dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000438
Ian Campbell31a41892015-06-01 11:30:24 +0100439 if (!be)
440 return 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000441
Ian Campbell31a41892015-06-01 11:30:24 +0100442 if (add_uevent_var(env, "script=%s", be->hotplug_script))
443 return -ENOMEM;
444
445 if (!be->vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000446 return 0;
447
448 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
449}
450
451
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300452static int backend_create_xenvif(struct backend_info *be)
Ian Campbellf942dc22011-03-15 00:06:18 +0000453{
454 int err;
455 long handle;
456 struct xenbus_device *dev = be->dev;
Jan Beulichf15650b2014-12-09 11:47:04 +0000457 struct xenvif *vif;
Ian Campbellf942dc22011-03-15 00:06:18 +0000458
459 if (be->vif != NULL)
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300460 return 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000461
462 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
463 if (err != 1) {
464 xenbus_dev_fatal(dev, err, "reading handle");
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300465 return (err < 0) ? err : -EINVAL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000466 }
467
Jan Beulichf15650b2014-12-09 11:47:04 +0000468 vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
469 if (IS_ERR(vif)) {
470 err = PTR_ERR(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000471 xenbus_dev_fatal(dev, err, "creating interface");
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300472 return err;
Ian Campbellf942dc22011-03-15 00:06:18 +0000473 }
Jan Beulichf15650b2014-12-09 11:47:04 +0000474 be->vif = vif;
Ian Campbellf942dc22011-03-15 00:06:18 +0000475
476 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300477 return 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000478}
479
Paul Durrantea732df2013-09-26 12:09:52 +0100480static void backend_disconnect(struct backend_info *be)
Ian Campbellf942dc22011-03-15 00:06:18 +0000481{
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000482 struct xenvif *vif = be->vif;
483
484 if (vif) {
Igor Druzhininb17075d2017-03-10 21:36:22 +0000485 unsigned int num_queues = vif->num_queues;
Igor Druzhinin9a6cdf52017-01-17 20:49:37 +0000486 unsigned int queue_index;
487
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000488 xen_unregister_watchers(vif);
Zoltan Kissf51de242014-07-08 19:49:14 +0100489#ifdef CONFIG_DEBUG_FS
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000490 xenvif_debugfs_delif(vif);
Zoltan Kissf51de242014-07-08 19:49:14 +0100491#endif /* CONFIG_DEBUG_FS */
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000492 xenvif_disconnect_data(vif);
Igor Druzhininb17075d2017-03-10 21:36:22 +0000493
494 /* At this point some of the handlers may still be active
495 * so we need to have additional synchronization here.
496 */
497 vif->num_queues = 0;
498 synchronize_net();
499
500 for (queue_index = 0; queue_index < num_queues; ++queue_index)
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000501 xenvif_deinit_queue(&vif->queues[queue_index]);
Igor Druzhinin9a6cdf52017-01-17 20:49:37 +0000502
Igor Druzhininb17075d2017-03-10 21:36:22 +0000503 vfree(vif->queues);
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000504 vif->queues = NULL;
Paul Durranta254d8f2017-03-02 12:54:26 +0000505
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000506 xenvif_disconnect_ctrl(vif);
Zoltan Kissf51de242014-07-08 19:49:14 +0100507 }
Paul Durrant279f4382013-09-17 17:46:08 +0100508}
509
Paul Durrantea732df2013-09-26 12:09:52 +0100510static void backend_connect(struct backend_info *be)
Paul Durrant279f4382013-09-17 17:46:08 +0100511{
Paul Durrantea732df2013-09-26 12:09:52 +0100512 if (be->vif)
513 connect(be);
514}
Paul Durrant279f4382013-09-17 17:46:08 +0100515
Paul Durrantea732df2013-09-26 12:09:52 +0100516static inline void backend_switch_state(struct backend_info *be,
517 enum xenbus_state state)
518{
519 struct xenbus_device *dev = be->dev;
520
521 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
522 be->state = state;
523
524 /* If we are waiting for a hotplug script then defer the
525 * actual xenbus state change.
526 */
527 if (!be->have_hotplug_status_watch)
528 xenbus_switch_state(dev, state);
529}
530
531/* Handle backend state transitions:
532 *
Filipe Mancocce94482016-09-15 17:10:46 +0200533 * The backend state starts in Initialising and the following transitions are
Paul Durrantea732df2013-09-26 12:09:52 +0100534 * allowed.
535 *
Filipe Mancocce94482016-09-15 17:10:46 +0200536 * Initialising -> InitWait -> Connected
537 * \
538 * \ ^ \ |
539 * \ | \ |
540 * \ | \ |
541 * \ | \ |
542 * \ | \ |
543 * \ | \ |
544 * V | V V
Paul Durrantea732df2013-09-26 12:09:52 +0100545 *
Filipe Mancocce94482016-09-15 17:10:46 +0200546 * Closed <-> Closing
Paul Durrantea732df2013-09-26 12:09:52 +0100547 *
548 * The state argument specifies the eventual state of the backend and the
549 * function transitions to that state via the shortest path.
550 */
551static void set_backend_state(struct backend_info *be,
552 enum xenbus_state state)
553{
554 while (be->state != state) {
555 switch (be->state) {
Filipe Mancocce94482016-09-15 17:10:46 +0200556 case XenbusStateInitialising:
557 switch (state) {
558 case XenbusStateInitWait:
559 case XenbusStateConnected:
560 case XenbusStateClosing:
561 backend_switch_state(be, XenbusStateInitWait);
562 break;
563 case XenbusStateClosed:
564 backend_switch_state(be, XenbusStateClosed);
565 break;
566 default:
567 BUG();
568 }
569 break;
Paul Durrantea732df2013-09-26 12:09:52 +0100570 case XenbusStateClosed:
571 switch (state) {
572 case XenbusStateInitWait:
573 case XenbusStateConnected:
Paul Durrantea732df2013-09-26 12:09:52 +0100574 backend_switch_state(be, XenbusStateInitWait);
575 break;
576 case XenbusStateClosing:
577 backend_switch_state(be, XenbusStateClosing);
578 break;
579 default:
580 BUG();
581 }
582 break;
583 case XenbusStateInitWait:
584 switch (state) {
585 case XenbusStateConnected:
586 backend_connect(be);
587 backend_switch_state(be, XenbusStateConnected);
588 break;
589 case XenbusStateClosing:
590 case XenbusStateClosed:
591 backend_switch_state(be, XenbusStateClosing);
592 break;
593 default:
594 BUG();
595 }
596 break;
597 case XenbusStateConnected:
598 switch (state) {
599 case XenbusStateInitWait:
600 case XenbusStateClosing:
601 case XenbusStateClosed:
602 backend_disconnect(be);
603 backend_switch_state(be, XenbusStateClosing);
604 break;
605 default:
606 BUG();
607 }
608 break;
609 case XenbusStateClosing:
610 switch (state) {
611 case XenbusStateInitWait:
612 case XenbusStateConnected:
613 case XenbusStateClosed:
614 backend_switch_state(be, XenbusStateClosed);
615 break;
616 default:
617 BUG();
618 }
619 break;
620 default:
621 BUG();
622 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000623 }
624}
625
626/**
627 * Callback received when the frontend's state changes.
628 */
629static void frontend_changed(struct xenbus_device *dev,
630 enum xenbus_state frontend_state)
631{
632 struct backend_info *be = dev_get_drvdata(&dev->dev);
633
Paul Durrantea732df2013-09-26 12:09:52 +0100634 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
Ian Campbellf942dc22011-03-15 00:06:18 +0000635
636 be->frontend_state = frontend_state;
637
638 switch (frontend_state) {
639 case XenbusStateInitialising:
Paul Durrantea732df2013-09-26 12:09:52 +0100640 set_backend_state(be, XenbusStateInitWait);
Ian Campbellf942dc22011-03-15 00:06:18 +0000641 break;
642
643 case XenbusStateInitialised:
644 break;
645
646 case XenbusStateConnected:
Paul Durrantea732df2013-09-26 12:09:52 +0100647 set_backend_state(be, XenbusStateConnected);
Ian Campbellf942dc22011-03-15 00:06:18 +0000648 break;
649
650 case XenbusStateClosing:
Paul Durrantea732df2013-09-26 12:09:52 +0100651 set_backend_state(be, XenbusStateClosing);
Ian Campbellf942dc22011-03-15 00:06:18 +0000652 break;
653
654 case XenbusStateClosed:
Paul Durrantea732df2013-09-26 12:09:52 +0100655 set_backend_state(be, XenbusStateClosed);
Ian Campbellf942dc22011-03-15 00:06:18 +0000656 if (xenbus_dev_is_online(dev))
657 break;
658 /* fall through if not online */
659 case XenbusStateUnknown:
Paul Durrantea732df2013-09-26 12:09:52 +0100660 set_backend_state(be, XenbusStateClosed);
Ian Campbellf942dc22011-03-15 00:06:18 +0000661 device_unregister(&dev->dev);
662 break;
663
664 default:
665 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
666 frontend_state);
667 break;
668 }
669}
670
671
672static void xen_net_read_rate(struct xenbus_device *dev,
673 unsigned long *bytes, unsigned long *usec)
674{
675 char *s, *e;
676 unsigned long b, u;
677 char *ratestr;
678
679 /* Default to unlimited bandwidth. */
680 *bytes = ~0UL;
681 *usec = 0;
682
683 ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
684 if (IS_ERR(ratestr))
685 return;
686
687 s = ratestr;
688 b = simple_strtoul(s, &e, 10);
689 if ((s == e) || (*e != ','))
690 goto fail;
691
692 s = e + 1;
693 u = simple_strtoul(s, &e, 10);
694 if ((s == e) || (*e != '\0'))
695 goto fail;
696
697 *bytes = b;
698 *usec = u;
699
700 kfree(ratestr);
701 return;
702
703 fail:
704 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
705 kfree(ratestr);
706}
707
708static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
709{
710 char *s, *e, *macstr;
711 int i;
712
713 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
714 if (IS_ERR(macstr))
715 return PTR_ERR(macstr);
716
717 for (i = 0; i < ETH_ALEN; i++) {
718 mac[i] = simple_strtoul(s, &e, 16);
719 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
720 kfree(macstr);
721 return -ENOENT;
722 }
723 s = e+1;
724 }
725
726 kfree(macstr);
727 return 0;
728}
729
Palik, Imreedafc132015-03-19 11:05:42 +0100730static void xen_net_rate_changed(struct xenbus_watch *watch,
Juergen Gross5584ea22017-02-09 14:39:57 +0100731 const char *path, const char *token)
Palik, Imreedafc132015-03-19 11:05:42 +0100732{
733 struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
734 struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
735 unsigned long credit_bytes;
736 unsigned long credit_usec;
737 unsigned int queue_index;
738
739 xen_net_read_rate(dev, &credit_bytes, &credit_usec);
740 for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
741 struct xenvif_queue *queue = &vif->queues[queue_index];
742
743 queue->credit_bytes = credit_bytes;
744 queue->credit_usec = credit_usec;
745 if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
746 queue->remaining_credit > queue->credit_bytes) {
747 queue->remaining_credit = queue->credit_bytes;
748 }
749 }
750}
751
Paul Durrant22fae972016-02-02 11:55:05 +0000752static int xen_register_credit_watch(struct xenbus_device *dev,
753 struct xenvif *vif)
Palik, Imreedafc132015-03-19 11:05:42 +0100754{
755 int err = 0;
756 char *node;
757 unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
758
Palik, Imre12b322a2015-06-19 14:21:51 +0200759 if (vif->credit_watch.node)
760 return -EADDRINUSE;
761
Palik, Imreedafc132015-03-19 11:05:42 +0100762 node = kmalloc(maxlen, GFP_KERNEL);
763 if (!node)
764 return -ENOMEM;
765 snprintf(node, maxlen, "%s/rate", dev->nodename);
766 vif->credit_watch.node = node;
767 vif->credit_watch.callback = xen_net_rate_changed;
768 err = register_xenbus_watch(&vif->credit_watch);
769 if (err) {
770 pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
771 kfree(node);
772 vif->credit_watch.node = NULL;
773 vif->credit_watch.callback = NULL;
774 }
775 return err;
776}
777
Paul Durrant22fae972016-02-02 11:55:05 +0000778static void xen_unregister_credit_watch(struct xenvif *vif)
Palik, Imreedafc132015-03-19 11:05:42 +0100779{
780 if (vif->credit_watch.node) {
781 unregister_xenbus_watch(&vif->credit_watch);
782 kfree(vif->credit_watch.node);
783 vif->credit_watch.node = NULL;
784 }
785}
786
Paul Durrant22fae972016-02-02 11:55:05 +0000787static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
Juergen Gross5584ea22017-02-09 14:39:57 +0100788 const char *path, const char *token)
Paul Durrant22fae972016-02-02 11:55:05 +0000789{
790 struct xenvif *vif = container_of(watch, struct xenvif,
791 mcast_ctrl_watch);
792 struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
Paul Durrant22fae972016-02-02 11:55:05 +0000793
Juergen Grossf95842e2016-10-31 14:58:41 +0100794 vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
795 "request-multicast-control", 0);
Paul Durrant22fae972016-02-02 11:55:05 +0000796}
797
798static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
799 struct xenvif *vif)
800{
801 int err = 0;
802 char *node;
803 unsigned maxlen = strlen(dev->otherend) +
804 sizeof("/request-multicast-control");
805
806 if (vif->mcast_ctrl_watch.node) {
807 pr_err_ratelimited("Watch is already registered\n");
808 return -EADDRINUSE;
809 }
810
811 node = kmalloc(maxlen, GFP_KERNEL);
812 if (!node) {
813 pr_err("Failed to allocate memory for watch\n");
814 return -ENOMEM;
815 }
816 snprintf(node, maxlen, "%s/request-multicast-control",
817 dev->otherend);
818 vif->mcast_ctrl_watch.node = node;
819 vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
820 err = register_xenbus_watch(&vif->mcast_ctrl_watch);
821 if (err) {
822 pr_err("Failed to set watcher %s\n",
823 vif->mcast_ctrl_watch.node);
824 kfree(node);
825 vif->mcast_ctrl_watch.node = NULL;
826 vif->mcast_ctrl_watch.callback = NULL;
827 }
828 return err;
829}
830
831static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
832{
833 if (vif->mcast_ctrl_watch.node) {
834 unregister_xenbus_watch(&vif->mcast_ctrl_watch);
835 kfree(vif->mcast_ctrl_watch.node);
836 vif->mcast_ctrl_watch.node = NULL;
837 }
838}
839
840static void xen_register_watchers(struct xenbus_device *dev,
841 struct xenvif *vif)
842{
843 xen_register_credit_watch(dev, vif);
844 xen_register_mcast_ctrl_watch(dev, vif);
845}
846
847static void xen_unregister_watchers(struct xenvif *vif)
848{
849 xen_unregister_mcast_ctrl_watch(vif);
850 xen_unregister_credit_watch(vif);
851}
852
Ian Campbellf942dc22011-03-15 00:06:18 +0000853static void unregister_hotplug_status_watch(struct backend_info *be)
854{
855 if (be->have_hotplug_status_watch) {
856 unregister_xenbus_watch(&be->hotplug_status_watch);
857 kfree(be->hotplug_status_watch.node);
858 }
859 be->have_hotplug_status_watch = 0;
860}
861
862static void hotplug_status_changed(struct xenbus_watch *watch,
Juergen Gross5584ea22017-02-09 14:39:57 +0100863 const char *path,
864 const char *token)
Ian Campbellf942dc22011-03-15 00:06:18 +0000865{
866 struct backend_info *be = container_of(watch,
867 struct backend_info,
868 hotplug_status_watch);
869 char *str;
870 unsigned int len;
871
872 str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
873 if (IS_ERR(str))
874 return;
875 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
Paul Durrantea732df2013-09-26 12:09:52 +0100876 /* Complete any pending state change */
877 xenbus_switch_state(be->dev, be->state);
878
Ian Campbellf942dc22011-03-15 00:06:18 +0000879 /* Not interested in this watch anymore. */
880 unregister_hotplug_status_watch(be);
881 }
882 kfree(str);
883}
884
Paul Durrant4e15ee22016-05-13 09:37:26 +0100885static int connect_ctrl_ring(struct backend_info *be)
886{
887 struct xenbus_device *dev = be->dev;
888 struct xenvif *vif = be->vif;
889 unsigned int val;
890 grant_ref_t ring_ref;
891 unsigned int evtchn;
892 int err;
893
Jan Beulich6c27f992016-11-08 00:45:53 -0700894 err = xenbus_scanf(XBT_NIL, dev->otherend,
895 "ctrl-ring-ref", "%u", &val);
896 if (err < 0)
Paul Durrant4e15ee22016-05-13 09:37:26 +0100897 goto done; /* The frontend does not have a control ring */
898
899 ring_ref = val;
900
Jan Beulich6c27f992016-11-08 00:45:53 -0700901 err = xenbus_scanf(XBT_NIL, dev->otherend,
902 "event-channel-ctrl", "%u", &val);
903 if (err < 0) {
Paul Durrant4e15ee22016-05-13 09:37:26 +0100904 xenbus_dev_fatal(dev, err,
905 "reading %s/event-channel-ctrl",
906 dev->otherend);
907 goto fail;
908 }
909
910 evtchn = val;
911
912 err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
913 if (err) {
914 xenbus_dev_fatal(dev, err,
915 "mapping shared-frame %u port %u",
916 ring_ref, evtchn);
917 goto fail;
918 }
919
920done:
921 return 0;
922
923fail:
924 return err;
925}
926
Ian Campbellf942dc22011-03-15 00:06:18 +0000927static void connect(struct backend_info *be)
928{
929 int err;
930 struct xenbus_device *dev = be->dev;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100931 unsigned long credit_bytes, credit_usec;
932 unsigned int queue_index;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100933 unsigned int requested_num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100934 struct xenvif_queue *queue;
Ian Campbellf942dc22011-03-15 00:06:18 +0000935
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100936 /* Check whether the frontend requested multiple queues
937 * and read the number requested.
938 */
Juergen Grossf95842e2016-10-31 14:58:41 +0100939 requested_num_queues = xenbus_read_unsigned(dev->otherend,
940 "multi-queue-num-queues", 1);
941 if (requested_num_queues > xenvif_max_queues) {
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100942 /* buggy or malicious guest */
Arnd Bergmann0f06ac32016-11-10 09:55:42 +0100943 xenbus_dev_fatal(dev, -EINVAL,
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100944 "guest requested %u queues, exceeding the maximum of %u.",
945 requested_num_queues, xenvif_max_queues);
946 return;
947 }
948
Ian Campbellf942dc22011-03-15 00:06:18 +0000949 err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
950 if (err) {
951 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
952 return;
953 }
954
Wei Liue9ce7cb2014-06-04 10:30:42 +0100955 xen_net_read_rate(dev, &credit_bytes, &credit_usec);
Palik, Imre12b322a2015-06-19 14:21:51 +0200956 xen_unregister_watchers(be->vif);
Palik, Imreedafc132015-03-19 11:05:42 +0100957 xen_register_watchers(dev, be->vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100958 read_xenbus_vif_flags(be);
959
Paul Durrant4e15ee22016-05-13 09:37:26 +0100960 err = connect_ctrl_ring(be);
961 if (err) {
962 xenbus_dev_fatal(dev, err, "connecting control ring");
963 return;
964 }
965
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100966 /* Use the number of queues requested by the frontend */
Kees Cookfad953c2018-06-12 14:27:37 -0700967 be->vif->queues = vzalloc(array_size(requested_num_queues,
968 sizeof(struct xenvif_queue)));
Insu Yun833b8f12015-10-15 18:02:28 +0000969 if (!be->vif->queues) {
970 xenbus_dev_fatal(dev, -ENOMEM,
971 "allocating queues");
972 return;
973 }
974
Wei Liuf7b50c42014-06-23 10:50:17 +0100975 be->vif->num_queues = requested_num_queues;
David Vrabelecf08d22014-10-22 14:08:55 +0100976 be->vif->stalled_queues = requested_num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100977
978 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
979 queue = &be->vif->queues[queue_index];
980 queue->vif = be->vif;
981 queue->id = queue_index;
982 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
983 be->vif->dev->name, queue->id);
984
985 err = xenvif_init_queue(queue);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100986 if (err) {
987 /* xenvif_init_queue() cleans up after itself on
988 * failure, but we need to clean up any previously
989 * initialised queues. Set num_queues to i so that
990 * earlier queues can be destroyed using the regular
991 * disconnect logic.
992 */
Wei Liuf7b50c42014-06-23 10:50:17 +0100993 be->vif->num_queues = queue_index;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100994 goto err;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100995 }
Wei Liue9ce7cb2014-06-04 10:30:42 +0100996
Ross Lagerwallce0e5c52015-05-27 11:44:32 +0100997 queue->credit_bytes = credit_bytes;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100998 queue->remaining_credit = credit_bytes;
Palik, Imre07ff8902015-01-06 16:44:44 +0100999 queue->credit_usec = credit_usec;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001000
Paul Durrant4e15ee22016-05-13 09:37:26 +01001001 err = connect_data_rings(be, queue);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001002 if (err) {
Paul Durrant4e15ee22016-05-13 09:37:26 +01001003 /* connect_data_rings() cleans up after itself on
1004 * failure, but we need to clean up after
1005 * xenvif_init_queue() here, and also clean up any
1006 * previously initialised queues.
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001007 */
1008 xenvif_deinit_queue(queue);
Wei Liuf7b50c42014-06-23 10:50:17 +01001009 be->vif->num_queues = queue_index;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001010 goto err;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001011 }
Wei Liue9ce7cb2014-06-04 10:30:42 +01001012 }
1013
Wei Liu628fa762014-08-12 11:59:30 +01001014#ifdef CONFIG_DEBUG_FS
1015 xenvif_debugfs_addif(be->vif);
1016#endif /* CONFIG_DEBUG_FS */
1017
Wei Liuf7b50c42014-06-23 10:50:17 +01001018 /* Initialisation completed, tell core driver the number of
1019 * active queues.
1020 */
1021 rtnl_lock();
1022 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
1023 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
1024 rtnl_unlock();
1025
Wei Liue9ce7cb2014-06-04 10:30:42 +01001026 xenvif_carrier_on(be->vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001027
1028 unregister_hotplug_status_watch(be);
1029 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
1030 hotplug_status_changed,
1031 "%s/%s", dev->nodename, "hotplug-status");
Paul Durrantea732df2013-09-26 12:09:52 +01001032 if (!err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001033 be->have_hotplug_status_watch = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +00001034
Wei Liue9ce7cb2014-06-04 10:30:42 +01001035 netif_tx_wake_all_queues(be->vif->dev);
1036
1037 return;
1038
1039err:
Wei Liuf7b50c42014-06-23 10:50:17 +01001040 if (be->vif->num_queues > 0)
Paul Durrant4e15ee22016-05-13 09:37:26 +01001041 xenvif_disconnect_data(be->vif); /* Clean up existing queues */
Igor Druzhinin9a6cdf52017-01-17 20:49:37 +00001042 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
1043 xenvif_deinit_queue(&be->vif->queues[queue_index]);
Wei Liue9ce7cb2014-06-04 10:30:42 +01001044 vfree(be->vif->queues);
1045 be->vif->queues = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +01001046 be->vif->num_queues = 0;
Paul Durrant4e15ee22016-05-13 09:37:26 +01001047 xenvif_disconnect_ctrl(be->vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +01001048 return;
Ian Campbellf942dc22011-03-15 00:06:18 +00001049}
1050
1051
Paul Durrant4e15ee22016-05-13 09:37:26 +01001052static int connect_data_rings(struct backend_info *be,
1053 struct xenvif_queue *queue)
Ian Campbellf942dc22011-03-15 00:06:18 +00001054{
Ian Campbellf942dc22011-03-15 00:06:18 +00001055 struct xenbus_device *dev = be->dev;
Wei Liuf7b50c42014-06-23 10:50:17 +01001056 unsigned int num_queues = queue->vif->num_queues;
Ian Campbellf942dc22011-03-15 00:06:18 +00001057 unsigned long tx_ring_ref, rx_ring_ref;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001058 unsigned int tx_evtchn, rx_evtchn;
Ian Campbellf942dc22011-03-15 00:06:18 +00001059 int err;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001060 char *xspath;
1061 size_t xspathsize;
1062 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
Ian Campbellf942dc22011-03-15 00:06:18 +00001063
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001064 /* If the frontend requested 1 queue, or we have fallen back
1065 * to single queue due to lack of frontend support for multi-
1066 * queue, expect the remaining XenStore keys in the toplevel
1067 * directory. Otherwise, expect them in a subdirectory called
1068 * queue-N.
1069 */
1070 if (num_queues == 1) {
1071 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
1072 if (!xspath) {
1073 xenbus_dev_fatal(dev, -ENOMEM,
1074 "reading ring references");
1075 return -ENOMEM;
1076 }
1077 strcpy(xspath, dev->otherend);
1078 } else {
1079 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1080 xspath = kzalloc(xspathsize, GFP_KERNEL);
1081 if (!xspath) {
1082 xenbus_dev_fatal(dev, -ENOMEM,
1083 "reading ring references");
1084 return -ENOMEM;
1085 }
1086 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
1087 queue->id);
1088 }
1089
1090 err = xenbus_gather(XBT_NIL, xspath,
Ian Campbellf942dc22011-03-15 00:06:18 +00001091 "tx-ring-ref", "%lu", &tx_ring_ref,
Wei Liue1f00a692013-05-22 06:34:45 +00001092 "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
Ian Campbellf942dc22011-03-15 00:06:18 +00001093 if (err) {
1094 xenbus_dev_fatal(dev, err,
Wei Liue1f00a692013-05-22 06:34:45 +00001095 "reading %s/ring-ref",
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001096 xspath);
1097 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +00001098 }
1099
Wei Liue1f00a692013-05-22 06:34:45 +00001100 /* Try split event channels first, then single event channel. */
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001101 err = xenbus_gather(XBT_NIL, xspath,
Wei Liue1f00a692013-05-22 06:34:45 +00001102 "event-channel-tx", "%u", &tx_evtchn,
1103 "event-channel-rx", "%u", &rx_evtchn, NULL);
1104 if (err < 0) {
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001105 err = xenbus_scanf(XBT_NIL, xspath,
Wei Liue1f00a692013-05-22 06:34:45 +00001106 "event-channel", "%u", &tx_evtchn);
1107 if (err < 0) {
1108 xenbus_dev_fatal(dev, err,
1109 "reading %s/event-channel(-tx/rx)",
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001110 xspath);
1111 goto err;
Wei Liue1f00a692013-05-22 06:34:45 +00001112 }
1113 rx_evtchn = tx_evtchn;
1114 }
1115
Wei Liue9ce7cb2014-06-04 10:30:42 +01001116 /* Map the shared frame, irq etc. */
Paul Durrant4e15ee22016-05-13 09:37:26 +01001117 err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
1118 tx_evtchn, rx_evtchn);
Wei Liue9ce7cb2014-06-04 10:30:42 +01001119 if (err) {
1120 xenbus_dev_fatal(dev, err,
1121 "mapping shared-frames %lu/%lu port tx %u rx %u",
1122 tx_ring_ref, rx_ring_ref,
1123 tx_evtchn, rx_evtchn);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001124 goto err;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001125 }
1126
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001127 err = 0;
1128err: /* Regular return falls through with err == 0 */
1129 kfree(xspath);
1130 return err;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001131}
1132
1133static int read_xenbus_vif_flags(struct backend_info *be)
1134{
1135 struct xenvif *vif = be->vif;
1136 struct xenbus_device *dev = be->dev;
1137 unsigned int rx_copy;
Juergen Grossf95842e2016-10-31 14:58:41 +01001138 int err;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001139
Ian Campbellf942dc22011-03-15 00:06:18 +00001140 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
1141 &rx_copy);
1142 if (err == -ENOENT) {
1143 err = 0;
1144 rx_copy = 0;
1145 }
1146 if (err < 0) {
1147 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
1148 dev->otherend);
1149 return err;
1150 }
1151 if (!rx_copy)
1152 return -EOPNOTSUPP;
1153
Juergen Grossf95842e2016-10-31 14:58:41 +01001154 if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
David Vrabel26c0e102014-12-18 11:13:06 +00001155 /* - Reduce drain timeout to poll more frequently for
1156 * Rx requests.
1157 * - Disable Rx stall detection.
1158 */
1159 be->vif->drain_timeout = msecs_to_jiffies(30);
1160 be->vif->stall_timeout = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +00001161 }
1162
Juergen Grossf95842e2016-10-31 14:58:41 +01001163 vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
Ian Campbellf942dc22011-03-15 00:06:18 +00001164
Paul Durrant82cada22013-10-16 17:50:32 +01001165 vif->gso_mask = 0;
Paul Durrant82cada22013-10-16 17:50:32 +01001166
Juergen Grossf95842e2016-10-31 14:58:41 +01001167 if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
Paul Durrant82cada22013-10-16 17:50:32 +01001168 vif->gso_mask |= GSO_BIT(TCPV4);
Ian Campbellf942dc22011-03-15 00:06:18 +00001169
Juergen Grossf95842e2016-10-31 14:58:41 +01001170 if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
Paul Durrant82cada22013-10-16 17:50:32 +01001171 vif->gso_mask |= GSO_BIT(TCPV6);
1172
Juergen Grossf95842e2016-10-31 14:58:41 +01001173 vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
1174 "feature-no-csum-offload", 0);
Paul Durrant146c8a72013-10-16 17:50:28 +01001175
Juergen Grossf95842e2016-10-31 14:58:41 +01001176 vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
1177 "feature-ipv6-csum-offload", 0);
Ian Campbellf942dc22011-03-15 00:06:18 +00001178
Ian Campbellf942dc22011-03-15 00:06:18 +00001179 return 0;
1180}
1181
Ian Campbellf942dc22011-03-15 00:06:18 +00001182static const struct xenbus_device_id netback_ids[] = {
1183 { "vif" },
1184 { "" }
1185};
1186
David Vrabel95afae42014-09-08 17:30:41 +01001187static struct xenbus_driver netback_driver = {
1188 .ids = netback_ids,
Ian Campbellf942dc22011-03-15 00:06:18 +00001189 .probe = netback_probe,
1190 .remove = netback_remove,
1191 .uevent = netback_uevent,
1192 .otherend_changed = frontend_changed,
David Vrabel95afae42014-09-08 17:30:41 +01001193};
Ian Campbellf942dc22011-03-15 00:06:18 +00001194
1195int xenvif_xenbus_init(void)
1196{
Jan Beulich73db1442011-12-22 09:08:13 +00001197 return xenbus_register_backend(&netback_driver);
Ian Campbellf942dc22011-03-15 00:06:18 +00001198}
Wei Liub103f352013-05-16 23:26:11 +00001199
1200void xenvif_xenbus_fini(void)
1201{
1202 return xenbus_unregister_driver(&netback_driver);
1203}