blob: 41034264bd34deffae2956c2b7c2c8ee8065c710 [file] [log] [blame]
Thomas Gleixner1ccea772019-05-19 15:51:43 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Ian Campbellf942dc22011-03-15 00:06:18 +00002/*
3 * Xenbus code for netif backend
4 *
5 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
6 * Copyright (C) 2005 XenSource Ltd
Ian Campbellf942dc22011-03-15 00:06:18 +00007*/
8
9#include "common.h"
Wei Liue9ce7cb2014-06-04 10:30:42 +010010#include <linux/vmalloc.h>
11#include <linux/rtnetlink.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000012
Paul Durrant4e15ee22016-05-13 09:37:26 +010013static int connect_data_rings(struct backend_info *be,
14 struct xenvif_queue *queue);
Wei Liue9ce7cb2014-06-04 10:30:42 +010015static void connect(struct backend_info *be);
16static int read_xenbus_vif_flags(struct backend_info *be);
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +030017static int backend_create_xenvif(struct backend_info *be);
Ian Campbellf942dc22011-03-15 00:06:18 +000018static void unregister_hotplug_status_watch(struct backend_info *be);
Palik, Imreedafc132015-03-19 11:05:42 +010019static void xen_unregister_watchers(struct xenvif *vif);
David Vrabeldc62cca2013-10-07 13:55:19 +010020static void set_backend_state(struct backend_info *be,
21 enum xenbus_state state);
Ian Campbellf942dc22011-03-15 00:06:18 +000022
Zoltan Kissf51de242014-07-08 19:49:14 +010023#ifdef CONFIG_DEBUG_FS
24struct dentry *xen_netback_dbg_root = NULL;
25
26static int xenvif_read_io_ring(struct seq_file *m, void *v)
27{
28 struct xenvif_queue *queue = m->private;
29 struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
30 struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
David Vrabelf48da8b2014-10-22 14:08:54 +010031 struct netdev_queue *dev_queue;
Zoltan Kissf51de242014-07-08 19:49:14 +010032
33 if (tx_ring->sring) {
34 struct xen_netif_tx_sring *sring = tx_ring->sring;
35
36 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
37 tx_ring->nr_ents);
38 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
39 sring->req_prod,
40 sring->req_prod - sring->rsp_prod,
41 tx_ring->req_cons,
42 tx_ring->req_cons - sring->rsp_prod,
43 sring->req_event,
44 sring->req_event - sring->rsp_prod);
45 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
46 sring->rsp_prod,
47 tx_ring->rsp_prod_pvt,
48 tx_ring->rsp_prod_pvt - sring->rsp_prod,
49 sring->rsp_event,
50 sring->rsp_event - sring->rsp_prod);
51 seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
52 queue->pending_prod,
53 queue->pending_cons,
54 nr_pending_reqs(queue));
55 seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
56 queue->dealloc_prod,
57 queue->dealloc_cons,
58 queue->dealloc_prod - queue->dealloc_cons);
59 }
60
61 if (rx_ring->sring) {
62 struct xen_netif_rx_sring *sring = rx_ring->sring;
63
64 seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
65 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
66 sring->req_prod,
67 sring->req_prod - sring->rsp_prod,
68 rx_ring->req_cons,
69 rx_ring->req_cons - sring->rsp_prod,
70 sring->req_event,
71 sring->req_event - sring->rsp_prod);
72 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
73 sring->rsp_prod,
74 rx_ring->rsp_prod_pvt,
75 rx_ring->rsp_prod_pvt - sring->rsp_prod,
76 sring->rsp_event,
77 sring->rsp_event - sring->rsp_prod);
78 }
79
80 seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
81 "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
82 "remaining: %lu, expires: %lu, now: %lu\n",
83 queue->napi.state, queue->napi.weight,
84 skb_queue_len(&queue->tx_queue),
85 timer_pending(&queue->credit_timeout),
86 queue->credit_bytes,
87 queue->credit_usec,
88 queue->remaining_credit,
89 queue->credit_timeout.expires,
90 jiffies);
91
David Vrabelf48da8b2014-10-22 14:08:54 +010092 dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
93
94 seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
95 queue->rx_queue_len, queue->rx_queue_max,
96 skb_queue_len(&queue->rx_queue),
97 netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
98
Zoltan Kissf51de242014-07-08 19:49:14 +010099 return 0;
100}
101
102#define XENVIF_KICK_STR "kick"
Wei Liu5c807002014-08-12 11:59:29 +0100103#define BUFFER_SIZE 32
Zoltan Kissf51de242014-07-08 19:49:14 +0100104
105static ssize_t
106xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
107 loff_t *ppos)
108{
109 struct xenvif_queue *queue =
110 ((struct seq_file *)filp->private_data)->private;
111 int len;
Wei Liu5c807002014-08-12 11:59:29 +0100112 char write[BUFFER_SIZE];
Zoltan Kissf51de242014-07-08 19:49:14 +0100113
114 /* don't allow partial writes and check the length */
115 if (*ppos != 0)
116 return 0;
Wei Liu5c807002014-08-12 11:59:29 +0100117 if (count >= sizeof(write))
Zoltan Kissf51de242014-07-08 19:49:14 +0100118 return -ENOSPC;
119
120 len = simple_write_to_buffer(write,
Wei Liu5c807002014-08-12 11:59:29 +0100121 sizeof(write) - 1,
Zoltan Kissf51de242014-07-08 19:49:14 +0100122 ppos,
123 buf,
124 count);
125 if (len < 0)
126 return len;
127
Wei Liu5c807002014-08-12 11:59:29 +0100128 write[len] = '\0';
129
Zoltan Kissf51de242014-07-08 19:49:14 +0100130 if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
131 xenvif_interrupt(0, (void *)queue);
132 else {
133 pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
134 queue->id);
135 count = -EINVAL;
136 }
137 return count;
138}
139
Paul Durranta9339b82016-10-10 09:30:53 +0100140static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
Zoltan Kissf51de242014-07-08 19:49:14 +0100141{
142 int ret;
143 void *queue = NULL;
144
145 if (inode->i_private)
146 queue = inode->i_private;
147 ret = single_open(filp, xenvif_read_io_ring, queue);
148 filp->f_mode |= FMODE_PWRITE;
149 return ret;
150}
151
152static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
153 .owner = THIS_MODULE,
Paul Durranta9339b82016-10-10 09:30:53 +0100154 .open = xenvif_io_ring_open,
Zoltan Kissf51de242014-07-08 19:49:14 +0100155 .read = seq_read,
156 .llseek = seq_lseek,
157 .release = single_release,
158 .write = xenvif_write_io_ring,
159};
160
Yangtao Li5061e3f2018-12-10 10:53:29 -0500161static int xenvif_ctrl_show(struct seq_file *m, void *v)
Paul Durranta9339b82016-10-10 09:30:53 +0100162{
163 struct xenvif *vif = m->private;
164
165 xenvif_dump_hash_info(vif, m);
166
167 return 0;
168}
Yangtao Li5061e3f2018-12-10 10:53:29 -0500169DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
Paul Durranta9339b82016-10-10 09:30:53 +0100170
Wei Liu628fa762014-08-12 11:59:30 +0100171static void xenvif_debugfs_addif(struct xenvif *vif)
Zoltan Kissf51de242014-07-08 19:49:14 +0100172{
173 struct dentry *pfile;
Zoltan Kissf51de242014-07-08 19:49:14 +0100174 int i;
175
176 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
177 return;
178
179 vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
180 xen_netback_dbg_root);
181 if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
182 for (i = 0; i < vif->num_queues; ++i) {
183 char filename[sizeof("io_ring_q") + 4];
184
185 snprintf(filename, sizeof(filename), "io_ring_q%d", i);
186 pfile = debugfs_create_file(filename,
Joe Perchesd61e4032018-03-23 15:54:39 -0700187 0600,
Zoltan Kissf51de242014-07-08 19:49:14 +0100188 vif->xenvif_dbg_root,
189 &vif->queues[i],
190 &xenvif_dbg_io_ring_ops_fops);
191 if (IS_ERR_OR_NULL(pfile))
192 pr_warn("Creation of io_ring file returned %ld!\n",
193 PTR_ERR(pfile));
194 }
Paul Durranta9339b82016-10-10 09:30:53 +0100195
196 if (vif->ctrl_irq) {
197 pfile = debugfs_create_file("ctrl",
Joe Perchesd61e4032018-03-23 15:54:39 -0700198 0400,
Paul Durranta9339b82016-10-10 09:30:53 +0100199 vif->xenvif_dbg_root,
200 vif,
Yangtao Li5061e3f2018-12-10 10:53:29 -0500201 &xenvif_ctrl_fops);
Paul Durranta9339b82016-10-10 09:30:53 +0100202 if (IS_ERR_OR_NULL(pfile))
203 pr_warn("Creation of ctrl file returned %ld!\n",
204 PTR_ERR(pfile));
205 }
Zoltan Kissf51de242014-07-08 19:49:14 +0100206 } else
207 netdev_warn(vif->dev,
208 "Creation of vif debugfs dir returned %ld!\n",
209 PTR_ERR(vif->xenvif_dbg_root));
210}
211
212static void xenvif_debugfs_delif(struct xenvif *vif)
213{
214 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
215 return;
216
zhong jiang05270972018-09-08 21:35:06 +0800217 debugfs_remove_recursive(vif->xenvif_dbg_root);
Zoltan Kissf51de242014-07-08 19:49:14 +0100218 vif->xenvif_dbg_root = NULL;
219}
220#endif /* CONFIG_DEBUG_FS */
221
Ian Campbellf942dc22011-03-15 00:06:18 +0000222static int netback_remove(struct xenbus_device *dev)
223{
224 struct backend_info *be = dev_get_drvdata(&dev->dev);
225
David Vrabeldc62cca2013-10-07 13:55:19 +0100226 set_backend_state(be, XenbusStateClosed);
227
Ian Campbellf942dc22011-03-15 00:06:18 +0000228 unregister_hotplug_status_watch(be);
229 if (be->vif) {
230 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
Palik, Imreedafc132015-03-19 11:05:42 +0100231 xen_unregister_watchers(be->vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000232 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
Paul Durrant279f4382013-09-17 17:46:08 +0100233 xenvif_free(be->vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000234 be->vif = NULL;
235 }
Ian Campbell31a41892015-06-01 11:30:24 +0100236 kfree(be->hotplug_script);
Ian Campbellf942dc22011-03-15 00:06:18 +0000237 kfree(be);
238 dev_set_drvdata(&dev->dev, NULL);
239 return 0;
240}
241
242
243/**
244 * Entry point to this code when a new device is created. Allocate the basic
245 * structures and switch to InitWait.
246 */
247static int netback_probe(struct xenbus_device *dev,
248 const struct xenbus_device_id *id)
249{
250 const char *message;
251 struct xenbus_transaction xbt;
252 int err;
253 int sg;
Ian Campbell31a41892015-06-01 11:30:24 +0100254 const char *script;
Ian Campbellf942dc22011-03-15 00:06:18 +0000255 struct backend_info *be = kzalloc(sizeof(struct backend_info),
256 GFP_KERNEL);
257 if (!be) {
258 xenbus_dev_fatal(dev, -ENOMEM,
259 "allocating backend structure");
260 return -ENOMEM;
261 }
262
263 be->dev = dev;
264 dev_set_drvdata(&dev->dev, be);
265
Filipe Mancocce94482016-09-15 17:10:46 +0200266 be->state = XenbusStateInitialising;
267 err = xenbus_switch_state(dev, XenbusStateInitialising);
268 if (err)
269 goto fail;
270
Ian Campbellf942dc22011-03-15 00:06:18 +0000271 sg = 1;
272
273 do {
274 err = xenbus_transaction_start(&xbt);
275 if (err) {
276 xenbus_dev_fatal(dev, err, "starting transaction");
277 goto fail;
278 }
279
280 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
281 if (err) {
282 message = "writing feature-sg";
283 goto abort_transaction;
284 }
285
286 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
287 "%d", sg);
288 if (err) {
289 message = "writing feature-gso-tcpv4";
290 goto abort_transaction;
291 }
292
Paul Durranta9468582013-10-16 17:50:31 +0100293 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
294 "%d", sg);
295 if (err) {
296 message = "writing feature-gso-tcpv6";
297 goto abort_transaction;
298 }
299
Paul Durrant2eba61d2013-10-16 17:50:29 +0100300 /* We support partial checksum setup for IPv6 packets */
301 err = xenbus_printf(xbt, dev->nodename,
302 "feature-ipv6-csum-offload",
303 "%d", 1);
304 if (err) {
305 message = "writing feature-ipv6-csum-offload";
306 goto abort_transaction;
307 }
308
Ian Campbellf942dc22011-03-15 00:06:18 +0000309 /* We support rx-copy path. */
310 err = xenbus_printf(xbt, dev->nodename,
311 "feature-rx-copy", "%d", 1);
312 if (err) {
313 message = "writing feature-rx-copy";
314 goto abort_transaction;
315 }
316
317 /*
318 * We don't support rx-flip path (except old guests who don't
319 * grok this feature flag).
320 */
321 err = xenbus_printf(xbt, dev->nodename,
322 "feature-rx-flip", "%d", 0);
323 if (err) {
324 message = "writing feature-rx-flip";
325 goto abort_transaction;
326 }
327
Paul Durrant22fae972016-02-02 11:55:05 +0000328 /* We support dynamic multicast-control. */
Paul Durrant210c34d2015-09-02 17:58:36 +0100329 err = xenbus_printf(xbt, dev->nodename,
330 "feature-multicast-control", "%d", 1);
331 if (err) {
332 message = "writing feature-multicast-control";
333 goto abort_transaction;
334 }
335
Paul Durrant22fae972016-02-02 11:55:05 +0000336 err = xenbus_printf(xbt, dev->nodename,
337 "feature-dynamic-multicast-control",
338 "%d", 1);
339 if (err) {
340 message = "writing feature-dynamic-multicast-control";
341 goto abort_transaction;
342 }
343
Ian Campbellf942dc22011-03-15 00:06:18 +0000344 err = xenbus_transaction_end(xbt, 0);
345 } while (err == -EAGAIN);
346
347 if (err) {
348 xenbus_dev_fatal(dev, err, "completing transaction");
349 goto fail;
350 }
351
Wei Liue1f00a692013-05-22 06:34:45 +0000352 /*
353 * Split event channels support, this is optional so it is not
354 * put inside the above loop.
355 */
356 err = xenbus_printf(XBT_NIL, dev->nodename,
357 "feature-split-event-channels",
358 "%u", separate_tx_rx_irq);
359 if (err)
Wei Liu8ef2c3b2013-07-02 00:08:54 +0100360 pr_debug("Error writing feature-split-event-channels\n");
Wei Liue1f00a692013-05-22 06:34:45 +0000361
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100362 /* Multi-queue support: This is an optional feature. */
363 err = xenbus_printf(XBT_NIL, dev->nodename,
364 "multi-queue-max-queues", "%u", xenvif_max_queues);
365 if (err)
366 pr_debug("Error writing multi-queue-max-queues\n");
367
Paul Durrant4e15ee22016-05-13 09:37:26 +0100368 err = xenbus_printf(XBT_NIL, dev->nodename,
369 "feature-ctrl-ring",
370 "%u", true);
371 if (err)
372 pr_debug("Error writing feature-ctrl-ring\n");
373
Ian Campbell31a41892015-06-01 11:30:24 +0100374 script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
375 if (IS_ERR(script)) {
376 err = PTR_ERR(script);
377 xenbus_dev_fatal(dev, err, "reading script");
378 goto fail;
379 }
380
381 be->hotplug_script = script;
382
Paul Durrantea732df2013-09-26 12:09:52 +0100383
Ian Campbellf942dc22011-03-15 00:06:18 +0000384 /* This kicks hotplug scripts, so do it immediately. */
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300385 err = backend_create_xenvif(be);
386 if (err)
387 goto fail;
Ian Campbellf942dc22011-03-15 00:06:18 +0000388
389 return 0;
390
391abort_transaction:
392 xenbus_transaction_end(xbt, 1);
393 xenbus_dev_fatal(dev, err, "%s", message);
394fail:
Wei Liu8ef2c3b2013-07-02 00:08:54 +0100395 pr_debug("failed\n");
Ian Campbellf942dc22011-03-15 00:06:18 +0000396 netback_remove(dev);
397 return err;
398}
399
400
401/*
402 * Handle the creation of the hotplug script environment. We add the script
403 * and vif variables to the environment, for the benefit of the vif-* hotplug
404 * scripts.
405 */
406static int netback_uevent(struct xenbus_device *xdev,
407 struct kobj_uevent_env *env)
408{
409 struct backend_info *be = dev_get_drvdata(&xdev->dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000410
Ian Campbell31a41892015-06-01 11:30:24 +0100411 if (!be)
412 return 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000413
Ian Campbell31a41892015-06-01 11:30:24 +0100414 if (add_uevent_var(env, "script=%s", be->hotplug_script))
415 return -ENOMEM;
416
417 if (!be->vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000418 return 0;
419
420 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
421}
422
423
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300424static int backend_create_xenvif(struct backend_info *be)
Ian Campbellf942dc22011-03-15 00:06:18 +0000425{
426 int err;
427 long handle;
428 struct xenbus_device *dev = be->dev;
Jan Beulichf15650b2014-12-09 11:47:04 +0000429 struct xenvif *vif;
Ian Campbellf942dc22011-03-15 00:06:18 +0000430
431 if (be->vif != NULL)
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300432 return 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000433
434 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
435 if (err != 1) {
436 xenbus_dev_fatal(dev, err, "reading handle");
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300437 return (err < 0) ? err : -EINVAL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000438 }
439
Jan Beulichf15650b2014-12-09 11:47:04 +0000440 vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
441 if (IS_ERR(vif)) {
442 err = PTR_ERR(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000443 xenbus_dev_fatal(dev, err, "creating interface");
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300444 return err;
Ian Campbellf942dc22011-03-15 00:06:18 +0000445 }
Jan Beulichf15650b2014-12-09 11:47:04 +0000446 be->vif = vif;
Dongli Zhang6dc400a2019-04-12 14:53:24 +0800447 vif->be = be;
Ian Campbellf942dc22011-03-15 00:06:18 +0000448
449 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
Alexey Khoroshilov2dd34332014-11-24 13:58:00 +0300450 return 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000451}
452
Paul Durrantea732df2013-09-26 12:09:52 +0100453static void backend_disconnect(struct backend_info *be)
Ian Campbellf942dc22011-03-15 00:06:18 +0000454{
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000455 struct xenvif *vif = be->vif;
456
457 if (vif) {
Igor Druzhininb17075d2017-03-10 21:36:22 +0000458 unsigned int num_queues = vif->num_queues;
Igor Druzhinin9a6cdf52017-01-17 20:49:37 +0000459 unsigned int queue_index;
460
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000461 xen_unregister_watchers(vif);
Zoltan Kissf51de242014-07-08 19:49:14 +0100462#ifdef CONFIG_DEBUG_FS
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000463 xenvif_debugfs_delif(vif);
Zoltan Kissf51de242014-07-08 19:49:14 +0100464#endif /* CONFIG_DEBUG_FS */
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000465 xenvif_disconnect_data(vif);
Igor Druzhininb17075d2017-03-10 21:36:22 +0000466
467 /* At this point some of the handlers may still be active
468 * so we need to have additional synchronization here.
469 */
470 vif->num_queues = 0;
471 synchronize_net();
472
473 for (queue_index = 0; queue_index < num_queues; ++queue_index)
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000474 xenvif_deinit_queue(&vif->queues[queue_index]);
Igor Druzhinin9a6cdf52017-01-17 20:49:37 +0000475
Igor Druzhininb17075d2017-03-10 21:36:22 +0000476 vfree(vif->queues);
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000477 vif->queues = NULL;
Paul Durranta254d8f2017-03-02 12:54:26 +0000478
Paul Durrantd67ce7d2017-03-02 12:54:25 +0000479 xenvif_disconnect_ctrl(vif);
Zoltan Kissf51de242014-07-08 19:49:14 +0100480 }
Paul Durrant279f4382013-09-17 17:46:08 +0100481}
482
Paul Durrantea732df2013-09-26 12:09:52 +0100483static void backend_connect(struct backend_info *be)
Paul Durrant279f4382013-09-17 17:46:08 +0100484{
Paul Durrantea732df2013-09-26 12:09:52 +0100485 if (be->vif)
486 connect(be);
487}
Paul Durrant279f4382013-09-17 17:46:08 +0100488
Paul Durrantea732df2013-09-26 12:09:52 +0100489static inline void backend_switch_state(struct backend_info *be,
490 enum xenbus_state state)
491{
492 struct xenbus_device *dev = be->dev;
493
494 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
495 be->state = state;
496
497 /* If we are waiting for a hotplug script then defer the
498 * actual xenbus state change.
499 */
500 if (!be->have_hotplug_status_watch)
501 xenbus_switch_state(dev, state);
502}
503
504/* Handle backend state transitions:
505 *
Filipe Mancocce94482016-09-15 17:10:46 +0200506 * The backend state starts in Initialising and the following transitions are
Paul Durrantea732df2013-09-26 12:09:52 +0100507 * allowed.
508 *
Filipe Mancocce94482016-09-15 17:10:46 +0200509 * Initialising -> InitWait -> Connected
510 * \
511 * \ ^ \ |
512 * \ | \ |
513 * \ | \ |
514 * \ | \ |
515 * \ | \ |
516 * \ | \ |
517 * V | V V
Paul Durrantea732df2013-09-26 12:09:52 +0100518 *
Filipe Mancocce94482016-09-15 17:10:46 +0200519 * Closed <-> Closing
Paul Durrantea732df2013-09-26 12:09:52 +0100520 *
521 * The state argument specifies the eventual state of the backend and the
522 * function transitions to that state via the shortest path.
523 */
524static void set_backend_state(struct backend_info *be,
525 enum xenbus_state state)
526{
527 while (be->state != state) {
528 switch (be->state) {
Filipe Mancocce94482016-09-15 17:10:46 +0200529 case XenbusStateInitialising:
530 switch (state) {
531 case XenbusStateInitWait:
532 case XenbusStateConnected:
533 case XenbusStateClosing:
534 backend_switch_state(be, XenbusStateInitWait);
535 break;
536 case XenbusStateClosed:
537 backend_switch_state(be, XenbusStateClosed);
538 break;
539 default:
540 BUG();
541 }
542 break;
Paul Durrantea732df2013-09-26 12:09:52 +0100543 case XenbusStateClosed:
544 switch (state) {
545 case XenbusStateInitWait:
546 case XenbusStateConnected:
Paul Durrantea732df2013-09-26 12:09:52 +0100547 backend_switch_state(be, XenbusStateInitWait);
548 break;
549 case XenbusStateClosing:
550 backend_switch_state(be, XenbusStateClosing);
551 break;
552 default:
553 BUG();
554 }
555 break;
556 case XenbusStateInitWait:
557 switch (state) {
558 case XenbusStateConnected:
559 backend_connect(be);
560 backend_switch_state(be, XenbusStateConnected);
561 break;
562 case XenbusStateClosing:
563 case XenbusStateClosed:
564 backend_switch_state(be, XenbusStateClosing);
565 break;
566 default:
567 BUG();
568 }
569 break;
570 case XenbusStateConnected:
571 switch (state) {
572 case XenbusStateInitWait:
573 case XenbusStateClosing:
574 case XenbusStateClosed:
575 backend_disconnect(be);
576 backend_switch_state(be, XenbusStateClosing);
577 break;
578 default:
579 BUG();
580 }
581 break;
582 case XenbusStateClosing:
583 switch (state) {
584 case XenbusStateInitWait:
585 case XenbusStateConnected:
586 case XenbusStateClosed:
587 backend_switch_state(be, XenbusStateClosed);
588 break;
589 default:
590 BUG();
591 }
592 break;
593 default:
594 BUG();
595 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000596 }
597}
598
599/**
600 * Callback received when the frontend's state changes.
601 */
602static void frontend_changed(struct xenbus_device *dev,
603 enum xenbus_state frontend_state)
604{
605 struct backend_info *be = dev_get_drvdata(&dev->dev);
606
Paul Durrantea732df2013-09-26 12:09:52 +0100607 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
Ian Campbellf942dc22011-03-15 00:06:18 +0000608
609 be->frontend_state = frontend_state;
610
611 switch (frontend_state) {
612 case XenbusStateInitialising:
Paul Durrantea732df2013-09-26 12:09:52 +0100613 set_backend_state(be, XenbusStateInitWait);
Ian Campbellf942dc22011-03-15 00:06:18 +0000614 break;
615
616 case XenbusStateInitialised:
617 break;
618
619 case XenbusStateConnected:
Paul Durrantea732df2013-09-26 12:09:52 +0100620 set_backend_state(be, XenbusStateConnected);
Ian Campbellf942dc22011-03-15 00:06:18 +0000621 break;
622
623 case XenbusStateClosing:
Paul Durrantea732df2013-09-26 12:09:52 +0100624 set_backend_state(be, XenbusStateClosing);
Ian Campbellf942dc22011-03-15 00:06:18 +0000625 break;
626
627 case XenbusStateClosed:
Paul Durrantea732df2013-09-26 12:09:52 +0100628 set_backend_state(be, XenbusStateClosed);
Ian Campbellf942dc22011-03-15 00:06:18 +0000629 if (xenbus_dev_is_online(dev))
630 break;
Gustavo A. R. Silva7499a282019-02-08 13:58:38 -0600631 /* fall through - if not online */
Ian Campbellf942dc22011-03-15 00:06:18 +0000632 case XenbusStateUnknown:
Paul Durrantea732df2013-09-26 12:09:52 +0100633 set_backend_state(be, XenbusStateClosed);
Ian Campbellf942dc22011-03-15 00:06:18 +0000634 device_unregister(&dev->dev);
635 break;
636
637 default:
638 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
639 frontend_state);
640 break;
641 }
642}
643
644
645static void xen_net_read_rate(struct xenbus_device *dev,
646 unsigned long *bytes, unsigned long *usec)
647{
648 char *s, *e;
649 unsigned long b, u;
650 char *ratestr;
651
652 /* Default to unlimited bandwidth. */
653 *bytes = ~0UL;
654 *usec = 0;
655
656 ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
657 if (IS_ERR(ratestr))
658 return;
659
660 s = ratestr;
661 b = simple_strtoul(s, &e, 10);
662 if ((s == e) || (*e != ','))
663 goto fail;
664
665 s = e + 1;
666 u = simple_strtoul(s, &e, 10);
667 if ((s == e) || (*e != '\0'))
668 goto fail;
669
670 *bytes = b;
671 *usec = u;
672
673 kfree(ratestr);
674 return;
675
676 fail:
677 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
678 kfree(ratestr);
679}
680
681static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
682{
683 char *s, *e, *macstr;
684 int i;
685
686 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
687 if (IS_ERR(macstr))
688 return PTR_ERR(macstr);
689
690 for (i = 0; i < ETH_ALEN; i++) {
691 mac[i] = simple_strtoul(s, &e, 16);
692 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
693 kfree(macstr);
694 return -ENOENT;
695 }
696 s = e+1;
697 }
698
699 kfree(macstr);
700 return 0;
701}
702
Palik, Imreedafc132015-03-19 11:05:42 +0100703static void xen_net_rate_changed(struct xenbus_watch *watch,
Juergen Gross5584ea22017-02-09 14:39:57 +0100704 const char *path, const char *token)
Palik, Imreedafc132015-03-19 11:05:42 +0100705{
706 struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
707 struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
708 unsigned long credit_bytes;
709 unsigned long credit_usec;
710 unsigned int queue_index;
711
712 xen_net_read_rate(dev, &credit_bytes, &credit_usec);
713 for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
714 struct xenvif_queue *queue = &vif->queues[queue_index];
715
716 queue->credit_bytes = credit_bytes;
717 queue->credit_usec = credit_usec;
718 if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
719 queue->remaining_credit > queue->credit_bytes) {
720 queue->remaining_credit = queue->credit_bytes;
721 }
722 }
723}
724
Paul Durrant22fae972016-02-02 11:55:05 +0000725static int xen_register_credit_watch(struct xenbus_device *dev,
726 struct xenvif *vif)
Palik, Imreedafc132015-03-19 11:05:42 +0100727{
728 int err = 0;
729 char *node;
730 unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
731
Palik, Imre12b322a2015-06-19 14:21:51 +0200732 if (vif->credit_watch.node)
733 return -EADDRINUSE;
734
Palik, Imreedafc132015-03-19 11:05:42 +0100735 node = kmalloc(maxlen, GFP_KERNEL);
736 if (!node)
737 return -ENOMEM;
738 snprintf(node, maxlen, "%s/rate", dev->nodename);
739 vif->credit_watch.node = node;
740 vif->credit_watch.callback = xen_net_rate_changed;
741 err = register_xenbus_watch(&vif->credit_watch);
742 if (err) {
743 pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
744 kfree(node);
745 vif->credit_watch.node = NULL;
746 vif->credit_watch.callback = NULL;
747 }
748 return err;
749}
750
Paul Durrant22fae972016-02-02 11:55:05 +0000751static void xen_unregister_credit_watch(struct xenvif *vif)
Palik, Imreedafc132015-03-19 11:05:42 +0100752{
753 if (vif->credit_watch.node) {
754 unregister_xenbus_watch(&vif->credit_watch);
755 kfree(vif->credit_watch.node);
756 vif->credit_watch.node = NULL;
757 }
758}
759
Paul Durrant22fae972016-02-02 11:55:05 +0000760static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
Juergen Gross5584ea22017-02-09 14:39:57 +0100761 const char *path, const char *token)
Paul Durrant22fae972016-02-02 11:55:05 +0000762{
763 struct xenvif *vif = container_of(watch, struct xenvif,
764 mcast_ctrl_watch);
765 struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
Paul Durrant22fae972016-02-02 11:55:05 +0000766
Juergen Grossf95842e2016-10-31 14:58:41 +0100767 vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
768 "request-multicast-control", 0);
Paul Durrant22fae972016-02-02 11:55:05 +0000769}
770
771static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
772 struct xenvif *vif)
773{
774 int err = 0;
775 char *node;
776 unsigned maxlen = strlen(dev->otherend) +
777 sizeof("/request-multicast-control");
778
779 if (vif->mcast_ctrl_watch.node) {
780 pr_err_ratelimited("Watch is already registered\n");
781 return -EADDRINUSE;
782 }
783
784 node = kmalloc(maxlen, GFP_KERNEL);
785 if (!node) {
786 pr_err("Failed to allocate memory for watch\n");
787 return -ENOMEM;
788 }
789 snprintf(node, maxlen, "%s/request-multicast-control",
790 dev->otherend);
791 vif->mcast_ctrl_watch.node = node;
792 vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
793 err = register_xenbus_watch(&vif->mcast_ctrl_watch);
794 if (err) {
795 pr_err("Failed to set watcher %s\n",
796 vif->mcast_ctrl_watch.node);
797 kfree(node);
798 vif->mcast_ctrl_watch.node = NULL;
799 vif->mcast_ctrl_watch.callback = NULL;
800 }
801 return err;
802}
803
804static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
805{
806 if (vif->mcast_ctrl_watch.node) {
807 unregister_xenbus_watch(&vif->mcast_ctrl_watch);
808 kfree(vif->mcast_ctrl_watch.node);
809 vif->mcast_ctrl_watch.node = NULL;
810 }
811}
812
813static void xen_register_watchers(struct xenbus_device *dev,
814 struct xenvif *vif)
815{
816 xen_register_credit_watch(dev, vif);
817 xen_register_mcast_ctrl_watch(dev, vif);
818}
819
820static void xen_unregister_watchers(struct xenvif *vif)
821{
822 xen_unregister_mcast_ctrl_watch(vif);
823 xen_unregister_credit_watch(vif);
824}
825
Ian Campbellf942dc22011-03-15 00:06:18 +0000826static void unregister_hotplug_status_watch(struct backend_info *be)
827{
828 if (be->have_hotplug_status_watch) {
829 unregister_xenbus_watch(&be->hotplug_status_watch);
830 kfree(be->hotplug_status_watch.node);
831 }
832 be->have_hotplug_status_watch = 0;
833}
834
835static void hotplug_status_changed(struct xenbus_watch *watch,
Juergen Gross5584ea22017-02-09 14:39:57 +0100836 const char *path,
837 const char *token)
Ian Campbellf942dc22011-03-15 00:06:18 +0000838{
839 struct backend_info *be = container_of(watch,
840 struct backend_info,
841 hotplug_status_watch);
842 char *str;
843 unsigned int len;
844
845 str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
846 if (IS_ERR(str))
847 return;
848 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
Paul Durrantea732df2013-09-26 12:09:52 +0100849 /* Complete any pending state change */
850 xenbus_switch_state(be->dev, be->state);
851
Ian Campbellf942dc22011-03-15 00:06:18 +0000852 /* Not interested in this watch anymore. */
853 unregister_hotplug_status_watch(be);
854 }
855 kfree(str);
856}
857
Paul Durrant4e15ee22016-05-13 09:37:26 +0100858static int connect_ctrl_ring(struct backend_info *be)
859{
860 struct xenbus_device *dev = be->dev;
861 struct xenvif *vif = be->vif;
862 unsigned int val;
863 grant_ref_t ring_ref;
864 unsigned int evtchn;
865 int err;
866
Jan Beulich6c27f992016-11-08 00:45:53 -0700867 err = xenbus_scanf(XBT_NIL, dev->otherend,
868 "ctrl-ring-ref", "%u", &val);
869 if (err < 0)
Paul Durrant4e15ee22016-05-13 09:37:26 +0100870 goto done; /* The frontend does not have a control ring */
871
872 ring_ref = val;
873
Jan Beulich6c27f992016-11-08 00:45:53 -0700874 err = xenbus_scanf(XBT_NIL, dev->otherend,
875 "event-channel-ctrl", "%u", &val);
876 if (err < 0) {
Paul Durrant4e15ee22016-05-13 09:37:26 +0100877 xenbus_dev_fatal(dev, err,
878 "reading %s/event-channel-ctrl",
879 dev->otherend);
880 goto fail;
881 }
882
883 evtchn = val;
884
885 err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
886 if (err) {
887 xenbus_dev_fatal(dev, err,
888 "mapping shared-frame %u port %u",
889 ring_ref, evtchn);
890 goto fail;
891 }
892
893done:
894 return 0;
895
896fail:
897 return err;
898}
899
Ian Campbellf942dc22011-03-15 00:06:18 +0000900static void connect(struct backend_info *be)
901{
902 int err;
903 struct xenbus_device *dev = be->dev;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100904 unsigned long credit_bytes, credit_usec;
905 unsigned int queue_index;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100906 unsigned int requested_num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100907 struct xenvif_queue *queue;
Ian Campbellf942dc22011-03-15 00:06:18 +0000908
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100909 /* Check whether the frontend requested multiple queues
910 * and read the number requested.
911 */
Juergen Grossf95842e2016-10-31 14:58:41 +0100912 requested_num_queues = xenbus_read_unsigned(dev->otherend,
913 "multi-queue-num-queues", 1);
914 if (requested_num_queues > xenvif_max_queues) {
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100915 /* buggy or malicious guest */
Arnd Bergmann0f06ac32016-11-10 09:55:42 +0100916 xenbus_dev_fatal(dev, -EINVAL,
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100917 "guest requested %u queues, exceeding the maximum of %u.",
918 requested_num_queues, xenvif_max_queues);
919 return;
920 }
921
Ian Campbellf942dc22011-03-15 00:06:18 +0000922 err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
923 if (err) {
924 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
925 return;
926 }
927
Wei Liue9ce7cb2014-06-04 10:30:42 +0100928 xen_net_read_rate(dev, &credit_bytes, &credit_usec);
Palik, Imre12b322a2015-06-19 14:21:51 +0200929 xen_unregister_watchers(be->vif);
Palik, Imreedafc132015-03-19 11:05:42 +0100930 xen_register_watchers(dev, be->vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100931 read_xenbus_vif_flags(be);
932
Paul Durrant4e15ee22016-05-13 09:37:26 +0100933 err = connect_ctrl_ring(be);
934 if (err) {
935 xenbus_dev_fatal(dev, err, "connecting control ring");
936 return;
937 }
938
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100939 /* Use the number of queues requested by the frontend */
Kees Cookfad953c2018-06-12 14:27:37 -0700940 be->vif->queues = vzalloc(array_size(requested_num_queues,
941 sizeof(struct xenvif_queue)));
Insu Yun833b8f12015-10-15 18:02:28 +0000942 if (!be->vif->queues) {
943 xenbus_dev_fatal(dev, -ENOMEM,
944 "allocating queues");
945 return;
946 }
947
Wei Liuf7b50c42014-06-23 10:50:17 +0100948 be->vif->num_queues = requested_num_queues;
David Vrabelecf08d22014-10-22 14:08:55 +0100949 be->vif->stalled_queues = requested_num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100950
951 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
952 queue = &be->vif->queues[queue_index];
953 queue->vif = be->vif;
954 queue->id = queue_index;
955 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
956 be->vif->dev->name, queue->id);
957
958 err = xenvif_init_queue(queue);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100959 if (err) {
960 /* xenvif_init_queue() cleans up after itself on
961 * failure, but we need to clean up any previously
962 * initialised queues. Set num_queues to i so that
963 * earlier queues can be destroyed using the regular
964 * disconnect logic.
965 */
Wei Liuf7b50c42014-06-23 10:50:17 +0100966 be->vif->num_queues = queue_index;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100967 goto err;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100968 }
Wei Liue9ce7cb2014-06-04 10:30:42 +0100969
Ross Lagerwallce0e5c52015-05-27 11:44:32 +0100970 queue->credit_bytes = credit_bytes;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100971 queue->remaining_credit = credit_bytes;
Palik, Imre07ff8902015-01-06 16:44:44 +0100972 queue->credit_usec = credit_usec;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100973
Paul Durrant4e15ee22016-05-13 09:37:26 +0100974 err = connect_data_rings(be, queue);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100975 if (err) {
Paul Durrant4e15ee22016-05-13 09:37:26 +0100976 /* connect_data_rings() cleans up after itself on
977 * failure, but we need to clean up after
978 * xenvif_init_queue() here, and also clean up any
979 * previously initialised queues.
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100980 */
981 xenvif_deinit_queue(queue);
Wei Liuf7b50c42014-06-23 10:50:17 +0100982 be->vif->num_queues = queue_index;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100983 goto err;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100984 }
Wei Liue9ce7cb2014-06-04 10:30:42 +0100985 }
986
Wei Liu628fa762014-08-12 11:59:30 +0100987#ifdef CONFIG_DEBUG_FS
988 xenvif_debugfs_addif(be->vif);
989#endif /* CONFIG_DEBUG_FS */
990
Wei Liuf7b50c42014-06-23 10:50:17 +0100991 /* Initialisation completed, tell core driver the number of
992 * active queues.
993 */
994 rtnl_lock();
995 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
996 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
997 rtnl_unlock();
998
Wei Liue9ce7cb2014-06-04 10:30:42 +0100999 xenvif_carrier_on(be->vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001000
1001 unregister_hotplug_status_watch(be);
1002 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
1003 hotplug_status_changed,
1004 "%s/%s", dev->nodename, "hotplug-status");
Paul Durrantea732df2013-09-26 12:09:52 +01001005 if (!err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001006 be->have_hotplug_status_watch = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +00001007
Wei Liue9ce7cb2014-06-04 10:30:42 +01001008 netif_tx_wake_all_queues(be->vif->dev);
1009
1010 return;
1011
1012err:
Wei Liuf7b50c42014-06-23 10:50:17 +01001013 if (be->vif->num_queues > 0)
Paul Durrant4e15ee22016-05-13 09:37:26 +01001014 xenvif_disconnect_data(be->vif); /* Clean up existing queues */
Igor Druzhinin9a6cdf52017-01-17 20:49:37 +00001015 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
1016 xenvif_deinit_queue(&be->vif->queues[queue_index]);
Wei Liue9ce7cb2014-06-04 10:30:42 +01001017 vfree(be->vif->queues);
1018 be->vif->queues = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +01001019 be->vif->num_queues = 0;
Paul Durrant4e15ee22016-05-13 09:37:26 +01001020 xenvif_disconnect_ctrl(be->vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +01001021 return;
Ian Campbellf942dc22011-03-15 00:06:18 +00001022}
1023
1024
Paul Durrant4e15ee22016-05-13 09:37:26 +01001025static int connect_data_rings(struct backend_info *be,
1026 struct xenvif_queue *queue)
Ian Campbellf942dc22011-03-15 00:06:18 +00001027{
Ian Campbellf942dc22011-03-15 00:06:18 +00001028 struct xenbus_device *dev = be->dev;
Wei Liuf7b50c42014-06-23 10:50:17 +01001029 unsigned int num_queues = queue->vif->num_queues;
Ian Campbellf942dc22011-03-15 00:06:18 +00001030 unsigned long tx_ring_ref, rx_ring_ref;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001031 unsigned int tx_evtchn, rx_evtchn;
Ian Campbellf942dc22011-03-15 00:06:18 +00001032 int err;
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001033 char *xspath;
1034 size_t xspathsize;
1035 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
Ian Campbellf942dc22011-03-15 00:06:18 +00001036
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001037 /* If the frontend requested 1 queue, or we have fallen back
1038 * to single queue due to lack of frontend support for multi-
1039 * queue, expect the remaining XenStore keys in the toplevel
1040 * directory. Otherwise, expect them in a subdirectory called
1041 * queue-N.
1042 */
1043 if (num_queues == 1) {
1044 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
1045 if (!xspath) {
1046 xenbus_dev_fatal(dev, -ENOMEM,
1047 "reading ring references");
1048 return -ENOMEM;
1049 }
1050 strcpy(xspath, dev->otherend);
1051 } else {
1052 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1053 xspath = kzalloc(xspathsize, GFP_KERNEL);
1054 if (!xspath) {
1055 xenbus_dev_fatal(dev, -ENOMEM,
1056 "reading ring references");
1057 return -ENOMEM;
1058 }
1059 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
1060 queue->id);
1061 }
1062
1063 err = xenbus_gather(XBT_NIL, xspath,
Ian Campbellf942dc22011-03-15 00:06:18 +00001064 "tx-ring-ref", "%lu", &tx_ring_ref,
Wei Liue1f00a692013-05-22 06:34:45 +00001065 "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
Ian Campbellf942dc22011-03-15 00:06:18 +00001066 if (err) {
1067 xenbus_dev_fatal(dev, err,
Wei Liue1f00a692013-05-22 06:34:45 +00001068 "reading %s/ring-ref",
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001069 xspath);
1070 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +00001071 }
1072
Wei Liue1f00a692013-05-22 06:34:45 +00001073 /* Try split event channels first, then single event channel. */
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001074 err = xenbus_gather(XBT_NIL, xspath,
Wei Liue1f00a692013-05-22 06:34:45 +00001075 "event-channel-tx", "%u", &tx_evtchn,
1076 "event-channel-rx", "%u", &rx_evtchn, NULL);
1077 if (err < 0) {
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001078 err = xenbus_scanf(XBT_NIL, xspath,
Wei Liue1f00a692013-05-22 06:34:45 +00001079 "event-channel", "%u", &tx_evtchn);
1080 if (err < 0) {
1081 xenbus_dev_fatal(dev, err,
1082 "reading %s/event-channel(-tx/rx)",
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001083 xspath);
1084 goto err;
Wei Liue1f00a692013-05-22 06:34:45 +00001085 }
1086 rx_evtchn = tx_evtchn;
1087 }
1088
Wei Liue9ce7cb2014-06-04 10:30:42 +01001089 /* Map the shared frame, irq etc. */
Paul Durrant4e15ee22016-05-13 09:37:26 +01001090 err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
1091 tx_evtchn, rx_evtchn);
Wei Liue9ce7cb2014-06-04 10:30:42 +01001092 if (err) {
1093 xenbus_dev_fatal(dev, err,
1094 "mapping shared-frames %lu/%lu port tx %u rx %u",
1095 tx_ring_ref, rx_ring_ref,
1096 tx_evtchn, rx_evtchn);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001097 goto err;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001098 }
1099
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +01001100 err = 0;
1101err: /* Regular return falls through with err == 0 */
1102 kfree(xspath);
1103 return err;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001104}
1105
1106static int read_xenbus_vif_flags(struct backend_info *be)
1107{
1108 struct xenvif *vif = be->vif;
1109 struct xenbus_device *dev = be->dev;
1110 unsigned int rx_copy;
Juergen Grossf95842e2016-10-31 14:58:41 +01001111 int err;
Wei Liue9ce7cb2014-06-04 10:30:42 +01001112
Ian Campbellf942dc22011-03-15 00:06:18 +00001113 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
1114 &rx_copy);
1115 if (err == -ENOENT) {
1116 err = 0;
1117 rx_copy = 0;
1118 }
1119 if (err < 0) {
1120 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
1121 dev->otherend);
1122 return err;
1123 }
1124 if (!rx_copy)
1125 return -EOPNOTSUPP;
1126
Juergen Grossf95842e2016-10-31 14:58:41 +01001127 if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
David Vrabel26c0e102014-12-18 11:13:06 +00001128 /* - Reduce drain timeout to poll more frequently for
1129 * Rx requests.
1130 * - Disable Rx stall detection.
1131 */
1132 be->vif->drain_timeout = msecs_to_jiffies(30);
1133 be->vif->stall_timeout = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +00001134 }
1135
Juergen Grossf95842e2016-10-31 14:58:41 +01001136 vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
Ian Campbellf942dc22011-03-15 00:06:18 +00001137
Paul Durrant82cada22013-10-16 17:50:32 +01001138 vif->gso_mask = 0;
Paul Durrant82cada22013-10-16 17:50:32 +01001139
Juergen Grossf95842e2016-10-31 14:58:41 +01001140 if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
Paul Durrant82cada22013-10-16 17:50:32 +01001141 vif->gso_mask |= GSO_BIT(TCPV4);
Ian Campbellf942dc22011-03-15 00:06:18 +00001142
Juergen Grossf95842e2016-10-31 14:58:41 +01001143 if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
Paul Durrant82cada22013-10-16 17:50:32 +01001144 vif->gso_mask |= GSO_BIT(TCPV6);
1145
Juergen Grossf95842e2016-10-31 14:58:41 +01001146 vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
1147 "feature-no-csum-offload", 0);
Paul Durrant146c8a72013-10-16 17:50:28 +01001148
Juergen Grossf95842e2016-10-31 14:58:41 +01001149 vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
1150 "feature-ipv6-csum-offload", 0);
Ian Campbellf942dc22011-03-15 00:06:18 +00001151
Ian Campbellf942dc22011-03-15 00:06:18 +00001152 return 0;
1153}
1154
Ian Campbellf942dc22011-03-15 00:06:18 +00001155static const struct xenbus_device_id netback_ids[] = {
1156 { "vif" },
1157 { "" }
1158};
1159
David Vrabel95afae42014-09-08 17:30:41 +01001160static struct xenbus_driver netback_driver = {
1161 .ids = netback_ids,
Ian Campbellf942dc22011-03-15 00:06:18 +00001162 .probe = netback_probe,
1163 .remove = netback_remove,
1164 .uevent = netback_uevent,
1165 .otherend_changed = frontend_changed,
David Vrabel95afae42014-09-08 17:30:41 +01001166};
Ian Campbellf942dc22011-03-15 00:06:18 +00001167
1168int xenvif_xenbus_init(void)
1169{
Jan Beulich73db1442011-12-22 09:08:13 +00001170 return xenbus_register_backend(&netback_driver);
Ian Campbellf942dc22011-03-15 00:06:18 +00001171}
Wei Liub103f352013-05-16 23:26:11 +00001172
1173void xenvif_xenbus_fini(void)
1174{
1175 return xenbus_unregister_driver(&netback_driver);
1176}