blob: e5d95aab2cb80d5e03e0509326e2e84e32d9c04b [file] [log] [blame]
Stefano Stabellini416efba2017-10-30 15:40:51 -07001/*
2 * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/module.h>
Stefano Stabellini21950462017-10-30 15:40:54 -070016#include <linux/net.h>
17#include <linux/socket.h>
18
19#include <net/sock.h>
Stefano Stabellini416efba2017-10-30 15:40:51 -070020
21#include <xen/events.h>
22#include <xen/grant_table.h>
23#include <xen/xen.h>
24#include <xen/xenbus.h>
25#include <xen/interface/io/pvcalls.h>
26
Stefano Stabellini21950462017-10-30 15:40:54 -070027#include "pvcalls-front.h"
28
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -070029#define PVCALLS_INVALID_ID UINT_MAX
30#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
31#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
Stefano Stabellini45ddce22017-10-30 15:40:59 -070032#define PVCALLS_FRONT_MAX_SPIN 5000
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -070033
34struct pvcalls_bedata {
35 struct xen_pvcalls_front_ring ring;
36 grant_ref_t ref;
37 int irq;
38
39 struct list_head socket_mappings;
40 spinlock_t socket_lock;
41
42 wait_queue_head_t inflight_req;
43 struct xen_pvcalls_response rsp[PVCALLS_NR_RSP_PER_RING];
44};
45/* Only one front/back connection supported. */
46static struct xenbus_device *pvcalls_front_dev;
47static atomic_t pvcalls_refcount;
48
49/* first increment refcount, then proceed */
50#define pvcalls_enter() { \
51 atomic_inc(&pvcalls_refcount); \
52}
53
54/* first complete other operations, then decrement refcount */
55#define pvcalls_exit() { \
56 atomic_dec(&pvcalls_refcount); \
57}
58
59struct sock_mapping {
60 bool active_socket;
61 struct list_head list;
62 struct socket *sock;
Stefano Stabellini64d68712018-02-14 10:28:23 -080063 atomic_t refcount;
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -070064 union {
65 struct {
66 int irq;
67 grant_ref_t ref;
68 struct pvcalls_data_intf *ring;
69 struct pvcalls_data data;
70 struct mutex in_mutex;
71 struct mutex out_mutex;
72
73 wait_queue_head_t inflight_conn_req;
74 } active;
Stefano Stabellini67ea9892017-10-30 15:40:56 -070075 struct {
Stefano Stabellinid811bce2018-02-28 18:05:34 -080076 /*
77 * Socket status, needs to be 64-bit aligned due to the
78 * test_and_* functions which have this requirement on arm64.
79 */
Stefano Stabellini67ea9892017-10-30 15:40:56 -070080#define PVCALLS_STATUS_UNINITALIZED 0
81#define PVCALLS_STATUS_BIND 1
82#define PVCALLS_STATUS_LISTEN 2
Stefano Stabellinid811bce2018-02-28 18:05:34 -080083 uint8_t status __attribute__((aligned(8)));
Stefano Stabellini9774c6c2017-10-30 15:40:58 -070084 /*
85 * Internal state-machine flags.
86 * Only one accept operation can be inflight for a socket.
87 * Only one poll operation can be inflight for a given socket.
Stefano Stabellinid811bce2018-02-28 18:05:34 -080088 * flags needs to be 64-bit aligned due to the test_and_*
89 * functions which have this requirement on arm64.
Stefano Stabellini9774c6c2017-10-30 15:40:58 -070090 */
91#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
Stefano Stabellini5842c832017-10-30 15:41:01 -070092#define PVCALLS_FLAG_POLL_INFLIGHT 1
93#define PVCALLS_FLAG_POLL_RET 2
Stefano Stabellinid811bce2018-02-28 18:05:34 -080094 uint8_t flags __attribute__((aligned(8)));
Stefano Stabellini9774c6c2017-10-30 15:40:58 -070095 uint32_t inflight_req_id;
96 struct sock_mapping *accept_map;
97 wait_queue_head_t inflight_accept_req;
Stefano Stabellini67ea9892017-10-30 15:40:56 -070098 } passive;
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -070099 };
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -0700100};
101
Stefano Stabellini64d68712018-02-14 10:28:23 -0800102static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock)
103{
104 struct sock_mapping *map;
105
106 if (!pvcalls_front_dev ||
107 dev_get_drvdata(&pvcalls_front_dev->dev) == NULL)
108 return ERR_PTR(-ENOTCONN);
109
110 map = (struct sock_mapping *)sock->sk->sk_send_head;
111 if (map == NULL)
112 return ERR_PTR(-ENOTSOCK);
113
114 pvcalls_enter();
115 atomic_inc(&map->refcount);
116 return map;
117}
118
119static inline void pvcalls_exit_sock(struct socket *sock)
120{
121 struct sock_mapping *map;
122
123 map = (struct sock_mapping *)sock->sk->sk_send_head;
124 atomic_dec(&map->refcount);
125 pvcalls_exit();
126}
127
Stefano Stabellini21950462017-10-30 15:40:54 -0700128static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
129{
130 *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
131 if (RING_FULL(&bedata->ring) ||
132 bedata->rsp[*req_id].req_id != PVCALLS_INVALID_ID)
133 return -EAGAIN;
134 return 0;
135}
136
Stefano Stabellini45ddce22017-10-30 15:40:59 -0700137static bool pvcalls_front_write_todo(struct sock_mapping *map)
138{
139 struct pvcalls_data_intf *intf = map->active.ring;
140 RING_IDX cons, prod, size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
141 int32_t error;
142
143 error = intf->out_error;
144 if (error == -ENOTCONN)
145 return false;
146 if (error != 0)
147 return true;
148
149 cons = intf->out_cons;
150 prod = intf->out_prod;
151 return !!(size - pvcalls_queued(prod, cons, size));
152}
153
Stefano Stabelliniae0d0402017-10-30 15:41:00 -0700154static bool pvcalls_front_read_todo(struct sock_mapping *map)
155{
156 struct pvcalls_data_intf *intf = map->active.ring;
157 RING_IDX cons, prod;
158 int32_t error;
159
160 cons = intf->in_cons;
161 prod = intf->in_prod;
162 error = intf->in_error;
163 return (error != 0 ||
164 pvcalls_queued(prod, cons,
165 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) != 0);
166}
167
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -0700168static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
169{
Stefano Stabellini21950462017-10-30 15:40:54 -0700170 struct xenbus_device *dev = dev_id;
171 struct pvcalls_bedata *bedata;
172 struct xen_pvcalls_response *rsp;
173 uint8_t *src, *dst;
174 int req_id = 0, more = 0, done = 0;
175
176 if (dev == NULL)
177 return IRQ_HANDLED;
178
179 pvcalls_enter();
180 bedata = dev_get_drvdata(&dev->dev);
181 if (bedata == NULL) {
182 pvcalls_exit();
183 return IRQ_HANDLED;
184 }
185
186again:
187 while (RING_HAS_UNCONSUMED_RESPONSES(&bedata->ring)) {
188 rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
189
190 req_id = rsp->req_id;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700191 if (rsp->cmd == PVCALLS_POLL) {
192 struct sock_mapping *map = (struct sock_mapping *)(uintptr_t)
193 rsp->u.poll.id;
194
195 clear_bit(PVCALLS_FLAG_POLL_INFLIGHT,
196 (void *)&map->passive.flags);
197 /*
198 * clear INFLIGHT, then set RET. It pairs with
199 * the checks at the beginning of
200 * pvcalls_front_poll_passive.
201 */
202 smp_wmb();
203 set_bit(PVCALLS_FLAG_POLL_RET,
204 (void *)&map->passive.flags);
205 } else {
206 dst = (uint8_t *)&bedata->rsp[req_id] +
207 sizeof(rsp->req_id);
208 src = (uint8_t *)rsp + sizeof(rsp->req_id);
209 memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
210 /*
211 * First copy the rest of the data, then req_id. It is
212 * paired with the barrier when accessing bedata->rsp.
213 */
214 smp_wmb();
215 bedata->rsp[req_id].req_id = req_id;
216 }
Stefano Stabellini21950462017-10-30 15:40:54 -0700217
218 done = 1;
219 bedata->ring.rsp_cons++;
220 }
221
222 RING_FINAL_CHECK_FOR_RESPONSES(&bedata->ring, more);
223 if (more)
224 goto again;
225 if (done)
226 wake_up(&bedata->inflight_req);
227 pvcalls_exit();
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -0700228 return IRQ_HANDLED;
229}
230
231static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
232 struct sock_mapping *map)
233{
Stefano Stabellini235a71c2017-10-30 15:41:02 -0700234 int i;
235
236 unbind_from_irqhandler(map->active.irq, map);
237
238 spin_lock(&bedata->socket_lock);
239 if (!list_empty(&map->list))
240 list_del_init(&map->list);
241 spin_unlock(&bedata->socket_lock);
242
243 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
244 gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0);
245 gnttab_end_foreign_access(map->active.ref, 0, 0);
246 free_page((unsigned long)map->active.ring);
247
248 kfree(map);
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -0700249}
250
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700251static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
252{
253 struct sock_mapping *map = sock_map;
254
255 if (map == NULL)
256 return IRQ_HANDLED;
257
258 wake_up_interruptible(&map->active.inflight_conn_req);
259
260 return IRQ_HANDLED;
261}
262
Stefano Stabellini21950462017-10-30 15:40:54 -0700263int pvcalls_front_socket(struct socket *sock)
264{
265 struct pvcalls_bedata *bedata;
266 struct sock_mapping *map = NULL;
267 struct xen_pvcalls_request *req;
268 int notify, req_id, ret;
269
270 /*
271 * PVCalls only supports domain AF_INET,
272 * type SOCK_STREAM and protocol 0 sockets for now.
273 *
274 * Check socket type here, AF_INET and protocol checks are done
275 * by the caller.
276 */
277 if (sock->type != SOCK_STREAM)
278 return -EOPNOTSUPP;
279
280 pvcalls_enter();
281 if (!pvcalls_front_dev) {
282 pvcalls_exit();
283 return -EACCES;
284 }
285 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
286
287 map = kzalloc(sizeof(*map), GFP_KERNEL);
288 if (map == NULL) {
289 pvcalls_exit();
290 return -ENOMEM;
291 }
292
293 spin_lock(&bedata->socket_lock);
294
295 ret = get_request(bedata, &req_id);
296 if (ret < 0) {
297 kfree(map);
298 spin_unlock(&bedata->socket_lock);
299 pvcalls_exit();
300 return ret;
301 }
302
303 /*
304 * sock->sk->sk_send_head is not used for ip sockets: reuse the
305 * field to store a pointer to the struct sock_mapping
306 * corresponding to the socket. This way, we can easily get the
307 * struct sock_mapping from the struct socket.
308 */
309 sock->sk->sk_send_head = (void *)map;
310 list_add_tail(&map->list, &bedata->socket_mappings);
311
312 req = RING_GET_REQUEST(&bedata->ring, req_id);
313 req->req_id = req_id;
314 req->cmd = PVCALLS_SOCKET;
315 req->u.socket.id = (uintptr_t) map;
316 req->u.socket.domain = AF_INET;
317 req->u.socket.type = SOCK_STREAM;
318 req->u.socket.protocol = IPPROTO_IP;
319
320 bedata->ring.req_prod_pvt++;
321 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
322 spin_unlock(&bedata->socket_lock);
323 if (notify)
324 notify_remote_via_irq(bedata->irq);
325
326 wait_event(bedata->inflight_req,
327 READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
328
329 /* read req_id, then the content */
330 smp_rmb();
331 ret = bedata->rsp[req_id].ret;
332 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
333
334 pvcalls_exit();
335 return ret;
336}
337
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700338static int create_active(struct sock_mapping *map, int *evtchn)
339{
340 void *bytes;
341 int ret = -ENOMEM, irq = -1, i;
342
343 *evtchn = -1;
344 init_waitqueue_head(&map->active.inflight_conn_req);
345
346 map->active.ring = (struct pvcalls_data_intf *)
347 __get_free_page(GFP_KERNEL | __GFP_ZERO);
348 if (map->active.ring == NULL)
349 goto out_error;
350 map->active.ring->ring_order = PVCALLS_RING_ORDER;
351 bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
352 PVCALLS_RING_ORDER);
353 if (bytes == NULL)
354 goto out_error;
355 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
356 map->active.ring->ref[i] = gnttab_grant_foreign_access(
357 pvcalls_front_dev->otherend_id,
358 pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
359
360 map->active.ref = gnttab_grant_foreign_access(
361 pvcalls_front_dev->otherend_id,
362 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
363
364 map->active.data.in = bytes;
365 map->active.data.out = bytes +
366 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
367
368 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
369 if (ret)
370 goto out_error;
371 irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
372 0, "pvcalls-frontend", map);
373 if (irq < 0) {
374 ret = irq;
375 goto out_error;
376 }
377
378 map->active.irq = irq;
379 map->active_socket = true;
380 mutex_init(&map->active.in_mutex);
381 mutex_init(&map->active.out_mutex);
382
383 return 0;
384
385out_error:
Colin Ian King773aaad2017-11-03 09:20:47 +0000386 if (*evtchn >= 0)
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
Pan Bian975ef942018-11-22 10:07:12 +0800388 free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
389 free_page((unsigned long)map->active.ring);
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700390 return ret;
391}
392
393int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
394 int addr_len, int flags)
395{
396 struct pvcalls_bedata *bedata;
397 struct sock_mapping *map = NULL;
398 struct xen_pvcalls_request *req;
399 int notify, req_id, ret, evtchn;
400
401 if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
402 return -EOPNOTSUPP;
403
Stefano Stabellini64d68712018-02-14 10:28:23 -0800404 map = pvcalls_enter_sock(sock);
405 if (IS_ERR(map))
406 return PTR_ERR(map);
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700407
408 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
409
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700410 spin_lock(&bedata->socket_lock);
411 ret = get_request(bedata, &req_id);
412 if (ret < 0) {
413 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800414 pvcalls_exit_sock(sock);
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700415 return ret;
416 }
417 ret = create_active(map, &evtchn);
418 if (ret < 0) {
419 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800420 pvcalls_exit_sock(sock);
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700421 return ret;
422 }
423
424 req = RING_GET_REQUEST(&bedata->ring, req_id);
425 req->req_id = req_id;
426 req->cmd = PVCALLS_CONNECT;
427 req->u.connect.id = (uintptr_t)map;
428 req->u.connect.len = addr_len;
429 req->u.connect.flags = flags;
430 req->u.connect.ref = map->active.ref;
431 req->u.connect.evtchn = evtchn;
432 memcpy(req->u.connect.addr, addr, sizeof(*addr));
433
434 map->sock = sock;
435
436 bedata->ring.req_prod_pvt++;
437 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
438 spin_unlock(&bedata->socket_lock);
439
440 if (notify)
441 notify_remote_via_irq(bedata->irq);
442
443 wait_event(bedata->inflight_req,
444 READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
445
446 /* read req_id, then the content */
447 smp_rmb();
448 ret = bedata->rsp[req_id].ret;
449 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
Stefano Stabellini64d68712018-02-14 10:28:23 -0800450 pvcalls_exit_sock(sock);
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -0700451 return ret;
452}
453
Stefano Stabellini45ddce22017-10-30 15:40:59 -0700454static int __write_ring(struct pvcalls_data_intf *intf,
455 struct pvcalls_data *data,
456 struct iov_iter *msg_iter,
457 int len)
458{
459 RING_IDX cons, prod, size, masked_prod, masked_cons;
460 RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
461 int32_t error;
462
463 error = intf->out_error;
464 if (error < 0)
465 return error;
466 cons = intf->out_cons;
467 prod = intf->out_prod;
468 /* read indexes before continuing */
469 virt_mb();
470
471 size = pvcalls_queued(prod, cons, array_size);
472 if (size >= array_size)
473 return -EINVAL;
474 if (len > array_size - size)
475 len = array_size - size;
476
477 masked_prod = pvcalls_mask(prod, array_size);
478 masked_cons = pvcalls_mask(cons, array_size);
479
480 if (masked_prod < masked_cons) {
481 len = copy_from_iter(data->out + masked_prod, len, msg_iter);
482 } else {
483 if (len > array_size - masked_prod) {
484 int ret = copy_from_iter(data->out + masked_prod,
485 array_size - masked_prod, msg_iter);
486 if (ret != array_size - masked_prod) {
487 len = ret;
488 goto out;
489 }
490 len = ret + copy_from_iter(data->out, len - ret, msg_iter);
491 } else {
492 len = copy_from_iter(data->out + masked_prod, len, msg_iter);
493 }
494 }
495out:
496 /* write to ring before updating pointer */
497 virt_wmb();
498 intf->out_prod += len;
499
500 return len;
501}
502
503int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
504 size_t len)
505{
506 struct pvcalls_bedata *bedata;
507 struct sock_mapping *map;
508 int sent, tot_sent = 0;
509 int count = 0, flags;
510
511 flags = msg->msg_flags;
512 if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
513 return -EOPNOTSUPP;
514
Stefano Stabellini64d68712018-02-14 10:28:23 -0800515 map = pvcalls_enter_sock(sock);
516 if (IS_ERR(map))
517 return PTR_ERR(map);
Stefano Stabellini45ddce22017-10-30 15:40:59 -0700518 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
519
Stefano Stabellini45ddce22017-10-30 15:40:59 -0700520 mutex_lock(&map->active.out_mutex);
521 if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
522 mutex_unlock(&map->active.out_mutex);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800523 pvcalls_exit_sock(sock);
Stefano Stabellini45ddce22017-10-30 15:40:59 -0700524 return -EAGAIN;
525 }
526 if (len > INT_MAX)
527 len = INT_MAX;
528
529again:
530 count++;
531 sent = __write_ring(map->active.ring,
532 &map->active.data, &msg->msg_iter,
533 len);
534 if (sent > 0) {
535 len -= sent;
536 tot_sent += sent;
537 notify_remote_via_irq(map->active.irq);
538 }
539 if (sent >= 0 && len > 0 && count < PVCALLS_FRONT_MAX_SPIN)
540 goto again;
541 if (sent < 0)
542 tot_sent = sent;
543
544 mutex_unlock(&map->active.out_mutex);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800545 pvcalls_exit_sock(sock);
Stefano Stabellini45ddce22017-10-30 15:40:59 -0700546 return tot_sent;
547}
548
Stefano Stabelliniae0d0402017-10-30 15:41:00 -0700549static int __read_ring(struct pvcalls_data_intf *intf,
550 struct pvcalls_data *data,
551 struct iov_iter *msg_iter,
552 size_t len, int flags)
553{
554 RING_IDX cons, prod, size, masked_prod, masked_cons;
555 RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
556 int32_t error;
557
558 cons = intf->in_cons;
559 prod = intf->in_prod;
560 error = intf->in_error;
561 /* get pointers before reading from the ring */
562 virt_rmb();
Stefano Stabelliniae0d0402017-10-30 15:41:00 -0700563
564 size = pvcalls_queued(prod, cons, array_size);
565 masked_prod = pvcalls_mask(prod, array_size);
566 masked_cons = pvcalls_mask(cons, array_size);
567
568 if (size == 0)
Stefano Stabellinib79470b2018-12-21 15:06:29 -0800569 return error ?: size;
Stefano Stabelliniae0d0402017-10-30 15:41:00 -0700570
571 if (len > size)
572 len = size;
573
574 if (masked_prod > masked_cons) {
575 len = copy_to_iter(data->in + masked_cons, len, msg_iter);
576 } else {
577 if (len > (array_size - masked_cons)) {
578 int ret = copy_to_iter(data->in + masked_cons,
579 array_size - masked_cons, msg_iter);
580 if (ret != array_size - masked_cons) {
581 len = ret;
582 goto out;
583 }
584 len = ret + copy_to_iter(data->in, len - ret, msg_iter);
585 } else {
586 len = copy_to_iter(data->in + masked_cons, len, msg_iter);
587 }
588 }
589out:
590 /* read data from the ring before increasing the index */
591 virt_mb();
592 if (!(flags & MSG_PEEK))
593 intf->in_cons += len;
594
595 return len;
596}
597
598int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
599 int flags)
600{
601 struct pvcalls_bedata *bedata;
602 int ret;
603 struct sock_mapping *map;
604
605 if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
606 return -EOPNOTSUPP;
607
Stefano Stabellini64d68712018-02-14 10:28:23 -0800608 map = pvcalls_enter_sock(sock);
609 if (IS_ERR(map))
610 return PTR_ERR(map);
Stefano Stabelliniae0d0402017-10-30 15:41:00 -0700611 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
612
Stefano Stabelliniae0d0402017-10-30 15:41:00 -0700613 mutex_lock(&map->active.in_mutex);
614 if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
615 len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
616
617 while (!(flags & MSG_DONTWAIT) && !pvcalls_front_read_todo(map)) {
618 wait_event_interruptible(map->active.inflight_conn_req,
619 pvcalls_front_read_todo(map));
620 }
621 ret = __read_ring(map->active.ring, &map->active.data,
622 &msg->msg_iter, len, flags);
623
624 if (ret > 0)
625 notify_remote_via_irq(map->active.irq);
626 if (ret == 0)
627 ret = (flags & MSG_DONTWAIT) ? -EAGAIN : 0;
628 if (ret == -ENOTCONN)
629 ret = 0;
630
631 mutex_unlock(&map->active.in_mutex);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800632 pvcalls_exit_sock(sock);
Stefano Stabelliniae0d0402017-10-30 15:41:00 -0700633 return ret;
634}
635
Stefano Stabellini67ea9892017-10-30 15:40:56 -0700636int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
637{
638 struct pvcalls_bedata *bedata;
639 struct sock_mapping *map = NULL;
640 struct xen_pvcalls_request *req;
641 int notify, req_id, ret;
642
643 if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
644 return -EOPNOTSUPP;
645
Stefano Stabellini64d68712018-02-14 10:28:23 -0800646 map = pvcalls_enter_sock(sock);
647 if (IS_ERR(map))
648 return PTR_ERR(map);
Stefano Stabellini67ea9892017-10-30 15:40:56 -0700649 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
650
Stefano Stabellini67ea9892017-10-30 15:40:56 -0700651 spin_lock(&bedata->socket_lock);
652 ret = get_request(bedata, &req_id);
653 if (ret < 0) {
654 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800655 pvcalls_exit_sock(sock);
Stefano Stabellini67ea9892017-10-30 15:40:56 -0700656 return ret;
657 }
658 req = RING_GET_REQUEST(&bedata->ring, req_id);
659 req->req_id = req_id;
660 map->sock = sock;
661 req->cmd = PVCALLS_BIND;
662 req->u.bind.id = (uintptr_t)map;
663 memcpy(req->u.bind.addr, addr, sizeof(*addr));
664 req->u.bind.len = addr_len;
665
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700666 init_waitqueue_head(&map->passive.inflight_accept_req);
667
Stefano Stabellini67ea9892017-10-30 15:40:56 -0700668 map->active_socket = false;
669
670 bedata->ring.req_prod_pvt++;
671 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
672 spin_unlock(&bedata->socket_lock);
673 if (notify)
674 notify_remote_via_irq(bedata->irq);
675
676 wait_event(bedata->inflight_req,
677 READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
678
679 /* read req_id, then the content */
680 smp_rmb();
681 ret = bedata->rsp[req_id].ret;
682 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
683
684 map->passive.status = PVCALLS_STATUS_BIND;
Stefano Stabellini64d68712018-02-14 10:28:23 -0800685 pvcalls_exit_sock(sock);
Stefano Stabellini67ea9892017-10-30 15:40:56 -0700686 return 0;
687}
688
Stefano Stabellini1853f112017-10-30 15:40:57 -0700689int pvcalls_front_listen(struct socket *sock, int backlog)
690{
691 struct pvcalls_bedata *bedata;
692 struct sock_mapping *map;
693 struct xen_pvcalls_request *req;
694 int notify, req_id, ret;
695
Stefano Stabellini64d68712018-02-14 10:28:23 -0800696 map = pvcalls_enter_sock(sock);
697 if (IS_ERR(map))
698 return PTR_ERR(map);
Stefano Stabellini1853f112017-10-30 15:40:57 -0700699 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
700
Stefano Stabellini1853f112017-10-30 15:40:57 -0700701 if (map->passive.status != PVCALLS_STATUS_BIND) {
Stefano Stabellini64d68712018-02-14 10:28:23 -0800702 pvcalls_exit_sock(sock);
Stefano Stabellini1853f112017-10-30 15:40:57 -0700703 return -EOPNOTSUPP;
704 }
705
706 spin_lock(&bedata->socket_lock);
707 ret = get_request(bedata, &req_id);
708 if (ret < 0) {
709 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800710 pvcalls_exit_sock(sock);
Stefano Stabellini1853f112017-10-30 15:40:57 -0700711 return ret;
712 }
713 req = RING_GET_REQUEST(&bedata->ring, req_id);
714 req->req_id = req_id;
715 req->cmd = PVCALLS_LISTEN;
716 req->u.listen.id = (uintptr_t) map;
717 req->u.listen.backlog = backlog;
718
719 bedata->ring.req_prod_pvt++;
720 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
721 spin_unlock(&bedata->socket_lock);
722 if (notify)
723 notify_remote_via_irq(bedata->irq);
724
725 wait_event(bedata->inflight_req,
726 READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
727
728 /* read req_id, then the content */
729 smp_rmb();
730 ret = bedata->rsp[req_id].ret;
731 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
732
733 map->passive.status = PVCALLS_STATUS_LISTEN;
Stefano Stabellini64d68712018-02-14 10:28:23 -0800734 pvcalls_exit_sock(sock);
Stefano Stabellini1853f112017-10-30 15:40:57 -0700735 return ret;
736}
737
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700738int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
739{
740 struct pvcalls_bedata *bedata;
741 struct sock_mapping *map;
742 struct sock_mapping *map2 = NULL;
743 struct xen_pvcalls_request *req;
744 int notify, req_id, ret, evtchn, nonblock;
745
Stefano Stabellini64d68712018-02-14 10:28:23 -0800746 map = pvcalls_enter_sock(sock);
747 if (IS_ERR(map))
748 return PTR_ERR(map);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700749 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
750
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700751 if (map->passive.status != PVCALLS_STATUS_LISTEN) {
Stefano Stabellini64d68712018-02-14 10:28:23 -0800752 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700753 return -EINVAL;
754 }
755
756 nonblock = flags & SOCK_NONBLOCK;
757 /*
758 * Backend only supports 1 inflight accept request, will return
759 * errors for the others
760 */
761 if (test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
762 (void *)&map->passive.flags)) {
763 req_id = READ_ONCE(map->passive.inflight_req_id);
764 if (req_id != PVCALLS_INVALID_ID &&
765 READ_ONCE(bedata->rsp[req_id].req_id) == req_id) {
766 map2 = map->passive.accept_map;
767 goto received;
768 }
769 if (nonblock) {
Stefano Stabellini64d68712018-02-14 10:28:23 -0800770 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700771 return -EAGAIN;
772 }
773 if (wait_event_interruptible(map->passive.inflight_accept_req,
774 !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
775 (void *)&map->passive.flags))) {
Stefano Stabellini64d68712018-02-14 10:28:23 -0800776 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700777 return -EINTR;
778 }
779 }
780
781 spin_lock(&bedata->socket_lock);
782 ret = get_request(bedata, &req_id);
783 if (ret < 0) {
784 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
785 (void *)&map->passive.flags);
786 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800787 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700788 return ret;
789 }
Wei Yongjun4aac2ca2017-12-28 03:46:48 +0000790 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700791 if (map2 == NULL) {
792 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
793 (void *)&map->passive.flags);
794 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800795 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700796 return -ENOMEM;
797 }
798 ret = create_active(map2, &evtchn);
799 if (ret < 0) {
800 kfree(map2);
801 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
802 (void *)&map->passive.flags);
803 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800804 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700805 return ret;
806 }
807 list_add_tail(&map2->list, &bedata->socket_mappings);
808
809 req = RING_GET_REQUEST(&bedata->ring, req_id);
810 req->req_id = req_id;
811 req->cmd = PVCALLS_ACCEPT;
812 req->u.accept.id = (uintptr_t) map;
813 req->u.accept.ref = map2->active.ref;
814 req->u.accept.id_new = (uintptr_t) map2;
815 req->u.accept.evtchn = evtchn;
816 map->passive.accept_map = map2;
817
818 bedata->ring.req_prod_pvt++;
819 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
820 spin_unlock(&bedata->socket_lock);
821 if (notify)
822 notify_remote_via_irq(bedata->irq);
823 /* We could check if we have received a response before returning. */
824 if (nonblock) {
825 WRITE_ONCE(map->passive.inflight_req_id, req_id);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800826 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700827 return -EAGAIN;
828 }
829
830 if (wait_event_interruptible(bedata->inflight_req,
831 READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
Stefano Stabellini64d68712018-02-14 10:28:23 -0800832 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700833 return -EINTR;
834 }
835 /* read req_id, then the content */
836 smp_rmb();
837
838received:
839 map2->sock = newsock;
840 newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
841 if (!newsock->sk) {
842 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
843 map->passive.inflight_req_id = PVCALLS_INVALID_ID;
844 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
845 (void *)&map->passive.flags);
846 pvcalls_front_free_map(bedata, map2);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800847 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700848 return -ENOMEM;
849 }
850 newsock->sk->sk_send_head = (void *)map2;
851
852 ret = bedata->rsp[req_id].ret;
853 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
854 map->passive.inflight_req_id = PVCALLS_INVALID_ID;
855
856 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
857 wake_up(&map->passive.inflight_accept_req);
858
Stefano Stabellini64d68712018-02-14 10:28:23 -0800859 pvcalls_exit_sock(sock);
Stefano Stabellini9774c6c2017-10-30 15:40:58 -0700860 return ret;
861}
862
Al Viroafc9a422017-07-03 06:39:46 -0400863static __poll_t pvcalls_front_poll_passive(struct file *file,
Stefano Stabellini5842c832017-10-30 15:41:01 -0700864 struct pvcalls_bedata *bedata,
865 struct sock_mapping *map,
866 poll_table *wait)
867{
868 int notify, req_id, ret;
869 struct xen_pvcalls_request *req;
870
871 if (test_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
872 (void *)&map->passive.flags)) {
873 uint32_t req_id = READ_ONCE(map->passive.inflight_req_id);
874
875 if (req_id != PVCALLS_INVALID_ID &&
876 READ_ONCE(bedata->rsp[req_id].req_id) == req_id)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800877 return EPOLLIN | EPOLLRDNORM;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700878
879 poll_wait(file, &map->passive.inflight_accept_req, wait);
880 return 0;
881 }
882
883 if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET,
884 (void *)&map->passive.flags))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800885 return EPOLLIN | EPOLLRDNORM;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700886
887 /*
888 * First check RET, then INFLIGHT. No barriers necessary to
889 * ensure execution ordering because of the conditional
890 * instructions creating control dependencies.
891 */
892
893 if (test_and_set_bit(PVCALLS_FLAG_POLL_INFLIGHT,
894 (void *)&map->passive.flags)) {
895 poll_wait(file, &bedata->inflight_req, wait);
896 return 0;
897 }
898
899 spin_lock(&bedata->socket_lock);
900 ret = get_request(bedata, &req_id);
901 if (ret < 0) {
902 spin_unlock(&bedata->socket_lock);
903 return ret;
904 }
905 req = RING_GET_REQUEST(&bedata->ring, req_id);
906 req->req_id = req_id;
907 req->cmd = PVCALLS_POLL;
908 req->u.poll.id = (uintptr_t) map;
909
910 bedata->ring.req_prod_pvt++;
911 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
912 spin_unlock(&bedata->socket_lock);
913 if (notify)
914 notify_remote_via_irq(bedata->irq);
915
916 poll_wait(file, &bedata->inflight_req, wait);
917 return 0;
918}
919
Al Viroafc9a422017-07-03 06:39:46 -0400920static __poll_t pvcalls_front_poll_active(struct file *file,
Stefano Stabellini5842c832017-10-30 15:41:01 -0700921 struct pvcalls_bedata *bedata,
922 struct sock_mapping *map,
923 poll_table *wait)
924{
Al Viroafc9a422017-07-03 06:39:46 -0400925 __poll_t mask = 0;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700926 int32_t in_error, out_error;
927 struct pvcalls_data_intf *intf = map->active.ring;
928
929 out_error = intf->out_error;
930 in_error = intf->in_error;
931
932 poll_wait(file, &map->active.inflight_conn_req, wait);
933 if (pvcalls_front_write_todo(map))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800934 mask |= EPOLLOUT | EPOLLWRNORM;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700935 if (pvcalls_front_read_todo(map))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800936 mask |= EPOLLIN | EPOLLRDNORM;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700937 if (in_error != 0 || out_error != 0)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800938 mask |= EPOLLERR;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700939
940 return mask;
941}
942
Al Viroafc9a422017-07-03 06:39:46 -0400943__poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
Stefano Stabellini5842c832017-10-30 15:41:01 -0700944 poll_table *wait)
945{
946 struct pvcalls_bedata *bedata;
947 struct sock_mapping *map;
Al Viroafc9a422017-07-03 06:39:46 -0400948 __poll_t ret;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700949
Stefano Stabellini64d68712018-02-14 10:28:23 -0800950 map = pvcalls_enter_sock(sock);
951 if (IS_ERR(map))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800952 return EPOLLNVAL;
Stefano Stabellini5842c832017-10-30 15:41:01 -0700953 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
954
Stefano Stabellini5842c832017-10-30 15:41:01 -0700955 if (map->active_socket)
956 ret = pvcalls_front_poll_active(file, bedata, map, wait);
957 else
958 ret = pvcalls_front_poll_passive(file, bedata, map, wait);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800959 pvcalls_exit_sock(sock);
Stefano Stabellini5842c832017-10-30 15:41:01 -0700960 return ret;
961}
962
Stefano Stabellini235a71c2017-10-30 15:41:02 -0700963int pvcalls_front_release(struct socket *sock)
964{
965 struct pvcalls_bedata *bedata;
966 struct sock_mapping *map;
967 int req_id, notify, ret;
968 struct xen_pvcalls_request *req;
969
970 if (sock->sk == NULL)
971 return 0;
972
Stefano Stabellini64d68712018-02-14 10:28:23 -0800973 map = pvcalls_enter_sock(sock);
974 if (IS_ERR(map)) {
975 if (PTR_ERR(map) == -ENOTCONN)
976 return -EIO;
977 else
978 return 0;
Stefano Stabellini235a71c2017-10-30 15:41:02 -0700979 }
Stefano Stabellini235a71c2017-10-30 15:41:02 -0700980 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
981
Stefano Stabellini235a71c2017-10-30 15:41:02 -0700982 spin_lock(&bedata->socket_lock);
983 ret = get_request(bedata, &req_id);
984 if (ret < 0) {
985 spin_unlock(&bedata->socket_lock);
Stefano Stabellini64d68712018-02-14 10:28:23 -0800986 pvcalls_exit_sock(sock);
Stefano Stabellini235a71c2017-10-30 15:41:02 -0700987 return ret;
988 }
989 sock->sk->sk_send_head = NULL;
990
991 req = RING_GET_REQUEST(&bedata->ring, req_id);
992 req->req_id = req_id;
993 req->cmd = PVCALLS_RELEASE;
994 req->u.release.id = (uintptr_t)map;
995
996 bedata->ring.req_prod_pvt++;
997 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
998 spin_unlock(&bedata->socket_lock);
999 if (notify)
1000 notify_remote_via_irq(bedata->irq);
1001
1002 wait_event(bedata->inflight_req,
1003 READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
1004
1005 if (map->active_socket) {
1006 /*
1007 * Set in_error and wake up inflight_conn_req to force
1008 * recvmsg waiters to exit.
1009 */
1010 map->active.ring->in_error = -EBADF;
1011 wake_up_interruptible(&map->active.inflight_conn_req);
1012
1013 /*
Stefano Stabellini646d9442017-11-15 13:20:21 -08001014 * We need to make sure that sendmsg/recvmsg on this socket have
1015 * not started before we've cleared sk_send_head here. The
Stefano Stabellini64d68712018-02-14 10:28:23 -08001016 * easiest way to guarantee this is to see that no pvcalls
1017 * (other than us) is in progress on this socket.
Stefano Stabellini235a71c2017-10-30 15:41:02 -07001018 */
Stefano Stabellini64d68712018-02-14 10:28:23 -08001019 while (atomic_read(&map->refcount) > 1)
Stefano Stabellini235a71c2017-10-30 15:41:02 -07001020 cpu_relax();
1021
1022 pvcalls_front_free_map(bedata, map);
1023 } else {
Stefano Stabellinid1a75e02018-02-14 10:28:24 -08001024 wake_up(&bedata->inflight_req);
1025 wake_up(&map->passive.inflight_accept_req);
1026
1027 while (atomic_read(&map->refcount) > 1)
1028 cpu_relax();
1029
Stefano Stabellini235a71c2017-10-30 15:41:02 -07001030 spin_lock(&bedata->socket_lock);
1031 list_del(&map->list);
1032 spin_unlock(&bedata->socket_lock);
1033 if (READ_ONCE(map->passive.inflight_req_id) !=
1034 PVCALLS_INVALID_ID) {
1035 pvcalls_front_free_map(bedata,
1036 map->passive.accept_map);
1037 }
1038 kfree(map);
1039 }
1040 WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
1041
1042 pvcalls_exit();
1043 return 0;
1044}
1045
Stefano Stabellini416efba2017-10-30 15:40:51 -07001046static const struct xenbus_device_id pvcalls_front_ids[] = {
1047 { "pvcalls" },
1048 { "" }
1049};
1050
1051static int pvcalls_front_remove(struct xenbus_device *dev)
1052{
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -07001053 struct pvcalls_bedata *bedata;
1054 struct sock_mapping *map = NULL, *n;
1055
1056 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
1057 dev_set_drvdata(&dev->dev, NULL);
1058 pvcalls_front_dev = NULL;
1059 if (bedata->irq >= 0)
1060 unbind_from_irqhandler(bedata->irq, dev);
1061
Stefano Stabellinicb1c7d92017-10-30 15:40:55 -07001062 list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
1063 map->sock->sk->sk_send_head = NULL;
1064 if (map->active_socket) {
1065 map->active.ring->in_error = -EBADF;
1066 wake_up_interruptible(&map->active.inflight_conn_req);
1067 }
1068 }
1069
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -07001070 smp_mb();
1071 while (atomic_read(&pvcalls_refcount) > 0)
1072 cpu_relax();
1073 list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
1074 if (map->active_socket) {
1075 /* No need to lock, refcount is 0 */
1076 pvcalls_front_free_map(bedata, map);
1077 } else {
1078 list_del(&map->list);
1079 kfree(map);
1080 }
1081 }
Dan Carpenter1ab134c2017-12-05 17:38:54 +03001082 if (bedata->ref != -1)
Stefano Stabelliniaa7ba372017-10-30 15:40:52 -07001083 gnttab_end_foreign_access(bedata->ref, 0, 0);
1084 kfree(bedata->ring.sring);
1085 kfree(bedata);
1086 xenbus_switch_state(dev, XenbusStateClosed);
Stefano Stabellini416efba2017-10-30 15:40:51 -07001087 return 0;
1088}
1089
1090static int pvcalls_front_probe(struct xenbus_device *dev,
1091 const struct xenbus_device_id *id)
1092{
Stefano Stabellini21968192017-10-30 15:40:53 -07001093 int ret = -ENOMEM, evtchn, i;
1094 unsigned int max_page_order, function_calls, len;
1095 char *versions;
1096 grant_ref_t gref_head = 0;
1097 struct xenbus_transaction xbt;
1098 struct pvcalls_bedata *bedata = NULL;
1099 struct xen_pvcalls_sring *sring;
1100
1101 if (pvcalls_front_dev != NULL) {
1102 dev_err(&dev->dev, "only one PV Calls connection supported\n");
1103 return -EINVAL;
1104 }
1105
1106 versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
Dan Carpenter8c71fa82017-12-05 17:38:43 +03001107 if (IS_ERR(versions))
1108 return PTR_ERR(versions);
Stefano Stabellini21968192017-10-30 15:40:53 -07001109 if (!len)
1110 return -EINVAL;
1111 if (strcmp(versions, "1")) {
1112 kfree(versions);
1113 return -EINVAL;
1114 }
1115 kfree(versions);
1116 max_page_order = xenbus_read_unsigned(dev->otherend,
1117 "max-page-order", 0);
1118 if (max_page_order < PVCALLS_RING_ORDER)
1119 return -ENODEV;
1120 function_calls = xenbus_read_unsigned(dev->otherend,
1121 "function-calls", 0);
1122 /* See XENBUS_FUNCTIONS_CALLS in pvcalls.h */
1123 if (function_calls != 1)
1124 return -ENODEV;
1125 pr_info("%s max-page-order is %u\n", __func__, max_page_order);
1126
1127 bedata = kzalloc(sizeof(struct pvcalls_bedata), GFP_KERNEL);
1128 if (!bedata)
1129 return -ENOMEM;
1130
1131 dev_set_drvdata(&dev->dev, bedata);
1132 pvcalls_front_dev = dev;
1133 init_waitqueue_head(&bedata->inflight_req);
1134 INIT_LIST_HEAD(&bedata->socket_mappings);
1135 spin_lock_init(&bedata->socket_lock);
1136 bedata->irq = -1;
1137 bedata->ref = -1;
1138
1139 for (i = 0; i < PVCALLS_NR_RSP_PER_RING; i++)
1140 bedata->rsp[i].req_id = PVCALLS_INVALID_ID;
1141
1142 sring = (struct xen_pvcalls_sring *) __get_free_page(GFP_KERNEL |
1143 __GFP_ZERO);
1144 if (!sring)
1145 goto error;
1146 SHARED_RING_INIT(sring);
1147 FRONT_RING_INIT(&bedata->ring, sring, XEN_PAGE_SIZE);
1148
1149 ret = xenbus_alloc_evtchn(dev, &evtchn);
1150 if (ret)
1151 goto error;
1152
1153 bedata->irq = bind_evtchn_to_irqhandler(evtchn,
1154 pvcalls_front_event_handler,
1155 0, "pvcalls-frontend", dev);
1156 if (bedata->irq < 0) {
1157 ret = bedata->irq;
1158 goto error;
1159 }
1160
1161 ret = gnttab_alloc_grant_references(1, &gref_head);
1162 if (ret < 0)
1163 goto error;
Colin Ian King95110ac2017-11-03 08:42:02 +00001164 ret = gnttab_claim_grant_reference(&gref_head);
1165 if (ret < 0)
Stefano Stabellini21968192017-10-30 15:40:53 -07001166 goto error;
Colin Ian King95110ac2017-11-03 08:42:02 +00001167 bedata->ref = ret;
Stefano Stabellini21968192017-10-30 15:40:53 -07001168 gnttab_grant_foreign_access_ref(bedata->ref, dev->otherend_id,
1169 virt_to_gfn((void *)sring), 0);
1170
1171 again:
1172 ret = xenbus_transaction_start(&xbt);
1173 if (ret) {
1174 xenbus_dev_fatal(dev, ret, "starting transaction");
1175 goto error;
1176 }
1177 ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
1178 if (ret)
1179 goto error_xenbus;
1180 ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", bedata->ref);
1181 if (ret)
1182 goto error_xenbus;
1183 ret = xenbus_printf(xbt, dev->nodename, "port", "%u",
1184 evtchn);
1185 if (ret)
1186 goto error_xenbus;
1187 ret = xenbus_transaction_end(xbt, 0);
1188 if (ret) {
1189 if (ret == -EAGAIN)
1190 goto again;
1191 xenbus_dev_fatal(dev, ret, "completing transaction");
1192 goto error;
1193 }
1194 xenbus_switch_state(dev, XenbusStateInitialised);
1195
Stefano Stabellini416efba2017-10-30 15:40:51 -07001196 return 0;
Stefano Stabellini21968192017-10-30 15:40:53 -07001197
1198 error_xenbus:
1199 xenbus_transaction_end(xbt, 1);
1200 xenbus_dev_fatal(dev, ret, "writing xenstore");
1201 error:
1202 pvcalls_front_remove(dev);
1203 return ret;
Stefano Stabellini416efba2017-10-30 15:40:51 -07001204}
1205
1206static void pvcalls_front_changed(struct xenbus_device *dev,
1207 enum xenbus_state backend_state)
1208{
Stefano Stabellini21968192017-10-30 15:40:53 -07001209 switch (backend_state) {
1210 case XenbusStateReconfiguring:
1211 case XenbusStateReconfigured:
1212 case XenbusStateInitialising:
1213 case XenbusStateInitialised:
1214 case XenbusStateUnknown:
1215 break;
1216
1217 case XenbusStateInitWait:
1218 break;
1219
1220 case XenbusStateConnected:
1221 xenbus_switch_state(dev, XenbusStateConnected);
1222 break;
1223
1224 case XenbusStateClosed:
1225 if (dev->state == XenbusStateClosed)
1226 break;
Gustavo A. R. Silva3d8765d2017-11-02 13:51:22 -05001227 /* Missed the backend's CLOSING state */
1228 /* fall through */
Stefano Stabellini21968192017-10-30 15:40:53 -07001229 case XenbusStateClosing:
1230 xenbus_frontend_closed(dev);
1231 break;
1232 }
Stefano Stabellini416efba2017-10-30 15:40:51 -07001233}
1234
1235static struct xenbus_driver pvcalls_front_driver = {
1236 .ids = pvcalls_front_ids,
1237 .probe = pvcalls_front_probe,
1238 .remove = pvcalls_front_remove,
1239 .otherend_changed = pvcalls_front_changed,
1240};
1241
1242static int __init pvcalls_frontend_init(void)
1243{
1244 if (!xen_domain())
1245 return -ENODEV;
1246
1247 pr_info("Initialising Xen pvcalls frontend driver\n");
1248
1249 return xenbus_register_frontend(&pvcalls_front_driver);
1250}
1251
1252module_init(pvcalls_frontend_init);
Boris Ostrovsky24e7f842017-11-15 11:24:02 -05001253
1254MODULE_DESCRIPTION("Xen PV Calls frontend driver");
1255MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
1256MODULE_LICENSE("GPL");