blob: 82fcf07fa9ea7a08d9a7980c8bd3d25f1cb8c12e [file] [log] [blame]
Alex Williamsoncba33452012-07-31 08:16:22 -06001/*
2 * VFIO core
3 *
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
14 */
15
16#include <linux/cdev.h>
17#include <linux/compat.h>
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/anon_inodes.h>
21#include <linux/fs.h>
22#include <linux/idr.h>
23#include <linux/iommu.h>
24#include <linux/list.h>
Alex Williamsond1099902013-12-19 10:17:13 -070025#include <linux/miscdevice.h>
Alex Williamsoncba33452012-07-31 08:16:22 -060026#include <linux/module.h>
27#include <linux/mutex.h>
Alex Williamson5f096b12015-10-27 14:53:04 -060028#include <linux/pci.h>
Alex Williamson9587f442013-04-25 16:12:38 -060029#include <linux/rwsem.h>
Alex Williamsoncba33452012-07-31 08:16:22 -060030#include <linux/sched.h>
31#include <linux/slab.h>
Alex Williamson664e9382013-04-30 15:42:28 -060032#include <linux/stat.h>
Alex Williamsoncba33452012-07-31 08:16:22 -060033#include <linux/string.h>
34#include <linux/uaccess.h>
35#include <linux/vfio.h>
36#include <linux/wait.h>
Farhan Ali41be3e22019-04-03 14:22:27 -040037#include <linux/sched/signal.h>
Alex Williamsoncba33452012-07-31 08:16:22 -060038
39#define DRIVER_VERSION "0.3"
40#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
41#define DRIVER_DESC "VFIO - User Level meta-driver"
42
43static struct vfio {
44 struct class *class;
45 struct list_head iommu_drivers_list;
46 struct mutex iommu_drivers_lock;
47 struct list_head group_list;
48 struct idr group_idr;
49 struct mutex group_lock;
50 struct cdev group_cdev;
Alex Williamsond1099902013-12-19 10:17:13 -070051 dev_t group_devt;
Alex Williamsoncba33452012-07-31 08:16:22 -060052 wait_queue_head_t release_q;
53} vfio;
54
55struct vfio_iommu_driver {
56 const struct vfio_iommu_driver_ops *ops;
57 struct list_head vfio_next;
58};
59
60struct vfio_container {
61 struct kref kref;
62 struct list_head group_list;
Alex Williamson9587f442013-04-25 16:12:38 -060063 struct rw_semaphore group_lock;
Alex Williamsoncba33452012-07-31 08:16:22 -060064 struct vfio_iommu_driver *iommu_driver;
65 void *iommu_data;
Alex Williamson03a76b62015-12-21 15:13:33 -070066 bool noiommu;
Alex Williamsoncba33452012-07-31 08:16:22 -060067};
68
Alex Williamson60720a02015-02-06 15:05:06 -070069struct vfio_unbound_dev {
70 struct device *dev;
71 struct list_head unbound_next;
72};
73
Alex Williamsoncba33452012-07-31 08:16:22 -060074struct vfio_group {
75 struct kref kref;
76 int minor;
77 atomic_t container_users;
78 struct iommu_group *iommu_group;
79 struct vfio_container *container;
80 struct list_head device_list;
81 struct mutex device_lock;
82 struct device *dev;
83 struct notifier_block nb;
84 struct list_head vfio_next;
85 struct list_head container_next;
Alex Williamson60720a02015-02-06 15:05:06 -070086 struct list_head unbound_list;
87 struct mutex unbound_lock;
Alex Williamson6d6768c2013-06-25 16:06:54 -060088 atomic_t opened;
Alex Williamson6586b562017-08-17 22:10:20 -060089 wait_queue_head_t container_q;
Alex Williamson03a76b62015-12-21 15:13:33 -070090 bool noiommu;
Jike Songccd46db2016-12-01 13:20:06 +080091 struct kvm *kvm;
92 struct blocking_notifier_head notifier;
Alex Williamsoncba33452012-07-31 08:16:22 -060093};
94
95struct vfio_device {
96 struct kref kref;
97 struct device *dev;
98 const struct vfio_device_ops *ops;
99 struct vfio_group *group;
100 struct list_head group_next;
101 void *device_data;
102};
103
Alex Williamson03a76b62015-12-21 15:13:33 -0700104#ifdef CONFIG_VFIO_NOIOMMU
105static bool noiommu __read_mostly;
106module_param_named(enable_unsafe_noiommu_mode,
107 noiommu, bool, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
109#endif
110
111/*
112 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
113 * and remove functions, any use cases other than acquiring the first
114 * reference for the purpose of calling vfio_add_group_dev() or removing
115 * that symmetric reference after vfio_del_group_dev() should use the raw
116 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
117 * removes the device from the dummy group and cannot be nested.
118 */
119struct iommu_group *vfio_iommu_group_get(struct device *dev)
120{
121 struct iommu_group *group;
122 int __maybe_unused ret;
123
124 group = iommu_group_get(dev);
125
126#ifdef CONFIG_VFIO_NOIOMMU
127 /*
128 * With noiommu enabled, an IOMMU group will be created for a device
129 * that doesn't already have one and doesn't have an iommu_ops on their
Alex Williamson16ab8a52016-01-27 11:22:25 -0700130 * bus. We set iommudata simply to be able to identify these groups
131 * as special use and for reclamation later.
Alex Williamson03a76b62015-12-21 15:13:33 -0700132 */
133 if (group || !noiommu || iommu_present(dev->bus))
134 return group;
135
136 group = iommu_group_alloc();
137 if (IS_ERR(group))
138 return NULL;
139
140 iommu_group_set_name(group, "vfio-noiommu");
Alex Williamson16ab8a52016-01-27 11:22:25 -0700141 iommu_group_set_iommudata(group, &noiommu, NULL);
Alex Williamson03a76b62015-12-21 15:13:33 -0700142 ret = iommu_group_add_device(group, dev);
Eric Augerd935ad92017-08-11 15:16:06 +0200143 if (ret) {
144 iommu_group_put(group);
Alex Williamson03a76b62015-12-21 15:13:33 -0700145 return NULL;
Eric Augerd935ad92017-08-11 15:16:06 +0200146 }
Alex Williamson03a76b62015-12-21 15:13:33 -0700147
148 /*
149 * Where to taint? At this point we've added an IOMMU group for a
150 * device that is not backed by iommu_ops, therefore any iommu_
151 * callback using iommu_ops can legitimately Oops. So, while we may
152 * be about to give a DMA capable device to a user without IOMMU
153 * protection, which is clearly taint-worthy, let's go ahead and do
154 * it here.
155 */
156 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
157 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
158#endif
159
160 return group;
161}
162EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
163
164void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
165{
166#ifdef CONFIG_VFIO_NOIOMMU
Alex Williamson16ab8a52016-01-27 11:22:25 -0700167 if (iommu_group_get_iommudata(group) == &noiommu)
Alex Williamson03a76b62015-12-21 15:13:33 -0700168 iommu_group_remove_device(dev);
169#endif
170
171 iommu_group_put(group);
172}
173EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
174
175#ifdef CONFIG_VFIO_NOIOMMU
176static void *vfio_noiommu_open(unsigned long arg)
177{
178 if (arg != VFIO_NOIOMMU_IOMMU)
179 return ERR_PTR(-EINVAL);
180 if (!capable(CAP_SYS_RAWIO))
181 return ERR_PTR(-EPERM);
182
183 return NULL;
184}
185
186static void vfio_noiommu_release(void *iommu_data)
187{
188}
189
190static long vfio_noiommu_ioctl(void *iommu_data,
191 unsigned int cmd, unsigned long arg)
192{
193 if (cmd == VFIO_CHECK_EXTENSION)
194 return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
195
196 return -ENOTTY;
197}
198
Alex Williamson03a76b62015-12-21 15:13:33 -0700199static int vfio_noiommu_attach_group(void *iommu_data,
200 struct iommu_group *iommu_group)
201{
Alex Williamson16ab8a52016-01-27 11:22:25 -0700202 return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
Alex Williamson03a76b62015-12-21 15:13:33 -0700203}
204
205static void vfio_noiommu_detach_group(void *iommu_data,
206 struct iommu_group *iommu_group)
207{
208}
209
210static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
211 .name = "vfio-noiommu",
212 .owner = THIS_MODULE,
213 .open = vfio_noiommu_open,
214 .release = vfio_noiommu_release,
215 .ioctl = vfio_noiommu_ioctl,
216 .attach_group = vfio_noiommu_attach_group,
217 .detach_group = vfio_noiommu_detach_group,
218};
219#endif
220
221
Alex Williamsoncba33452012-07-31 08:16:22 -0600222/**
223 * IOMMU driver registration
224 */
225int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
226{
227 struct vfio_iommu_driver *driver, *tmp;
228
229 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
230 if (!driver)
231 return -ENOMEM;
232
233 driver->ops = ops;
234
235 mutex_lock(&vfio.iommu_drivers_lock);
236
237 /* Check for duplicates */
238 list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
239 if (tmp->ops == ops) {
240 mutex_unlock(&vfio.iommu_drivers_lock);
241 kfree(driver);
242 return -EINVAL;
243 }
244 }
245
246 list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
247
248 mutex_unlock(&vfio.iommu_drivers_lock);
249
250 return 0;
251}
252EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
253
254void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
255{
256 struct vfio_iommu_driver *driver;
257
258 mutex_lock(&vfio.iommu_drivers_lock);
259 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
260 if (driver->ops == ops) {
261 list_del(&driver->vfio_next);
262 mutex_unlock(&vfio.iommu_drivers_lock);
263 kfree(driver);
264 return;
265 }
266 }
267 mutex_unlock(&vfio.iommu_drivers_lock);
268}
269EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
270
271/**
272 * Group minor allocation/free - both called with vfio.group_lock held
273 */
274static int vfio_alloc_group_minor(struct vfio_group *group)
275{
Alex Williamsond1099902013-12-19 10:17:13 -0700276 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
Alex Williamsoncba33452012-07-31 08:16:22 -0600277}
278
279static void vfio_free_group_minor(int minor)
280{
281 idr_remove(&vfio.group_idr, minor);
282}
283
284static int vfio_iommu_group_notifier(struct notifier_block *nb,
285 unsigned long action, void *data);
286static void vfio_group_get(struct vfio_group *group);
287
288/**
289 * Container objects - containers are created when /dev/vfio/vfio is
290 * opened, but their lifecycle extends until the last user is done, so
291 * it's freed via kref. Must support container/group/device being
292 * closed in any order.
293 */
294static void vfio_container_get(struct vfio_container *container)
295{
296 kref_get(&container->kref);
297}
298
299static void vfio_container_release(struct kref *kref)
300{
301 struct vfio_container *container;
302 container = container_of(kref, struct vfio_container, kref);
303
304 kfree(container);
305}
306
307static void vfio_container_put(struct vfio_container *container)
308{
309 kref_put(&container->kref, vfio_container_release);
310}
311
Jiang Liu9df7b252012-12-07 13:43:50 -0700312static void vfio_group_unlock_and_free(struct vfio_group *group)
313{
314 mutex_unlock(&vfio.group_lock);
315 /*
316 * Unregister outside of lock. A spurious callback is harmless now
317 * that the group is no longer in vfio.group_list.
318 */
319 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
320 kfree(group);
321}
322
Alex Williamsoncba33452012-07-31 08:16:22 -0600323/**
324 * Group objects - create, release, get, put, search
325 */
Alex Williamson16ab8a52016-01-27 11:22:25 -0700326static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
Alex Williamsoncba33452012-07-31 08:16:22 -0600327{
328 struct vfio_group *group, *tmp;
329 struct device *dev;
330 int ret, minor;
331
332 group = kzalloc(sizeof(*group), GFP_KERNEL);
333 if (!group)
334 return ERR_PTR(-ENOMEM);
335
336 kref_init(&group->kref);
337 INIT_LIST_HEAD(&group->device_list);
338 mutex_init(&group->device_lock);
Alex Williamson60720a02015-02-06 15:05:06 -0700339 INIT_LIST_HEAD(&group->unbound_list);
340 mutex_init(&group->unbound_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -0600341 atomic_set(&group->container_users, 0);
Alex Williamson6d6768c2013-06-25 16:06:54 -0600342 atomic_set(&group->opened, 0);
Alex Williamson6586b562017-08-17 22:10:20 -0600343 init_waitqueue_head(&group->container_q);
Alex Williamsoncba33452012-07-31 08:16:22 -0600344 group->iommu_group = iommu_group;
Alex Williamson16ab8a52016-01-27 11:22:25 -0700345#ifdef CONFIG_VFIO_NOIOMMU
346 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
347#endif
Jike Songccd46db2016-12-01 13:20:06 +0800348 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
Alex Williamsoncba33452012-07-31 08:16:22 -0600349
350 group->nb.notifier_call = vfio_iommu_group_notifier;
351
352 /*
353 * blocking notifiers acquire a rwsem around registering and hold
354 * it around callback. Therefore, need to register outside of
355 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
356 * do anything unless it can find the group in vfio.group_list, so
357 * no harm in registering early.
358 */
359 ret = iommu_group_register_notifier(iommu_group, &group->nb);
360 if (ret) {
361 kfree(group);
362 return ERR_PTR(ret);
363 }
364
365 mutex_lock(&vfio.group_lock);
366
Alex Williamsoncba33452012-07-31 08:16:22 -0600367 /* Did we race creating this group? */
368 list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
369 if (tmp->iommu_group == iommu_group) {
370 vfio_group_get(tmp);
Jiang Liu9df7b252012-12-07 13:43:50 -0700371 vfio_group_unlock_and_free(group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600372 return tmp;
373 }
374 }
375
Zhen Lei2f51bf42015-03-16 14:08:56 -0600376 minor = vfio_alloc_group_minor(group);
377 if (minor < 0) {
378 vfio_group_unlock_and_free(group);
379 return ERR_PTR(minor);
380 }
381
Alex Williamsond1099902013-12-19 10:17:13 -0700382 dev = device_create(vfio.class, NULL,
383 MKDEV(MAJOR(vfio.group_devt), minor),
Alex Williamson03a76b62015-12-21 15:13:33 -0700384 group, "%s%d", group->noiommu ? "noiommu-" : "",
385 iommu_group_id(iommu_group));
Alex Williamsoncba33452012-07-31 08:16:22 -0600386 if (IS_ERR(dev)) {
387 vfio_free_group_minor(minor);
Jiang Liu9df7b252012-12-07 13:43:50 -0700388 vfio_group_unlock_and_free(group);
Dan Carpenter7b3a10d2017-05-18 10:34:31 +0300389 return ERR_CAST(dev);
Alex Williamsoncba33452012-07-31 08:16:22 -0600390 }
391
392 group->minor = minor;
393 group->dev = dev;
394
395 list_add(&group->vfio_next, &vfio.group_list);
396
397 mutex_unlock(&vfio.group_lock);
398
399 return group;
400}
401
Al Viro6d2cd3c2012-08-17 21:27:32 -0400402/* called with vfio.group_lock held */
Alex Williamsoncba33452012-07-31 08:16:22 -0600403static void vfio_group_release(struct kref *kref)
404{
405 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
Alex Williamson60720a02015-02-06 15:05:06 -0700406 struct vfio_unbound_dev *unbound, *tmp;
Alex Williamson4a688102015-02-06 15:05:06 -0700407 struct iommu_group *iommu_group = group->iommu_group;
Alex Williamsoncba33452012-07-31 08:16:22 -0600408
409 WARN_ON(!list_empty(&group->device_list));
Alex Williamson65b1ade2017-03-21 13:19:09 -0600410 WARN_ON(group->notifier.head);
Alex Williamsoncba33452012-07-31 08:16:22 -0600411
Alex Williamson60720a02015-02-06 15:05:06 -0700412 list_for_each_entry_safe(unbound, tmp,
413 &group->unbound_list, unbound_next) {
414 list_del(&unbound->unbound_next);
415 kfree(unbound);
416 }
417
Alex Williamsond1099902013-12-19 10:17:13 -0700418 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
Alex Williamsoncba33452012-07-31 08:16:22 -0600419 list_del(&group->vfio_next);
420 vfio_free_group_minor(group->minor);
Jiang Liu9df7b252012-12-07 13:43:50 -0700421 vfio_group_unlock_and_free(group);
Alex Williamson4a688102015-02-06 15:05:06 -0700422 iommu_group_put(iommu_group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600423}
424
425static void vfio_group_put(struct vfio_group *group)
426{
Al Viro6d2cd3c2012-08-17 21:27:32 -0400427 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -0600428}
429
Alex Williamson811642d2017-06-19 09:10:32 -0600430struct vfio_group_put_work {
431 struct work_struct work;
432 struct vfio_group *group;
433};
434
435static void vfio_group_put_bg(struct work_struct *work)
436{
437 struct vfio_group_put_work *do_work;
438
439 do_work = container_of(work, struct vfio_group_put_work, work);
440
441 vfio_group_put(do_work->group);
442 kfree(do_work);
443}
444
445static void vfio_group_schedule_put(struct vfio_group *group)
446{
447 struct vfio_group_put_work *do_work;
448
449 do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
450 if (WARN_ON(!do_work))
451 return;
452
453 INIT_WORK(&do_work->work, vfio_group_put_bg);
454 do_work->group = group;
455 schedule_work(&do_work->work);
456}
457
Alex Williamsoncba33452012-07-31 08:16:22 -0600458/* Assume group_lock or group reference is held */
459static void vfio_group_get(struct vfio_group *group)
460{
461 kref_get(&group->kref);
462}
463
464/*
465 * Not really a try as we will sleep for mutex, but we need to make
466 * sure the group pointer is valid under lock and get a reference.
467 */
468static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
469{
470 struct vfio_group *target = group;
471
472 mutex_lock(&vfio.group_lock);
473 list_for_each_entry(group, &vfio.group_list, vfio_next) {
474 if (group == target) {
475 vfio_group_get(group);
476 mutex_unlock(&vfio.group_lock);
477 return group;
478 }
479 }
480 mutex_unlock(&vfio.group_lock);
481
482 return NULL;
483}
484
485static
486struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
487{
488 struct vfio_group *group;
489
490 mutex_lock(&vfio.group_lock);
491 list_for_each_entry(group, &vfio.group_list, vfio_next) {
492 if (group->iommu_group == iommu_group) {
493 vfio_group_get(group);
494 mutex_unlock(&vfio.group_lock);
495 return group;
496 }
497 }
498 mutex_unlock(&vfio.group_lock);
499
500 return NULL;
501}
502
503static struct vfio_group *vfio_group_get_from_minor(int minor)
504{
505 struct vfio_group *group;
506
507 mutex_lock(&vfio.group_lock);
508 group = idr_find(&vfio.group_idr, minor);
509 if (!group) {
510 mutex_unlock(&vfio.group_lock);
511 return NULL;
512 }
513 vfio_group_get(group);
514 mutex_unlock(&vfio.group_lock);
515
516 return group;
517}
518
Kirti Wankhede7ed3ea82016-11-17 02:16:15 +0530519static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
520{
521 struct iommu_group *iommu_group;
522 struct vfio_group *group;
523
524 iommu_group = iommu_group_get(dev);
525 if (!iommu_group)
526 return NULL;
527
528 group = vfio_group_get_from_iommu(iommu_group);
529 iommu_group_put(iommu_group);
530
531 return group;
532}
533
Alex Williamsoncba33452012-07-31 08:16:22 -0600534/**
535 * Device objects - create, release, get, put, search
536 */
537static
538struct vfio_device *vfio_group_create_device(struct vfio_group *group,
539 struct device *dev,
540 const struct vfio_device_ops *ops,
541 void *device_data)
542{
543 struct vfio_device *device;
Alex Williamsoncba33452012-07-31 08:16:22 -0600544
545 device = kzalloc(sizeof(*device), GFP_KERNEL);
546 if (!device)
547 return ERR_PTR(-ENOMEM);
548
549 kref_init(&device->kref);
550 device->dev = dev;
551 device->group = group;
552 device->ops = ops;
553 device->device_data = device_data;
Jean Delvare8283b492014-04-14 12:55:38 +0200554 dev_set_drvdata(dev, device);
Alex Williamsoncba33452012-07-31 08:16:22 -0600555
556 /* No need to get group_lock, caller has group reference */
557 vfio_group_get(group);
558
559 mutex_lock(&group->device_lock);
560 list_add(&device->group_next, &group->device_list);
561 mutex_unlock(&group->device_lock);
562
563 return device;
564}
565
566static void vfio_device_release(struct kref *kref)
567{
568 struct vfio_device *device = container_of(kref,
569 struct vfio_device, kref);
570 struct vfio_group *group = device->group;
571
Alex Williamsoncba33452012-07-31 08:16:22 -0600572 list_del(&device->group_next);
573 mutex_unlock(&group->device_lock);
574
575 dev_set_drvdata(device->dev, NULL);
576
577 kfree(device);
578
579 /* vfio_del_group_dev may be waiting for this device */
580 wake_up(&vfio.release_q);
581}
582
583/* Device reference always implies a group reference */
Vijay Mohan Pandarathil44f50712013-03-11 09:28:44 -0600584void vfio_device_put(struct vfio_device *device)
Alex Williamsoncba33452012-07-31 08:16:22 -0600585{
Al Viro934ad4c2012-08-17 19:49:09 -0400586 struct vfio_group *group = device->group;
Al Viro90b12532012-08-17 21:29:06 -0400587 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
Al Viro934ad4c2012-08-17 19:49:09 -0400588 vfio_group_put(group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600589}
Vijay Mohan Pandarathil44f50712013-03-11 09:28:44 -0600590EXPORT_SYMBOL_GPL(vfio_device_put);
Alex Williamsoncba33452012-07-31 08:16:22 -0600591
592static void vfio_device_get(struct vfio_device *device)
593{
594 vfio_group_get(device->group);
595 kref_get(&device->kref);
596}
597
598static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
599 struct device *dev)
600{
601 struct vfio_device *device;
602
603 mutex_lock(&group->device_lock);
604 list_for_each_entry(device, &group->device_list, group_next) {
605 if (device->dev == dev) {
606 vfio_device_get(device);
607 mutex_unlock(&group->device_lock);
608 return device;
609 }
610 }
611 mutex_unlock(&group->device_lock);
612 return NULL;
613}
614
615/*
Alex Williamson5f096b12015-10-27 14:53:04 -0600616 * Some drivers, like pci-stub, are only used to prevent other drivers from
617 * claiming a device and are therefore perfectly legitimate for a user owned
618 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
619 * of the device, but it does prevent the user from having direct access to
620 * the device, which is useful in some circumstances.
621 *
622 * We also assume that we can include PCI interconnect devices, ie. bridges.
623 * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
624 * then all of the downstream devices will be part of the same IOMMU group as
625 * the bridge. Thus, if placing the bridge into the user owned IOVA space
626 * breaks anything, it only does so for user owned devices downstream. Note
627 * that error notification via MSI can be affected for platforms that handle
628 * MSI within the same IOVA space as DMA.
Alex Williamsoncba33452012-07-31 08:16:22 -0600629 */
Alex Williamson5f096b12015-10-27 14:53:04 -0600630static const char * const vfio_driver_whitelist[] = { "pci-stub" };
Alex Williamsoncba33452012-07-31 08:16:22 -0600631
Alex Williamson5f096b12015-10-27 14:53:04 -0600632static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
Alex Williamsoncba33452012-07-31 08:16:22 -0600633{
Alex Williamson5f096b12015-10-27 14:53:04 -0600634 if (dev_is_pci(dev)) {
635 struct pci_dev *pdev = to_pci_dev(dev);
636
637 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
638 return true;
639 }
640
Yisheng Xiee77addf2018-05-21 19:57:45 +0800641 return match_string(vfio_driver_whitelist,
642 ARRAY_SIZE(vfio_driver_whitelist),
643 drv->name) >= 0;
Alex Williamsoncba33452012-07-31 08:16:22 -0600644}
645
646/*
Alex Williamson60720a02015-02-06 15:05:06 -0700647 * A vfio group is viable for use by userspace if all devices are in
648 * one of the following states:
649 * - driver-less
650 * - bound to a vfio driver
651 * - bound to a whitelisted driver
Alex Williamson5f096b12015-10-27 14:53:04 -0600652 * - a PCI interconnect device
Alex Williamson60720a02015-02-06 15:05:06 -0700653 *
654 * We use two methods to determine whether a device is bound to a vfio
655 * driver. The first is to test whether the device exists in the vfio
656 * group. The second is to test if the device exists on the group
657 * unbound_list, indicating it's in the middle of transitioning from
658 * a vfio driver to driver-less.
Alex Williamsoncba33452012-07-31 08:16:22 -0600659 */
660static int vfio_dev_viable(struct device *dev, void *data)
661{
662 struct vfio_group *group = data;
663 struct vfio_device *device;
Mark Rutland6aa7de02017-10-23 14:07:29 -0700664 struct device_driver *drv = READ_ONCE(dev->driver);
Alex Williamson60720a02015-02-06 15:05:06 -0700665 struct vfio_unbound_dev *unbound;
666 int ret = -EINVAL;
Alex Williamsoncba33452012-07-31 08:16:22 -0600667
Alex Williamson60720a02015-02-06 15:05:06 -0700668 mutex_lock(&group->unbound_lock);
669 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
670 if (dev == unbound->dev) {
671 ret = 0;
672 break;
673 }
674 }
675 mutex_unlock(&group->unbound_lock);
676
Alex Williamson5f096b12015-10-27 14:53:04 -0600677 if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
Alex Williamsoncba33452012-07-31 08:16:22 -0600678 return 0;
679
680 device = vfio_group_get_device(group, dev);
681 if (device) {
682 vfio_device_put(device);
683 return 0;
684 }
685
Alex Williamson60720a02015-02-06 15:05:06 -0700686 return ret;
Alex Williamsoncba33452012-07-31 08:16:22 -0600687}
688
689/**
690 * Async device support
691 */
692static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
693{
694 struct vfio_device *device;
695
696 /* Do we already know about it? We shouldn't */
697 device = vfio_group_get_device(group, dev);
698 if (WARN_ON_ONCE(device)) {
699 vfio_device_put(device);
700 return 0;
701 }
702
703 /* Nothing to do for idle groups */
704 if (!atomic_read(&group->container_users))
705 return 0;
706
707 /* TODO Prevent device auto probing */
Bjorn Helgaasa88a7b32019-03-30 09:41:35 -0500708 dev_WARN(dev, "Device added to live group %d!\n",
709 iommu_group_id(group->iommu_group));
Alex Williamsoncba33452012-07-31 08:16:22 -0600710
711 return 0;
712}
713
Alex Williamsoncba33452012-07-31 08:16:22 -0600714static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
715{
716 /* We don't care what happens when the group isn't in use */
717 if (!atomic_read(&group->container_users))
718 return 0;
719
720 return vfio_dev_viable(dev, group);
721}
722
723static int vfio_iommu_group_notifier(struct notifier_block *nb,
724 unsigned long action, void *data)
725{
726 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
727 struct device *dev = data;
Alex Williamson60720a02015-02-06 15:05:06 -0700728 struct vfio_unbound_dev *unbound;
Alex Williamsoncba33452012-07-31 08:16:22 -0600729
730 /*
Alex Williamsonc6401932013-06-10 16:40:56 -0600731 * Need to go through a group_lock lookup to get a reference or we
732 * risk racing a group being removed. Ignore spurious notifies.
Alex Williamsoncba33452012-07-31 08:16:22 -0600733 */
734 group = vfio_group_try_get(group);
Alex Williamsonc6401932013-06-10 16:40:56 -0600735 if (!group)
Alex Williamsoncba33452012-07-31 08:16:22 -0600736 return NOTIFY_OK;
737
738 switch (action) {
739 case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
740 vfio_group_nb_add_dev(group, dev);
741 break;
742 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
Alex Williamsonde9c7602013-06-10 16:40:56 -0600743 /*
744 * Nothing to do here. If the device is in use, then the
745 * vfio sub-driver should block the remove callback until
746 * it is unused. If the device is unused or attached to a
747 * stub driver, then it should be released and we don't
748 * care that it will be going away.
749 */
Alex Williamsoncba33452012-07-31 08:16:22 -0600750 break;
751 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
Bjorn Helgaasa88a7b32019-03-30 09:41:35 -0500752 dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
753 iommu_group_id(group->iommu_group));
Alex Williamsoncba33452012-07-31 08:16:22 -0600754 break;
755 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
Bjorn Helgaasa88a7b32019-03-30 09:41:35 -0500756 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
757 iommu_group_id(group->iommu_group), dev->driver->name);
Alex Williamsoncba33452012-07-31 08:16:22 -0600758 BUG_ON(vfio_group_nb_verify(group, dev));
759 break;
760 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
Bjorn Helgaasa88a7b32019-03-30 09:41:35 -0500761 dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
762 __func__, iommu_group_id(group->iommu_group),
763 dev->driver->name);
Alex Williamsoncba33452012-07-31 08:16:22 -0600764 break;
765 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
Bjorn Helgaasa88a7b32019-03-30 09:41:35 -0500766 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
767 iommu_group_id(group->iommu_group));
Alex Williamsoncba33452012-07-31 08:16:22 -0600768 /*
769 * XXX An unbound device in a live group is ok, but we'd
770 * really like to avoid the above BUG_ON by preventing other
771 * drivers from binding to it. Once that occurs, we have to
772 * stop the system to maintain isolation. At a minimum, we'd
773 * want a toggle to disable driver auto probe for this device.
774 */
Alex Williamson60720a02015-02-06 15:05:06 -0700775
776 mutex_lock(&group->unbound_lock);
777 list_for_each_entry(unbound,
778 &group->unbound_list, unbound_next) {
779 if (dev == unbound->dev) {
780 list_del(&unbound->unbound_next);
781 kfree(unbound);
782 break;
783 }
784 }
785 mutex_unlock(&group->unbound_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -0600786 break;
787 }
788
Alex Williamson811642d2017-06-19 09:10:32 -0600789 /*
790 * If we're the last reference to the group, the group will be
791 * released, which includes unregistering the iommu group notifier.
792 * We hold a read-lock on that notifier list, unregistering needs
793 * a write-lock... deadlock. Release our reference asynchronously
794 * to avoid that situation.
795 */
796 vfio_group_schedule_put(group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600797 return NOTIFY_OK;
798}
799
800/**
801 * VFIO driver API
802 */
803int vfio_add_group_dev(struct device *dev,
804 const struct vfio_device_ops *ops, void *device_data)
805{
806 struct iommu_group *iommu_group;
807 struct vfio_group *group;
808 struct vfio_device *device;
809
810 iommu_group = iommu_group_get(dev);
811 if (!iommu_group)
812 return -EINVAL;
813
814 group = vfio_group_get_from_iommu(iommu_group);
815 if (!group) {
Alex Williamson16ab8a52016-01-27 11:22:25 -0700816 group = vfio_create_group(iommu_group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600817 if (IS_ERR(group)) {
818 iommu_group_put(iommu_group);
819 return PTR_ERR(group);
820 }
Alex Williamson4a688102015-02-06 15:05:06 -0700821 } else {
822 /*
823 * A found vfio_group already holds a reference to the
824 * iommu_group. A created vfio_group keeps the reference.
825 */
826 iommu_group_put(iommu_group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600827 }
828
829 device = vfio_group_get_device(group, dev);
830 if (device) {
Bjorn Helgaasa88a7b32019-03-30 09:41:35 -0500831 dev_WARN(dev, "Device already exists on group %d\n",
832 iommu_group_id(iommu_group));
Alex Williamsoncba33452012-07-31 08:16:22 -0600833 vfio_device_put(device);
834 vfio_group_put(group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600835 return -EBUSY;
836 }
837
838 device = vfio_group_create_device(group, dev, ops, device_data);
839 if (IS_ERR(device)) {
840 vfio_group_put(group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600841 return PTR_ERR(device);
842 }
843
844 /*
Alex Williamson4a688102015-02-06 15:05:06 -0700845 * Drop all but the vfio_device reference. The vfio_device holds
846 * a reference to the vfio_group, which holds a reference to the
847 * iommu_group.
Alex Williamsoncba33452012-07-31 08:16:22 -0600848 */
849 vfio_group_put(group);
850
851 return 0;
852}
853EXPORT_SYMBOL_GPL(vfio_add_group_dev);
854
Vijay Mohan Pandarathil44f50712013-03-11 09:28:44 -0600855/**
Alex Williamson20f30012015-06-09 10:08:57 -0600856 * Get a reference to the vfio_device for a device. Even if the
857 * caller thinks they own the device, they could be racing with a
858 * release call path, so we can't trust drvdata for the shortcut.
859 * Go the long way around, from the iommu_group to the vfio_group
860 * to the vfio_device.
Vijay Mohan Pandarathil44f50712013-03-11 09:28:44 -0600861 */
862struct vfio_device *vfio_device_get_from_dev(struct device *dev)
863{
Alex Williamson20f30012015-06-09 10:08:57 -0600864 struct vfio_group *group;
865 struct vfio_device *device;
Vijay Mohan Pandarathil44f50712013-03-11 09:28:44 -0600866
Kirti Wankhede7ed3ea82016-11-17 02:16:15 +0530867 group = vfio_group_get_from_dev(dev);
Alex Williamson20f30012015-06-09 10:08:57 -0600868 if (!group)
869 return NULL;
870
871 device = vfio_group_get_device(group, dev);
872 vfio_group_put(group);
Vijay Mohan Pandarathil44f50712013-03-11 09:28:44 -0600873
874 return device;
875}
876EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
877
Alex Williamson4bc94d52015-07-24 15:14:04 -0600878static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
879 char *buf)
880{
Joerg Roedele324fc82015-11-04 13:53:26 +0100881 struct vfio_device *it, *device = NULL;
Alex Williamson4bc94d52015-07-24 15:14:04 -0600882
883 mutex_lock(&group->device_lock);
Joerg Roedele324fc82015-11-04 13:53:26 +0100884 list_for_each_entry(it, &group->device_list, group_next) {
885 if (!strcmp(dev_name(it->dev), buf)) {
886 device = it;
Alex Williamson4bc94d52015-07-24 15:14:04 -0600887 vfio_device_get(device);
888 break;
889 }
890 }
891 mutex_unlock(&group->device_lock);
892
893 return device;
894}
895
Vijay Mohan Pandarathil44f50712013-03-11 09:28:44 -0600896/*
897 * Caller must hold a reference to the vfio_device
898 */
899void *vfio_device_data(struct vfio_device *device)
900{
901 return device->device_data;
902}
903EXPORT_SYMBOL_GPL(vfio_device_data);
904
Alex Williamsoncba33452012-07-31 08:16:22 -0600905/*
906 * Decrement the device reference count and wait for the device to be
907 * removed. Open file descriptors for the device... */
908void *vfio_del_group_dev(struct device *dev)
909{
Farhan Ali41be3e22019-04-03 14:22:27 -0400910 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Alex Williamsoncba33452012-07-31 08:16:22 -0600911 struct vfio_device *device = dev_get_drvdata(dev);
912 struct vfio_group *group = device->group;
Alex Williamsoncba33452012-07-31 08:16:22 -0600913 void *device_data = device->device_data;
Alex Williamson60720a02015-02-06 15:05:06 -0700914 struct vfio_unbound_dev *unbound;
Alex Williamson13060b62015-02-06 15:05:07 -0700915 unsigned int i = 0;
Alex Williamsondb7d4d72015-05-01 16:31:41 -0600916 bool interrupted = false;
Alex Williamsoncba33452012-07-31 08:16:22 -0600917
Alex Williamsone014e942013-02-14 14:02:13 -0700918 /*
919 * The group exists so long as we have a device reference. Get
920 * a group reference and use it to scan for the device going away.
921 */
922 vfio_group_get(group);
923
Alex Williamson60720a02015-02-06 15:05:06 -0700924 /*
925 * When the device is removed from the group, the group suddenly
926 * becomes non-viable; the device has a driver (until the unbind
927 * completes), but it's not present in the group. This is bad news
928 * for any external users that need to re-acquire a group reference
929 * in order to match and release their existing reference. To
930 * solve this, we track such devices on the unbound_list to bridge
931 * the gap until they're fully unbound.
932 */
933 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
934 if (unbound) {
935 unbound->dev = dev;
936 mutex_lock(&group->unbound_lock);
937 list_add(&unbound->unbound_next, &group->unbound_list);
938 mutex_unlock(&group->unbound_lock);
939 }
940 WARN_ON(!unbound);
941
Alex Williamsoncba33452012-07-31 08:16:22 -0600942 vfio_device_put(device);
943
Alex Williamson13060b62015-02-06 15:05:07 -0700944 /*
945 * If the device is still present in the group after the above
946 * 'put', then it is in use and we need to request it from the
947 * bus driver. The driver may in turn need to request the
948 * device from the user. We send the request on an arbitrary
949 * interval with counter to allow the driver to take escalating
950 * measures to release the device if it has the ability to do so.
951 */
Farhan Ali41be3e22019-04-03 14:22:27 -0400952 add_wait_queue(&vfio.release_q, &wait);
953
Alex Williamson13060b62015-02-06 15:05:07 -0700954 do {
955 device = vfio_group_get_device(group, dev);
956 if (!device)
957 break;
958
959 if (device->ops->request)
960 device->ops->request(device_data, i++);
961
962 vfio_device_put(device);
963
Alex Williamsondb7d4d72015-05-01 16:31:41 -0600964 if (interrupted) {
Farhan Ali41be3e22019-04-03 14:22:27 -0400965 wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
Alex Williamsondb7d4d72015-05-01 16:31:41 -0600966 } else {
Farhan Ali41be3e22019-04-03 14:22:27 -0400967 wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
968 if (signal_pending(current)) {
Alex Williamsondb7d4d72015-05-01 16:31:41 -0600969 interrupted = true;
970 dev_warn(dev,
971 "Device is currently in use, task"
972 " \"%s\" (%d) "
973 "blocked until device is released",
974 current->comm, task_pid_nr(current));
975 }
976 }
Alex Williamsone014e942013-02-14 14:02:13 -0700977
Farhan Ali41be3e22019-04-03 14:22:27 -0400978 } while (1);
979
980 remove_wait_queue(&vfio.release_q, &wait);
Alex Williamson6586b562017-08-17 22:10:20 -0600981 /*
982 * In order to support multiple devices per group, devices can be
983 * plucked from the group while other devices in the group are still
984 * in use. The container persists with this group and those remaining
985 * devices still attached. If the user creates an isolation violation
986 * by binding this device to another driver while the group is still in
987 * use, that's their fault. However, in the case of removing the last,
988 * or potentially the only, device in the group there can be no other
989 * in-use devices in the group. The user has done their due diligence
990 * and we should lay no claims to those devices. In order to do that,
991 * we need to make sure the group is detached from the container.
992 * Without this stall, we're potentially racing with a user process
993 * that may attempt to immediately bind this device to another driver.
994 */
995 if (list_empty(&group->device_list))
996 wait_event(group->container_q, !group->container);
997
Alex Williamsone014e942013-02-14 14:02:13 -0700998 vfio_group_put(group);
Alex Williamsoncba33452012-07-31 08:16:22 -0600999
Alex Williamsoncba33452012-07-31 08:16:22 -06001000 return device_data;
1001}
1002EXPORT_SYMBOL_GPL(vfio_del_group_dev);
1003
1004/**
1005 * VFIO base fd, /dev/vfio/vfio
1006 */
1007static long vfio_ioctl_check_extension(struct vfio_container *container,
1008 unsigned long arg)
1009{
Alex Williamson0b43c082013-04-29 08:41:36 -06001010 struct vfio_iommu_driver *driver;
Alex Williamsoncba33452012-07-31 08:16:22 -06001011 long ret = 0;
1012
Alex Williamson0b43c082013-04-29 08:41:36 -06001013 down_read(&container->group_lock);
1014
1015 driver = container->iommu_driver;
1016
Alex Williamsoncba33452012-07-31 08:16:22 -06001017 switch (arg) {
1018 /* No base extensions yet */
1019 default:
1020 /*
1021 * If no driver is set, poll all registered drivers for
1022 * extensions and return the first positive result. If
1023 * a driver is already set, further queries will be passed
1024 * only to that driver.
1025 */
1026 if (!driver) {
1027 mutex_lock(&vfio.iommu_drivers_lock);
Alex Williamsonae5515d2015-12-04 08:38:42 -07001028 list_for_each_entry(driver, &vfio.iommu_drivers_list,
1029 vfio_next) {
Alex Williamson03a76b62015-12-21 15:13:33 -07001030
1031#ifdef CONFIG_VFIO_NOIOMMU
1032 if (!list_empty(&container->group_list) &&
1033 (container->noiommu !=
1034 (driver->ops == &vfio_noiommu_ops)))
1035 continue;
1036#endif
1037
Alex Williamsoncba33452012-07-31 08:16:22 -06001038 if (!try_module_get(driver->ops->owner))
1039 continue;
1040
1041 ret = driver->ops->ioctl(NULL,
1042 VFIO_CHECK_EXTENSION,
1043 arg);
1044 module_put(driver->ops->owner);
1045 if (ret > 0)
1046 break;
1047 }
1048 mutex_unlock(&vfio.iommu_drivers_lock);
1049 } else
1050 ret = driver->ops->ioctl(container->iommu_data,
1051 VFIO_CHECK_EXTENSION, arg);
1052 }
1053
Alex Williamson0b43c082013-04-29 08:41:36 -06001054 up_read(&container->group_lock);
1055
Alex Williamsoncba33452012-07-31 08:16:22 -06001056 return ret;
1057}
1058
Alex Williamson9587f442013-04-25 16:12:38 -06001059/* hold write lock on container->group_lock */
Alex Williamsoncba33452012-07-31 08:16:22 -06001060static int __vfio_container_attach_groups(struct vfio_container *container,
1061 struct vfio_iommu_driver *driver,
1062 void *data)
1063{
1064 struct vfio_group *group;
1065 int ret = -ENODEV;
1066
1067 list_for_each_entry(group, &container->group_list, container_next) {
1068 ret = driver->ops->attach_group(data, group->iommu_group);
1069 if (ret)
1070 goto unwind;
1071 }
1072
1073 return ret;
1074
1075unwind:
1076 list_for_each_entry_continue_reverse(group, &container->group_list,
1077 container_next) {
1078 driver->ops->detach_group(data, group->iommu_group);
1079 }
1080
1081 return ret;
1082}
1083
1084static long vfio_ioctl_set_iommu(struct vfio_container *container,
1085 unsigned long arg)
1086{
1087 struct vfio_iommu_driver *driver;
1088 long ret = -ENODEV;
1089
Alex Williamson9587f442013-04-25 16:12:38 -06001090 down_write(&container->group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -06001091
1092 /*
1093 * The container is designed to be an unprivileged interface while
1094 * the group can be assigned to specific users. Therefore, only by
1095 * adding a group to a container does the user get the privilege of
1096 * enabling the iommu, which may allocate finite resources. There
1097 * is no unset_iommu, but by removing all the groups from a container,
1098 * the container is deprivileged and returns to an unset state.
1099 */
1100 if (list_empty(&container->group_list) || container->iommu_driver) {
Alex Williamson9587f442013-04-25 16:12:38 -06001101 up_write(&container->group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -06001102 return -EINVAL;
1103 }
1104
1105 mutex_lock(&vfio.iommu_drivers_lock);
Alex Williamsonae5515d2015-12-04 08:38:42 -07001106 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
Alex Williamsoncba33452012-07-31 08:16:22 -06001107 void *data;
1108
Alex Williamson03a76b62015-12-21 15:13:33 -07001109#ifdef CONFIG_VFIO_NOIOMMU
1110 /*
1111 * Only noiommu containers can use vfio-noiommu and noiommu
1112 * containers can only use vfio-noiommu.
1113 */
1114 if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
1115 continue;
1116#endif
1117
Alex Williamsoncba33452012-07-31 08:16:22 -06001118 if (!try_module_get(driver->ops->owner))
1119 continue;
1120
1121 /*
1122 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1123 * so test which iommu driver reported support for this
1124 * extension and call open on them. We also pass them the
1125 * magic, allowing a single driver to support multiple
1126 * interfaces if they'd like.
1127 */
1128 if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1129 module_put(driver->ops->owner);
1130 continue;
1131 }
1132
Alex Williamsoncba33452012-07-31 08:16:22 -06001133 data = driver->ops->open(arg);
1134 if (IS_ERR(data)) {
1135 ret = PTR_ERR(data);
1136 module_put(driver->ops->owner);
Alex Williamson7c435b42016-02-22 16:02:30 -07001137 continue;
Alex Williamsoncba33452012-07-31 08:16:22 -06001138 }
1139
1140 ret = __vfio_container_attach_groups(container, driver, data);
Alex Williamson7c435b42016-02-22 16:02:30 -07001141 if (ret) {
Alex Williamsoncba33452012-07-31 08:16:22 -06001142 driver->ops->release(data);
1143 module_put(driver->ops->owner);
Alex Williamson7c435b42016-02-22 16:02:30 -07001144 continue;
Alex Williamsoncba33452012-07-31 08:16:22 -06001145 }
1146
Alex Williamson7c435b42016-02-22 16:02:30 -07001147 container->iommu_driver = driver;
1148 container->iommu_data = data;
1149 break;
Alex Williamsoncba33452012-07-31 08:16:22 -06001150 }
1151
1152 mutex_unlock(&vfio.iommu_drivers_lock);
Alex Williamson9587f442013-04-25 16:12:38 -06001153 up_write(&container->group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -06001154
1155 return ret;
1156}
1157
1158static long vfio_fops_unl_ioctl(struct file *filep,
1159 unsigned int cmd, unsigned long arg)
1160{
1161 struct vfio_container *container = filep->private_data;
1162 struct vfio_iommu_driver *driver;
1163 void *data;
1164 long ret = -EINVAL;
1165
1166 if (!container)
1167 return ret;
1168
Alex Williamsoncba33452012-07-31 08:16:22 -06001169 switch (cmd) {
1170 case VFIO_GET_API_VERSION:
1171 ret = VFIO_API_VERSION;
1172 break;
1173 case VFIO_CHECK_EXTENSION:
1174 ret = vfio_ioctl_check_extension(container, arg);
1175 break;
1176 case VFIO_SET_IOMMU:
1177 ret = vfio_ioctl_set_iommu(container, arg);
1178 break;
1179 default:
Alex Williamson0b43c082013-04-29 08:41:36 -06001180 driver = container->iommu_driver;
1181 data = container->iommu_data;
1182
Alex Williamsoncba33452012-07-31 08:16:22 -06001183 if (driver) /* passthrough all unrecognized ioctls */
1184 ret = driver->ops->ioctl(data, cmd, arg);
1185 }
1186
1187 return ret;
1188}
1189
1190#ifdef CONFIG_COMPAT
1191static long vfio_fops_compat_ioctl(struct file *filep,
1192 unsigned int cmd, unsigned long arg)
1193{
1194 arg = (unsigned long)compat_ptr(arg);
1195 return vfio_fops_unl_ioctl(filep, cmd, arg);
1196}
1197#endif /* CONFIG_COMPAT */
1198
1199static int vfio_fops_open(struct inode *inode, struct file *filep)
1200{
1201 struct vfio_container *container;
1202
1203 container = kzalloc(sizeof(*container), GFP_KERNEL);
1204 if (!container)
1205 return -ENOMEM;
1206
1207 INIT_LIST_HEAD(&container->group_list);
Alex Williamson9587f442013-04-25 16:12:38 -06001208 init_rwsem(&container->group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -06001209 kref_init(&container->kref);
1210
1211 filep->private_data = container;
1212
1213 return 0;
1214}
1215
1216static int vfio_fops_release(struct inode *inode, struct file *filep)
1217{
1218 struct vfio_container *container = filep->private_data;
1219
1220 filep->private_data = NULL;
1221
1222 vfio_container_put(container);
1223
1224 return 0;
1225}
1226
1227/*
1228 * Once an iommu driver is set, we optionally pass read/write/mmap
1229 * on to the driver, allowing management interfaces beyond ioctl.
1230 */
1231static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1232 size_t count, loff_t *ppos)
1233{
1234 struct vfio_container *container = filep->private_data;
Alex Williamson0b43c082013-04-29 08:41:36 -06001235 struct vfio_iommu_driver *driver;
1236 ssize_t ret = -EINVAL;
Alex Williamsoncba33452012-07-31 08:16:22 -06001237
Alex Williamson0b43c082013-04-29 08:41:36 -06001238 driver = container->iommu_driver;
1239 if (likely(driver && driver->ops->read))
1240 ret = driver->ops->read(container->iommu_data,
1241 buf, count, ppos);
1242
Alex Williamson0b43c082013-04-29 08:41:36 -06001243 return ret;
Alex Williamsoncba33452012-07-31 08:16:22 -06001244}
1245
1246static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1247 size_t count, loff_t *ppos)
1248{
1249 struct vfio_container *container = filep->private_data;
Alex Williamson0b43c082013-04-29 08:41:36 -06001250 struct vfio_iommu_driver *driver;
1251 ssize_t ret = -EINVAL;
Alex Williamsoncba33452012-07-31 08:16:22 -06001252
Alex Williamson0b43c082013-04-29 08:41:36 -06001253 driver = container->iommu_driver;
1254 if (likely(driver && driver->ops->write))
1255 ret = driver->ops->write(container->iommu_data,
1256 buf, count, ppos);
1257
Alex Williamson0b43c082013-04-29 08:41:36 -06001258 return ret;
Alex Williamsoncba33452012-07-31 08:16:22 -06001259}
1260
1261static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1262{
1263 struct vfio_container *container = filep->private_data;
Alex Williamson0b43c082013-04-29 08:41:36 -06001264 struct vfio_iommu_driver *driver;
1265 int ret = -EINVAL;
Alex Williamsoncba33452012-07-31 08:16:22 -06001266
Alex Williamson0b43c082013-04-29 08:41:36 -06001267 driver = container->iommu_driver;
1268 if (likely(driver && driver->ops->mmap))
1269 ret = driver->ops->mmap(container->iommu_data, vma);
1270
Alex Williamson0b43c082013-04-29 08:41:36 -06001271 return ret;
Alex Williamsoncba33452012-07-31 08:16:22 -06001272}
1273
1274static const struct file_operations vfio_fops = {
1275 .owner = THIS_MODULE,
1276 .open = vfio_fops_open,
1277 .release = vfio_fops_release,
1278 .read = vfio_fops_read,
1279 .write = vfio_fops_write,
1280 .unlocked_ioctl = vfio_fops_unl_ioctl,
1281#ifdef CONFIG_COMPAT
1282 .compat_ioctl = vfio_fops_compat_ioctl,
1283#endif
1284 .mmap = vfio_fops_mmap,
1285};
1286
1287/**
1288 * VFIO Group fd, /dev/vfio/$GROUP
1289 */
1290static void __vfio_group_unset_container(struct vfio_group *group)
1291{
1292 struct vfio_container *container = group->container;
1293 struct vfio_iommu_driver *driver;
1294
Alex Williamson9587f442013-04-25 16:12:38 -06001295 down_write(&container->group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -06001296
1297 driver = container->iommu_driver;
1298 if (driver)
1299 driver->ops->detach_group(container->iommu_data,
1300 group->iommu_group);
1301
1302 group->container = NULL;
Alex Williamson6586b562017-08-17 22:10:20 -06001303 wake_up(&group->container_q);
Alex Williamsoncba33452012-07-31 08:16:22 -06001304 list_del(&group->container_next);
1305
1306 /* Detaching the last group deprivileges a container, remove iommu */
1307 if (driver && list_empty(&container->group_list)) {
1308 driver->ops->release(container->iommu_data);
1309 module_put(driver->ops->owner);
1310 container->iommu_driver = NULL;
1311 container->iommu_data = NULL;
1312 }
1313
Alex Williamson9587f442013-04-25 16:12:38 -06001314 up_write(&container->group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -06001315
1316 vfio_container_put(container);
1317}
1318
1319/*
1320 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1321 * if there was no container to unset. Since the ioctl is called on
1322 * the group, we know that still exists, therefore the only valid
1323 * transition here is 1->0.
1324 */
1325static int vfio_group_unset_container(struct vfio_group *group)
1326{
1327 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1328
1329 if (!users)
1330 return -EINVAL;
1331 if (users != 1)
1332 return -EBUSY;
1333
1334 __vfio_group_unset_container(group);
1335
1336 return 0;
1337}
1338
1339/*
1340 * When removing container users, anything that removes the last user
1341 * implicitly removes the group from the container. That is, if the
1342 * group file descriptor is closed, as well as any device file descriptors,
1343 * the group is free.
1344 */
1345static void vfio_group_try_dissolve_container(struct vfio_group *group)
1346{
1347 if (0 == atomic_dec_if_positive(&group->container_users))
1348 __vfio_group_unset_container(group);
1349}
1350
1351static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1352{
Al Viro2903ff02012-08-28 12:52:22 -04001353 struct fd f;
Alex Williamsoncba33452012-07-31 08:16:22 -06001354 struct vfio_container *container;
1355 struct vfio_iommu_driver *driver;
Al Viro2903ff02012-08-28 12:52:22 -04001356 int ret = 0;
Alex Williamsoncba33452012-07-31 08:16:22 -06001357
1358 if (atomic_read(&group->container_users))
1359 return -EINVAL;
1360
Alex Williamson03a76b62015-12-21 15:13:33 -07001361 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1362 return -EPERM;
1363
Al Viro2903ff02012-08-28 12:52:22 -04001364 f = fdget(container_fd);
1365 if (!f.file)
Alex Williamsoncba33452012-07-31 08:16:22 -06001366 return -EBADF;
1367
1368 /* Sanity check, is this really our fd? */
Al Viro2903ff02012-08-28 12:52:22 -04001369 if (f.file->f_op != &vfio_fops) {
1370 fdput(f);
Alex Williamsoncba33452012-07-31 08:16:22 -06001371 return -EINVAL;
1372 }
1373
Al Viro2903ff02012-08-28 12:52:22 -04001374 container = f.file->private_data;
Alex Williamsoncba33452012-07-31 08:16:22 -06001375 WARN_ON(!container); /* fget ensures we don't race vfio_release */
1376
Alex Williamson9587f442013-04-25 16:12:38 -06001377 down_write(&container->group_lock);
Alex Williamsoncba33452012-07-31 08:16:22 -06001378
Alex Williamson03a76b62015-12-21 15:13:33 -07001379 /* Real groups and fake groups cannot mix */
1380 if (!list_empty(&container->group_list) &&
1381 container->noiommu != group->noiommu) {
1382 ret = -EPERM;
1383 goto unlock_out;
1384 }
1385
Alex Williamsoncba33452012-07-31 08:16:22 -06001386 driver = container->iommu_driver;
1387 if (driver) {
1388 ret = driver->ops->attach_group(container->iommu_data,
1389 group->iommu_group);
1390 if (ret)
1391 goto unlock_out;
1392 }
1393
1394 group->container = container;
Alex Williamson03a76b62015-12-21 15:13:33 -07001395 container->noiommu = group->noiommu;
Alex Williamsoncba33452012-07-31 08:16:22 -06001396 list_add(&group->container_next, &container->group_list);
1397
1398 /* Get a reference on the container and mark a user within the group */
1399 vfio_container_get(container);
1400 atomic_inc(&group->container_users);
1401
1402unlock_out:
Alex Williamson9587f442013-04-25 16:12:38 -06001403 up_write(&container->group_lock);
Al Viro2903ff02012-08-28 12:52:22 -04001404 fdput(f);
Alex Williamsoncba33452012-07-31 08:16:22 -06001405 return ret;
1406}
1407
1408static bool vfio_group_viable(struct vfio_group *group)
1409{
1410 return (iommu_group_for_each_dev(group->iommu_group,
1411 group, vfio_dev_viable) == 0);
1412}
1413
Kirti Wankhede32f55d82016-11-17 02:16:16 +05301414static int vfio_group_add_container_user(struct vfio_group *group)
1415{
1416 if (!atomic_inc_not_zero(&group->container_users))
1417 return -EINVAL;
1418
1419 if (group->noiommu) {
1420 atomic_dec(&group->container_users);
1421 return -EPERM;
1422 }
1423 if (!group->container->iommu_driver || !vfio_group_viable(group)) {
1424 atomic_dec(&group->container_users);
1425 return -EINVAL;
1426 }
1427
1428 return 0;
1429}
1430
Alex Williamsoncba33452012-07-31 08:16:22 -06001431static const struct file_operations vfio_device_fops;
1432
1433static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1434{
1435 struct vfio_device *device;
1436 struct file *filep;
Alex Williamson4bc94d52015-07-24 15:14:04 -06001437 int ret;
Alex Williamsoncba33452012-07-31 08:16:22 -06001438
1439 if (0 == atomic_read(&group->container_users) ||
1440 !group->container->iommu_driver || !vfio_group_viable(group))
1441 return -EINVAL;
1442
Alex Williamson03a76b62015-12-21 15:13:33 -07001443 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1444 return -EPERM;
1445
Alex Williamson4bc94d52015-07-24 15:14:04 -06001446 device = vfio_device_get_from_name(group, buf);
1447 if (!device)
1448 return -ENODEV;
Alex Williamsoncba33452012-07-31 08:16:22 -06001449
Alex Williamson4bc94d52015-07-24 15:14:04 -06001450 ret = device->ops->open(device->device_data);
1451 if (ret) {
1452 vfio_device_put(device);
1453 return ret;
Alex Williamsoncba33452012-07-31 08:16:22 -06001454 }
Alex Williamson4bc94d52015-07-24 15:14:04 -06001455
1456 /*
1457 * We can't use anon_inode_getfd() because we need to modify
1458 * the f_mode flags directly to allow more than just ioctls
1459 */
1460 ret = get_unused_fd_flags(O_CLOEXEC);
1461 if (ret < 0) {
1462 device->ops->release(device->device_data);
1463 vfio_device_put(device);
1464 return ret;
1465 }
1466
1467 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1468 device, O_RDWR);
1469 if (IS_ERR(filep)) {
1470 put_unused_fd(ret);
1471 ret = PTR_ERR(filep);
1472 device->ops->release(device->device_data);
1473 vfio_device_put(device);
1474 return ret;
1475 }
1476
1477 /*
1478 * TODO: add an anon_inode interface to do this.
1479 * Appears to be missing by lack of need rather than
1480 * explicitly prevented. Now there's need.
1481 */
1482 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1483
1484 atomic_inc(&group->container_users);
1485
1486 fd_install(ret, filep);
Alex Williamsoncba33452012-07-31 08:16:22 -06001487
Alex Williamson03a76b62015-12-21 15:13:33 -07001488 if (group->noiommu)
1489 dev_warn(device->dev, "vfio-noiommu device opened by user "
1490 "(%s:%d)\n", current->comm, task_pid_nr(current));
1491
Alex Williamsoncba33452012-07-31 08:16:22 -06001492 return ret;
1493}
1494
1495static long vfio_group_fops_unl_ioctl(struct file *filep,
1496 unsigned int cmd, unsigned long arg)
1497{
1498 struct vfio_group *group = filep->private_data;
1499 long ret = -ENOTTY;
1500
1501 switch (cmd) {
1502 case VFIO_GROUP_GET_STATUS:
1503 {
1504 struct vfio_group_status status;
1505 unsigned long minsz;
1506
1507 minsz = offsetofend(struct vfio_group_status, flags);
1508
1509 if (copy_from_user(&status, (void __user *)arg, minsz))
1510 return -EFAULT;
1511
1512 if (status.argsz < minsz)
1513 return -EINVAL;
1514
1515 status.flags = 0;
1516
1517 if (vfio_group_viable(group))
1518 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1519
1520 if (group->container)
1521 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1522
1523 if (copy_to_user((void __user *)arg, &status, minsz))
1524 return -EFAULT;
1525
1526 ret = 0;
1527 break;
1528 }
1529 case VFIO_GROUP_SET_CONTAINER:
1530 {
1531 int fd;
1532
1533 if (get_user(fd, (int __user *)arg))
1534 return -EFAULT;
1535
1536 if (fd < 0)
1537 return -EINVAL;
1538
1539 ret = vfio_group_set_container(group, fd);
1540 break;
1541 }
1542 case VFIO_GROUP_UNSET_CONTAINER:
1543 ret = vfio_group_unset_container(group);
1544 break;
1545 case VFIO_GROUP_GET_DEVICE_FD:
1546 {
1547 char *buf;
1548
1549 buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1550 if (IS_ERR(buf))
1551 return PTR_ERR(buf);
1552
1553 ret = vfio_group_get_device_fd(group, buf);
1554 kfree(buf);
1555 break;
1556 }
1557 }
1558
1559 return ret;
1560}
1561
1562#ifdef CONFIG_COMPAT
1563static long vfio_group_fops_compat_ioctl(struct file *filep,
1564 unsigned int cmd, unsigned long arg)
1565{
1566 arg = (unsigned long)compat_ptr(arg);
1567 return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1568}
1569#endif /* CONFIG_COMPAT */
1570
1571static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1572{
1573 struct vfio_group *group;
Alex Williamson6d6768c2013-06-25 16:06:54 -06001574 int opened;
Alex Williamsoncba33452012-07-31 08:16:22 -06001575
1576 group = vfio_group_get_from_minor(iminor(inode));
1577 if (!group)
1578 return -ENODEV;
1579
Alex Williamson03a76b62015-12-21 15:13:33 -07001580 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1581 vfio_group_put(group);
1582 return -EPERM;
1583 }
1584
Alex Williamson6d6768c2013-06-25 16:06:54 -06001585 /* Do we need multiple instances of the group open? Seems not. */
1586 opened = atomic_cmpxchg(&group->opened, 0, 1);
1587 if (opened) {
1588 vfio_group_put(group);
1589 return -EBUSY;
1590 }
1591
1592 /* Is something still in use from a previous open? */
Alex Williamsoncba33452012-07-31 08:16:22 -06001593 if (group->container) {
Alex Williamson6d6768c2013-06-25 16:06:54 -06001594 atomic_dec(&group->opened);
Alex Williamsoncba33452012-07-31 08:16:22 -06001595 vfio_group_put(group);
1596 return -EBUSY;
1597 }
1598
Alex Williamson65b1ade2017-03-21 13:19:09 -06001599 /* Warn if previous user didn't cleanup and re-init to drop them */
1600 if (WARN_ON(group->notifier.head))
1601 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1602
Alex Williamsoncba33452012-07-31 08:16:22 -06001603 filep->private_data = group;
1604
1605 return 0;
1606}
1607
1608static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1609{
1610 struct vfio_group *group = filep->private_data;
1611
1612 filep->private_data = NULL;
1613
1614 vfio_group_try_dissolve_container(group);
1615
Alex Williamson6d6768c2013-06-25 16:06:54 -06001616 atomic_dec(&group->opened);
1617
Alex Williamsoncba33452012-07-31 08:16:22 -06001618 vfio_group_put(group);
1619
1620 return 0;
1621}
1622
1623static const struct file_operations vfio_group_fops = {
1624 .owner = THIS_MODULE,
1625 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1626#ifdef CONFIG_COMPAT
1627 .compat_ioctl = vfio_group_fops_compat_ioctl,
1628#endif
1629 .open = vfio_group_fops_open,
1630 .release = vfio_group_fops_release,
1631};
1632
1633/**
1634 * VFIO Device fd
1635 */
1636static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1637{
1638 struct vfio_device *device = filep->private_data;
1639
1640 device->ops->release(device->device_data);
1641
1642 vfio_group_try_dissolve_container(device->group);
1643
1644 vfio_device_put(device);
1645
1646 return 0;
1647}
1648
1649static long vfio_device_fops_unl_ioctl(struct file *filep,
1650 unsigned int cmd, unsigned long arg)
1651{
1652 struct vfio_device *device = filep->private_data;
1653
1654 if (unlikely(!device->ops->ioctl))
1655 return -EINVAL;
1656
1657 return device->ops->ioctl(device->device_data, cmd, arg);
1658}
1659
1660static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1661 size_t count, loff_t *ppos)
1662{
1663 struct vfio_device *device = filep->private_data;
1664
1665 if (unlikely(!device->ops->read))
1666 return -EINVAL;
1667
1668 return device->ops->read(device->device_data, buf, count, ppos);
1669}
1670
1671static ssize_t vfio_device_fops_write(struct file *filep,
1672 const char __user *buf,
1673 size_t count, loff_t *ppos)
1674{
1675 struct vfio_device *device = filep->private_data;
1676
1677 if (unlikely(!device->ops->write))
1678 return -EINVAL;
1679
1680 return device->ops->write(device->device_data, buf, count, ppos);
1681}
1682
1683static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1684{
1685 struct vfio_device *device = filep->private_data;
1686
1687 if (unlikely(!device->ops->mmap))
1688 return -EINVAL;
1689
1690 return device->ops->mmap(device->device_data, vma);
1691}
1692
1693#ifdef CONFIG_COMPAT
1694static long vfio_device_fops_compat_ioctl(struct file *filep,
1695 unsigned int cmd, unsigned long arg)
1696{
1697 arg = (unsigned long)compat_ptr(arg);
1698 return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1699}
1700#endif /* CONFIG_COMPAT */
1701
1702static const struct file_operations vfio_device_fops = {
1703 .owner = THIS_MODULE,
1704 .release = vfio_device_fops_release,
1705 .read = vfio_device_fops_read,
1706 .write = vfio_device_fops_write,
1707 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1708#ifdef CONFIG_COMPAT
1709 .compat_ioctl = vfio_device_fops_compat_ioctl,
1710#endif
1711 .mmap = vfio_device_fops_mmap,
1712};
1713
1714/**
Alexey Kardashevskiy6cdd9782013-08-05 10:52:36 -06001715 * External user API, exported by symbols to be linked dynamically.
1716 *
1717 * The protocol includes:
1718 * 1. do normal VFIO init operation:
1719 * - opening a new container;
1720 * - attaching group(s) to it;
1721 * - setting an IOMMU driver for a container.
1722 * When IOMMU is set for a container, all groups in it are
1723 * considered ready to use by an external user.
1724 *
1725 * 2. User space passes a group fd to an external user.
1726 * The external user calls vfio_group_get_external_user()
1727 * to verify that:
1728 * - the group is initialized;
1729 * - IOMMU is set for it.
1730 * If both checks passed, vfio_group_get_external_user()
1731 * increments the container user counter to prevent
1732 * the VFIO group from disposal before KVM exits.
1733 *
1734 * 3. The external user calls vfio_external_user_iommu_id()
1735 * to know an IOMMU ID.
1736 *
1737 * 4. When the external KVM finishes, it calls
1738 * vfio_group_put_external_user() to release the VFIO group.
1739 * This call decrements the container user counter.
1740 */
1741struct vfio_group *vfio_group_get_external_user(struct file *filep)
1742{
1743 struct vfio_group *group = filep->private_data;
Kirti Wankhede32f55d82016-11-17 02:16:16 +05301744 int ret;
Alexey Kardashevskiy6cdd9782013-08-05 10:52:36 -06001745
1746 if (filep->f_op != &vfio_group_fops)
1747 return ERR_PTR(-EINVAL);
1748
Kirti Wankhede32f55d82016-11-17 02:16:16 +05301749 ret = vfio_group_add_container_user(group);
1750 if (ret)
1751 return ERR_PTR(ret);
Alexey Kardashevskiy6cdd9782013-08-05 10:52:36 -06001752
1753 vfio_group_get(group);
1754
1755 return group;
1756}
1757EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1758
1759void vfio_group_put_external_user(struct vfio_group *group)
1760{
Alexey Kardashevskiy6cdd9782013-08-05 10:52:36 -06001761 vfio_group_try_dissolve_container(group);
Ilya Lesokhind370c912016-07-14 16:50:19 +03001762 vfio_group_put(group);
Alexey Kardashevskiy6cdd9782013-08-05 10:52:36 -06001763}
1764EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1765
Alex Williamson5d6dee82017-06-28 13:50:05 -06001766bool vfio_external_group_match_file(struct vfio_group *test_group,
1767 struct file *filep)
1768{
1769 struct vfio_group *group = filep->private_data;
1770
1771 return (filep->f_op == &vfio_group_fops) && (group == test_group);
1772}
1773EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
1774
Alexey Kardashevskiy6cdd9782013-08-05 10:52:36 -06001775int vfio_external_user_iommu_id(struct vfio_group *group)
1776{
1777 return iommu_group_id(group->iommu_group);
1778}
1779EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1780
Alex Williamson88d7ab82014-02-26 11:38:39 -07001781long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1782{
1783 return vfio_ioctl_check_extension(group->container, arg);
1784}
1785EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1786
Alexey Kardashevskiy6cdd9782013-08-05 10:52:36 -06001787/**
Alex Williamsond7a8d5e2016-02-22 16:02:33 -07001788 * Sub-module support
1789 */
1790/*
1791 * Helper for managing a buffer of info chain capabilities, allocate or
1792 * reallocate a buffer with additional @size, filling in @id and @version
1793 * of the capability. A pointer to the new capability is returned.
1794 *
1795 * NB. The chain is based at the head of the buffer, so new entries are
1796 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1797 * next offsets prior to copying to the user buffer.
1798 */
1799struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1800 size_t size, u16 id, u16 version)
1801{
1802 void *buf;
1803 struct vfio_info_cap_header *header, *tmp;
1804
1805 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1806 if (!buf) {
1807 kfree(caps->buf);
1808 caps->size = 0;
1809 return ERR_PTR(-ENOMEM);
1810 }
1811
1812 caps->buf = buf;
1813 header = buf + caps->size;
1814
1815 /* Eventually copied to user buffer, zero */
1816 memset(header, 0, size);
1817
1818 header->id = id;
1819 header->version = version;
1820
1821 /* Add to the end of the capability chain */
Eric Auger5ba6de92016-11-21 07:21:02 +01001822 for (tmp = buf; tmp->next; tmp = buf + tmp->next)
Alex Williamsond7a8d5e2016-02-22 16:02:33 -07001823 ; /* nothing */
1824
1825 tmp->next = caps->size;
1826 caps->size += size;
1827
1828 return header;
1829}
1830EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1831
1832void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1833{
1834 struct vfio_info_cap_header *tmp;
Eric Auger5ba6de92016-11-21 07:21:02 +01001835 void *buf = (void *)caps->buf;
Alex Williamsond7a8d5e2016-02-22 16:02:33 -07001836
Eric Auger5ba6de92016-11-21 07:21:02 +01001837 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
Alex Williamsond7a8d5e2016-02-22 16:02:33 -07001838 tmp->next += offset;
1839}
Kirti Wankhedeb3c0a862016-11-17 02:16:25 +05301840EXPORT_SYMBOL(vfio_info_cap_shift);
Alex Williamsond7a8d5e2016-02-22 16:02:33 -07001841
Alex Williamsondda01f72017-12-12 12:59:39 -07001842int vfio_info_add_capability(struct vfio_info_cap *caps,
1843 struct vfio_info_cap_header *cap, size_t size)
Kirti Wankhedeb3c0a862016-11-17 02:16:25 +05301844{
1845 struct vfio_info_cap_header *header;
Kirti Wankhedeb3c0a862016-11-17 02:16:25 +05301846
Alex Williamsondda01f72017-12-12 12:59:39 -07001847 header = vfio_info_cap_add(caps, size, cap->id, cap->version);
Kirti Wankhedeb3c0a862016-11-17 02:16:25 +05301848 if (IS_ERR(header))
1849 return PTR_ERR(header);
1850
Alex Williamsondda01f72017-12-12 12:59:39 -07001851 memcpy(header + 1, cap + 1, size - sizeof(*header));
1852
Kirti Wankhedeb3c0a862016-11-17 02:16:25 +05301853 return 0;
1854}
Kirti Wankhedeb3c0a862016-11-17 02:16:25 +05301855EXPORT_SYMBOL(vfio_info_add_capability);
Kirti Wankhede21690372016-11-17 02:16:17 +05301856
Kirti Wankhedec747f082016-11-17 02:16:27 +05301857int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1858 int max_irq_type, size_t *data_size)
1859{
1860 unsigned long minsz;
1861 size_t size;
1862
1863 minsz = offsetofend(struct vfio_irq_set, count);
1864
1865 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1866 (hdr->count >= (U32_MAX - hdr->start)) ||
1867 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1868 VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1869 return -EINVAL;
1870
1871 if (data_size)
1872 *data_size = 0;
1873
1874 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1875 return -EINVAL;
1876
1877 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1878 case VFIO_IRQ_SET_DATA_NONE:
1879 size = 0;
1880 break;
1881 case VFIO_IRQ_SET_DATA_BOOL:
1882 size = sizeof(uint8_t);
1883 break;
1884 case VFIO_IRQ_SET_DATA_EVENTFD:
1885 size = sizeof(int32_t);
1886 break;
1887 default:
1888 return -EINVAL;
1889 }
1890
1891 if (size) {
1892 if (hdr->argsz - minsz < hdr->count * size)
1893 return -EINVAL;
1894
1895 if (!data_size)
1896 return -EINVAL;
1897
1898 *data_size = hdr->count * size;
1899 }
1900
1901 return 0;
1902}
1903EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1904
Kirti Wankhede21690372016-11-17 02:16:17 +05301905/*
1906 * Pin a set of guest PFNs and return their associated host PFNs for local
1907 * domain only.
1908 * @dev [in] : device
Changbin Dud9d84782017-02-06 15:03:37 +08001909 * @user_pfn [in]: array of user/guest PFNs to be pinned.
Kirti Wankhede21690372016-11-17 02:16:17 +05301910 * @npage [in] : count of elements in user_pfn array. This count should not
1911 * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
1912 * @prot [in] : protection flags
1913 * @phys_pfn[out]: array of host PFNs
1914 * Return error or number of pages pinned.
1915 */
1916int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
1917 int prot, unsigned long *phys_pfn)
1918{
1919 struct vfio_container *container;
1920 struct vfio_group *group;
1921 struct vfio_iommu_driver *driver;
1922 int ret;
1923
1924 if (!dev || !user_pfn || !phys_pfn || !npage)
1925 return -EINVAL;
1926
1927 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1928 return -E2BIG;
1929
1930 group = vfio_group_get_from_dev(dev);
Christophe JAILLETd2564592016-11-30 08:06:12 +01001931 if (!group)
1932 return -ENODEV;
Kirti Wankhede21690372016-11-17 02:16:17 +05301933
1934 ret = vfio_group_add_container_user(group);
1935 if (ret)
1936 goto err_pin_pages;
1937
1938 container = group->container;
Kirti Wankhede21690372016-11-17 02:16:17 +05301939 driver = container->iommu_driver;
1940 if (likely(driver && driver->ops->pin_pages))
1941 ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
1942 npage, prot, phys_pfn);
1943 else
1944 ret = -ENOTTY;
1945
Kirti Wankhede21690372016-11-17 02:16:17 +05301946 vfio_group_try_dissolve_container(group);
1947
1948err_pin_pages:
1949 vfio_group_put(group);
1950 return ret;
1951}
1952EXPORT_SYMBOL(vfio_pin_pages);
1953
1954/*
1955 * Unpin set of host PFNs for local domain only.
1956 * @dev [in] : device
1957 * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
1958 * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1959 * @npage [in] : count of elements in user_pfn array. This count should not
1960 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1961 * Return error or number of pages unpinned.
1962 */
1963int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
1964{
1965 struct vfio_container *container;
1966 struct vfio_group *group;
1967 struct vfio_iommu_driver *driver;
1968 int ret;
1969
1970 if (!dev || !user_pfn || !npage)
1971 return -EINVAL;
1972
1973 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1974 return -E2BIG;
1975
1976 group = vfio_group_get_from_dev(dev);
Christophe JAILLETd2564592016-11-30 08:06:12 +01001977 if (!group)
1978 return -ENODEV;
Kirti Wankhede21690372016-11-17 02:16:17 +05301979
1980 ret = vfio_group_add_container_user(group);
1981 if (ret)
1982 goto err_unpin_pages;
1983
1984 container = group->container;
Kirti Wankhede21690372016-11-17 02:16:17 +05301985 driver = container->iommu_driver;
1986 if (likely(driver && driver->ops->unpin_pages))
1987 ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
1988 npage);
1989 else
1990 ret = -ENOTTY;
1991
Kirti Wankhede21690372016-11-17 02:16:17 +05301992 vfio_group_try_dissolve_container(group);
1993
1994err_unpin_pages:
1995 vfio_group_put(group);
1996 return ret;
1997}
1998EXPORT_SYMBOL(vfio_unpin_pages);
1999
Jike Song22195cb2016-12-01 13:20:05 +08002000static int vfio_register_iommu_notifier(struct vfio_group *group,
2001 unsigned long *events,
2002 struct notifier_block *nb)
Kirti Wankhedec086de812016-11-17 10:28:26 +05302003{
2004 struct vfio_container *container;
Kirti Wankhedec086de812016-11-17 10:28:26 +05302005 struct vfio_iommu_driver *driver;
2006 int ret;
2007
Kirti Wankhedec086de812016-11-17 10:28:26 +05302008 ret = vfio_group_add_container_user(group);
2009 if (ret)
Jike Song22195cb2016-12-01 13:20:05 +08002010 return -EINVAL;
Kirti Wankhedec086de812016-11-17 10:28:26 +05302011
2012 container = group->container;
Kirti Wankhedec086de812016-11-17 10:28:26 +05302013 driver = container->iommu_driver;
2014 if (likely(driver && driver->ops->register_notifier))
Jike Song22195cb2016-12-01 13:20:05 +08002015 ret = driver->ops->register_notifier(container->iommu_data,
2016 events, nb);
Kirti Wankhedec086de812016-11-17 10:28:26 +05302017 else
2018 ret = -ENOTTY;
2019
Kirti Wankhedec086de812016-11-17 10:28:26 +05302020 vfio_group_try_dissolve_container(group);
2021
Kirti Wankhedec086de812016-11-17 10:28:26 +05302022 return ret;
2023}
Kirti Wankhedec086de812016-11-17 10:28:26 +05302024
Jike Song22195cb2016-12-01 13:20:05 +08002025static int vfio_unregister_iommu_notifier(struct vfio_group *group,
2026 struct notifier_block *nb)
Kirti Wankhedec086de812016-11-17 10:28:26 +05302027{
2028 struct vfio_container *container;
Kirti Wankhedec086de812016-11-17 10:28:26 +05302029 struct vfio_iommu_driver *driver;
2030 int ret;
2031
Kirti Wankhedec086de812016-11-17 10:28:26 +05302032 ret = vfio_group_add_container_user(group);
2033 if (ret)
Jike Song22195cb2016-12-01 13:20:05 +08002034 return -EINVAL;
Kirti Wankhedec086de812016-11-17 10:28:26 +05302035
2036 container = group->container;
Kirti Wankhedec086de812016-11-17 10:28:26 +05302037 driver = container->iommu_driver;
2038 if (likely(driver && driver->ops->unregister_notifier))
2039 ret = driver->ops->unregister_notifier(container->iommu_data,
2040 nb);
2041 else
2042 ret = -ENOTTY;
2043
Kirti Wankhedec086de812016-11-17 10:28:26 +05302044 vfio_group_try_dissolve_container(group);
2045
Jike Song22195cb2016-12-01 13:20:05 +08002046 return ret;
2047}
2048
Jike Songccd46db2016-12-01 13:20:06 +08002049void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
2050{
2051 group->kvm = kvm;
2052 blocking_notifier_call_chain(&group->notifier,
2053 VFIO_GROUP_NOTIFY_SET_KVM, kvm);
2054}
2055EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
2056
2057static int vfio_register_group_notifier(struct vfio_group *group,
2058 unsigned long *events,
2059 struct notifier_block *nb)
2060{
Jike Songccd46db2016-12-01 13:20:06 +08002061 int ret;
2062 bool set_kvm = false;
2063
2064 if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
2065 set_kvm = true;
2066
2067 /* clear known events */
2068 *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
2069
2070 /* refuse to continue if still events remaining */
2071 if (*events)
2072 return -EINVAL;
2073
2074 ret = vfio_group_add_container_user(group);
2075 if (ret)
2076 return -EINVAL;
2077
Jike Songccd46db2016-12-01 13:20:06 +08002078 ret = blocking_notifier_chain_register(&group->notifier, nb);
2079
2080 /*
2081 * The attaching of kvm and vfio_group might already happen, so
2082 * here we replay once upon registration.
2083 */
2084 if (!ret && set_kvm && group->kvm)
2085 blocking_notifier_call_chain(&group->notifier,
2086 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
2087
Jike Songccd46db2016-12-01 13:20:06 +08002088 vfio_group_try_dissolve_container(group);
2089
2090 return ret;
2091}
2092
2093static int vfio_unregister_group_notifier(struct vfio_group *group,
2094 struct notifier_block *nb)
2095{
Jike Songccd46db2016-12-01 13:20:06 +08002096 int ret;
2097
2098 ret = vfio_group_add_container_user(group);
2099 if (ret)
2100 return -EINVAL;
2101
Jike Songccd46db2016-12-01 13:20:06 +08002102 ret = blocking_notifier_chain_unregister(&group->notifier, nb);
2103
Jike Songccd46db2016-12-01 13:20:06 +08002104 vfio_group_try_dissolve_container(group);
2105
2106 return ret;
2107}
2108
Jike Song22195cb2016-12-01 13:20:05 +08002109int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
2110 unsigned long *events, struct notifier_block *nb)
2111{
2112 struct vfio_group *group;
2113 int ret;
2114
2115 if (!dev || !nb || !events || (*events == 0))
2116 return -EINVAL;
2117
2118 group = vfio_group_get_from_dev(dev);
2119 if (!group)
2120 return -ENODEV;
2121
2122 switch (type) {
2123 case VFIO_IOMMU_NOTIFY:
2124 ret = vfio_register_iommu_notifier(group, events, nb);
2125 break;
Jike Songccd46db2016-12-01 13:20:06 +08002126 case VFIO_GROUP_NOTIFY:
2127 ret = vfio_register_group_notifier(group, events, nb);
2128 break;
Jike Song22195cb2016-12-01 13:20:05 +08002129 default:
2130 ret = -EINVAL;
2131 }
2132
2133 vfio_group_put(group);
2134 return ret;
2135}
2136EXPORT_SYMBOL(vfio_register_notifier);
2137
2138int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
2139 struct notifier_block *nb)
2140{
2141 struct vfio_group *group;
2142 int ret;
2143
2144 if (!dev || !nb)
2145 return -EINVAL;
2146
2147 group = vfio_group_get_from_dev(dev);
2148 if (!group)
2149 return -ENODEV;
2150
2151 switch (type) {
2152 case VFIO_IOMMU_NOTIFY:
2153 ret = vfio_unregister_iommu_notifier(group, nb);
2154 break;
Jike Songccd46db2016-12-01 13:20:06 +08002155 case VFIO_GROUP_NOTIFY:
2156 ret = vfio_unregister_group_notifier(group, nb);
2157 break;
Jike Song22195cb2016-12-01 13:20:05 +08002158 default:
2159 ret = -EINVAL;
2160 }
2161
Kirti Wankhedec086de812016-11-17 10:28:26 +05302162 vfio_group_put(group);
2163 return ret;
2164}
2165EXPORT_SYMBOL(vfio_unregister_notifier);
2166
Alex Williamsond7a8d5e2016-02-22 16:02:33 -07002167/**
Alex Williamsoncba33452012-07-31 08:16:22 -06002168 * Module/class support
2169 */
2170static char *vfio_devnode(struct device *dev, umode_t *mode)
2171{
2172 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
2173}
2174
Alex Williamsond1099902013-12-19 10:17:13 -07002175static struct miscdevice vfio_dev = {
2176 .minor = VFIO_MINOR,
2177 .name = "vfio",
2178 .fops = &vfio_fops,
2179 .nodename = "vfio/vfio",
2180 .mode = S_IRUGO | S_IWUGO,
2181};
2182
Alex Williamsoncba33452012-07-31 08:16:22 -06002183static int __init vfio_init(void)
2184{
2185 int ret;
2186
2187 idr_init(&vfio.group_idr);
2188 mutex_init(&vfio.group_lock);
2189 mutex_init(&vfio.iommu_drivers_lock);
2190 INIT_LIST_HEAD(&vfio.group_list);
2191 INIT_LIST_HEAD(&vfio.iommu_drivers_list);
2192 init_waitqueue_head(&vfio.release_q);
2193
Alex Williamsond1099902013-12-19 10:17:13 -07002194 ret = misc_register(&vfio_dev);
2195 if (ret) {
2196 pr_err("vfio: misc device register failed\n");
2197 return ret;
2198 }
2199
2200 /* /dev/vfio/$GROUP */
Alex Williamsoncba33452012-07-31 08:16:22 -06002201 vfio.class = class_create(THIS_MODULE, "vfio");
2202 if (IS_ERR(vfio.class)) {
2203 ret = PTR_ERR(vfio.class);
2204 goto err_class;
2205 }
2206
2207 vfio.class->devnode = vfio_devnode;
2208
Chengguang Xu8bcb64a2019-02-12 13:59:29 +08002209 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
Alex Williamsoncba33452012-07-31 08:16:22 -06002210 if (ret)
Alex Williamsond1099902013-12-19 10:17:13 -07002211 goto err_alloc_chrdev;
Alex Williamsoncba33452012-07-31 08:16:22 -06002212
Alex Williamsoncba33452012-07-31 08:16:22 -06002213 cdev_init(&vfio.group_cdev, &vfio_group_fops);
Chengguang Xu8bcb64a2019-02-12 13:59:29 +08002214 ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
Alex Williamsoncba33452012-07-31 08:16:22 -06002215 if (ret)
Alex Williamsond1099902013-12-19 10:17:13 -07002216 goto err_cdev_add;
Alex Williamsoncba33452012-07-31 08:16:22 -06002217
2218 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
2219
Alex Williamson03a76b62015-12-21 15:13:33 -07002220#ifdef CONFIG_VFIO_NOIOMMU
2221 vfio_register_iommu_driver(&vfio_noiommu_ops);
2222#endif
Alex Williamsoncba33452012-07-31 08:16:22 -06002223 return 0;
2224
Alex Williamsond1099902013-12-19 10:17:13 -07002225err_cdev_add:
Chengguang Xu8bcb64a2019-02-12 13:59:29 +08002226 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
Alex Williamsond1099902013-12-19 10:17:13 -07002227err_alloc_chrdev:
Alex Williamsoncba33452012-07-31 08:16:22 -06002228 class_destroy(vfio.class);
2229 vfio.class = NULL;
2230err_class:
Alex Williamsond1099902013-12-19 10:17:13 -07002231 misc_deregister(&vfio_dev);
Alex Williamsoncba33452012-07-31 08:16:22 -06002232 return ret;
2233}
2234
2235static void __exit vfio_cleanup(void)
2236{
2237 WARN_ON(!list_empty(&vfio.group_list));
2238
Alex Williamson03a76b62015-12-21 15:13:33 -07002239#ifdef CONFIG_VFIO_NOIOMMU
2240 vfio_unregister_iommu_driver(&vfio_noiommu_ops);
2241#endif
Alex Williamsoncba33452012-07-31 08:16:22 -06002242 idr_destroy(&vfio.group_idr);
2243 cdev_del(&vfio.group_cdev);
Chengguang Xu8bcb64a2019-02-12 13:59:29 +08002244 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
Alex Williamsoncba33452012-07-31 08:16:22 -06002245 class_destroy(vfio.class);
2246 vfio.class = NULL;
Alex Williamsond1099902013-12-19 10:17:13 -07002247 misc_deregister(&vfio_dev);
Alex Williamsoncba33452012-07-31 08:16:22 -06002248}
2249
2250module_init(vfio_init);
2251module_exit(vfio_cleanup);
2252
2253MODULE_VERSION(DRIVER_VERSION);
2254MODULE_LICENSE("GPL v2");
2255MODULE_AUTHOR(DRIVER_AUTHOR);
2256MODULE_DESCRIPTION(DRIVER_DESC);
Alex Williamsond1099902013-12-19 10:17:13 -07002257MODULE_ALIAS_MISCDEV(VFIO_MINOR);
2258MODULE_ALIAS("devname:vfio/vfio");
Alex Williamson0ca582f2017-02-08 13:13:26 -07002259MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");