summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-11-29 16:29:29 -0400
committerJason Gunthorpe <jgg@nvidia.com>2022-11-30 20:16:49 -0400
commit2ff4bed7fee72ba1abfcff5f11ae8f8e570353f2 (patch)
treee932c21eef44174131e88a1f8cc5ffc5175589fb /drivers/iommu
parent658234de0d2ed3a1b86d793f4772e38a2e039b35 (diff)
iommufd: File descriptor, context, kconfig and makefiles
This is the basic infrastructure of a new miscdevice to hold the iommufd IOCTL API. It provides: - A miscdevice to create file descriptors to run the IOCTL interface over - A table based ioctl dispatch and centralized extendable pre-validation step - An xarray mapping userspace ID's to kernel objects. The design has multiple inter-related objects held within in a single IOMMUFD fd - A simple usage count to build a graph of object relations and protect against hostile userspace racing ioctls The only IOCTL provided in this patch is the generic 'destroy any object by handle' operation. Link: https://lore.kernel.org/r/6-v6-a196d26f289e+11787-iommufd_jgg@nvidia.com Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Yi Liu <yi.l.liu@intel.com> Tested-by: Lixiao Yang <lixiao.yang@intel.com> Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> Signed-off-by: Yi Liu <yi.l.liu@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/iommufd/Kconfig12
-rw-r--r--drivers/iommu/iommufd/Makefile5
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h109
-rw-r--r--drivers/iommu/iommufd/main.c344
6 files changed, 472 insertions, 1 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dc5f7a156ff5..319966cde5cf 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -188,6 +188,7 @@ config MSM_IOMMU
source "drivers/iommu/amd/Kconfig"
source "drivers/iommu/intel/Kconfig"
+source "drivers/iommu/iommufd/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 7fbf6a337662..f461d0651385 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += amd/ intel/ arm/
+obj-y += amd/ intel/ arm/ iommufd/
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
new file mode 100644
index 000000000000..164812084a67
--- /dev/null
+++ b/drivers/iommu/iommufd/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config IOMMUFD
+ tristate "IOMMU Userspace API"
+ select INTERVAL_TREE
+ select INTERVAL_TREE_SPAN_ITER
+ select IOMMU_API
+ default n
+ help
+ Provides /dev/iommu, the user API to control the IOMMU subsystem as
+ it relates to managing IO page tables that point at user space memory.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile
new file mode 100644
index 000000000000..a07a8cffe937
--- /dev/null
+++ b/drivers/iommu/iommufd/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+iommufd-y := \
+ main.o
+
+obj-$(CONFIG_IOMMUFD) += iommufd.o
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
new file mode 100644
index 000000000000..bb720bc11317
--- /dev/null
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __IOMMUFD_PRIVATE_H
+#define __IOMMUFD_PRIVATE_H
+
+#include <linux/rwsem.h>
+#include <linux/xarray.h>
+#include <linux/refcount.h>
+#include <linux/uaccess.h>
+
+struct iommufd_ctx {
+ struct file *file;
+ struct xarray objects;
+};
+
+struct iommufd_ucmd {
+ struct iommufd_ctx *ictx;
+ void __user *ubuffer;
+ u32 user_size;
+ void *cmd;
+};
+
+/* Copy the response in ucmd->cmd back to userspace. */
+static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd,
+ size_t cmd_len)
+{
+ if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
+ min_t(size_t, ucmd->user_size, cmd_len)))
+ return -EFAULT;
+ return 0;
+}
+
+enum iommufd_object_type {
+ IOMMUFD_OBJ_NONE,
+ IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE,
+};
+
+/* Base struct for all objects with a userspace ID handle. */
+struct iommufd_object {
+ struct rw_semaphore destroy_rwsem;
+ refcount_t users;
+ enum iommufd_object_type type;
+ unsigned int id;
+};
+
+static inline bool iommufd_lock_obj(struct iommufd_object *obj)
+{
+ if (!down_read_trylock(&obj->destroy_rwsem))
+ return false;
+ if (!refcount_inc_not_zero(&obj->users)) {
+ up_read(&obj->destroy_rwsem);
+ return false;
+ }
+ return true;
+}
+
+struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
+ enum iommufd_object_type type);
+static inline void iommufd_put_object(struct iommufd_object *obj)
+{
+ refcount_dec(&obj->users);
+ up_read(&obj->destroy_rwsem);
+}
+
+/**
+ * iommufd_ref_to_users() - Switch from destroy_rwsem to users refcount
+ * protection
+ * @obj - Object to release
+ *
+ * Objects have two refcount protections (destroy_rwsem and the refcount_t
+ * users). Holding either of these will prevent the object from being destroyed.
+ *
+ * Depending on the use case, one protection or the other is appropriate. In
+ * most cases references are being protected by the destroy_rwsem. This allows
+ * orderly destruction of the object because iommufd_object_destroy_user() will
+ * wait for it to become unlocked. However, as a rwsem, it cannot be held across
+ * a system call return. So cases that have longer term needs must switch
+ * to the weaker users refcount_t.
+ *
+ * With users protection iommufd_object_destroy_user() will return false,
+ * refusing to destroy the object, causing -EBUSY to userspace.
+ */
+static inline void iommufd_ref_to_users(struct iommufd_object *obj)
+{
+ up_read(&obj->destroy_rwsem);
+ /* iommufd_lock_obj() obtains users as well */
+}
+void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
+void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj);
+void iommufd_object_finalize(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj);
+bool iommufd_object_destroy_user(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj);
+struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
+ size_t size,
+ enum iommufd_object_type type);
+
+#define iommufd_object_alloc(ictx, ptr, type) \
+ container_of(_iommufd_object_alloc( \
+ ictx, \
+ sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \
+ offsetof(typeof(*(ptr)), \
+ obj) != 0), \
+ type), \
+ typeof(*(ptr)), obj)
+
+#endif
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
new file mode 100644
index 000000000000..dfbc68b97506
--- /dev/null
+++ b/drivers/iommu/iommufd/main.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Intel Corporation
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
+ *
+ * iommufd provides control over the IOMMU HW objects created by IOMMU kernel
+ * drivers. IOMMU HW objects revolve around IO page tables that map incoming DMA
+ * addresses (IOVA) to CPU addresses.
+ */
+#define pr_fmt(fmt) "iommufd: " fmt
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/bug.h>
+#include <uapi/linux/iommufd.h>
+#include <linux/iommufd.h>
+
+#include "iommufd_private.h"
+
+struct iommufd_object_ops {
+ void (*destroy)(struct iommufd_object *obj);
+};
+static const struct iommufd_object_ops iommufd_object_ops[];
+
+struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
+ size_t size,
+ enum iommufd_object_type type)
+{
+ struct iommufd_object *obj;
+ int rc;
+
+ obj = kzalloc(size, GFP_KERNEL_ACCOUNT);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+ obj->type = type;
+ init_rwsem(&obj->destroy_rwsem);
+ refcount_set(&obj->users, 1);
+
+ /*
+ * Reserve an ID in the xarray but do not publish the pointer yet since
+ * the caller hasn't initialized it yet. Once the pointer is published
+ * in the xarray and visible to other threads we can't reliably destroy
+ * it anymore, so the caller must complete all errorable operations
+ * before calling iommufd_object_finalize().
+ */
+ rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY,
+ xa_limit_32b, GFP_KERNEL_ACCOUNT);
+ if (rc)
+ goto out_free;
+ return obj;
+out_free:
+ kfree(obj);
+ return ERR_PTR(rc);
+}
+
+/*
+ * Allow concurrent access to the object.
+ *
+ * Once another thread can see the object pointer it can prevent object
+ * destruction. Expect for special kernel-only objects there is no in-kernel way
+ * to reliably destroy a single object. Thus all APIs that are creating objects
+ * must use iommufd_object_abort() to handle their errors and only call
+ * iommufd_object_finalize() once object creation cannot fail.
+ */
+void iommufd_object_finalize(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
+{
+ void *old;
+
+ old = xa_store(&ictx->objects, obj->id, obj, GFP_KERNEL);
+ /* obj->id was returned from xa_alloc() so the xa_store() cannot fail */
+ WARN_ON(old);
+}
+
+/* Undo _iommufd_object_alloc() if iommufd_object_finalize() was not called */
+void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
+{
+ void *old;
+
+ old = xa_erase(&ictx->objects, obj->id);
+ WARN_ON(old);
+ kfree(obj);
+}
+
+/*
+ * Abort an object that has been fully initialized and needs destroy, but has
+ * not been finalized.
+ */
+void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
+{
+ iommufd_object_ops[obj->type].destroy(obj);
+ iommufd_object_abort(ictx, obj);
+}
+
+struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
+ enum iommufd_object_type type)
+{
+ struct iommufd_object *obj;
+
+ xa_lock(&ictx->objects);
+ obj = xa_load(&ictx->objects, id);
+ if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) ||
+ !iommufd_lock_obj(obj))
+ obj = ERR_PTR(-ENOENT);
+ xa_unlock(&ictx->objects);
+ return obj;
+}
+
+/*
+ * The caller holds a users refcount and wants to destroy the object. Returns
+ * true if the object was destroyed. In all cases the caller no longer has a
+ * reference on obj.
+ */
+bool iommufd_object_destroy_user(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
+{
+ /*
+ * The purpose of the destroy_rwsem is to ensure deterministic
+ * destruction of objects used by external drivers and destroyed by this
+ * function. Any temporary increment of the refcount must hold the read
+ * side of this, such as during ioctl execution.
+ */
+ down_write(&obj->destroy_rwsem);
+ xa_lock(&ictx->objects);
+ refcount_dec(&obj->users);
+ if (!refcount_dec_if_one(&obj->users)) {
+ xa_unlock(&ictx->objects);
+ up_write(&obj->destroy_rwsem);
+ return false;
+ }
+ __xa_erase(&ictx->objects, obj->id);
+ xa_unlock(&ictx->objects);
+ up_write(&obj->destroy_rwsem);
+
+ iommufd_object_ops[obj->type].destroy(obj);
+ kfree(obj);
+ return true;
+}
+
+static int iommufd_destroy(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_destroy *cmd = ucmd->cmd;
+ struct iommufd_object *obj;
+
+ obj = iommufd_get_object(ucmd->ictx, cmd->id, IOMMUFD_OBJ_ANY);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+ iommufd_ref_to_users(obj);
+ /* See iommufd_ref_to_users() */
+ if (!iommufd_object_destroy_user(ucmd->ictx, obj))
+ return -EBUSY;
+ return 0;
+}
+
+static int iommufd_fops_open(struct inode *inode, struct file *filp)
+{
+ struct iommufd_ctx *ictx;
+
+ ictx = kzalloc(sizeof(*ictx), GFP_KERNEL_ACCOUNT);
+ if (!ictx)
+ return -ENOMEM;
+
+ xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
+ ictx->file = filp;
+ filp->private_data = ictx;
+ return 0;
+}
+
+static int iommufd_fops_release(struct inode *inode, struct file *filp)
+{
+ struct iommufd_ctx *ictx = filp->private_data;
+ struct iommufd_object *obj;
+
+ /*
+ * The objects in the xarray form a graph of "users" counts, and we have
+ * to destroy them in a depth first manner. Leaf objects will reduce the
+ * users count of interior objects when they are destroyed.
+ *
+ * Repeatedly destroying all the "1 users" leaf objects will progress
+ * until the entire list is destroyed. If this can't progress then there
+ * is some bug related to object refcounting.
+ */
+ while (!xa_empty(&ictx->objects)) {
+ unsigned int destroyed = 0;
+ unsigned long index;
+
+ xa_for_each(&ictx->objects, index, obj) {
+ if (!refcount_dec_if_one(&obj->users))
+ continue;
+ destroyed++;
+ xa_erase(&ictx->objects, index);
+ iommufd_object_ops[obj->type].destroy(obj);
+ kfree(obj);
+ }
+ /* Bug related to users refcount */
+ if (WARN_ON(!destroyed))
+ break;
+ }
+ kfree(ictx);
+ return 0;
+}
+
+union ucmd_buffer {
+ struct iommu_destroy destroy;
+};
+
+struct iommufd_ioctl_op {
+ unsigned int size;
+ unsigned int min_size;
+ unsigned int ioctl_num;
+ int (*execute)(struct iommufd_ucmd *ucmd);
+};
+
+#define IOCTL_OP(_ioctl, _fn, _struct, _last) \
+ [_IOC_NR(_ioctl) - IOMMUFD_CMD_BASE] = { \
+ .size = sizeof(_struct) + \
+ BUILD_BUG_ON_ZERO(sizeof(union ucmd_buffer) < \
+ sizeof(_struct)), \
+ .min_size = offsetofend(_struct, _last), \
+ .ioctl_num = _ioctl, \
+ .execute = _fn, \
+ }
+static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
+ IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
+};
+
+static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ const struct iommufd_ioctl_op *op;
+ struct iommufd_ucmd ucmd = {};
+ union ucmd_buffer buf;
+ unsigned int nr;
+ int ret;
+
+ ucmd.ictx = filp->private_data;
+ ucmd.ubuffer = (void __user *)arg;
+ ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
+ if (ret)
+ return ret;
+
+ nr = _IOC_NR(cmd);
+ if (nr < IOMMUFD_CMD_BASE ||
+ (nr - IOMMUFD_CMD_BASE) >= ARRAY_SIZE(iommufd_ioctl_ops))
+ return -ENOIOCTLCMD;
+ op = &iommufd_ioctl_ops[nr - IOMMUFD_CMD_BASE];
+ if (op->ioctl_num != cmd)
+ return -ENOIOCTLCMD;
+ if (ucmd.user_size < op->min_size)
+ return -EINVAL;
+
+ ucmd.cmd = &buf;
+ ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer,
+ ucmd.user_size);
+ if (ret)
+ return ret;
+ ret = op->execute(&ucmd);
+ return ret;
+}
+
+static const struct file_operations iommufd_fops = {
+ .owner = THIS_MODULE,
+ .open = iommufd_fops_open,
+ .release = iommufd_fops_release,
+ .unlocked_ioctl = iommufd_fops_ioctl,
+};
+
+/**
+ * iommufd_ctx_get - Get a context reference
+ * @ictx: Context to get
+ *
+ * The caller must already hold a valid reference to ictx.
+ */
+void iommufd_ctx_get(struct iommufd_ctx *ictx)
+{
+ get_file(ictx->file);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_get, IOMMUFD);
+
+/**
+ * iommufd_ctx_from_file - Acquires a reference to the iommufd context
+ * @file: File to obtain the reference from
+ *
+ * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. The struct file
+ * remains owned by the caller and the caller must still do fput. On success
+ * the caller is responsible to call iommufd_ctx_put().
+ */
+struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
+{
+ struct iommufd_ctx *ictx;
+
+ if (file->f_op != &iommufd_fops)
+ return ERR_PTR(-EBADFD);
+ ictx = file->private_data;
+ iommufd_ctx_get(ictx);
+ return ictx;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_file, IOMMUFD);
+
+/**
+ * iommufd_ctx_put - Put back a reference
+ * @ictx: Context to put back
+ */
+void iommufd_ctx_put(struct iommufd_ctx *ictx)
+{
+ fput(ictx->file);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD);
+
+static const struct iommufd_object_ops iommufd_object_ops[] = {
+};
+
+static struct miscdevice iommu_misc_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "iommu",
+ .fops = &iommufd_fops,
+ .nodename = "iommu",
+ .mode = 0660,
+};
+
+static int __init iommufd_init(void)
+{
+ int ret;
+
+ ret = misc_register(&iommu_misc_dev);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static void __exit iommufd_exit(void)
+{
+ misc_deregister(&iommu_misc_dev);
+}
+
+module_init(iommufd_init);
+module_exit(iommufd_exit);
+
+MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices");
+MODULE_LICENSE("GPL");