diff options
author | Vasant Hegde <vasant.hegde@amd.com> | 2024-10-30 06:35:53 +0000 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2024-10-30 11:06:45 +0100 |
commit | e843aedbeb82b17a5fe6172449bff133fc8b68a1 (patch) | |
tree | 71f364bb71e8c6e837de48314809a7daa2bc5972 | |
parent | 4b18ef8491b06e353e8801705092cc292582cb7a (diff) |
iommu/amd: Convert dev_data lock from spinlock to mutex
Currently in attach device path it takes dev_data->spinlock. But as per
design attach device path can sleep. Also if device is PRI capable then
it adds device to IOMMU fault handler queue which takes mutex. Hence
currently PRI enablement is done outside dev_data lock.
Covert dev_data lock from spinlock to mutex so that it follows the
design and also PRI enablement can be done properly.
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20241030063556.6104-10-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/amd/amd_iommu_types.h | 2 | ||||
-rw-r--r-- | drivers/iommu/amd/iommu.c | 14 |
2 files changed, 8 insertions, 8 deletions
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index f44de5f31625..fdb0357e0bb9 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -836,7 +836,7 @@ struct devid_map { */ struct iommu_dev_data { /*Protect against attach/detach races */ - spinlock_t lock; + struct mutex mutex; struct list_head list; /* For domain->dev_list */ struct llist_node dev_data_list; /* For global dev_data_list */ diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4426e68de808..599aae889be8 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -210,7 +210,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) if (!dev_data) return NULL; - spin_lock_init(&dev_data->lock); + mutex_init(&dev_data->mutex); dev_data->devid = devid; ratelimit_default_init(&dev_data->rs); @@ -2092,7 +2092,7 @@ static int attach_device(struct device *dev, struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); int ret = 0; - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); if (dev_data->domain != NULL) { ret = -EBUSY; @@ -2118,7 +2118,7 @@ static int attach_device(struct device *dev, } out: - spin_unlock(&dev_data->lock); + mutex_unlock(&dev_data->mutex); return ret; } @@ -2134,7 +2134,7 @@ static void detach_device(struct device *dev) bool ppr = dev_data->ppr; unsigned long flags; - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); /* * First check if the device is still attached. It might already @@ -2172,7 +2172,7 @@ static void detach_device(struct device *dev) pdom_detach_iommu(iommu, domain); out: - spin_unlock(&dev_data->lock); + mutex_unlock(&dev_data->mutex); /* Remove IOPF handler */ if (ppr) @@ -2470,9 +2470,9 @@ static int blocked_domain_attach_device(struct iommu_domain *domain, detach_device(dev); /* Clear DTE and flush the entry */ - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); dev_update_dte(dev_data, false); - spin_unlock(&dev_data->lock); + mutex_unlock(&dev_data->mutex); return 0; } |