summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2024-08-29 21:06:12 -0300
committerJoerg Roedel <jroedel@suse.de>2024-09-04 11:37:42 +0200
commit7a41dcb52f9de6079621fc31c3b84c7fc290934b (patch)
tree221eab9b13186a570d4425f7ef8581a683b55304 /drivers/iommu
parentb0a6c883bcd42eeb0850135e529b34b64d57673c (diff)
iommu/amd: Set the pgsize_bitmap correctly
When using io_pgtable the correct pgsize_bitmap is stored in the cfg, both v1_alloc_pgtable() and v2_alloc_pgtable() set it correctly. This fixes a bug where the v2 pgtable had the wrong pgsize as protection_domain_init_v2() would set it and then do_iommu_domain_alloc() immediately resets it. Remove the confusing ops.pgsize_bitmap since that is not used if the driver sets domain.pgsize_bitmap. Fixes: 134288158a41 ("iommu/amd: Add domain_alloc_user based domain allocation") Reviewed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/3-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/iommu.c28
1 files changed, 4 insertions, 24 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 70213f94e24c..c90a53a0385d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2264,26 +2264,11 @@ void protection_domain_free(struct protection_domain *domain)
kfree(domain);
}
-static int protection_domain_init_v1(struct protection_domain *domain)
-{
- domain->pd_mode = PD_MODE_V1;
- return 0;
-}
-
-static int protection_domain_init_v2(struct protection_domain *pdom)
-{
- pdom->pd_mode = PD_MODE_V2;
- pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
- return 0;
-}
-
struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable;
- int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
@@ -2319,18 +2304,14 @@ struct protection_domain *protection_domain_alloc(unsigned int type)
switch (pgtable) {
case AMD_IOMMU_V1:
- ret = protection_domain_init_v1(domain);
+ domain->pd_mode = PD_MODE_V1;
break;
case AMD_IOMMU_V2:
- ret = protection_domain_init_v2(domain);
+ domain->pd_mode = PD_MODE_V2;
break;
default:
- ret = -EINVAL;
- break;
- }
-
- if (ret)
goto out_err;
+ }
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops)
@@ -2383,10 +2364,10 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
domain->domain.geometry.aperture_start = 0;
domain->domain.geometry.aperture_end = dma_max_address();
domain->domain.geometry.force_aperture = true;
+ domain->domain.pgsize_bitmap = domain->iop.iop.cfg.pgsize_bitmap;
if (iommu) {
domain->domain.type = type;
- domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap;
domain->domain.ops = iommu->iommu.ops->default_domain_ops;
if (dirty_tracking)
@@ -2869,7 +2850,6 @@ const struct iommu_ops amd_iommu_ops = {
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
- .pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
.dev_enable_feat = amd_iommu_dev_enable_feature,
.dev_disable_feat = amd_iommu_dev_disable_feature,