diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-14 12:08:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-14 12:08:34 -0700 |
commit | 531d29b0b674036347a04c08c0898ff1aa522180 (patch) | |
tree | 26b25c969544e8c0d9ea9c20a69639e98f2ad089 /drivers/iommu/amd/iommu.c | |
parent | 79db2b74aa146384dc8a962495f43941e5a91ee6 (diff) | |
parent | 7e3c3883c381aeda903778d7e99fc4cd523be610 (diff) |
Merge tag 'iommu-updates-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
- ARM-SMMU Updates from Will:
- Continued SVM enablement, where page-table is shared with CPU
- Groundwork to support integrated SMMU with Adreno GPU
- Allow disabling of MSI-based polling on the kernel command-line
- Minor driver fixes and cleanups (octal permissions, error
messages, ...)
- Secure Nested Paging Support for AMD IOMMU. The IOMMU will fault when
a device tries DMA on memory owned by a guest. This needs new
fault-types as well as a rewrite of the IOMMU memory semaphore for
command completions.
- Allow broken Intel IOMMUs (wrong address widths reported) to still be
used for interrupt remapping.
- IOMMU UAPI updates for supporting vSVA, where the IOMMU can access
address spaces of processes running in a VM.
- Support for the MT8167 IOMMU in the Mediatek IOMMU driver.
- Device-tree updates for the Renesas driver to support r8a7742.
- Several smaller fixes and cleanups all over the place.
* tag 'iommu-updates-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (57 commits)
iommu/vt-d: Gracefully handle DMAR units with no supported address widths
iommu/vt-d: Check UAPI data processed by IOMMU core
iommu/uapi: Handle data and argsz filled by users
iommu/uapi: Rename uapi functions
iommu/uapi: Use named union for user data
iommu/uapi: Add argsz for user filled data
docs: IOMMU user API
iommu/qcom: add missing put_device() call in qcom_iommu_of_xlate()
iommu/arm-smmu-v3: Add SVA device feature
iommu/arm-smmu-v3: Check for SVA features
iommu/arm-smmu-v3: Seize private ASID
iommu/arm-smmu-v3: Share process page tables
iommu/arm-smmu-v3: Move definitions to a header
iommu/io-pgtable-arm: Move some definitions to a header
iommu/arm-smmu-v3: Ensure queue is read after updating prod pointer
iommu/amd: Re-purpose Exclusion range registers to support SNP CWWB
iommu/amd: Add support for RMP_PAGE_FAULT and RMP_HW_ERR
iommu/amd: Use 4K page for completion wait write-back semaphore
iommu/tegra-smmu: Allow to group clients in same swgroup
iommu/tegra-smmu: Fix iova->phys translation
...
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r-- | drivers/iommu/amd/iommu.c | 90 |
1 files changed, 78 insertions, 12 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 9e231caa5012..4b1b02c80f55 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -486,6 +486,67 @@ static void dump_command(unsigned long phys_addr) pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); } +static void amd_iommu_report_rmp_hw_error(volatile u32 *event) +{ + struct iommu_dev_data *dev_data = NULL; + int devid, vmg_tag, flags; + struct pci_dev *pdev; + u64 spa; + + devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; + vmg_tag = (event[1]) & 0xFFFF; + flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; + spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8); + + pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), + devid & 0xff); + if (pdev) + dev_data = dev_iommu_priv_get(&pdev->dev); + + if (dev_data && __ratelimit(&dev_data->rs)) { + pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", + vmg_tag, spa, flags); + } else { + pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + vmg_tag, spa, flags); + } + + if (pdev) + pci_dev_put(pdev); +} + +static void amd_iommu_report_rmp_fault(volatile u32 *event) +{ + struct iommu_dev_data *dev_data = NULL; + int devid, flags_rmp, vmg_tag, flags; + struct pci_dev *pdev; + u64 gpa; + + devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; + flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF; + vmg_tag = (event[1]) & 0xFFFF; + flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; + gpa = ((u64)event[3] << 32) | event[2]; + + pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), + devid & 0xff); + if (pdev) + dev_data = dev_iommu_priv_get(&pdev->dev); + + if (dev_data && __ratelimit(&dev_data->rs)) { + pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", + vmg_tag, gpa, flags_rmp, flags); + } else { + pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + vmg_tag, gpa, flags_rmp, flags); + } + + if (pdev) + pci_dev_put(pdev); +} + static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, u64 address, int flags) { @@ -578,6 +639,12 @@ retry: PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; + case EVENT_TYPE_RMP_FAULT: + amd_iommu_report_rmp_fault(event); + break; + case EVENT_TYPE_RMP_HW_ERR: + amd_iommu_report_rmp_hw_error(event); + break; case EVENT_TYPE_INV_PPR_REQ: pasid = PPR_PASID(*((u64 *)__evt)); tag = event[1] & 0x03FF; @@ -807,11 +874,11 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) * ****************************************************************************/ -static int wait_on_sem(volatile u64 *sem) +static int wait_on_sem(struct amd_iommu *iommu, u64 data) { int i = 0; - while (*sem == 0 && i < LOOP_TIMEOUT) { + while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { udelay(1); i += 1; } @@ -842,16 +909,16 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); } -static void build_completion_wait(struct iommu_cmd *cmd, u64 address) +static void build_completion_wait(struct iommu_cmd *cmd, + struct amd_iommu *iommu, + u64 data) { - u64 paddr = iommu_virt_to_phys((void *)address); - - WARN_ON(address & 0x7ULL); + u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); memset(cmd, 0, sizeof(*cmd)); cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; cmd->data[1] = upper_32_bits(paddr); - cmd->data[2] = 1; + cmd->data[2] = data; CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); } @@ -1060,22 +1127,21 @@ static int iommu_completion_wait(struct amd_iommu *iommu) struct iommu_cmd cmd; unsigned long flags; int ret; + u64 data; if (!iommu->need_sync) return 0; - - build_completion_wait(&cmd, (u64)&iommu->cmd_sem); - raw_spin_lock_irqsave(&iommu->lock, flags); - iommu->cmd_sem = 0; + data = ++iommu->cmd_sem_val; + build_completion_wait(&cmd, iommu, data); ret = __iommu_queue_command_sync(iommu, &cmd, false); if (ret) goto out_unlock; - ret = wait_on_sem(&iommu->cmd_sem); + ret = wait_on_sem(iommu, data); out_unlock: raw_spin_unlock_irqrestore(&iommu->lock, flags); |