diff options
Diffstat (limited to 'drivers/iommu/io-pgfault.c')
| -rw-r--r-- | drivers/iommu/io-pgfault.c | 121 | 
1 files changed, 83 insertions, 38 deletions
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index 81e9cc6e3164..4674e618797c 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -115,6 +115,59 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,  	return group;  } +static struct iommu_attach_handle *find_fault_handler(struct device *dev, +						     struct iopf_fault *evt) +{ +	struct iommu_fault *fault = &evt->fault; +	struct iommu_attach_handle *attach_handle; + +	if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { +		attach_handle = iommu_attach_handle_get(dev->iommu_group, +				fault->prm.pasid, 0); +		if (IS_ERR(attach_handle)) { +			const struct iommu_ops *ops = dev_iommu_ops(dev); + +			if (!ops->user_pasid_table) +				return NULL; +			/* +			 * The iommu driver for this device supports user- +			 * managed PASID table. Therefore page faults for +			 * any PASID should go through the NESTING domain +			 * attached to the device RID. +			 */ +			attach_handle = iommu_attach_handle_get( +					dev->iommu_group, IOMMU_NO_PASID, +					IOMMU_DOMAIN_NESTED); +			if (IS_ERR(attach_handle)) +				return NULL; +		} +	} else { +		attach_handle = iommu_attach_handle_get(dev->iommu_group, +				IOMMU_NO_PASID, 0); + +		if (IS_ERR(attach_handle)) +			return NULL; +	} + +	if (!attach_handle->domain->iopf_handler) +		return NULL; + +	return attach_handle; +} + +static void iopf_error_response(struct device *dev, struct iopf_fault *evt) +{ +	const struct iommu_ops *ops = dev_iommu_ops(dev); +	struct iommu_fault *fault = &evt->fault; +	struct iommu_page_response resp = { +		.pasid = fault->prm.pasid, +		.grpid = fault->prm.grpid, +		.code = IOMMU_PAGE_RESP_INVALID +	}; + +	ops->page_response(dev, evt, &resp); +} +  /**   * iommu_report_device_fault() - Report fault event to device driver   * @dev: the device @@ -153,24 +206,39 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,   * handling framework should guarantee that the iommu domain could only be   * freed after the device has stopped generating page faults (or the iommu   * hardware has been set to block the page faults) and the pending page faults - * have been flushed. + * have been flushed. In case no page fault handler is attached or no iopf params + * are setup, then the ops->page_response() is called to complete the evt. + * + * Returns 0 on success, or an error in case of a bad/failed iopf setup.   */ -void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) +int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)  { +	struct iommu_attach_handle *attach_handle;  	struct iommu_fault *fault = &evt->fault;  	struct iommu_fault_param *iopf_param;  	struct iopf_group abort_group = {};  	struct iopf_group *group; +	attach_handle = find_fault_handler(dev, evt); +	if (!attach_handle) +		goto err_bad_iopf; + +	/* +	 * Something has gone wrong if a fault capable domain is attached but no +	 * iopf_param is setup +	 */  	iopf_param = iopf_get_dev_fault_param(dev);  	if (WARN_ON(!iopf_param)) -		return; +		goto err_bad_iopf;  	if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { -		report_partial_fault(iopf_param, fault); +		int ret; + +		ret = report_partial_fault(iopf_param, fault);  		iopf_put_dev_fault_param(iopf_param);  		/* A request that is not the last does not need to be ack'd */ -		return; + +		return ret;  	}  	/* @@ -185,38 +253,7 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)  	if (group == &abort_group)  		goto err_abort; -	if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { -		group->attach_handle = iommu_attach_handle_get(dev->iommu_group, -							       fault->prm.pasid, -							       0); -		if (IS_ERR(group->attach_handle)) { -			const struct iommu_ops *ops = dev_iommu_ops(dev); - -			if (!ops->user_pasid_table) -				goto err_abort; - -			/* -			 * The iommu driver for this device supports user- -			 * managed PASID table. Therefore page faults for -			 * any PASID should go through the NESTING domain -			 * attached to the device RID. -			 */ -			group->attach_handle = -				iommu_attach_handle_get(dev->iommu_group, -							IOMMU_NO_PASID, -							IOMMU_DOMAIN_NESTED); -			if (IS_ERR(group->attach_handle)) -				goto err_abort; -		} -	} else { -		group->attach_handle = -			iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0); -		if (IS_ERR(group->attach_handle)) -			goto err_abort; -	} - -	if (!group->attach_handle->domain->iopf_handler) -		goto err_abort; +	group->attach_handle = attach_handle;  	/*  	 * On success iopf_handler must call iopf_group_response() and @@ -225,7 +262,7 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)  	if (group->attach_handle->domain->iopf_handler(group))  		goto err_abort; -	return; +	return 0;  err_abort:  	dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n", @@ -235,6 +272,14 @@ err_abort:  		__iopf_free_group(group);  	else  		iopf_free_group(group); + +	return 0; + +err_bad_iopf: +	if (fault->type == IOMMU_FAULT_PAGE_REQ) +		iopf_error_response(dev, evt); + +	return -EINVAL;  }  EXPORT_SYMBOL_GPL(iommu_report_device_fault);  | 
