mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd
Pull iommufd updates from Jason Gunthorpe:
"Several fixes:
- Add missing static const
- Correct type 1 emulation for VFIO_CHECK_EXTENSION when no-iommu is
turned on
- Fix selftest memory leak and syzkaller splat
- Fix missed -EFAULT in fault reporting write() fops
- Fix a race where map/unmap with the internal IOVA allocator can
unmap things it should not"
* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd:
iommufd: Fix a race with concurrent allocation and unmap
iommufd/selftest: Remove MOCK_IOMMUPT_AMDV1 format
iommufd: Fix return value of iommufd_fault_fops_write()
iommufd: update outdated comment for renamed iommufd_hw_pagetable_alloc()
iommufd/selftest: Fix page leaks in mock_viommu_{init,destroy}
iommufd: vfio compatibility extension check for noiommu mode
iommufd: Constify struct dma_buf_attach_ops
This commit is contained in:
@@ -866,7 +866,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, ioasid_t pasid,
|
||||
{
|
||||
/*
|
||||
* iommufd_hw_pagetable_attach() is called by
|
||||
* iommufd_hw_pagetable_alloc() in immediate attachment mode, same as
|
||||
* iommufd_hwpt_paging_alloc() in immediate attachment mode, same as
|
||||
* iommufd_device_do_attach(). So if we are in this mode then we prefer
|
||||
* to use the immediate_attach path as it supports drivers that can't
|
||||
* directly allocate a domain.
|
||||
|
||||
@@ -187,9 +187,10 @@ static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *b
|
||||
|
||||
mutex_lock(&fault->mutex);
|
||||
while (count > done) {
|
||||
rc = copy_from_user(&response, buf + done, response_size);
|
||||
if (rc)
|
||||
if (copy_from_user(&response, buf + done, response_size)) {
|
||||
rc = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
static_assert((int)IOMMUFD_PAGE_RESP_SUCCESS ==
|
||||
(int)IOMMU_PAGE_RESP_SUCCESS);
|
||||
|
||||
@@ -814,6 +814,16 @@ again:
|
||||
unmapped_bytes += area_last - area_first + 1;
|
||||
|
||||
down_write(&iopt->iova_rwsem);
|
||||
|
||||
/*
|
||||
* After releasing the iova_rwsem concurrent allocation could
|
||||
* place new areas at IOVAs we have already unmapped. Keep
|
||||
* moving the start of the search forward to ignore the area
|
||||
* already unmapped.
|
||||
*/
|
||||
if (area_last >= last)
|
||||
break;
|
||||
start = area_last + 1;
|
||||
}
|
||||
|
||||
out_unlock_iova:
|
||||
|
||||
@@ -36,7 +36,6 @@ enum {
|
||||
enum {
|
||||
MOCK_IOMMUPT_DEFAULT = 0,
|
||||
MOCK_IOMMUPT_HUGE,
|
||||
MOCK_IOMMUPT_AMDV1,
|
||||
};
|
||||
|
||||
/* These values are true for MOCK_IOMMUPT_DEFAULT */
|
||||
|
||||
@@ -1450,7 +1450,7 @@ static void iopt_revoke_notify(struct dma_buf_attachment *attach)
|
||||
pages->dmabuf.phys.len = 0;
|
||||
}
|
||||
|
||||
static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
|
||||
static const struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
|
||||
.allow_peer2peer = true,
|
||||
.invalidate_mappings = iopt_revoke_notify,
|
||||
};
|
||||
|
||||
@@ -421,19 +421,6 @@ static const struct iommu_dirty_ops amdv1_mock_dirty_ops = {
|
||||
.set_dirty_tracking = mock_domain_set_dirty_tracking,
|
||||
};
|
||||
|
||||
static const struct iommu_domain_ops amdv1_ops = {
|
||||
IOMMU_PT_DOMAIN_OPS(amdv1),
|
||||
.free = mock_domain_free,
|
||||
.attach_dev = mock_domain_nop_attach,
|
||||
.set_dev_pasid = mock_domain_set_dev_pasid_nop,
|
||||
.iotlb_sync = &mock_iotlb_sync,
|
||||
};
|
||||
|
||||
static const struct iommu_dirty_ops amdv1_dirty_ops = {
|
||||
IOMMU_PT_DIRTY_OPS(amdv1),
|
||||
.set_dirty_tracking = mock_domain_set_dirty_tracking,
|
||||
};
|
||||
|
||||
static struct mock_iommu_domain *
|
||||
mock_domain_alloc_pgtable(struct device *dev,
|
||||
const struct iommu_hwpt_selftest *user_cfg, u32 flags)
|
||||
@@ -477,24 +464,6 @@ mock_domain_alloc_pgtable(struct device *dev,
|
||||
mock->domain.dirty_ops = &amdv1_mock_dirty_ops;
|
||||
break;
|
||||
}
|
||||
|
||||
case MOCK_IOMMUPT_AMDV1: {
|
||||
struct pt_iommu_amdv1_cfg cfg = {};
|
||||
|
||||
cfg.common.hw_max_vasz_lg2 = 64;
|
||||
cfg.common.hw_max_oasz_lg2 = 52;
|
||||
cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
|
||||
BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
|
||||
BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
|
||||
cfg.starting_level = 2;
|
||||
mock->domain.ops = &amdv1_ops;
|
||||
rc = pt_iommu_amdv1_init(&mock->amdv1, &cfg, GFP_KERNEL);
|
||||
if (rc)
|
||||
goto err_free;
|
||||
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
|
||||
mock->domain.dirty_ops = &amdv1_dirty_ops;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
rc = -EOPNOTSUPP;
|
||||
goto err_free;
|
||||
@@ -636,7 +605,7 @@ static void mock_viommu_destroy(struct iommufd_viommu *viommu)
|
||||
if (mock_viommu->mmap_offset)
|
||||
iommufd_viommu_destroy_mmap(&mock_viommu->core,
|
||||
mock_viommu->mmap_offset);
|
||||
free_page((unsigned long)mock_viommu->page);
|
||||
free_pages((unsigned long)mock_viommu->page, 1);
|
||||
mutex_destroy(&mock_viommu->queue_mutex);
|
||||
|
||||
/* iommufd core frees mock_viommu and viommu */
|
||||
@@ -870,7 +839,7 @@ err_destroy_mmap:
|
||||
iommufd_viommu_destroy_mmap(&mock_viommu->core,
|
||||
mock_viommu->mmap_offset);
|
||||
err_free_page:
|
||||
free_page((unsigned long)mock_viommu->page);
|
||||
free_pages((unsigned long)mock_viommu->page, 1);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
@@ -283,7 +283,7 @@ static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx,
|
||||
case VFIO_TYPE1_IOMMU:
|
||||
case VFIO_TYPE1v2_IOMMU:
|
||||
case VFIO_UNMAP_ALL:
|
||||
return 1;
|
||||
return !ictx->no_iommu_mode;
|
||||
|
||||
case VFIO_NOIOMMU_IOMMU:
|
||||
return IS_ENABLED(CONFIG_VFIO_NOIOMMU);
|
||||
|
||||
Reference in New Issue
Block a user