Skip to content

Commit 864ad04

Browse files
committed
Merge tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: "This has a set of swiotlb alignment fixes for sometimes very long standing bugs from Will. We've been discussion them for a while and they should be solid now" * tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: Reinstate page-alignment for mappings >= PAGE_SIZE iommu/dma: Force swiotlb_max_mapping_size on an untrusted device swiotlb: Fix alignment checks when both allocation and DMA masks are present swiotlb: Honour dma_alloc_coherent() alignment in swiotlb_alloc() swiotlb: Enforce page alignment in swiotlb_alloc() swiotlb: Fix double-allocation of slots due to broken alignment handling
2 parents 7029324 + 14cebf6 commit 864ad04

File tree

2 files changed

+42
-12
lines changed

2 files changed

+42
-12
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1711,6 +1711,14 @@ static size_t iommu_dma_opt_mapping_size(void)
17111711
return iova_rcache_range();
17121712
}
17131713

1714+
static size_t iommu_dma_max_mapping_size(struct device *dev)
1715+
{
1716+
if (dev_is_untrusted(dev))
1717+
return swiotlb_max_mapping_size(dev);
1718+
1719+
return SIZE_MAX;
1720+
}
1721+
17141722
static const struct dma_map_ops iommu_dma_ops = {
17151723
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
17161724
.alloc = iommu_dma_alloc,
@@ -1733,6 +1741,7 @@ static const struct dma_map_ops iommu_dma_ops = {
17331741
.unmap_resource = iommu_dma_unmap_resource,
17341742
.get_merge_boundary = iommu_dma_get_merge_boundary,
17351743
.opt_mapping_size = iommu_dma_opt_mapping_size,
1744+
.max_mapping_size = iommu_dma_max_mapping_size,
17361745
};
17371746

17381747
/*

kernel/dma/swiotlb.c

Lines changed: 33 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1003,8 +1003,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
10031003
dma_addr_t tbl_dma_addr =
10041004
phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
10051005
unsigned long max_slots = get_max_slots(boundary_mask);
1006-
unsigned int iotlb_align_mask =
1007-
dma_get_min_align_mask(dev) | alloc_align_mask;
1006+
unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
10081007
unsigned int nslots = nr_slots(alloc_size), stride;
10091008
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
10101009
unsigned int index, slots_checked, count = 0, i;
@@ -1016,18 +1015,29 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
10161015
BUG_ON(area_index >= pool->nareas);
10171016

10181017
/*
1019-
* For allocations of PAGE_SIZE or larger only look for page aligned
1020-
* allocations.
1018+
* Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
1019+
* page-aligned in the absence of any other alignment requirements.
1020+
* 'alloc_align_mask' was later introduced to specify the alignment
1021+
* explicitly, however this is passed as zero for streaming mappings
1022+
* and so we preserve the old behaviour there in case any drivers are
1023+
* relying on it.
10211024
*/
1022-
if (alloc_size >= PAGE_SIZE)
1023-
iotlb_align_mask |= ~PAGE_MASK;
1024-
iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
1025+
if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
1026+
alloc_align_mask = PAGE_SIZE - 1;
1027+
1028+
/*
1029+
* Ensure that the allocation is at least slot-aligned and update
1030+
* 'iotlb_align_mask' to ignore bits that will be preserved when
1031+
* offsetting into the allocation.
1032+
*/
1033+
alloc_align_mask |= (IO_TLB_SIZE - 1);
1034+
iotlb_align_mask &= ~alloc_align_mask;
10251035

10261036
/*
10271037
* For mappings with an alignment requirement don't bother looping to
10281038
* unaligned slots once we found an aligned one.
10291039
*/
1030-
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
1040+
stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
10311041

10321042
spin_lock_irqsave(&area->lock, flags);
10331043
if (unlikely(nslots > pool->area_nslabs - area->used))
@@ -1037,11 +1047,14 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
10371047
index = area->index;
10381048

10391049
for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1050+
phys_addr_t tlb_addr;
1051+
10401052
slot_index = slot_base + index;
1053+
tlb_addr = slot_addr(tbl_dma_addr, slot_index);
10411054

1042-
if (orig_addr &&
1043-
(slot_addr(tbl_dma_addr, slot_index) &
1044-
iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
1055+
if ((tlb_addr & alloc_align_mask) ||
1056+
(orig_addr && (tlb_addr & iotlb_align_mask) !=
1057+
(orig_addr & iotlb_align_mask))) {
10451058
index = wrap_area_index(pool, index + 1);
10461059
slots_checked++;
10471060
continue;
@@ -1677,16 +1690,24 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
16771690
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
16781691
struct io_tlb_pool *pool;
16791692
phys_addr_t tlb_addr;
1693+
unsigned int align;
16801694
int index;
16811695

16821696
if (!mem)
16831697
return NULL;
16841698

1685-
index = swiotlb_find_slots(dev, 0, size, 0, &pool);
1699+
align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
1700+
index = swiotlb_find_slots(dev, 0, size, align, &pool);
16861701
if (index == -1)
16871702
return NULL;
16881703

16891704
tlb_addr = slot_addr(pool->start, index);
1705+
if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
1706+
dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
1707+
&tlb_addr);
1708+
swiotlb_release_slots(dev, tlb_addr);
1709+
return NULL;
1710+
}
16901711

16911712
return pfn_to_page(PFN_DOWN(tlb_addr));
16921713
}

0 commit comments

Comments
 (0)