hw/arm/smmuv3: Enforce invalidation on a power of two range

As of today, the driver can invalidate a number of pages that is
not a power of 2. However IOTLB unmap notifications and internal
IOTLB invalidations work with masks leading to erroneous
invalidations.

In case the range is not a power of 2, split invalidations into
power of 2 invalidations.

When looking for a single page entry in the vSMMU internal IOTLB,
let's make sure that if the entry is not found using a
g_hash_table_remove() we iterate over all the entries to find a
potential range that overlaps it.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Message-id: 20210309102742.30442-6-eric.auger@redhat.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Eric Auger 2021-03-09 11:27:40 +01:00 committed by Peter Maydell
parent a4b6e1be2c
commit 6d9cd115b9
2 changed files with 38 additions and 16 deletions

View File

@ -151,22 +151,28 @@ inline void
smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages, uint8_t ttl)
{
/* if tg is not set we use 4KB range invalidation */
uint8_t granule = tg ? tg * 2 + 10 : 12;
if (ttl && (num_pages == 1) && (asid >= 0)) {
SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl);
g_hash_table_remove(s->iotlb, &key);
} else {
/* if tg is not set we use 4KB range invalidation */
uint8_t granule = tg ? tg * 2 + 10 : 12;
SMMUIOTLBPageInvInfo info = {
.asid = asid, .iova = iova,
.mask = (num_pages * 1 << granule) - 1};
g_hash_table_foreach_remove(s->iotlb,
smmu_hash_remove_by_asid_iova,
&info);
if (g_hash_table_remove(s->iotlb, &key)) {
return;
}
/*
* if the entry is not found, let's see if it does not
* belong to a larger IOTLB entry
*/
}
SMMUIOTLBPageInvInfo info = {
.asid = asid, .iova = iova,
.mask = (num_pages * 1 << granule) - 1};
g_hash_table_foreach_remove(s->iotlb,
smmu_hash_remove_by_asid_iova,
&info);
}
inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)

View File

@ -861,7 +861,8 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
uint16_t vmid = CMD_VMID(cmd);
bool leaf = CMD_LEAF(cmd);
uint8_t tg = CMD_TG(cmd);
hwaddr num_pages = 1;
uint64_t first_page = 0, last_page;
uint64_t num_pages = 1;
int asid = -1;
if (tg) {
@ -874,9 +875,24 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
if (type == SMMU_CMD_TLBI_NH_VA) {
asid = CMD_ASID(cmd);
}
trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
/* Split invalidations into ^2 range invalidations */
last_page = num_pages - 1;
while (num_pages) {
uint8_t granule = tg * 2 + 10;
uint64_t mask, count;
mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
count = mask + 1;
trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
num_pages -= count;
first_page += count;
addr += count * BIT_ULL(granule);
}
}
static int smmuv3_cmdq_consume(SMMUv3State *s)