iommu/tegra: smmu: Fix unsleepable memory allocation at alloc_pdir()

alloc_pdir() is called from smmu_iommu_domain_init() with spin_lock
held. memory allocations in alloc_pdir() had to be atomic. Instead of
converting into atomic allocation, this patch once releases a lock,
does the allocation, holds the lock again and then sees if it's raced
or not in order to avoid introducing mutex and preallocation.

Signed-off-by: Hiroshi DOYU <hdoyu@nvidia.com>
Reported-by: Chris Wright <chrisw@sous-sol.org>
Cc: Chris Wright <chrisw@sous-sol.org>
Acked-by: Stephen Warren <swarren@wwwdotorg.org>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
Hiroshi DOYU 2012-07-02 14:26:38 +03:00 committed by Joerg Roedel
parent 0bdbf4ccef
commit 9e971a03af
1 changed files with 45 additions and 32 deletions

View File

@ -555,28 +555,39 @@ static inline void put_signature(struct smmu_as *as,
/* /*
* Caller must lock/unlock as * Caller must lock/unlock as
*/ */
static int alloc_pdir(struct smmu_as *as) static int alloc_pdir(struct smmu_as *as, unsigned long *flags)
{ {
unsigned long *pdir; unsigned long *pdir;
int pdn; int pdn, err = 0;
u32 val; u32 val;
struct smmu_device *smmu = as->smmu; struct smmu_device *smmu = as->smmu;
struct page *page;
unsigned int *cnt;
as->pte_count = devm_kzalloc(smmu->dev, /*
sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL); * do the allocation outside the as->lock
if (!as->pte_count) { */
dev_err(smmu->dev, spin_unlock_irqrestore(&as->lock, *flags);
"failed to allocate smmu_device PTE cunters\n"); cnt = devm_kzalloc(smmu->dev,
return -ENOMEM; sizeof(cnt[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_DMA);
spin_lock_irqsave(&as->lock, *flags);
if (as->pdir_page) {
/* We raced, free the redundant */
err = -EAGAIN;
goto err_out;
} }
as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
if (!as->pdir_page) { if (!page || !cnt) {
dev_err(smmu->dev, dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
"failed to allocate smmu_device page directory\n"); err = -ENOMEM;
devm_kfree(smmu->dev, as->pte_count); goto err_out;
as->pte_count = NULL;
return -ENOMEM;
} }
as->pdir_page = page;
as->pte_count = cnt;
SetPageReserved(as->pdir_page); SetPageReserved(as->pdir_page);
pdir = page_address(as->pdir_page); pdir = page_address(as->pdir_page);
@ -593,6 +604,12 @@ static int alloc_pdir(struct smmu_as *as)
FLUSH_SMMU_REGS(as->smmu); FLUSH_SMMU_REGS(as->smmu);
return 0; return 0;
err_out:
devm_kfree(smmu->dev, cnt);
if (page)
__free_page(page);
return err;
} }
static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
@ -784,29 +801,29 @@ out:
static int smmu_iommu_domain_init(struct iommu_domain *domain) static int smmu_iommu_domain_init(struct iommu_domain *domain)
{ {
int i; int i, err = -ENODEV;
unsigned long flags; unsigned long flags;
struct smmu_as *as; struct smmu_as *as;
struct smmu_device *smmu = smmu_handle; struct smmu_device *smmu = smmu_handle;
/* Look for a free AS with lock held */ /* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) { for (i = 0; i < smmu->num_as; i++) {
struct smmu_as *tmp = &smmu->as[i]; as = &smmu->as[i];
spin_lock_irqsave(&as->lock, flags);
spin_lock_irqsave(&tmp->lock, flags); if (!as->pdir_page) {
if (!tmp->pdir_page) { err = alloc_pdir(as, &flags);
as = tmp; if (!err)
goto found; goto found;
} }
spin_unlock_irqrestore(&tmp->lock, flags); spin_unlock_irqrestore(&as->lock, flags);
if (err != -EAGAIN)
break;
} }
dev_err(smmu->dev, "no free AS\n"); if (i == smmu->num_as)
return -ENODEV; dev_err(smmu->dev, "no free AS\n");
return err;
found: found:
if (alloc_pdir(as) < 0)
goto err_alloc_pdir;
spin_lock(&smmu->lock); spin_lock(&smmu->lock);
/* Update PDIR register */ /* Update PDIR register */
@ -822,10 +839,6 @@ found:
dev_dbg(smmu->dev, "smmu_as@%p\n", as); dev_dbg(smmu->dev, "smmu_as@%p\n", as);
return 0; return 0;
err_alloc_pdir:
spin_unlock_irqrestore(&as->lock, flags);
return -ENODEV;
} }
static void smmu_iommu_domain_destroy(struct iommu_domain *domain) static void smmu_iommu_domain_destroy(struct iommu_domain *domain)