dmar: Allocate queued invalidation structure using numa locality info

Allocate queued invalidation descriptor structures using numa locality info.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
Suresh Siddha 2009-10-02 11:01:22 -07:00 committed by David Woodhouse
parent ee34b32d8c
commit 751cafe3ae
1 changed files with 6 additions and 2 deletions

View File

@ -1040,6 +1040,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
struct page *desc_page;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@ -1056,13 +1057,16 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi = iommu->qi;
qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
if (!qi->desc) {
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
if (!desc_page) {
kfree(qi);
iommu->qi = 0;
return -ENOMEM;
}
qi->desc = page_address(desc_page);
qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);