x86, AMD IOMMU: flush domain TLB when there is more than one page to flush

This patch changes the domain TLB flushing behavior of the driver. When there
is more than one page to flush it flushes the whole domain TLB instead of every
single page. So we send only a single command to the IOMMU in every case which
is faster to execute.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: robert.richter@amd.com
Cc: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Joerg Roedel 2008-07-03 19:35:08 +02:00 committed by Ingo Molnar
parent 5f6a59d8ad
commit 999ba417cc
2 changed files with 12 additions and 4 deletions

View File

@ -140,16 +140,22 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
u64 address, size_t size) u64 address, size_t size)
{ {
int i; int s = 0;
unsigned pages = to_pages(address, size); unsigned pages = to_pages(address, size);
address &= PAGE_MASK; address &= PAGE_MASK;
for (i = 0; i < pages; ++i) { if (pages > 1) {
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 0); /*
address += PAGE_SIZE; * If we have to flush more than one page, flush all
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
s = 1;
} }
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
return 0; return 0;
} }

View File

@ -93,6 +93,8 @@
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
/* macros and definitions for device table entries */ /* macros and definitions for device table entries */
#define DEV_ENTRY_VALID 0x00 #define DEV_ENTRY_VALID 0x00
#define DEV_ENTRY_TRANSLATION 0x01 #define DEV_ENTRY_TRANSLATION 0x01