summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c92
1 files changed, 50 insertions, 42 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ea1ba89f9c90..1bfa706b96e7 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -32,6 +32,7 @@
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <linux/iommu-helper.h>
+#include <linux/crash_dump.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
@@ -458,6 +459,42 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
+static void iommu_table_clear(struct iommu_table *tbl)
+{
+ if (!is_kdump_kernel()) {
+ /* Clear the table in case firmware left allocations in it */
+ ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
+ return;
+ }
+
+#ifdef CONFIG_CRASH_DUMP
+ if (ppc_md.tce_get) {
+ unsigned long index, tceval, tcecount = 0;
+
+ /* Reserve the existing mappings left by the first kernel. */
+ for (index = 0; index < tbl->it_size; index++) {
+ tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
+ /*
+ * Freed TCE entry contains 0x7fffffffffffffff on JS20
+ */
+ if (tceval && (tceval != 0x7fffffffffffffffUL)) {
+ __set_bit(index, tbl->it_map);
+ tcecount++;
+ }
+ }
+
+ if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
+ printk(KERN_WARNING "TCE table is full; freeing ");
+ printk(KERN_WARNING "%d entries for the kdump boot\n",
+ KDUMP_MIN_TCE_ENTRIES);
+ for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
+ index < tbl->it_size; index++)
+ __clear_bit(index, tbl->it_map);
+ }
+ }
+#endif
+}
+
/*
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
@@ -484,38 +521,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
tbl->it_largehint = tbl->it_halfpoint;
spin_lock_init(&tbl->it_lock);
-#ifdef CONFIG_CRASH_DUMP
- if (ppc_md.tce_get) {
- unsigned long index;
- unsigned long tceval;
- unsigned long tcecount = 0;
-
- /*
- * Reserve the existing mappings left by the first kernel.
- */
- for (index = 0; index < tbl->it_size; index++) {
- tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
- /*
- * Freed TCE entry contains 0x7fffffffffffffff on JS20
- */
- if (tceval && (tceval != 0x7fffffffffffffffUL)) {
- __set_bit(index, tbl->it_map);
- tcecount++;
- }
- }
- if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
- printk(KERN_WARNING "TCE table is full; ");
- printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
- KDUMP_MIN_TCE_ENTRIES);
- for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
- index < tbl->it_size; index++)
- __clear_bit(index, tbl->it_map);
- }
- }
-#else
- /* Clear the hardware table in case firmware left allocations in it */
- ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
-#endif
+ iommu_table_clear(tbl);
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
@@ -559,21 +565,23 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
}
/* Creates TCEs for a user provided buffer. The user buffer must be
- * contiguous real kernel storage (not vmalloc). The address of the buffer
- * passed here is the kernel (virtual) address of the buffer. The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
+ * contiguous real kernel storage (not vmalloc). The address passed here
+ * comprises a page address and offset into that page. The dma_addr_t
+ * returned will point to the same byte within the page as was passed in.
*/
-dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
- void *vaddr, size_t size, unsigned long mask,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
+ struct page *page, unsigned long offset, size_t size,
+ unsigned long mask, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
+ void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
BUG_ON(direction == DMA_NONE);
+ vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
@@ -599,9 +607,9 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
return dma_handle;
}
-void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
unsigned int npages;