[IA64] SN specific version of dma_get_required_mask()
John Keller [Mon, 24 Nov 2008 22:47:17 +0000 (16:47 -0600)]
Create a platform specific version of dma_get_required_mask()
for ia64 SN Altix. All SN Altix platforms support 64 bit DMA
addressing regardless of the size of system memory.
Create an ia64 machvec for dma_get_required_mask, with the
SN version unconditionally returning DMA_64BIT_MASK.

Signed-off-by: John Keller <jpk@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

Documentation/DMA-API.txt
arch/ia64/include/asm/dma-mapping.h
arch/ia64/include/asm/machvec.h
arch/ia64/include/asm/machvec_init.h
arch/ia64/include/asm/machvec_sn2.h
arch/ia64/pci/pci.c
arch/ia64/sn/pci/pci_dma.c

index b462bb1..5244169 100644 (file)
@@ -170,16 +170,15 @@ Returns: 0 if successful and a negative error if not.
 u64
 dma_get_required_mask(struct device *dev)
 
-After setting the mask with dma_set_mask(), this API returns the
-actual mask (within that already set) that the platform actually
-requires to operate efficiently.  Usually this means the returned mask
+This API returns the mask that the platform requires to
+operate efficiently.  Usually this means the returned mask
 is the minimum required to cover all of memory.  Examining the
 required mask gives drivers with variable descriptor sizes the
 opportunity to use smaller descriptors as necessary.
 
 Requesting the required mask does not alter the current mask.  If you
-wish to take advantage of it, you should issue another dma_set_mask()
-call to lower the mask again.
+wish to take advantage of it, you should issue a dma_set_mask()
+call to set the mask to the value returned.
 
 
 Part Id - Streaming DMA mappings
index bbab7e2..1f912d9 100644 (file)
@@ -9,6 +9,8 @@
 #include <linux/scatterlist.h>
 #include <asm/swiotlb.h>
 
+#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
 struct dma_mapping_ops {
        int             (*mapping_error)(struct device *dev,
                                         dma_addr_t dma_addr);
index 59c17e4..fe87b21 100644 (file)
@@ -62,6 +62,7 @@ typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t
 typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
 typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
 typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+typedef u64 ia64_mv_dma_get_required_mask (struct device *);
 
 /*
  * WARNING: The legacy I/O space is _architected_.  Platforms are
@@ -159,6 +160,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
 #  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
 #  define platform_dma_mapping_error           ia64_mv.dma_mapping_error
 #  define platform_dma_supported       ia64_mv.dma_supported
+#  define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
 #  define platform_irq_to_vector       ia64_mv.irq_to_vector
 #  define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
 #  define platform_pci_get_legacy_mem  ia64_mv.pci_get_legacy_mem
@@ -213,6 +215,7 @@ struct ia64_machine_vector {
        ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
        ia64_mv_dma_mapping_error *dma_mapping_error;
        ia64_mv_dma_supported *dma_supported;
+       ia64_mv_dma_get_required_mask *dma_get_required_mask;
        ia64_mv_irq_to_vector *irq_to_vector;
        ia64_mv_local_vector_to_irq *local_vector_to_irq;
        ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -263,6 +266,7 @@ struct ia64_machine_vector {
        platform_dma_sync_sg_for_device,        \
        platform_dma_mapping_error,                     \
        platform_dma_supported,                 \
+       platform_dma_get_required_mask,         \
        platform_irq_to_vector,                 \
        platform_local_vector_to_irq,           \
        platform_pci_get_legacy_mem,            \
@@ -366,6 +370,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
 #ifndef platform_dma_supported
 # define  platform_dma_supported       swiotlb_dma_supported
 #endif
+#ifndef platform_dma_get_required_mask
+# define  platform_dma_get_required_mask       ia64_dma_get_required_mask
+#endif
 #ifndef platform_irq_to_vector
 # define platform_irq_to_vector                __ia64_irq_to_vector
 #endif
index ef964b2..37a4698 100644 (file)
@@ -3,6 +3,7 @@
 
 extern ia64_mv_send_ipi_t ia64_send_ipi;
 extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
+extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
 extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
 extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
 extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
index 781308e..f1a6e0d 100644 (file)
@@ -67,6 +67,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
 extern ia64_mv_dma_sync_sg_for_device  sn_dma_sync_sg_for_device;
 extern ia64_mv_dma_mapping_error       sn_dma_mapping_error;
 extern ia64_mv_dma_supported           sn_dma_supported;
+extern ia64_mv_dma_get_required_mask   sn_dma_get_required_mask;
 extern ia64_mv_migrate_t               sn_migrate;
 extern ia64_mv_kernel_launch_event_t   sn_kernel_launch_event;
 extern ia64_mv_setup_msi_irq_t         sn_setup_msi_irq;
@@ -123,6 +124,7 @@ extern ia64_mv_pci_fixup_bus_t              sn_pci_fixup_bus;
 #define platform_dma_sync_sg_for_device        sn_dma_sync_sg_for_device
 #define platform_dma_mapping_error             sn_dma_mapping_error
 #define platform_dma_supported         sn_dma_supported
+#define platform_dma_get_required_mask sn_dma_get_required_mask
 #define platform_migrate               sn_migrate
 #define platform_kernel_launch_event    sn_kernel_launch_event
 #ifdef CONFIG_PCI_MSI
index 211fcfd..61f1af5 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/ioport.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/bootmem.h>
 
 #include <asm/machvec.h>
 #include <asm/page.h>
@@ -748,6 +749,32 @@ static void __init set_pci_cacheline_size(void)
        pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
 }
 
+u64 ia64_dma_get_required_mask(struct device *dev)
+{
+       u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
+       u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
+       u64 mask;
+
+       if (!high_totalram) {
+               /* convert to mask just covering totalram */
+               low_totalram = (1 << (fls(low_totalram) - 1));
+               low_totalram += low_totalram - 1;
+               mask = low_totalram;
+       } else {
+               high_totalram = (1 << (fls(high_totalram) - 1));
+               high_totalram += high_totalram - 1;
+               mask = (((u64)high_totalram) << 32) + 0xffffffff;
+       }
+       return mask;
+}
+EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
+
+u64 dma_get_required_mask(struct device *dev)
+{
+       return platform_dma_get_required_mask(dev);
+}
+EXPORT_SYMBOL_GPL(dma_get_required_mask);
+
 static int __init pcibios_init(void)
 {
        set_pci_cacheline_size();
index 53ebb64..863f501 100644 (file)
@@ -356,6 +356,12 @@ int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 }
 EXPORT_SYMBOL(sn_dma_mapping_error);
 
+u64 sn_dma_get_required_mask(struct device *dev)
+{
+       return DMA_64BIT_MASK;
+}
+EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
+
 char *sn_pci_get_legacy_mem(struct pci_bus *bus)
 {
        if (!SN_PCIBUS_BUSSOFT(bus))