diff options
Diffstat (limited to 'arch/x86/include/asm/dma-mapping.h')
| -rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 168 | 
1 files changed, 3 insertions, 165 deletions
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index f82fdc412c6..1c3f9435f1c 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -6,6 +6,7 @@   * Documentation/DMA-API.txt for documentation.   */ +#include <linux/kmemcheck.h>  #include <linux/scatterlist.h>  #include <linux/dma-debug.h>  #include <linux/dma-attrs.h> @@ -32,6 +33,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)  #endif  } +#include <asm-generic/dma-mapping-common.h> +  /* Make sure we keep the same behaviour */  static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)  { @@ -52,171 +55,6 @@ extern int dma_set_mask(struct device *dev, u64 mask);  extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,  					dma_addr_t *dma_addr, gfp_t flag); -static inline dma_addr_t -dma_map_single(struct device *hwdev, void *ptr, size_t size, -	       enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); -	dma_addr_t addr; - -	BUG_ON(!valid_dma_direction(dir)); -	addr = ops->map_page(hwdev, virt_to_page(ptr), -			     (unsigned long)ptr & ~PAGE_MASK, size, -			     dir, NULL); -	debug_dma_map_page(hwdev, virt_to_page(ptr), -			   (unsigned long)ptr & ~PAGE_MASK, size, -			   dir, addr, true); -	return addr; -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, -		 enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(dev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->unmap_page) -		ops->unmap_page(dev, addr, size, dir, NULL); -	debug_dma_unmap_page(dev, addr, size, dir, true); -} - -static inline int -dma_map_sg(struct device *hwdev, struct scatterlist *sg, -	   int nents, enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); -	int ents; - -	BUG_ON(!valid_dma_direction(dir)); -	ents = ops->map_sg(hwdev, sg, nents, dir, NULL); -	debug_dma_map_sg(hwdev, sg, nents, ents, dir); - -	return ents; -} - -static inline void -dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, -	     enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); - -	BUG_ON(!valid_dma_direction(dir)); -	debug_dma_unmap_sg(hwdev, sg, nents, dir); -	if (ops->unmap_sg) -		ops->unmap_sg(hwdev, sg, nents, dir, NULL); -} - -static inline void -dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, -			size_t size, enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->sync_single_for_cpu) -		ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); -	debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); -	flush_write_buffers(); -} - -static inline void -dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, -			   size_t size, enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->sync_single_for_device) -		ops->sync_single_for_device(hwdev, dma_handle, size, dir); -	debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); -	flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, -			      unsigned long offset, size_t size, -			      enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->sync_single_range_for_cpu) -		ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, -					       size, dir); -	debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, -					    offset, size, dir); -	flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, -				 unsigned long offset, size_t size, -				 enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->sync_single_range_for_device) -		ops->sync_single_range_for_device(hwdev, dma_handle, -						  offset, size, dir); -	debug_dma_sync_single_range_for_device(hwdev, dma_handle, -					       offset, size, dir); -	flush_write_buffers(); -} - -static inline void -dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, -		    int nelems, enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->sync_sg_for_cpu) -		ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); -	debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); -	flush_write_buffers(); -} - -static inline void -dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, -		       int nelems, enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(hwdev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->sync_sg_for_device) -		ops->sync_sg_for_device(hwdev, sg, nelems, dir); -	debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); - -	flush_write_buffers(); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, -				      size_t offset, size_t size, -				      enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(dev); -	dma_addr_t addr; - -	BUG_ON(!valid_dma_direction(dir)); -	addr = ops->map_page(dev, page, offset, size, dir, NULL); -	debug_dma_map_page(dev, page, offset, size, dir, addr, false); - -	return addr; -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, -				  size_t size, enum dma_data_direction dir) -{ -	struct dma_map_ops *ops = get_dma_ops(dev); - -	BUG_ON(!valid_dma_direction(dir)); -	if (ops->unmap_page) -		ops->unmap_page(dev, addr, size, dir, NULL); -	debug_dma_unmap_page(dev, addr, size, dir, false); -} -  static inline void  dma_cache_sync(struct device *dev, void *vaddr, size_t size,  	enum dma_data_direction dir)  |