#define dma_map_single(dev, va, size, dir) virt_to_phys(va)
#define dma_map_page(dev, page, off, size, dir) (page_to_pa(page) + off)
-#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
-#define dma_unmap_page(dev, addr, size, dir) do { } while (0)
-#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
+#define dma_unmap_single(dev, addr, size, dir) ((void)0)
+#define dma_unmap_page(dev, addr, size, dir) ((void)0)
+#define dma_unmap_sg(dev, sg, nents, dir) ((void)0)
#define dma_mapping_error(addr) (0)
int dma_set_mask(struct device *dev, u64 mask);
-#define dma_sync_single_for_cpu(dev, addr, size, dir) do { } while (0)
-#define dma_sync_single_for_device(dev, addr, size, dir) do { } while (0)
-#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0)
-#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0)
-#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0)
-#define dma_cache_sync(dev, va, size, dir) do { } while (0)
+#define dma_sync_single_for_cpu(dev, addr, size, dir) ((void)0)
+#define dma_sync_single_for_device(dev, addr, size, dir) ((void)0)
+#define dma_sync_single_range(dev, addr, off, size, dir) ((void)0)
+#define dma_sync_sg_for_cpu(dev, sg, nents, dir) ((void)0)
+#define dma_sync_sg_for_device(dev, sg, nents, dir) ((void)0)
+#define dma_cache_sync(dev, va, size, dir) ((void)0)
+#define dma_sync_single_range_for_cpu(dev, addr, offset, size, dir) ((void)0)
+#define dma_sync_single_range_for_device(dev, addr, offset, size, dir) ((void)0)
#define dma_get_cache_alignment() L1_CACHE_BYTES