Commit | Line | Data |
---|---|---|
1394f032 BW |
1 | /* Changed from asm-m68k version, Lineo Inc. May 2001 */ |
2 | ||
3 | #ifndef _ASM_BFIN_PCI_H | |
4 | #define _ASM_BFIN_PCI_H | |
5 | ||
6 | #include <asm/scatterlist.h> | |
7 | ||
8 | /* | |
9 | * | |
10 | * Written by Wout Klaren. | |
11 | */ | |
12 | ||
13 | /* Added by Chang Junxiao */ | |
14 | #define PCIBIOS_MIN_IO 0x00001000 | |
15 | #define PCIBIOS_MIN_MEM 0x10000000 | |
16 | ||
17 | #define PCI_DMA_BUS_IS_PHYS (1) | |
18 | struct pci_ops; | |
19 | ||
20 | /* | |
21 | * Structure with hardware dependent information and functions of the | |
22 | * PCI bus. | |
23 | */ | |
24 | struct pci_bus_info { | |
25 | ||
26 | /* | |
27 | * Resources of the PCI bus. | |
28 | */ | |
29 | struct resource mem_space; | |
30 | struct resource io_space; | |
31 | ||
32 | /* | |
33 | * System dependent functions. | |
34 | */ | |
35 | struct pci_ops *bfin_pci_ops; | |
36 | void (*fixup) (int pci_modify); | |
37 | void (*conf_device) (unsigned char bus, unsigned char device_fn); | |
38 | }; | |
39 | ||
40 | #define pcibios_assign_all_busses() 0 | |
41 | static inline void pcibios_set_master(struct pci_dev *dev) | |
42 | { | |
43 | ||
44 | /* No special bus mastering setup handling */ | |
45 | } | |
46 | static inline void pcibios_penalize_isa_irq(int irq) | |
47 | { | |
48 | ||
49 | /* We don't do dynamic PCI IRQ allocation */ | |
50 | } | |
51 | static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, | |
52 | size_t size, int direction) | |
53 | { | |
54 | if (direction == PCI_DMA_NONE) | |
55 | BUG(); | |
56 | ||
57 | /* return virt_to_bus(ptr); */ | |
58 | return (dma_addr_t) ptr; | |
59 | } | |
60 | ||
61 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | |
62 | * must match what was provided for in a previous pci_map_single call. All | |
63 | * other usages are undefined. | |
64 | * | |
65 | * After this call, reads by the cpu to the buffer are guarenteed to see | |
66 | * whatever the device wrote there. | |
67 | */ | |
68 | static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, | |
69 | size_t size, int direction) | |
70 | { | |
71 | if (direction == PCI_DMA_NONE) | |
72 | BUG(); | |
73 | ||
74 | /* Nothing to do */ | |
75 | } | |
76 | ||
77 | /* Map a set of buffers described by scatterlist in streaming | |
78 | * mode for DMA. This is the scather-gather version of the | |
79 | * above pci_map_single interface. Here the scatter gather list | |
80 | * elements are each tagged with the appropriate dma address | |
81 | * and length. They are obtained via sg_dma_{address,length}(SG). | |
82 | * | |
83 | * NOTE: An implementation may be able to use a smaller number of | |
84 | * DMA address/length pairs than there are SG table elements. | |
85 | * (for example via virtual mapping capabilities) | |
86 | * The routine returns the number of addr/length pairs actually | |
87 | * used, at most nents. | |
88 | * | |
89 | * Device ownership issues as mentioned above for pci_map_single are | |
90 | * the same here. | |
91 | */ | |
92 | static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |
93 | int nents, int direction) | |
94 | { | |
95 | if (direction == PCI_DMA_NONE) | |
96 | BUG(); | |
97 | return nents; | |
98 | } | |
99 | ||
100 | /* Unmap a set of streaming mode DMA translations. | |
101 | * Again, cpu read rules concerning calls here are the same as for | |
102 | * pci_unmap_single() above. | |
103 | */ | |
104 | static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |
105 | int nents, int direction) | |
106 | { | |
107 | if (direction == PCI_DMA_NONE) | |
108 | BUG(); | |
109 | ||
110 | /* Nothing to do */ | |
111 | } | |
112 | ||
113 | /* Make physical memory consistent for a single | |
114 | * streaming mode DMA translation after a transfer. | |
115 | * | |
116 | * If you perform a pci_map_single() but wish to interrogate the | |
117 | * buffer using the cpu, yet do not wish to teardown the PCI dma | |
118 | * mapping, you must call this function before doing so. At the | |
119 | * next point you give the PCI dma address back to the card, the | |
120 | * device again owns the buffer. | |
121 | */ | |
122 | static inline void pci_dma_sync_single(struct pci_dev *hwdev, | |
123 | dma_addr_t dma_handle, size_t size, | |
124 | int direction) | |
125 | { | |
126 | if (direction == PCI_DMA_NONE) | |
127 | BUG(); | |
128 | ||
129 | /* Nothing to do */ | |
130 | } | |
131 | ||
132 | /* Make physical memory consistent for a set of streaming | |
133 | * mode DMA translations after a transfer. | |
134 | * | |
135 | * The same as pci_dma_sync_single but for a scatter-gather list, | |
136 | * same rules and usage. | |
137 | */ | |
138 | static inline void pci_dma_sync_sg(struct pci_dev *hwdev, | |
139 | struct scatterlist *sg, int nelems, | |
140 | int direction) | |
141 | { | |
142 | if (direction == PCI_DMA_NONE) | |
143 | BUG(); | |
144 | ||
145 | /* Nothing to do */ | |
146 | } | |
147 | ||
148 | #endif /* _ASM_BFIN_PCI_H */ |