x86, AMD IOMMU: add address allocation and deallocation functions
[linux-2.6] / arch / x86 / kernel / amd_iommu.c
1 /*
2  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3  * Author: Joerg Roedel <joerg.roedel@amd.com>
4  *         Leo Duran <leo.duran@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19
20 #include <linux/pci.h>
21 #include <linux/gfp.h>
22 #include <linux/bitops.h>
23 #include <linux/scatterlist.h>
24 #include <linux/iommu-helper.h>
25 #include <asm/proto.h>
26 #include <asm/gart.h>
27 #include <asm/amd_iommu_types.h>
28
29 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
30
31 #define to_pages(addr, size) \
32          (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
33
34 static DEFINE_RWLOCK(amd_iommu_devtable_lock);
35
36 struct command {
37         u32 data[4];
38 };
39
40 static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
41                              struct unity_map_entry *e);
42
43 static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
44 {
45         u32 tail, head;
46         u8 *target;
47
48         tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
49         target = (iommu->cmd_buf + tail);
50         memcpy_toio(target, cmd, sizeof(*cmd));
51         tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
52         head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
53         if (tail == head)
54                 return -ENOMEM;
55         writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
56
57         return 0;
58 }
59
60 static int iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
61 {
62         unsigned long flags;
63         int ret;
64
65         spin_lock_irqsave(&iommu->lock, flags);
66         ret = __iommu_queue_command(iommu, cmd);
67         spin_unlock_irqrestore(&iommu->lock, flags);
68
69         return ret;
70 }
71
72 static int iommu_completion_wait(struct amd_iommu *iommu)
73 {
74         int ret;
75         struct command cmd;
76         volatile u64 ready = 0;
77         unsigned long ready_phys = virt_to_phys(&ready);
78
79         memset(&cmd, 0, sizeof(cmd));
80         cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK;
81         cmd.data[1] = HIGH_U32(ready_phys);
82         cmd.data[2] = 1; /* value written to 'ready' */
83         CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
84
85         iommu->need_sync = 0;
86
87         ret = iommu_queue_command(iommu, &cmd);
88
89         if (ret)
90                 return ret;
91
92         while (!ready)
93                 cpu_relax();
94
95         return 0;
96 }
97
98 static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
99 {
100         struct command cmd;
101
102         BUG_ON(iommu == NULL);
103
104         memset(&cmd, 0, sizeof(cmd));
105         CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
106         cmd.data[0] = devid;
107
108         iommu->need_sync = 1;
109
110         return iommu_queue_command(iommu, &cmd);
111 }
112
113 static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
114                 u64 address, u16 domid, int pde, int s)
115 {
116         struct command cmd;
117
118         memset(&cmd, 0, sizeof(cmd));
119         address &= PAGE_MASK;
120         CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
121         cmd.data[1] |= domid;
122         cmd.data[2] = LOW_U32(address);
123         cmd.data[3] = HIGH_U32(address);
124         if (s)
125                 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
126         if (pde)
127                 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
128
129         iommu->need_sync = 1;
130
131         return iommu_queue_command(iommu, &cmd);
132 }
133
134 static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
135                 u64 address, size_t size)
136 {
137         int i;
138         unsigned pages = to_pages(address, size);
139
140         address &= PAGE_MASK;
141
142         for (i = 0; i < pages; ++i) {
143                 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 0);
144                 address += PAGE_SIZE;
145         }
146
147         return 0;
148 }
149
150 static int iommu_map(struct protection_domain *dom,
151                      unsigned long bus_addr,
152                      unsigned long phys_addr,
153                      int prot)
154 {
155         u64 __pte, *pte, *page;
156
157         bus_addr  = PAGE_ALIGN(bus_addr);
158         phys_addr = PAGE_ALIGN(bus_addr);
159
160         /* only support 512GB address spaces for now */
161         if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
162                 return -EINVAL;
163
164         pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
165
166         if (!IOMMU_PTE_PRESENT(*pte)) {
167                 page = (u64 *)get_zeroed_page(GFP_KERNEL);
168                 if (!page)
169                         return -ENOMEM;
170                 *pte = IOMMU_L2_PDE(virt_to_phys(page));
171         }
172
173         pte = IOMMU_PTE_PAGE(*pte);
174         pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
175
176         if (!IOMMU_PTE_PRESENT(*pte)) {
177                 page = (u64 *)get_zeroed_page(GFP_KERNEL);
178                 if (!page)
179                         return -ENOMEM;
180                 *pte = IOMMU_L1_PDE(virt_to_phys(page));
181         }
182
183         pte = IOMMU_PTE_PAGE(*pte);
184         pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
185
186         if (IOMMU_PTE_PRESENT(*pte))
187                 return -EBUSY;
188
189         __pte = phys_addr | IOMMU_PTE_P;
190         if (prot & IOMMU_PROT_IR)
191                 __pte |= IOMMU_PTE_IR;
192         if (prot & IOMMU_PROT_IW)
193                 __pte |= IOMMU_PTE_IW;
194
195         *pte = __pte;
196
197         return 0;
198 }
199
200 static int iommu_for_unity_map(struct amd_iommu *iommu,
201                                struct unity_map_entry *entry)
202 {
203         u16 bdf, i;
204
205         for (i = entry->devid_start; i <= entry->devid_end; ++i) {
206                 bdf = amd_iommu_alias_table[i];
207                 if (amd_iommu_rlookup_table[bdf] == iommu)
208                         return 1;
209         }
210
211         return 0;
212 }
213
214 static int iommu_init_unity_mappings(struct amd_iommu *iommu)
215 {
216         struct unity_map_entry *entry;
217         int ret;
218
219         list_for_each_entry(entry, &amd_iommu_unity_map, list) {
220                 if (!iommu_for_unity_map(iommu, entry))
221                         continue;
222                 ret = dma_ops_unity_map(iommu->default_dom, entry);
223                 if (ret)
224                         return ret;
225         }
226
227         return 0;
228 }
229
230 static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
231                              struct unity_map_entry *e)
232 {
233         u64 addr;
234         int ret;
235
236         for (addr = e->address_start; addr < e->address_end;
237              addr += PAGE_SIZE) {
238                 ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
239                 if (ret)
240                         return ret;
241                 /*
242                  * if unity mapping is in aperture range mark the page
243                  * as allocated in the aperture
244                  */
245                 if (addr < dma_dom->aperture_size)
246                         __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
247         }
248
249         return 0;
250 }
251
252 static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
253                                           u16 devid)
254 {
255         struct unity_map_entry *e;
256         int ret;
257
258         list_for_each_entry(e, &amd_iommu_unity_map, list) {
259                 if (!(devid >= e->devid_start && devid <= e->devid_end))
260                         continue;
261                 ret = dma_ops_unity_map(dma_dom, e);
262                 if (ret)
263                         return ret;
264         }
265
266         return 0;
267 }
268
269 static unsigned long dma_mask_to_pages(unsigned long mask)
270 {
271         return (mask >> PAGE_SHIFT) +
272                 (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
273 }
274
275 static unsigned long dma_ops_alloc_addresses(struct device *dev,
276                                              struct dma_ops_domain *dom,
277                                              unsigned int pages)
278 {
279         unsigned long limit = dma_mask_to_pages(*dev->dma_mask);
280         unsigned long address;
281         unsigned long size = dom->aperture_size >> PAGE_SHIFT;
282         unsigned long boundary_size;
283
284         boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
285                         PAGE_SIZE) >> PAGE_SHIFT;
286         limit = limit < size ? limit : size;
287
288         if (dom->next_bit >= limit)
289                 dom->next_bit = 0;
290
291         address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
292                         0 , boundary_size, 0);
293         if (address == -1)
294                 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
295                                 0, boundary_size, 0);
296
297         if (likely(address != -1)) {
298                 set_bit_string(dom->bitmap, address, pages);
299                 dom->next_bit = address + pages;
300                 address <<= PAGE_SHIFT;
301         } else
302                 address = bad_dma_address;
303
304         WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
305
306         return address;
307 }
308
309 static void dma_ops_free_addresses(struct dma_ops_domain *dom,
310                                    unsigned long address,
311                                    unsigned int pages)
312 {
313         address >>= PAGE_SHIFT;
314         iommu_area_free(dom->bitmap, address, pages);
315 }
316