Pull pvops into release branch
[linux-2.6] / arch / x86 / kernel / pci-calgary_64.c
1 /*
2  * Derived from arch/powerpc/kernel/iommu.c
3  *
4  * Copyright IBM Corporation, 2006-2007
5  * Copyright (C) 2006  Jon Mason <jdmason@kudzu.us>
6  *
7  * Author: Jon Mason <jdmason@kudzu.us>
8  * Author: Muli Ben-Yehuda <muli@il.ibm.com>
9
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/crash_dump.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/pci_ids.h>
36 #include <linux/pci.h>
37 #include <linux/delay.h>
38 #include <linux/scatterlist.h>
39 #include <linux/iommu-helper.h>
40
41 #include <asm/iommu.h>
42 #include <asm/calgary.h>
43 #include <asm/tce.h>
44 #include <asm/pci-direct.h>
45 #include <asm/system.h>
46 #include <asm/dma.h>
47 #include <asm/rio.h>
48 #include <asm/bios_ebda.h>
49
50 #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
51 int use_calgary __read_mostly = 1;
52 #else
53 int use_calgary __read_mostly = 0;
54 #endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
55
56 #define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
57 #define PCI_DEVICE_ID_IBM_CALIOC2 0x0308
58
59 /* register offsets inside the host bridge space */
60 #define CALGARY_CONFIG_REG      0x0108
61 #define PHB_CSR_OFFSET          0x0110 /* Channel Status */
62 #define PHB_PLSSR_OFFSET        0x0120
63 #define PHB_CONFIG_RW_OFFSET    0x0160
64 #define PHB_IOBASE_BAR_LOW      0x0170
65 #define PHB_IOBASE_BAR_HIGH     0x0180
66 #define PHB_MEM_1_LOW           0x0190
67 #define PHB_MEM_1_HIGH          0x01A0
68 #define PHB_IO_ADDR_SIZE        0x01B0
69 #define PHB_MEM_1_SIZE          0x01C0
70 #define PHB_MEM_ST_OFFSET       0x01D0
71 #define PHB_AER_OFFSET          0x0200
72 #define PHB_CONFIG_0_HIGH       0x0220
73 #define PHB_CONFIG_0_LOW        0x0230
74 #define PHB_CONFIG_0_END        0x0240
75 #define PHB_MEM_2_LOW           0x02B0
76 #define PHB_MEM_2_HIGH          0x02C0
77 #define PHB_MEM_2_SIZE_HIGH     0x02D0
78 #define PHB_MEM_2_SIZE_LOW      0x02E0
79 #define PHB_DOSHOLE_OFFSET      0x08E0
80
81 /* CalIOC2 specific */
82 #define PHB_SAVIOR_L2           0x0DB0
83 #define PHB_PAGE_MIG_CTRL       0x0DA8
84 #define PHB_PAGE_MIG_DEBUG      0x0DA0
85 #define PHB_ROOT_COMPLEX_STATUS 0x0CB0
86
87 /* PHB_CONFIG_RW */
88 #define PHB_TCE_ENABLE          0x20000000
89 #define PHB_SLOT_DISABLE        0x1C000000
90 #define PHB_DAC_DISABLE         0x01000000
91 #define PHB_MEM2_ENABLE         0x00400000
92 #define PHB_MCSR_ENABLE         0x00100000
93 /* TAR (Table Address Register) */
94 #define TAR_SW_BITS             0x0000ffffffff800fUL
95 #define TAR_VALID               0x0000000000000008UL
96 /* CSR (Channel/DMA Status Register) */
97 #define CSR_AGENT_MASK          0xffe0ffff
98 /* CCR (Calgary Configuration Register) */
99 #define CCR_2SEC_TIMEOUT        0x000000000000000EUL
100 /* PMCR/PMDR (Page Migration Control/Debug Registers */
101 #define PMR_SOFTSTOP            0x80000000
102 #define PMR_SOFTSTOPFAULT       0x40000000
103 #define PMR_HARDSTOP            0x20000000
104
105 #define MAX_NUM_OF_PHBS         8 /* how many PHBs in total? */
106 #define MAX_NUM_CHASSIS         8 /* max number of chassis */
107 /* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
108 #define MAX_PHB_BUS_NUM         (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
109 #define PHBS_PER_CALGARY        4
110
111 /* register offsets in Calgary's internal register space */
112 static const unsigned long tar_offsets[] = {
113         0x0580 /* TAR0 */,
114         0x0588 /* TAR1 */,
115         0x0590 /* TAR2 */,
116         0x0598 /* TAR3 */
117 };
118
119 static const unsigned long split_queue_offsets[] = {
120         0x4870 /* SPLIT QUEUE 0 */,
121         0x5870 /* SPLIT QUEUE 1 */,
122         0x6870 /* SPLIT QUEUE 2 */,
123         0x7870 /* SPLIT QUEUE 3 */
124 };
125
126 static const unsigned long phb_offsets[] = {
127         0x8000 /* PHB0 */,
128         0x9000 /* PHB1 */,
129         0xA000 /* PHB2 */,
130         0xB000 /* PHB3 */
131 };
132
133 /* PHB debug registers */
134
135 static const unsigned long phb_debug_offsets[] = {
136         0x4000  /* PHB 0 DEBUG */,
137         0x5000  /* PHB 1 DEBUG */,
138         0x6000  /* PHB 2 DEBUG */,
139         0x7000  /* PHB 3 DEBUG */
140 };
141
142 /*
143  * STUFF register for each debug PHB,
144  * byte 1 = start bus number, byte 2 = end bus number
145  */
146
147 #define PHB_DEBUG_STUFF_OFFSET  0x0020
148
149 #define EMERGENCY_PAGES 32 /* = 128KB */
150
151 unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
152 static int translate_empty_slots __read_mostly = 0;
153 static int calgary_detected __read_mostly = 0;
154
155 static struct rio_table_hdr     *rio_table_hdr __initdata;
156 static struct scal_detail       *scal_devs[MAX_NUMNODES] __initdata;
157 static struct rio_detail        *rio_devs[MAX_NUMNODES * 4] __initdata;
158
159 struct calgary_bus_info {
160         void *tce_space;
161         unsigned char translation_disabled;
162         signed char phbid;
163         void __iomem *bbar;
164 };
165
166 static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
167 static void calgary_tce_cache_blast(struct iommu_table *tbl);
168 static void calgary_dump_error_regs(struct iommu_table *tbl);
169 static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
170 static void calioc2_tce_cache_blast(struct iommu_table *tbl);
171 static void calioc2_dump_error_regs(struct iommu_table *tbl);
172 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl);
173 static void get_tce_space_from_tar(void);
174
175 static struct cal_chipset_ops calgary_chip_ops = {
176         .handle_quirks = calgary_handle_quirks,
177         .tce_cache_blast = calgary_tce_cache_blast,
178         .dump_error_regs = calgary_dump_error_regs
179 };
180
181 static struct cal_chipset_ops calioc2_chip_ops = {
182         .handle_quirks = calioc2_handle_quirks,
183         .tce_cache_blast = calioc2_tce_cache_blast,
184         .dump_error_regs = calioc2_dump_error_regs
185 };
186
187 static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
188
189 /* enable this to stress test the chip's TCE cache */
190 #ifdef CONFIG_IOMMU_DEBUG
191 static int debugging = 1;
192
193 static inline unsigned long verify_bit_range(unsigned long* bitmap,
194         int expected, unsigned long start, unsigned long end)
195 {
196         unsigned long idx = start;
197
198         BUG_ON(start >= end);
199
200         while (idx < end) {
201                 if (!!test_bit(idx, bitmap) != expected)
202                         return idx;
203                 ++idx;
204         }
205
206         /* all bits have the expected value */
207         return ~0UL;
208 }
209 #else /* debugging is disabled */
210 static int debugging;
211
212 static inline unsigned long verify_bit_range(unsigned long* bitmap,
213         int expected, unsigned long start, unsigned long end)
214 {
215         return ~0UL;
216 }
217
218 #endif /* CONFIG_IOMMU_DEBUG */
219
220 static inline int translation_enabled(struct iommu_table *tbl)
221 {
222         /* only PHBs with translation enabled have an IOMMU table */
223         return (tbl != NULL);
224 }
225
226 static void iommu_range_reserve(struct iommu_table *tbl,
227         unsigned long start_addr, unsigned int npages)
228 {
229         unsigned long index;
230         unsigned long end;
231         unsigned long badbit;
232         unsigned long flags;
233
234         index = start_addr >> PAGE_SHIFT;
235
236         /* bail out if we're asked to reserve a region we don't cover */
237         if (index >= tbl->it_size)
238                 return;
239
240         end = index + npages;
241         if (end > tbl->it_size) /* don't go off the table */
242                 end = tbl->it_size;
243
244         spin_lock_irqsave(&tbl->it_lock, flags);
245
246         badbit = verify_bit_range(tbl->it_map, 0, index, end);
247         if (badbit != ~0UL) {
248                 if (printk_ratelimit())
249                         printk(KERN_ERR "Calgary: entry already allocated at "
250                                "0x%lx tbl %p dma 0x%lx npages %u\n",
251                                badbit, tbl, start_addr, npages);
252         }
253
254         iommu_area_reserve(tbl->it_map, index, npages);
255
256         spin_unlock_irqrestore(&tbl->it_lock, flags);
257 }
258
259 static unsigned long iommu_range_alloc(struct device *dev,
260                                        struct iommu_table *tbl,
261                                        unsigned int npages)
262 {
263         unsigned long flags;
264         unsigned long offset;
265         unsigned long boundary_size;
266
267         boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
268                               PAGE_SIZE) >> PAGE_SHIFT;
269
270         BUG_ON(npages == 0);
271
272         spin_lock_irqsave(&tbl->it_lock, flags);
273
274         offset = iommu_area_alloc(tbl->it_map, tbl->it_size, tbl->it_hint,
275                                   npages, 0, boundary_size, 0);
276         if (offset == ~0UL) {
277                 tbl->chip_ops->tce_cache_blast(tbl);
278
279                 offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
280                                           npages, 0, boundary_size, 0);
281                 if (offset == ~0UL) {
282                         printk(KERN_WARNING "Calgary: IOMMU full.\n");
283                         spin_unlock_irqrestore(&tbl->it_lock, flags);
284                         if (panic_on_overflow)
285                                 panic("Calgary: fix the allocator.\n");
286                         else
287                                 return bad_dma_address;
288                 }
289         }
290
291         tbl->it_hint = offset + npages;
292         BUG_ON(tbl->it_hint > tbl->it_size);
293
294         spin_unlock_irqrestore(&tbl->it_lock, flags);
295
296         return offset;
297 }
298
299 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
300                               void *vaddr, unsigned int npages, int direction)
301 {
302         unsigned long entry;
303         dma_addr_t ret = bad_dma_address;
304
305         entry = iommu_range_alloc(dev, tbl, npages);
306
307         if (unlikely(entry == bad_dma_address))
308                 goto error;
309
310         /* set the return dma address */
311         ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
312
313         /* put the TCEs in the HW table */
314         tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
315                   direction);
316
317         return ret;
318
319 error:
320         printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
321                "iommu %p\n", npages, tbl);
322         return bad_dma_address;
323 }
324
325 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
326         unsigned int npages)
327 {
328         unsigned long entry;
329         unsigned long badbit;
330         unsigned long badend;
331         unsigned long flags;
332
333         /* were we called with bad_dma_address? */
334         badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
335         if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
336                 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
337                        "address 0x%Lx\n", dma_addr);
338                 return;
339         }
340
341         entry = dma_addr >> PAGE_SHIFT;
342
343         BUG_ON(entry + npages > tbl->it_size);
344
345         tce_free(tbl, entry, npages);
346
347         spin_lock_irqsave(&tbl->it_lock, flags);
348
349         badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages);
350         if (badbit != ~0UL) {
351                 if (printk_ratelimit())
352                         printk(KERN_ERR "Calgary: bit is off at 0x%lx "
353                                "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
354                                badbit, tbl, dma_addr, entry, npages);
355         }
356
357         iommu_area_free(tbl->it_map, entry, npages);
358
359         spin_unlock_irqrestore(&tbl->it_lock, flags);
360 }
361
362 static inline struct iommu_table *find_iommu_table(struct device *dev)
363 {
364         struct pci_dev *pdev;
365         struct pci_bus *pbus;
366         struct iommu_table *tbl;
367
368         pdev = to_pci_dev(dev);
369
370         pbus = pdev->bus;
371
372         /* is the device behind a bridge? Look for the root bus */
373         while (pbus->parent)
374                 pbus = pbus->parent;
375
376         tbl = pci_iommu(pbus);
377
378         BUG_ON(tbl && (tbl->it_busno != pbus->number));
379
380         return tbl;
381 }
382
383 static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
384                              int nelems,enum dma_data_direction dir,
385                              struct dma_attrs *attrs)
386 {
387         struct iommu_table *tbl = find_iommu_table(dev);
388         struct scatterlist *s;
389         int i;
390
391         if (!translation_enabled(tbl))
392                 return;
393
394         for_each_sg(sglist, s, nelems, i) {
395                 unsigned int npages;
396                 dma_addr_t dma = s->dma_address;
397                 unsigned int dmalen = s->dma_length;
398
399                 if (dmalen == 0)
400                         break;
401
402                 npages = iommu_num_pages(dma, dmalen, PAGE_SIZE);
403                 iommu_free(tbl, dma, npages);
404         }
405 }
406
407 static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
408                           int nelems, enum dma_data_direction dir,
409                           struct dma_attrs *attrs)
410 {
411         struct iommu_table *tbl = find_iommu_table(dev);
412         struct scatterlist *s;
413         unsigned long vaddr;
414         unsigned int npages;
415         unsigned long entry;
416         int i;
417
418         for_each_sg(sg, s, nelems, i) {
419                 BUG_ON(!sg_page(s));
420
421                 vaddr = (unsigned long) sg_virt(s);
422                 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
423
424                 entry = iommu_range_alloc(dev, tbl, npages);
425                 if (entry == bad_dma_address) {
426                         /* makes sure unmap knows to stop */
427                         s->dma_length = 0;
428                         goto error;
429                 }
430
431                 s->dma_address = (entry << PAGE_SHIFT) | s->offset;
432
433                 /* insert into HW table */
434                 tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir);
435
436                 s->dma_length = s->length;
437         }
438
439         return nelems;
440 error:
441         calgary_unmap_sg(dev, sg, nelems, dir, NULL);
442         for_each_sg(sg, s, nelems, i) {
443                 sg->dma_address = bad_dma_address;
444                 sg->dma_length = 0;
445         }
446         return 0;
447 }
448
449 static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
450                                    unsigned long offset, size_t size,
451                                    enum dma_data_direction dir,
452                                    struct dma_attrs *attrs)
453 {
454         void *vaddr = page_address(page) + offset;
455         unsigned long uaddr;
456         unsigned int npages;
457         struct iommu_table *tbl = find_iommu_table(dev);
458
459         uaddr = (unsigned long)vaddr;
460         npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
461
462         return iommu_alloc(dev, tbl, vaddr, npages, dir);
463 }
464
465 static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
466                                size_t size, enum dma_data_direction dir,
467                                struct dma_attrs *attrs)
468 {
469         struct iommu_table *tbl = find_iommu_table(dev);
470         unsigned int npages;
471
472         npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
473         iommu_free(tbl, dma_addr, npages);
474 }
475
476 static void* calgary_alloc_coherent(struct device *dev, size_t size,
477         dma_addr_t *dma_handle, gfp_t flag)
478 {
479         void *ret = NULL;
480         dma_addr_t mapping;
481         unsigned int npages, order;
482         struct iommu_table *tbl = find_iommu_table(dev);
483
484         size = PAGE_ALIGN(size); /* size rounded up to full pages */
485         npages = size >> PAGE_SHIFT;
486         order = get_order(size);
487
488         flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
489
490         /* alloc enough pages (and possibly more) */
491         ret = (void *)__get_free_pages(flag, order);
492         if (!ret)
493                 goto error;
494         memset(ret, 0, size);
495
496         /* set up tces to cover the allocated range */
497         mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
498         if (mapping == bad_dma_address)
499                 goto free;
500         *dma_handle = mapping;
501         return ret;
502 free:
503         free_pages((unsigned long)ret, get_order(size));
504         ret = NULL;
505 error:
506         return ret;
507 }
508
509 static void calgary_free_coherent(struct device *dev, size_t size,
510                                   void *vaddr, dma_addr_t dma_handle)
511 {
512         unsigned int npages;
513         struct iommu_table *tbl = find_iommu_table(dev);
514
515         size = PAGE_ALIGN(size);
516         npages = size >> PAGE_SHIFT;
517
518         iommu_free(tbl, dma_handle, npages);
519         free_pages((unsigned long)vaddr, get_order(size));
520 }
521
522 static struct dma_map_ops calgary_dma_ops = {
523         .alloc_coherent = calgary_alloc_coherent,
524         .free_coherent = calgary_free_coherent,
525         .map_sg = calgary_map_sg,
526         .unmap_sg = calgary_unmap_sg,
527         .map_page = calgary_map_page,
528         .unmap_page = calgary_unmap_page,
529 };
530
531 static inline void __iomem * busno_to_bbar(unsigned char num)
532 {
533         return bus_info[num].bbar;
534 }
535
536 static inline int busno_to_phbid(unsigned char num)
537 {
538         return bus_info[num].phbid;
539 }
540
541 static inline unsigned long split_queue_offset(unsigned char num)
542 {
543         size_t idx = busno_to_phbid(num);
544
545         return split_queue_offsets[idx];
546 }
547
548 static inline unsigned long tar_offset(unsigned char num)
549 {
550         size_t idx = busno_to_phbid(num);
551
552         return tar_offsets[idx];
553 }
554
555 static inline unsigned long phb_offset(unsigned char num)
556 {
557         size_t idx = busno_to_phbid(num);
558
559         return phb_offsets[idx];
560 }
561
562 static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
563 {
564         unsigned long target = ((unsigned long)bar) | offset;
565         return (void __iomem*)target;
566 }
567
568 static inline int is_calioc2(unsigned short device)
569 {
570         return (device == PCI_DEVICE_ID_IBM_CALIOC2);
571 }
572
573 static inline int is_calgary(unsigned short device)
574 {
575         return (device == PCI_DEVICE_ID_IBM_CALGARY);
576 }
577
578 static inline int is_cal_pci_dev(unsigned short device)
579 {
580         return (is_calgary(device) || is_calioc2(device));
581 }
582
583 static void calgary_tce_cache_blast(struct iommu_table *tbl)
584 {
585         u64 val;
586         u32 aer;
587         int i = 0;
588         void __iomem *bbar = tbl->bbar;
589         void __iomem *target;
590
591         /* disable arbitration on the bus */
592         target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
593         aer = readl(target);
594         writel(0, target);
595
596         /* read plssr to ensure it got there */
597         target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
598         val = readl(target);
599
600         /* poll split queues until all DMA activity is done */
601         target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
602         do {
603                 val = readq(target);
604                 i++;
605         } while ((val & 0xff) != 0xff && i < 100);
606         if (i == 100)
607                 printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
608                        "continuing anyway\n");
609
610         /* invalidate TCE cache */
611         target = calgary_reg(bbar, tar_offset(tbl->it_busno));
612         writeq(tbl->tar_val, target);
613
614         /* enable arbitration */
615         target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
616         writel(aer, target);
617         (void)readl(target); /* flush */
618 }
619
620 static void calioc2_tce_cache_blast(struct iommu_table *tbl)
621 {
622         void __iomem *bbar = tbl->bbar;
623         void __iomem *target;
624         u64 val64;
625         u32 val;
626         int i = 0;
627         int count = 1;
628         unsigned char bus = tbl->it_busno;
629
630 begin:
631         printk(KERN_DEBUG "Calgary: CalIOC2 bus 0x%x entering tce cache blast "
632                "sequence - count %d\n", bus, count);
633
634         /* 1. using the Page Migration Control reg set SoftStop */
635         target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
636         val = be32_to_cpu(readl(target));
637         printk(KERN_DEBUG "1a. read 0x%x [LE] from %p\n", val, target);
638         val |= PMR_SOFTSTOP;
639         printk(KERN_DEBUG "1b. writing 0x%x [LE] to %p\n", val, target);
640         writel(cpu_to_be32(val), target);
641
642         /* 2. poll split queues until all DMA activity is done */
643         printk(KERN_DEBUG "2a. starting to poll split queues\n");
644         target = calgary_reg(bbar, split_queue_offset(bus));
645         do {
646                 val64 = readq(target);
647                 i++;
648         } while ((val64 & 0xff) != 0xff && i < 100);
649         if (i == 100)
650                 printk(KERN_WARNING "CalIOC2: PCI bus not quiesced, "
651                        "continuing anyway\n");
652
653         /* 3. poll Page Migration DEBUG for SoftStopFault */
654         target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
655         val = be32_to_cpu(readl(target));
656         printk(KERN_DEBUG "3. read 0x%x [LE] from %p\n", val, target);
657
658         /* 4. if SoftStopFault - goto (1) */
659         if (val & PMR_SOFTSTOPFAULT) {
660                 if (++count < 100)
661                         goto begin;
662                 else {
663                         printk(KERN_WARNING "CalIOC2: too many SoftStopFaults, "
664                                "aborting TCE cache flush sequence!\n");
665                         return; /* pray for the best */
666                 }
667         }
668
669         /* 5. Slam into HardStop by reading PHB_PAGE_MIG_CTRL */
670         target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
671         printk(KERN_DEBUG "5a. slamming into HardStop by reading %p\n", target);
672         val = be32_to_cpu(readl(target));
673         printk(KERN_DEBUG "5b. read 0x%x [LE] from %p\n", val, target);
674         target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
675         val = be32_to_cpu(readl(target));
676         printk(KERN_DEBUG "5c. read 0x%x [LE] from %p (debug)\n", val, target);
677
678         /* 6. invalidate TCE cache */
679         printk(KERN_DEBUG "6. invalidating TCE cache\n");
680         target = calgary_reg(bbar, tar_offset(bus));
681         writeq(tbl->tar_val, target);
682
683         /* 7. Re-read PMCR */
684         printk(KERN_DEBUG "7a. Re-reading PMCR\n");
685         target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
686         val = be32_to_cpu(readl(target));
687         printk(KERN_DEBUG "7b. read 0x%x [LE] from %p\n", val, target);
688
689         /* 8. Remove HardStop */
690         printk(KERN_DEBUG "8a. removing HardStop from PMCR\n");
691         target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
692         val = 0;
693         printk(KERN_DEBUG "8b. writing 0x%x [LE] to %p\n", val, target);
694         writel(cpu_to_be32(val), target);
695         val = be32_to_cpu(readl(target));
696         printk(KERN_DEBUG "8c. read 0x%x [LE] from %p\n", val, target);
697 }
698
699 static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
700         u64 limit)
701 {
702         unsigned int numpages;
703
704         limit = limit | 0xfffff;
705         limit++;
706
707         numpages = ((limit - start) >> PAGE_SHIFT);
708         iommu_range_reserve(pci_iommu(dev->bus), start, numpages);
709 }
710
711 static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
712 {
713         void __iomem *target;
714         u64 low, high, sizelow;
715         u64 start, limit;
716         struct iommu_table *tbl = pci_iommu(dev->bus);
717         unsigned char busnum = dev->bus->number;
718         void __iomem *bbar = tbl->bbar;
719
720         /* peripheral MEM_1 region */
721         target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
722         low = be32_to_cpu(readl(target));
723         target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
724         high = be32_to_cpu(readl(target));
725         target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
726         sizelow = be32_to_cpu(readl(target));
727
728         start = (high << 32) | low;
729         limit = sizelow;
730
731         calgary_reserve_mem_region(dev, start, limit);
732 }
733
734 static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
735 {
736         void __iomem *target;
737         u32 val32;
738         u64 low, high, sizelow, sizehigh;
739         u64 start, limit;
740         struct iommu_table *tbl = pci_iommu(dev->bus);
741         unsigned char busnum = dev->bus->number;
742         void __iomem *bbar = tbl->bbar;
743
744         /* is it enabled? */
745         target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
746         val32 = be32_to_cpu(readl(target));
747         if (!(val32 & PHB_MEM2_ENABLE))
748                 return;
749
750         target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
751         low = be32_to_cpu(readl(target));
752         target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
753         high = be32_to_cpu(readl(target));
754         target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
755         sizelow = be32_to_cpu(readl(target));
756         target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
757         sizehigh = be32_to_cpu(readl(target));
758
759         start = (high << 32) | low;
760         limit = (sizehigh << 32) | sizelow;
761
762         calgary_reserve_mem_region(dev, start, limit);
763 }
764
765 /*
766  * some regions of the IO address space do not get translated, so we
767  * must not give devices IO addresses in those regions. The regions
768  * are the 640KB-1MB region and the two PCI peripheral memory holes.
769  * Reserve all of them in the IOMMU bitmap to avoid giving them out
770  * later.
771  */
772 static void __init calgary_reserve_regions(struct pci_dev *dev)
773 {
774         unsigned int npages;
775         u64 start;
776         struct iommu_table *tbl = pci_iommu(dev->bus);
777
778         /* reserve EMERGENCY_PAGES from bad_dma_address and up */
779         iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
780
781         /* avoid the BIOS/VGA first 640KB-1MB region */
782         /* for CalIOC2 - avoid the entire first MB */
783         if (is_calgary(dev->device)) {
784                 start = (640 * 1024);
785                 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
786         } else { /* calioc2 */
787                 start = 0;
788                 npages = (1 * 1024 * 1024) >> PAGE_SHIFT;
789         }
790         iommu_range_reserve(tbl, start, npages);
791
792         /* reserve the two PCI peripheral memory regions in IO space */
793         calgary_reserve_peripheral_mem_1(dev);
794         calgary_reserve_peripheral_mem_2(dev);
795 }
796
797 static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
798 {
799         u64 val64;
800         u64 table_phys;
801         void __iomem *target;
802         int ret;
803         struct iommu_table *tbl;
804
805         /* build TCE tables for each PHB */
806         ret = build_tce_table(dev, bbar);
807         if (ret)
808                 return ret;
809
810         tbl = pci_iommu(dev->bus);
811         tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
812
813         if (is_kdump_kernel())
814                 calgary_init_bitmap_from_tce_table(tbl);
815         else
816                 tce_free(tbl, 0, tbl->it_size);
817
818         if (is_calgary(dev->device))
819                 tbl->chip_ops = &calgary_chip_ops;
820         else if (is_calioc2(dev->device))
821                 tbl->chip_ops = &calioc2_chip_ops;
822         else
823                 BUG();
824
825         calgary_reserve_regions(dev);
826
827         /* set TARs for each PHB */
828         target = calgary_reg(bbar, tar_offset(dev->bus->number));
829         val64 = be64_to_cpu(readq(target));
830
831         /* zero out all TAR bits under sw control */
832         val64 &= ~TAR_SW_BITS;
833         table_phys = (u64)__pa(tbl->it_base);
834
835         val64 |= table_phys;
836
837         BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
838         val64 |= (u64) specified_table_size;
839
840         tbl->tar_val = cpu_to_be64(val64);
841
842         writeq(tbl->tar_val, target);
843         readq(target); /* flush */
844
845         return 0;
846 }
847
848 static void __init calgary_free_bus(struct pci_dev *dev)
849 {
850         u64 val64;
851         struct iommu_table *tbl = pci_iommu(dev->bus);
852         void __iomem *target;
853         unsigned int bitmapsz;
854
855         target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
856         val64 = be64_to_cpu(readq(target));
857         val64 &= ~TAR_SW_BITS;
858         writeq(cpu_to_be64(val64), target);
859         readq(target); /* flush */
860
861         bitmapsz = tbl->it_size / BITS_PER_BYTE;
862         free_pages((unsigned long)tbl->it_map, get_order(bitmapsz));
863         tbl->it_map = NULL;
864
865         kfree(tbl);
866         
867         set_pci_iommu(dev->bus, NULL);
868
869         /* Can't free bootmem allocated memory after system is up :-( */
870         bus_info[dev->bus->number].tce_space = NULL;
871 }
872
873 static void calgary_dump_error_regs(struct iommu_table *tbl)
874 {
875         void __iomem *bbar = tbl->bbar;
876         void __iomem *target;
877         u32 csr, plssr;
878
879         target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
880         csr = be32_to_cpu(readl(target));
881
882         target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
883         plssr = be32_to_cpu(readl(target));
884
885         /* If no error, the agent ID in the CSR is not valid */
886         printk(KERN_EMERG "Calgary: DMA error on Calgary PHB 0x%x, "
887                "0x%08x@CSR 0x%08x@PLSSR\n", tbl->it_busno, csr, plssr);
888 }
889
890 static void calioc2_dump_error_regs(struct iommu_table *tbl)
891 {
892         void __iomem *bbar = tbl->bbar;
893         u32 csr, csmr, plssr, mck, rcstat;
894         void __iomem *target;
895         unsigned long phboff = phb_offset(tbl->it_busno);
896         unsigned long erroff;
897         u32 errregs[7];
898         int i;
899
900         /* dump CSR */
901         target = calgary_reg(bbar, phboff | PHB_CSR_OFFSET);
902         csr = be32_to_cpu(readl(target));
903         /* dump PLSSR */
904         target = calgary_reg(bbar, phboff | PHB_PLSSR_OFFSET);
905         plssr = be32_to_cpu(readl(target));
906         /* dump CSMR */
907         target = calgary_reg(bbar, phboff | 0x290);
908         csmr = be32_to_cpu(readl(target));
909         /* dump mck */
910         target = calgary_reg(bbar, phboff | 0x800);
911         mck = be32_to_cpu(readl(target));
912
913         printk(KERN_EMERG "Calgary: DMA error on CalIOC2 PHB 0x%x\n",
914                tbl->it_busno);
915
916         printk(KERN_EMERG "Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
917                csr, plssr, csmr, mck);
918
919         /* dump rest of error regs */
920         printk(KERN_EMERG "Calgary: ");
921         for (i = 0; i < ARRAY_SIZE(errregs); i++) {
922                 /* err regs are at 0x810 - 0x870 */
923                 erroff = (0x810 + (i * 0x10));
924                 target = calgary_reg(bbar, phboff | erroff);
925                 errregs[i] = be32_to_cpu(readl(target));
926                 printk("0x%08x@0x%lx ", errregs[i], erroff);
927         }
928         printk("\n");
929
930         /* root complex status */
931         target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
932         rcstat = be32_to_cpu(readl(target));
933         printk(KERN_EMERG "Calgary: 0x%08x@0x%x\n", rcstat,
934                PHB_ROOT_COMPLEX_STATUS);
935 }
936
937 static void calgary_watchdog(unsigned long data)
938 {
939         struct pci_dev *dev = (struct pci_dev *)data;
940         struct iommu_table *tbl = pci_iommu(dev->bus);
941         void __iomem *bbar = tbl->bbar;
942         u32 val32;
943         void __iomem *target;
944
945         target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
946         val32 = be32_to_cpu(readl(target));
947
948         /* If no error, the agent ID in the CSR is not valid */
949         if (val32 & CSR_AGENT_MASK) {
950                 tbl->chip_ops->dump_error_regs(tbl);
951
952                 /* reset error */
953                 writel(0, target);
954
955                 /* Disable bus that caused the error */
956                 target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
957                                      PHB_CONFIG_RW_OFFSET);
958                 val32 = be32_to_cpu(readl(target));
959                 val32 |= PHB_SLOT_DISABLE;
960                 writel(cpu_to_be32(val32), target);
961                 readl(target); /* flush */
962         } else {
963                 /* Reset the timer */
964                 mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
965         }
966 }
967
968 static void __init calgary_set_split_completion_timeout(void __iomem *bbar,
969         unsigned char busnum, unsigned long timeout)
970 {
971         u64 val64;
972         void __iomem *target;
973         unsigned int phb_shift = ~0; /* silence gcc */
974         u64 mask;
975
976         switch (busno_to_phbid(busnum)) {
977         case 0: phb_shift = (63 - 19);
978                 break;
979         case 1: phb_shift = (63 - 23);
980                 break;
981         case 2: phb_shift = (63 - 27);
982                 break;
983         case 3: phb_shift = (63 - 35);
984                 break;
985         default:
986                 BUG_ON(busno_to_phbid(busnum));
987         }
988
989         target = calgary_reg(bbar, CALGARY_CONFIG_REG);
990         val64 = be64_to_cpu(readq(target));
991
992         /* zero out this PHB's timer bits */
993         mask = ~(0xFUL << phb_shift);
994         val64 &= mask;
995         val64 |= (timeout << phb_shift);
996         writeq(cpu_to_be64(val64), target);
997         readq(target); /* flush */
998 }
999
1000 static void __init calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
1001 {
1002         unsigned char busnum = dev->bus->number;
1003         void __iomem *bbar = tbl->bbar;
1004         void __iomem *target;
1005         u32 val;
1006
1007         /*
1008          * CalIOC2 designers recommend setting bit 8 in 0xnDB0 to 1
1009          */
1010         target = calgary_reg(bbar, phb_offset(busnum) | PHB_SAVIOR_L2);
1011         val = cpu_to_be32(readl(target));
1012         val |= 0x00800000;
1013         writel(cpu_to_be32(val), target);
1014 }
1015
1016 static void __init calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
1017 {
1018         unsigned char busnum = dev->bus->number;
1019
1020         /*
1021          * Give split completion a longer timeout on bus 1 for aic94xx
1022          * http://bugzilla.kernel.org/show_bug.cgi?id=7180
1023          */
1024         if (is_calgary(dev->device) && (busnum == 1))
1025                 calgary_set_split_completion_timeout(tbl->bbar, busnum,
1026                                                      CCR_2SEC_TIMEOUT);
1027 }
1028
1029 static void __init calgary_enable_translation(struct pci_dev *dev)
1030 {
1031         u32 val32;
1032         unsigned char busnum;
1033         void __iomem *target;
1034         void __iomem *bbar;
1035         struct iommu_table *tbl;
1036
1037         busnum = dev->bus->number;
1038         tbl = pci_iommu(dev->bus);
1039         bbar = tbl->bbar;
1040
1041         /* enable TCE in PHB Config Register */
1042         target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
1043         val32 = be32_to_cpu(readl(target));
1044         val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
1045
1046         printk(KERN_INFO "Calgary: enabling translation on %s PHB %#x\n",
1047                (dev->device == PCI_DEVICE_ID_IBM_CALGARY) ?
1048                "Calgary" : "CalIOC2", busnum);
1049         printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
1050                "bus.\n");
1051
1052         writel(cpu_to_be32(val32), target);
1053         readl(target); /* flush */
1054
1055         init_timer(&tbl->watchdog_timer);
1056         tbl->watchdog_timer.function = &calgary_watchdog;
1057         tbl->watchdog_timer.data = (unsigned long)dev;
1058         mod_timer(&tbl->watchdog_timer, jiffies);
1059 }
1060
1061 static void __init calgary_disable_translation(struct pci_dev *dev)
1062 {
1063         u32 val32;
1064         unsigned char busnum;
1065         void __iomem *target;
1066         void __iomem *bbar;
1067         struct iommu_table *tbl;
1068
1069         busnum = dev->bus->number;
1070         tbl = pci_iommu(dev->bus);
1071         bbar = tbl->bbar;
1072
1073         /* disable TCE in PHB Config Register */
1074         target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
1075         val32 = be32_to_cpu(readl(target));
1076         val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
1077
1078         printk(KERN_INFO "Calgary: disabling translation on PHB %#x!\n", busnum);
1079         writel(cpu_to_be32(val32), target);
1080         readl(target); /* flush */
1081
1082         del_timer_sync(&tbl->watchdog_timer);
1083 }
1084
1085 static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
1086 {
1087         pci_dev_get(dev);
1088         set_pci_iommu(dev->bus, NULL);
1089
1090         /* is the device behind a bridge? */
1091         if (dev->bus->parent)
1092                 dev->bus->parent->self = dev;
1093         else
1094                 dev->bus->self = dev;
1095 }
1096
1097 static int __init calgary_init_one(struct pci_dev *dev)
1098 {
1099         void __iomem *bbar;
1100         struct iommu_table *tbl;
1101         int ret;
1102
1103         BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
1104
1105         bbar = busno_to_bbar(dev->bus->number);
1106         ret = calgary_setup_tar(dev, bbar);
1107         if (ret)
1108                 goto done;
1109
1110         pci_dev_get(dev);
1111
1112         if (dev->bus->parent) {
1113                 if (dev->bus->parent->self)
1114                         printk(KERN_WARNING "Calgary: IEEEE, dev %p has "
1115                                "bus->parent->self!\n", dev);
1116                 dev->bus->parent->self = dev;
1117         } else
1118                 dev->bus->self = dev;
1119
1120         tbl = pci_iommu(dev->bus);
1121         tbl->chip_ops->handle_quirks(tbl, dev);
1122
1123         calgary_enable_translation(dev);
1124
1125         return 0;
1126
1127 done:
1128         return ret;
1129 }
1130
1131 static int __init calgary_locate_bbars(void)
1132 {
1133         int ret;
1134         int rioidx, phb, bus;
1135         void __iomem *bbar;
1136         void __iomem *target;
1137         unsigned long offset;
1138         u8 start_bus, end_bus;
1139         u32 val;
1140
1141         ret = -ENODATA;
1142         for (rioidx = 0; rioidx < rio_table_hdr->num_rio_dev; rioidx++) {
1143                 struct rio_detail *rio = rio_devs[rioidx];
1144
1145                 if ((rio->type != COMPAT_CALGARY) && (rio->type != ALT_CALGARY))
1146                         continue;
1147
1148                 /* map entire 1MB of Calgary config space */
1149                 bbar = ioremap_nocache(rio->BBAR, 1024 * 1024);
1150                 if (!bbar)
1151                         goto error;
1152
1153                 for (phb = 0; phb < PHBS_PER_CALGARY; phb++) {
1154                         offset = phb_debug_offsets[phb] | PHB_DEBUG_STUFF_OFFSET;
1155                         target = calgary_reg(bbar, offset);
1156
1157                         val = be32_to_cpu(readl(target));
1158
1159                         start_bus = (u8)((val & 0x00FF0000) >> 16);
1160                         end_bus = (u8)((val & 0x0000FF00) >> 8);
1161
1162                         if (end_bus) {
1163                                 for (bus = start_bus; bus <= end_bus; bus++) {
1164                                         bus_info[bus].bbar = bbar;
1165                                         bus_info[bus].phbid = phb;
1166                                 }
1167                         } else {
1168                                 bus_info[start_bus].bbar = bbar;
1169                                 bus_info[start_bus].phbid = phb;
1170                         }
1171                 }
1172         }
1173
1174         return 0;
1175
1176 error:
1177         /* scan bus_info and iounmap any bbars we previously ioremap'd */
1178         for (bus = 0; bus < ARRAY_SIZE(bus_info); bus++)
1179                 if (bus_info[bus].bbar)
1180                         iounmap(bus_info[bus].bbar);
1181
1182         return ret;
1183 }
1184
1185 static int __init calgary_init(void)
1186 {
1187         int ret;
1188         struct pci_dev *dev = NULL;
1189         struct calgary_bus_info *info;
1190
1191         ret = calgary_locate_bbars();
1192         if (ret)
1193                 return ret;
1194
1195         /* Purely for kdump kernel case */
1196         if (is_kdump_kernel())
1197                 get_tce_space_from_tar();
1198
1199         do {
1200                 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1201                 if (!dev)
1202                         break;
1203                 if (!is_cal_pci_dev(dev->device))
1204                         continue;
1205
1206                 info = &bus_info[dev->bus->number];
1207                 if (info->translation_disabled) {
1208                         calgary_init_one_nontraslated(dev);
1209                         continue;
1210                 }
1211
1212                 if (!info->tce_space && !translate_empty_slots)
1213                         continue;
1214
1215                 ret = calgary_init_one(dev);
1216                 if (ret)
1217                         goto error;
1218         } while (1);
1219
1220         dev = NULL;
1221         for_each_pci_dev(dev) {
1222                 struct iommu_table *tbl;
1223
1224                 tbl = find_iommu_table(&dev->dev);
1225
1226                 if (translation_enabled(tbl))
1227                         dev->dev.archdata.dma_ops = &calgary_dma_ops;
1228         }
1229
1230         return ret;
1231
1232 error:
1233         do {
1234                 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1235                 if (!dev)
1236                         break;
1237                 if (!is_cal_pci_dev(dev->device))
1238                         continue;
1239
1240                 info = &bus_info[dev->bus->number];
1241                 if (info->translation_disabled) {
1242                         pci_dev_put(dev);
1243                         continue;
1244                 }
1245                 if (!info->tce_space && !translate_empty_slots)
1246                         continue;
1247
1248                 calgary_disable_translation(dev);
1249                 calgary_free_bus(dev);
1250                 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
1251                 dev->dev.archdata.dma_ops = NULL;
1252         } while (1);
1253
1254         return ret;
1255 }
1256
1257 static inline int __init determine_tce_table_size(u64 ram)
1258 {
1259         int ret;
1260
1261         if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
1262                 return specified_table_size;
1263
1264         /*
1265          * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
1266          * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
1267          * larger table size has twice as many entries, so shift the
1268          * max ram address by 13 to divide by 8K and then look at the
1269          * order of the result to choose between 0-7.
1270          */
1271         ret = get_order(ram >> 13);
1272         if (ret > TCE_TABLE_SIZE_8M)
1273                 ret = TCE_TABLE_SIZE_8M;
1274
1275         return ret;
1276 }
1277
1278 static int __init build_detail_arrays(void)
1279 {
1280         unsigned long ptr;
1281         unsigned numnodes, i;
1282         int scal_detail_size, rio_detail_size;
1283
1284         numnodes = rio_table_hdr->num_scal_dev;
1285         if (numnodes > MAX_NUMNODES){
1286                 printk(KERN_WARNING
1287                         "Calgary: MAX_NUMNODES too low! Defined as %d, "
1288                         "but system has %d nodes.\n",
1289                         MAX_NUMNODES, numnodes);
1290                 return -ENODEV;
1291         }
1292
1293         switch (rio_table_hdr->version){
1294         case 2:
1295                 scal_detail_size = 11;
1296                 rio_detail_size = 13;
1297                 break;
1298         case 3:
1299                 scal_detail_size = 12;
1300                 rio_detail_size = 15;
1301                 break;
1302         default:
1303                 printk(KERN_WARNING
1304                        "Calgary: Invalid Rio Grande Table Version: %d\n",
1305                        rio_table_hdr->version);
1306                 return -EPROTO;
1307         }
1308
1309         ptr = ((unsigned long)rio_table_hdr) + 3;
1310         for (i = 0; i < numnodes; i++, ptr += scal_detail_size)
1311                 scal_devs[i] = (struct scal_detail *)ptr;
1312
1313         for (i = 0; i < rio_table_hdr->num_rio_dev;
1314                     i++, ptr += rio_detail_size)
1315                 rio_devs[i] = (struct rio_detail *)ptr;
1316
1317         return 0;
1318 }
1319
1320 static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
1321 {
1322         int dev;
1323         u32 val;
1324
1325         if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
1326                 /*
1327                  * FIXME: properly scan for devices accross the
1328                  * PCI-to-PCI bridge on every CalIOC2 port.
1329                  */
1330                 return 1;
1331         }
1332
1333         for (dev = 1; dev < 8; dev++) {
1334                 val = read_pci_config(bus, dev, 0, 0);
1335                 if (val != 0xffffffff)
1336                         break;
1337         }
1338         return (val != 0xffffffff);
1339 }
1340
1341 /*
1342  * calgary_init_bitmap_from_tce_table():
1343  * Funtion for kdump case. In the second/kdump kernel initialize
1344  * the bitmap based on the tce table entries obtained from first kernel
1345  */
1346 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
1347 {
1348         u64 *tp;
1349         unsigned int index;
1350         tp = ((u64 *)tbl->it_base);
1351         for (index = 0 ; index < tbl->it_size; index++) {
1352                 if (*tp != 0x0)
1353                         set_bit(index, tbl->it_map);
1354                 tp++;
1355         }
1356 }
1357
1358 /*
1359  * get_tce_space_from_tar():
1360  * Function for kdump case. Get the tce tables from first kernel
1361  * by reading the contents of the base adress register of calgary iommu
1362  */
1363 static void __init get_tce_space_from_tar(void)
1364 {
1365         int bus;
1366         void __iomem *target;
1367         unsigned long tce_space;
1368
1369         for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1370                 struct calgary_bus_info *info = &bus_info[bus];
1371                 unsigned short pci_device;
1372                 u32 val;
1373
1374                 val = read_pci_config(bus, 0, 0, 0);
1375                 pci_device = (val & 0xFFFF0000) >> 16;
1376
1377                 if (!is_cal_pci_dev(pci_device))
1378                         continue;
1379                 if (info->translation_disabled)
1380                         continue;
1381
1382                 if (calgary_bus_has_devices(bus, pci_device) ||
1383                                                 translate_empty_slots) {
1384                         target = calgary_reg(bus_info[bus].bbar,
1385                                                 tar_offset(bus));
1386                         tce_space = be64_to_cpu(readq(target));
1387                         tce_space = tce_space & TAR_SW_BITS;
1388
1389                         tce_space = tce_space & (~specified_table_size);
1390                         info->tce_space = (u64 *)__va(tce_space);
1391                 }
1392         }
1393         return;
1394 }
1395
1396 void __init detect_calgary(void)
1397 {
1398         int bus;
1399         void *tbl;
1400         int calgary_found = 0;
1401         unsigned long ptr;
1402         unsigned int offset, prev_offset;
1403         int ret;
1404
1405         /*
1406          * if the user specified iommu=off or iommu=soft or we found
1407          * another HW IOMMU already, bail out.
1408          */
1409         if (swiotlb || no_iommu || iommu_detected)
1410                 return;
1411
1412         if (!use_calgary)
1413                 return;
1414
1415         if (!early_pci_allowed())
1416                 return;
1417
1418         printk(KERN_DEBUG "Calgary: detecting Calgary via BIOS EBDA area\n");
1419
1420         ptr = (unsigned long)phys_to_virt(get_bios_ebda());
1421
1422         rio_table_hdr = NULL;
1423         prev_offset = 0;
1424         offset = 0x180;
1425         /*
1426          * The next offset is stored in the 1st word.
1427          * Only parse up until the offset increases:
1428          */
1429         while (offset > prev_offset) {
1430                 /* The block id is stored in the 2nd word */
1431                 if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
1432                         /* set the pointer past the offset & block id */
1433                         rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
1434                         break;
1435                 }
1436                 prev_offset = offset;
1437                 offset = *((unsigned short *)(ptr + offset));
1438         }
1439         if (!rio_table_hdr) {
1440                 printk(KERN_DEBUG "Calgary: Unable to locate Rio Grande table "
1441                        "in EBDA - bailing!\n");
1442                 return;
1443         }
1444
1445         ret = build_detail_arrays();
1446         if (ret) {
1447                 printk(KERN_DEBUG "Calgary: build_detail_arrays ret %d\n", ret);
1448                 return;
1449         }
1450
1451         specified_table_size = determine_tce_table_size((is_kdump_kernel() ?
1452                                         saved_max_pfn : max_pfn) * PAGE_SIZE);
1453
1454         for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1455                 struct calgary_bus_info *info = &bus_info[bus];
1456                 unsigned short pci_device;
1457                 u32 val;
1458
1459                 val = read_pci_config(bus, 0, 0, 0);
1460                 pci_device = (val & 0xFFFF0000) >> 16;
1461
1462                 if (!is_cal_pci_dev(pci_device))
1463                         continue;
1464
1465                 if (info->translation_disabled)
1466                         continue;
1467
1468                 if (calgary_bus_has_devices(bus, pci_device) ||
1469                     translate_empty_slots) {
1470                         /*
1471                          * If it is kdump kernel, find and use tce tables
1472                          * from first kernel, else allocate tce tables here
1473                          */
1474                         if (!is_kdump_kernel()) {
1475                                 tbl = alloc_tce_table();
1476                                 if (!tbl)
1477                                         goto cleanup;
1478                                 info->tce_space = tbl;
1479                         }
1480                         calgary_found = 1;
1481                 }
1482         }
1483
1484         printk(KERN_DEBUG "Calgary: finished detection, Calgary %s\n",
1485                calgary_found ? "found" : "not found");
1486
1487         if (calgary_found) {
1488                 iommu_detected = 1;
1489                 calgary_detected = 1;
1490                 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected.\n");
1491                 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, "
1492                        "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size,
1493                        debugging ? "enabled" : "disabled");
1494
1495                 /* swiotlb for devices that aren't behind the Calgary. */
1496                 if (max_pfn > MAX_DMA32_PFN)
1497                         swiotlb = 1;
1498         }
1499         return;
1500
1501 cleanup:
1502         for (--bus; bus >= 0; --bus) {
1503                 struct calgary_bus_info *info = &bus_info[bus];
1504
1505                 if (info->tce_space)
1506                         free_tce_table(info->tce_space);
1507         }
1508 }
1509
1510 int __init calgary_iommu_init(void)
1511 {
1512         int ret;
1513
1514         if (no_iommu || (swiotlb && !calgary_detected))
1515                 return -ENODEV;
1516
1517         if (!calgary_detected)
1518                 return -ENODEV;
1519
1520         /* ok, we're trying to use Calgary - let's roll */
1521         printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
1522
1523         ret = calgary_init();
1524         if (ret) {
1525                 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1526                        "falling back to no_iommu\n", ret);
1527                 return ret;
1528         }
1529
1530         force_iommu = 1;
1531         bad_dma_address = 0x0;
1532         /* dma_ops is set to swiotlb or nommu */
1533         if (!dma_ops)
1534                 dma_ops = &nommu_dma_ops;
1535
1536         return 0;
1537 }
1538
1539 static int __init calgary_parse_options(char *p)
1540 {
1541         unsigned int bridge;
1542         size_t len;
1543         char* endp;
1544
1545         while (*p) {
1546                 if (!strncmp(p, "64k", 3))
1547                         specified_table_size = TCE_TABLE_SIZE_64K;
1548                 else if (!strncmp(p, "128k", 4))
1549                         specified_table_size = TCE_TABLE_SIZE_128K;
1550                 else if (!strncmp(p, "256k", 4))
1551                         specified_table_size = TCE_TABLE_SIZE_256K;
1552                 else if (!strncmp(p, "512k", 4))
1553                         specified_table_size = TCE_TABLE_SIZE_512K;
1554                 else if (!strncmp(p, "1M", 2))
1555                         specified_table_size = TCE_TABLE_SIZE_1M;
1556                 else if (!strncmp(p, "2M", 2))
1557                         specified_table_size = TCE_TABLE_SIZE_2M;
1558                 else if (!strncmp(p, "4M", 2))
1559                         specified_table_size = TCE_TABLE_SIZE_4M;
1560                 else if (!strncmp(p, "8M", 2))
1561                         specified_table_size = TCE_TABLE_SIZE_8M;
1562
1563                 len = strlen("translate_empty_slots");
1564                 if (!strncmp(p, "translate_empty_slots", len))
1565                         translate_empty_slots = 1;
1566
1567                 len = strlen("disable");
1568                 if (!strncmp(p, "disable", len)) {
1569                         p += len;
1570                         if (*p == '=')
1571                                 ++p;
1572                         if (*p == '\0')
1573                                 break;
1574                         bridge = simple_strtoul(p, &endp, 0);
1575                         if (p == endp)
1576                                 break;
1577
1578                         if (bridge < MAX_PHB_BUS_NUM) {
1579                                 printk(KERN_INFO "Calgary: disabling "
1580                                        "translation for PHB %#x\n", bridge);
1581                                 bus_info[bridge].translation_disabled = 1;
1582                         }
1583                 }
1584
1585                 p = strpbrk(p, ",");
1586                 if (!p)
1587                         break;
1588
1589                 p++; /* skip ',' */
1590         }
1591         return 1;
1592 }
1593 __setup("calgary=", calgary_parse_options);
1594
1595 static void __init calgary_fixup_one_tce_space(struct pci_dev *dev)
1596 {
1597         struct iommu_table *tbl;
1598         unsigned int npages;
1599         int i;
1600
1601         tbl = pci_iommu(dev->bus);
1602
1603         for (i = 0; i < 4; i++) {
1604                 struct resource *r = &dev->resource[PCI_BRIDGE_RESOURCES + i];
1605
1606                 /* Don't give out TCEs that map MEM resources */
1607                 if (!(r->flags & IORESOURCE_MEM))
1608                         continue;
1609
1610                 /* 0-based? we reserve the whole 1st MB anyway */
1611                 if (!r->start)
1612                         continue;
1613
1614                 /* cover the whole region */
1615                 npages = (r->end - r->start) >> PAGE_SHIFT;
1616                 npages++;
1617
1618                 iommu_range_reserve(tbl, r->start, npages);
1619         }
1620 }
1621
1622 static int __init calgary_fixup_tce_spaces(void)
1623 {
1624         struct pci_dev *dev = NULL;
1625         struct calgary_bus_info *info;
1626
1627         if (no_iommu || swiotlb || !calgary_detected)
1628                 return -ENODEV;
1629
1630         printk(KERN_DEBUG "Calgary: fixing up tce spaces\n");
1631
1632         do {
1633                 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1634                 if (!dev)
1635                         break;
1636                 if (!is_cal_pci_dev(dev->device))
1637                         continue;
1638
1639                 info = &bus_info[dev->bus->number];
1640                 if (info->translation_disabled)
1641                         continue;
1642
1643                 if (!info->tce_space)
1644                         continue;
1645
1646                 calgary_fixup_one_tce_space(dev);
1647
1648         } while (1);
1649
1650         return 0;
1651 }
1652
1653 /*
1654  * We need to be call after pcibios_assign_resources (fs_initcall level)
1655  * and before device_initcall.
1656  */
1657 rootfs_initcall(calgary_fixup_tce_spaces);