4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
13 * Chris Zankel <chris@zankel.net>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/system.h>
22 #include <asm/cacheflush.h>
25 static inline void __flush_itlb_all (void)
29 for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
30 for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
31 int entry = way + (index << PAGE_SHIFT);
32 invalidate_itlb_entry_no_isync (entry);
35 asm volatile ("isync\n");
38 static inline void __flush_dtlb_all (void)
42 for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
43 for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
44 int entry = way + (index << PAGE_SHIFT);
45 invalidate_dtlb_entry_no_isync (entry);
48 asm volatile ("isync\n");
52 void flush_tlb_all (void)
58 /* If mm is current, we simply assign the current task a new ASID, thus,
59 * invalidating all previous tlb entries. If mm is someone else's user mapping,
60 * wie invalidate the context, thus, when that user mapping is swapped in,
61 * a new context will be assigned to it.
64 void flush_tlb_mm(struct mm_struct *mm)
67 printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
70 if (mm == current->active_mm) {
72 local_save_flags(flags);
73 get_new_mmu_context(mm, asid_cache);
74 set_rasid_register(ASID_INSERT(mm->context));
75 local_irq_restore(flags);
81 void flush_tlb_range (struct vm_area_struct *vma,
82 unsigned long start, unsigned long end)
84 struct mm_struct *mm = vma->vm_mm;
87 if (mm->context == NO_CONTEXT)
91 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
92 (unsigned long)mm->context, start, end);
94 local_save_flags(flags);
96 if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
97 int oldpid = get_rasid_register();
98 set_rasid_register (ASID_INSERT(mm->context));
100 if (vma->vm_flags & VM_EXEC)
102 invalidate_itlb_mapping(start);
103 invalidate_dtlb_mapping(start);
108 invalidate_dtlb_mapping(start);
112 set_rasid_register(oldpid);
114 get_new_mmu_context(mm, asid_cache);
115 if (mm == current->active_mm)
116 set_rasid_register(ASID_INSERT(mm->context));
118 local_irq_restore(flags);
121 void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
123 struct mm_struct* mm = vma->vm_mm;
127 printk("[tlbpage<%02lx,%08lx>]\n",
128 (unsigned long)mm->context, page);
131 if(mm->context == NO_CONTEXT)
134 local_save_flags(flags);
136 oldpid = get_rasid_register();
138 if (vma->vm_flags & VM_EXEC)
139 invalidate_itlb_mapping(page);
140 invalidate_dtlb_mapping(page);
142 set_rasid_register(oldpid);
144 local_irq_restore(flags);
158 struct way_config_t {
165 static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
167 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
168 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
169 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
170 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
172 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
173 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
174 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
175 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
177 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
178 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
179 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
180 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
182 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
183 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
184 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
185 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
187 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
188 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
189 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
190 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
192 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
193 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
194 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
195 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
197 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
198 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
199 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
200 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
204 static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
206 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
207 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
208 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
209 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
211 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
212 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
213 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
214 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
216 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
217 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
218 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
219 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
221 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
222 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
223 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
224 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
226 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
227 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
228 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
229 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
231 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
232 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
233 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
234 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
236 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
237 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
238 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
239 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
241 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
242 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
243 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
244 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
246 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
247 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
248 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
249 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
251 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
252 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
253 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
254 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
258 /* Total number of entries: */
259 #define ITLB_TOTAL_ENTRIES \
260 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
261 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
262 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
263 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
264 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
265 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
266 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
267 #define DTLB_TOTAL_ENTRIES \
268 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
269 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
270 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
271 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
272 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
273 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
274 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
275 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
276 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
277 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
287 unsigned char pgsz_log2; /* 0 .. 32 */
288 unsigned char type; /* 0=ITLB 1=DTLB */
291 /* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
292 int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
294 if (a->asid < b->asid) return -1;
295 if (a->asid > b->asid) return 1;
296 if (a->va < b->va) return -1;
297 if (a->va > b->va) return 1;
298 if (a->pa < b->pa) return -1;
299 if (a->pa > b->pa) return 1;
300 if (a->ca < b->ca) return -1;
301 if (a->ca > b->ca) return 1;
302 if (a->way < b->way) return -1;
303 if (a->way > b->way) return 1;
304 if (a->index < b->index) return -1;
305 if (a->index > b->index) return 1;
309 void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
312 /* Simple O(n*n) sort: */
313 for (i = 0; i < n-1; i++)
314 for (j = i+1; j < n; j++)
315 if (cmp_tlb_dump_info(t+i, t+j) > 0) {
316 tlb_dump_entry_t tmp = t[i];
323 static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
324 static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
327 static inline char *way_type (int type)
329 return type ? "autorefill" : "non-autorefill";
332 void print_entry (struct way_config_t *way_info,
335 unsigned int virtual,
336 unsigned int translation)
339 unsigned int va, pa, asid, ca;
342 ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
343 asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
344 pa = translation & ~((1 << way_info->pgsz_log2) - 1);
345 ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
346 valid_chr = asid ? 'V' : 'I';
348 /* Compute and incorporate the effect of the index bits on the
349 * va. It's more useful for kernel debugging, since we always
350 * want to know the effective va anyway. */
352 va += index << way_info->pgsz_log2;
354 printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
355 way, index, valid_chr, va, pa, asid, ca);
358 void print_itlb_entry (struct way_config_t *way_info, int way, int index)
360 print_entry (way_info, way, index,
361 read_itlb_virtual (way + (index << way_info->pgsz_log2)),
362 read_itlb_translation (way + (index << way_info->pgsz_log2)));
365 void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
367 print_entry (way_info, way, index,
368 read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
369 read_dtlb_translation (way + (index << way_info->pgsz_log2)));
372 void dump_itlb (void)
376 printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
378 for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
379 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
380 way, itlb[way].indicies,
381 itlb[way].pgsz_log2, way_type(itlb[way].arf));
382 for (index = 0; index < itlb[way].indicies; index++) {
383 print_itlb_entry(&itlb[way], way, index);
388 void dump_dtlb (void)
392 printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
394 for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
395 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
396 way, dtlb[way].indicies,
397 dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
398 for (index = 0; index < dtlb[way].indicies; index++) {
399 print_dtlb_entry(&dtlb[way], way, index);
404 void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
405 int entries, int ways, int type, int show_invalid)
407 tlb_dump_entry_t *e = tinfo;
410 /* Gather all info: */
411 for (way = 0; way < ways; way++) {
412 struct way_config_t *cfg = config + way;
413 for (i = 0; i < cfg->indicies; i++) {
414 unsigned wayindex = way + (i << cfg->pgsz_log2);
415 unsigned vv = (type ? read_dtlb_virtual (wayindex)
416 : read_itlb_virtual (wayindex));
417 unsigned pp = (type ? read_dtlb_translation (wayindex)
418 : read_itlb_translation (wayindex));
420 /* Compute and incorporate the effect of the index bits on the
421 * va. It's more useful for kernel debugging, since we always
422 * want to know the effective va anyway. */
424 e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
425 e->va += (i << cfg->pgsz_log2);
426 e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
427 e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
428 e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
431 e->pgsz_log2 = cfg->pgsz_log2;
437 /* Sort by ASID and VADDR: */
438 sort_tlb_dump_info (tinfo, entries);
441 /* Display all sorted info: */
442 printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
443 for (e = tinfo, i = 0; i < entries; i++, e++) {
445 if (e->asid == 0 && !show_invalid)
448 printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
449 (e->type ? 'D' : 'I'), e->way, e->index,
450 e->asid, e->va, e->pa, e->ca,
451 (1 << (e->pgsz_log2 % 10)),
452 " kMG"[e->pgsz_log2 / 10]
457 void dump_tlbs2 (int showinv)
459 dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
460 dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
463 void dump_all_tlbs (void)
468 void dump_valid_tlbs (void)
474 void dump_tlbs (void)
480 void dump_cache_tag(int dcache, int idx)
483 unsigned long tag, index;
484 unsigned long num_lines, num_ways, cache_size, line_size;
486 num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
487 cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
488 line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
490 num_lines = cache_size / num_ways;
492 s = 0; e = num_lines;
495 e = (s = idx * line_size) + 1;
497 for (i = s; i < e; i+= line_size) {
498 printk("\nline %#08x:", i);
499 for (w = 0; w < num_ways; w++) {
500 index = w * num_lines + i;
502 __asm__ __volatile__("ldct %0, %1\n\t"
503 : "=a"(tag) : "a"(index));
505 __asm__ __volatile__("lict %0, %1\n\t"
506 : "=a"(tag) : "a"(index));
508 printk(" %#010lx", tag);
514 void dump_icache(int index)
516 unsigned long data, addr;
519 const unsigned long num_ways = XCHAL_ICACHE_WAYS;
520 const unsigned long cache_size = XCHAL_ICACHE_SIZE;
521 const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
522 const unsigned long num_lines = cache_size / num_ways / line_size;
524 for (w = 0; w < num_ways; w++) {
525 printk ("\nWay %d", w);
527 for (i = 0; i < line_size; i+= 4) {
528 addr = w * num_lines + index * line_size + i;
529 __asm__ __volatile__("licw %0, %1\n\t"
530 : "=a"(data) : "a"(addr));
531 printk(" %#010lx", data);
537 void dump_cache_tags(void)
539 printk("Instruction cache\n");
540 dump_cache_tag(0, -1);
541 printk("Data cache\n");
542 dump_cache_tag(1, -1);