2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 * Adapted for the alpha wildfire architecture Jan 2001.
8 #include <linux/config.h>
11 struct bootmem_data_t; /* stupid forward decl. */
14 * Following are macros that are specific to this numa platform.
17 extern pg_data_t node_data[];
19 #define alpha_pa_to_nid(pa) \
21 ? alpha_mv.pa_to_nid(pa) \
23 #define node_mem_start(nid) \
24 (alpha_mv.node_mem_start \
25 ? alpha_mv.node_mem_start(nid) \
27 #define node_mem_size(nid) \
28 (alpha_mv.node_mem_size \
29 ? alpha_mv.node_mem_size(nid) \
30 : ((nid) ? (0UL) : (~0UL)))
32 #define pa_to_nid(pa) alpha_pa_to_nid(pa)
33 #define NODE_DATA(nid) (&node_data[(nid)])
35 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
38 #define PLAT_NODE_DATA_LOCALNR(p, n) \
39 (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
41 static inline unsigned long
42 PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
45 temp = p >> PAGE_SHIFT;
46 return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
50 #ifdef CONFIG_DISCONTIGMEM
53 * Following are macros that each numa implementation must define.
57 * Given a kernel address, find the home node of the underlying memory.
59 #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
60 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
62 #define local_mapnr(kvaddr) \
63 ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
66 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
67 * and returns the kaddr corresponding to first physical page in the
70 #define LOCAL_BASE_ADDR(kaddr) \
71 ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
74 /* XXX: FIXME -- wli */
75 #define kern_addr_valid(kaddr) (0)
77 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
79 #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
81 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
82 #define pte_pfn(pte) (pte_val(pte) >> 32)
84 #define mk_pte(page, pgprot) \
89 pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
90 pfn += page_zone(page)->zone_start_pfn << 32; \
91 pte_val(pte) = pfn | pgprot_val(pgprot); \
98 unsigned long kvirt; \
101 kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
102 __xx = virt_to_page(kvirt); \
107 #define pfn_to_page(pfn) \
109 unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \
110 (NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr)); \
113 #define page_to_pfn(page) \
114 ((page) - page_zone(page)->zone_mem_map + \
115 (page_zone(page)->zone_start_pfn))
117 #define page_to_pa(page) \
118 ((( (page) - page_zone(page)->zone_mem_map ) \
119 + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
121 #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
122 #define pfn_valid(pfn) \
123 (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
124 node_spanned_pages(pfn_to_nid(pfn))) \
126 #define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
128 #endif /* CONFIG_DISCONTIGMEM */
130 #endif /* _ASM_MMZONE_H_ */