1 #ifndef _PARISC_MMZONE_H
2 #define _PARISC_MMZONE_H
4 #ifdef CONFIG_DISCONTIGMEM
6 #define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
7 extern int npmem_ranges;
13 extern struct node_map_data node_data[];
15 #define NODE_DATA(nid) (&node_data[nid].pg_data)
18 * Given a kernel address, find the home node of the underlying memory.
20 #define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
22 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
23 #define node_end_pfn(nid) \
25 pg_data_t *__pgdat = NODE_DATA(nid); \
26 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
29 /* We have these possible memory map layouts:
30 * Astro: 0-3.75, 67.75-68, 4-64
31 * zx1: 0-1, 257-260, 4-256
32 * Stretch (N-class): 0-2, 4-32, 34-xxx
35 /* Since each 1GB can only belong to one region (node), we can create
36 * an index table for pfn to nid lookup; each entry in pfnnid_map
37 * represents 1GB, and contains the node that the memory belongs to. */
39 #define PFNNID_SHIFT (30 - PAGE_SHIFT)
40 #define PFNNID_MAP_MAX 512 /* support 512GB */
41 extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
44 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
46 /* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
47 #define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
50 static inline int pfn_to_nid(unsigned long pfn)
55 if (unlikely(pfn_is_io(pfn)))
58 i = pfn >> PFNNID_SHIFT;
59 BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
66 static inline int pfn_valid(int pfn)
68 int nid = pfn_to_nid(pfn);
71 return (pfn < node_end_pfn(nid));
75 #else /* !CONFIG_DISCONTIGMEM */
76 #define MAX_PHYSMEM_RANGES 1
78 #endif /* _PARISC_MMZONE_H */