123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189 |
- /*
- * NUMA support for s390
- *
- * Implement NUMA core code.
- *
- * Copyright IBM Corp. 2015
- */
- #define KMSG_COMPONENT "numa"
- #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
- #include <linux/kernel.h>
- #include <linux/mmzone.h>
- #include <linux/cpumask.h>
- #include <linux/bootmem.h>
- #include <linux/memblock.h>
- #include <linux/slab.h>
- #include <linux/node.h>
- #include <asm/numa.h>
- #include "numa_mode.h"
- pg_data_t *node_data[MAX_NUMNODES];
- EXPORT_SYMBOL(node_data);
- cpumask_t node_to_cpumask_map[MAX_NUMNODES];
- EXPORT_SYMBOL(node_to_cpumask_map);
- static void plain_setup(void)
- {
- node_set(0, node_possible_map);
- }
- const struct numa_mode numa_mode_plain = {
- .name = "plain",
- .setup = plain_setup,
- };
- static const struct numa_mode *mode = &numa_mode_plain;
- int numa_pfn_to_nid(unsigned long pfn)
- {
- return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
- }
- void numa_update_cpu_topology(void)
- {
- if (mode->update_cpu_topology)
- mode->update_cpu_topology();
- }
- int __node_distance(int a, int b)
- {
- return mode->distance ? mode->distance(a, b) : 0;
- }
- int numa_debug_enabled;
- /*
- * alloc_node_data() - Allocate node data
- */
- static __init pg_data_t *alloc_node_data(void)
- {
- pg_data_t *res;
- res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
- memset(res, 0, sizeof(pg_data_t));
- return res;
- }
- /*
- * numa_setup_memory() - Assign bootmem to nodes
- *
- * The memory is first added to memblock without any respect to nodes.
- * This is fixed before remaining memblock memory is handed over to the
- * buddy allocator.
- * An important side effect is that large bootmem allocations might easily
- * cross node boundaries, which can be needed for large allocations with
- * smaller memory stripes in each node (i.e. when using NUMA emulation).
- *
- * Memory defines nodes:
- * Therefore this routine also sets the nodes online with memory.
- */
- static void __init numa_setup_memory(void)
- {
- unsigned long cur_base, align, end_of_dram;
- int nid = 0;
- end_of_dram = memblock_end_of_DRAM();
- align = mode->align ? mode->align() : ULONG_MAX;
- /*
- * Step through all available memory and assign it to the nodes
- * indicated by the mode implementation.
- * All nodes which are seen here will be set online.
- */
- cur_base = 0;
- do {
- nid = numa_pfn_to_nid(PFN_DOWN(cur_base));
- node_set_online(nid);
- memblock_set_node(cur_base, align, &memblock.memory, nid);
- cur_base += align;
- } while (cur_base < end_of_dram);
- /* Allocate and fill out node_data */
- for (nid = 0; nid < MAX_NUMNODES; nid++)
- NODE_DATA(nid) = alloc_node_data();
- for_each_online_node(nid) {
- unsigned long start_pfn, end_pfn;
- unsigned long t_start, t_end;
- int i;
- start_pfn = ULONG_MAX;
- end_pfn = 0;
- for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) {
- if (t_start < start_pfn)
- start_pfn = t_start;
- if (t_end > end_pfn)
- end_pfn = t_end;
- }
- NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
- NODE_DATA(nid)->node_id = nid;
- }
- }
- /*
- * numa_setup() - Earliest initialization
- *
- * Assign the mode and call the mode's setup routine.
- */
- void __init numa_setup(void)
- {
- pr_info("NUMA mode: %s\n", mode->name);
- nodes_clear(node_possible_map);
- if (mode->setup)
- mode->setup();
- numa_setup_memory();
- memblock_dump_all();
- }
- /*
- * numa_init_early() - Initialization initcall
- *
- * This runs when only one CPU is online and before the first
- * topology update is called for by the scheduler.
- */
- static int __init numa_init_early(void)
- {
- /* Attach all possible CPUs to node 0 for now. */
- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
- return 0;
- }
- early_initcall(numa_init_early);
- /*
- * numa_init_late() - Initialization initcall
- *
- * Register NUMA nodes.
- */
- static int __init numa_init_late(void)
- {
- int nid;
- for_each_online_node(nid)
- register_one_node(nid);
- return 0;
- }
- arch_initcall(numa_init_late);
- static int __init parse_debug(char *parm)
- {
- numa_debug_enabled = 1;
- return 0;
- }
- early_param("numa_debug", parse_debug);
- static int __init parse_numa(char *parm)
- {
- if (strcmp(parm, numa_mode_plain.name) == 0)
- mode = &numa_mode_plain;
- #ifdef CONFIG_NUMA_EMU
- if (strcmp(parm, numa_mode_emu.name) == 0)
- mode = &numa_mode_emu;
- #endif
- return 0;
- }
- early_param("numa", parse_numa);
|