hotplug-memory.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. /*
  2. * pseries Memory Hotplug infrastructure.
  3. *
  4. * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/memblock.h>
  15. #include <linux/memory.h>
  16. #include <linux/memory_hotplug.h>
  17. #include <linux/slab.h>
  18. #include <asm/firmware.h>
  19. #include <asm/machdep.h>
  20. #include <asm/prom.h>
  21. #include <asm/sparsemem.h>
  22. #include "pseries.h"
  23. static bool rtas_hp_event;
  24. unsigned long pseries_memory_block_size(void)
  25. {
  26. struct device_node *np;
  27. unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
  28. struct resource r;
  29. np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  30. if (np) {
  31. const __be64 *size;
  32. size = of_get_property(np, "ibm,lmb-size", NULL);
  33. if (size)
  34. memblock_size = be64_to_cpup(size);
  35. of_node_put(np);
  36. } else if (machine_is(pseries)) {
  37. /* This fallback really only applies to pseries */
  38. unsigned int memzero_size = 0;
  39. np = of_find_node_by_path("/memory@0");
  40. if (np) {
  41. if (!of_address_to_resource(np, 0, &r))
  42. memzero_size = resource_size(&r);
  43. of_node_put(np);
  44. }
  45. if (memzero_size) {
  46. /* We now know the size of memory@0, use this to find
  47. * the first memoryblock and get its size.
  48. */
  49. char buf[64];
  50. sprintf(buf, "/memory@%x", memzero_size);
  51. np = of_find_node_by_path(buf);
  52. if (np) {
  53. if (!of_address_to_resource(np, 0, &r))
  54. memblock_size = resource_size(&r);
  55. of_node_put(np);
  56. }
  57. }
  58. }
  59. return memblock_size;
  60. }
  61. static void dlpar_free_property(struct property *prop)
  62. {
  63. kfree(prop->name);
  64. kfree(prop->value);
  65. kfree(prop);
  66. }
  67. static struct property *dlpar_clone_property(struct property *prop,
  68. u32 prop_size)
  69. {
  70. struct property *new_prop;
  71. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  72. if (!new_prop)
  73. return NULL;
  74. new_prop->name = kstrdup(prop->name, GFP_KERNEL);
  75. new_prop->value = kzalloc(prop_size, GFP_KERNEL);
  76. if (!new_prop->name || !new_prop->value) {
  77. dlpar_free_property(new_prop);
  78. return NULL;
  79. }
  80. memcpy(new_prop->value, prop->value, prop->length);
  81. new_prop->length = prop_size;
  82. of_property_set_flag(new_prop, OF_DYNAMIC);
  83. return new_prop;
  84. }
  85. static struct property *dlpar_clone_drconf_property(struct device_node *dn)
  86. {
  87. struct property *prop, *new_prop;
  88. struct of_drconf_cell *lmbs;
  89. u32 num_lmbs, *p;
  90. int i;
  91. prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
  92. if (!prop)
  93. return NULL;
  94. new_prop = dlpar_clone_property(prop, prop->length);
  95. if (!new_prop)
  96. return NULL;
  97. /* Convert the property to cpu endian-ness */
  98. p = new_prop->value;
  99. *p = be32_to_cpu(*p);
  100. num_lmbs = *p++;
  101. lmbs = (struct of_drconf_cell *)p;
  102. for (i = 0; i < num_lmbs; i++) {
  103. lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
  104. lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
  105. lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
  106. lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
  107. }
  108. return new_prop;
  109. }
  110. static void dlpar_update_drconf_property(struct device_node *dn,
  111. struct property *prop)
  112. {
  113. struct of_drconf_cell *lmbs;
  114. u32 num_lmbs, *p;
  115. int i;
  116. /* Convert the property back to BE */
  117. p = prop->value;
  118. num_lmbs = *p;
  119. *p = cpu_to_be32(*p);
  120. p++;
  121. lmbs = (struct of_drconf_cell *)p;
  122. for (i = 0; i < num_lmbs; i++) {
  123. lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
  124. lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
  125. lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
  126. lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
  127. }
  128. rtas_hp_event = true;
  129. of_update_property(dn, prop);
  130. rtas_hp_event = false;
  131. }
  132. static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
  133. {
  134. struct device_node *dn;
  135. struct property *prop;
  136. struct of_drconf_cell *lmbs;
  137. u32 *p, num_lmbs;
  138. int i;
  139. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  140. if (!dn)
  141. return -ENODEV;
  142. prop = dlpar_clone_drconf_property(dn);
  143. if (!prop) {
  144. of_node_put(dn);
  145. return -ENODEV;
  146. }
  147. p = prop->value;
  148. num_lmbs = *p++;
  149. lmbs = (struct of_drconf_cell *)p;
  150. for (i = 0; i < num_lmbs; i++) {
  151. if (lmbs[i].drc_index == lmb->drc_index) {
  152. lmbs[i].flags = lmb->flags;
  153. lmbs[i].aa_index = lmb->aa_index;
  154. dlpar_update_drconf_property(dn, prop);
  155. break;
  156. }
  157. }
  158. of_node_put(dn);
  159. return 0;
  160. }
  161. static u32 find_aa_index(struct device_node *dr_node,
  162. struct property *ala_prop, const u32 *lmb_assoc)
  163. {
  164. u32 *assoc_arrays;
  165. u32 aa_index;
  166. int aa_arrays, aa_array_entries, aa_array_sz;
  167. int i, index;
  168. /*
  169. * The ibm,associativity-lookup-arrays property is defined to be
  170. * a 32-bit value specifying the number of associativity arrays
  171. * followed by a 32-bitvalue specifying the number of entries per
  172. * array, followed by the associativity arrays.
  173. */
  174. assoc_arrays = ala_prop->value;
  175. aa_arrays = be32_to_cpu(assoc_arrays[0]);
  176. aa_array_entries = be32_to_cpu(assoc_arrays[1]);
  177. aa_array_sz = aa_array_entries * sizeof(u32);
  178. aa_index = -1;
  179. for (i = 0; i < aa_arrays; i++) {
  180. index = (i * aa_array_entries) + 2;
  181. if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
  182. continue;
  183. aa_index = i;
  184. break;
  185. }
  186. if (aa_index == -1) {
  187. struct property *new_prop;
  188. u32 new_prop_size;
  189. new_prop_size = ala_prop->length + aa_array_sz;
  190. new_prop = dlpar_clone_property(ala_prop, new_prop_size);
  191. if (!new_prop)
  192. return -1;
  193. assoc_arrays = new_prop->value;
  194. /* increment the number of entries in the lookup array */
  195. assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
  196. /* copy the new associativity into the lookup array */
  197. index = aa_arrays * aa_array_entries + 2;
  198. memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
  199. of_update_property(dr_node, new_prop);
  200. /*
  201. * The associativity lookup array index for this lmb is
  202. * number of entries - 1 since we added its associativity
  203. * to the end of the lookup array.
  204. */
  205. aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
  206. }
  207. return aa_index;
  208. }
  209. static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
  210. {
  211. struct device_node *parent, *lmb_node, *dr_node;
  212. struct property *ala_prop;
  213. const u32 *lmb_assoc;
  214. u32 aa_index;
  215. parent = of_find_node_by_path("/");
  216. if (!parent)
  217. return -ENODEV;
  218. lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
  219. parent);
  220. of_node_put(parent);
  221. if (!lmb_node)
  222. return -EINVAL;
  223. lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
  224. if (!lmb_assoc) {
  225. dlpar_free_cc_nodes(lmb_node);
  226. return -ENODEV;
  227. }
  228. dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  229. if (!dr_node) {
  230. dlpar_free_cc_nodes(lmb_node);
  231. return -ENODEV;
  232. }
  233. ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
  234. NULL);
  235. if (!ala_prop) {
  236. of_node_put(dr_node);
  237. dlpar_free_cc_nodes(lmb_node);
  238. return -ENODEV;
  239. }
  240. aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
  241. dlpar_free_cc_nodes(lmb_node);
  242. return aa_index;
  243. }
  244. static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb)
  245. {
  246. int aa_index;
  247. lmb->flags |= DRCONF_MEM_ASSIGNED;
  248. aa_index = lookup_lmb_associativity_index(lmb);
  249. if (aa_index < 0) {
  250. pr_err("Couldn't find associativity index for drc index %x\n",
  251. lmb->drc_index);
  252. return aa_index;
  253. }
  254. lmb->aa_index = aa_index;
  255. return dlpar_update_device_tree_lmb(lmb);
  256. }
  257. static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
  258. {
  259. lmb->flags &= ~DRCONF_MEM_ASSIGNED;
  260. lmb->aa_index = 0xffffffff;
  261. return dlpar_update_device_tree_lmb(lmb);
  262. }
  263. #ifdef CONFIG_MEMORY_HOTREMOVE
  264. static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
  265. {
  266. unsigned long block_sz, start_pfn;
  267. int sections_per_block;
  268. int i, nid;
  269. start_pfn = base >> PAGE_SHIFT;
  270. lock_device_hotplug();
  271. if (!pfn_valid(start_pfn))
  272. goto out;
  273. block_sz = pseries_memory_block_size();
  274. sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  275. nid = memory_add_physaddr_to_nid(base);
  276. for (i = 0; i < sections_per_block; i++) {
  277. remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
  278. base += MIN_MEMORY_BLOCK_SIZE;
  279. }
  280. out:
  281. /* Update memory regions for memory remove */
  282. memblock_remove(base, memblock_size);
  283. unlock_device_hotplug();
  284. return 0;
  285. }
  286. static int pseries_remove_mem_node(struct device_node *np)
  287. {
  288. const char *type;
  289. const __be32 *regs;
  290. unsigned long base;
  291. unsigned int lmb_size;
  292. int ret = -EINVAL;
  293. /*
  294. * Check to see if we are actually removing memory
  295. */
  296. type = of_get_property(np, "device_type", NULL);
  297. if (type == NULL || strcmp(type, "memory") != 0)
  298. return 0;
  299. /*
  300. * Find the base address and size of the memblock
  301. */
  302. regs = of_get_property(np, "reg", NULL);
  303. if (!regs)
  304. return ret;
  305. base = be64_to_cpu(*(unsigned long *)regs);
  306. lmb_size = be32_to_cpu(regs[3]);
  307. pseries_remove_memblock(base, lmb_size);
  308. return 0;
  309. }
  310. static bool lmb_is_removable(struct of_drconf_cell *lmb)
  311. {
  312. int i, scns_per_block;
  313. int rc = 1;
  314. unsigned long pfn, block_sz;
  315. u64 phys_addr;
  316. if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
  317. return false;
  318. block_sz = memory_block_size_bytes();
  319. scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  320. phys_addr = lmb->base_addr;
  321. for (i = 0; i < scns_per_block; i++) {
  322. pfn = PFN_DOWN(phys_addr);
  323. if (!pfn_present(pfn))
  324. continue;
  325. rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
  326. phys_addr += MIN_MEMORY_BLOCK_SIZE;
  327. }
  328. return rc ? true : false;
  329. }
  330. static int dlpar_add_lmb(struct of_drconf_cell *);
  331. static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
  332. {
  333. unsigned long section_nr;
  334. struct mem_section *mem_sect;
  335. struct memory_block *mem_block;
  336. section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
  337. mem_sect = __nr_to_section(section_nr);
  338. mem_block = find_memory_block(mem_sect);
  339. return mem_block;
  340. }
  341. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  342. {
  343. struct memory_block *mem_block;
  344. unsigned long block_sz;
  345. int nid, rc;
  346. if (!lmb_is_removable(lmb))
  347. return -EINVAL;
  348. mem_block = lmb_to_memblock(lmb);
  349. if (!mem_block)
  350. return -EINVAL;
  351. rc = device_offline(&mem_block->dev);
  352. put_device(&mem_block->dev);
  353. if (rc)
  354. return rc;
  355. block_sz = pseries_memory_block_size();
  356. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  357. remove_memory(nid, lmb->base_addr, block_sz);
  358. /* Update memory regions for memory remove */
  359. memblock_remove(lmb->base_addr, block_sz);
  360. dlpar_release_drc(lmb->drc_index);
  361. dlpar_remove_device_tree_lmb(lmb);
  362. return 0;
  363. }
  364. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  365. struct property *prop)
  366. {
  367. struct of_drconf_cell *lmbs;
  368. int lmbs_removed = 0;
  369. int lmbs_available = 0;
  370. u32 num_lmbs, *p;
  371. int i, rc;
  372. pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
  373. if (lmbs_to_remove == 0)
  374. return -EINVAL;
  375. p = prop->value;
  376. num_lmbs = *p++;
  377. lmbs = (struct of_drconf_cell *)p;
  378. /* Validate that there are enough LMBs to satisfy the request */
  379. for (i = 0; i < num_lmbs; i++) {
  380. if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
  381. lmbs_available++;
  382. }
  383. if (lmbs_available < lmbs_to_remove)
  384. return -EINVAL;
  385. for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
  386. rc = dlpar_remove_lmb(&lmbs[i]);
  387. if (rc)
  388. continue;
  389. lmbs_removed++;
  390. /* Mark this lmb so we can add it later if all of the
  391. * requested LMBs cannot be removed.
  392. */
  393. lmbs[i].reserved = 1;
  394. }
  395. if (lmbs_removed != lmbs_to_remove) {
  396. pr_err("Memory hot-remove failed, adding LMB's back\n");
  397. for (i = 0; i < num_lmbs; i++) {
  398. if (!lmbs[i].reserved)
  399. continue;
  400. rc = dlpar_add_lmb(&lmbs[i]);
  401. if (rc)
  402. pr_err("Failed to add LMB back, drc index %x\n",
  403. lmbs[i].drc_index);
  404. lmbs[i].reserved = 0;
  405. }
  406. rc = -EINVAL;
  407. } else {
  408. for (i = 0; i < num_lmbs; i++) {
  409. if (!lmbs[i].reserved)
  410. continue;
  411. pr_info("Memory at %llx was hot-removed\n",
  412. lmbs[i].base_addr);
  413. lmbs[i].reserved = 0;
  414. }
  415. rc = 0;
  416. }
  417. return rc;
  418. }
  419. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  420. {
  421. struct of_drconf_cell *lmbs;
  422. u32 num_lmbs, *p;
  423. int lmb_found;
  424. int i, rc;
  425. pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
  426. p = prop->value;
  427. num_lmbs = *p++;
  428. lmbs = (struct of_drconf_cell *)p;
  429. lmb_found = 0;
  430. for (i = 0; i < num_lmbs; i++) {
  431. if (lmbs[i].drc_index == drc_index) {
  432. lmb_found = 1;
  433. rc = dlpar_remove_lmb(&lmbs[i]);
  434. break;
  435. }
  436. }
  437. if (!lmb_found)
  438. rc = -EINVAL;
  439. if (rc)
  440. pr_info("Failed to hot-remove memory at %llx\n",
  441. lmbs[i].base_addr);
  442. else
  443. pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
  444. return rc;
  445. }
  446. #else
  447. static inline int pseries_remove_memblock(unsigned long base,
  448. unsigned int memblock_size)
  449. {
  450. return -EOPNOTSUPP;
  451. }
  452. static inline int pseries_remove_mem_node(struct device_node *np)
  453. {
  454. return 0;
  455. }
  456. static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
  457. {
  458. return -EOPNOTSUPP;
  459. }
  460. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  461. {
  462. return -EOPNOTSUPP;
  463. }
  464. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  465. struct property *prop)
  466. {
  467. return -EOPNOTSUPP;
  468. }
  469. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  470. {
  471. return -EOPNOTSUPP;
  472. }
  473. #endif /* CONFIG_MEMORY_HOTREMOVE */
  474. static int dlpar_add_lmb(struct of_drconf_cell *lmb)
  475. {
  476. unsigned long block_sz;
  477. int nid, rc;
  478. if (lmb->flags & DRCONF_MEM_ASSIGNED)
  479. return -EINVAL;
  480. rc = dlpar_acquire_drc(lmb->drc_index);
  481. if (rc)
  482. return rc;
  483. rc = dlpar_add_device_tree_lmb(lmb);
  484. if (rc) {
  485. pr_err("Couldn't update device tree for drc index %x\n",
  486. lmb->drc_index);
  487. dlpar_release_drc(lmb->drc_index);
  488. return rc;
  489. }
  490. block_sz = memory_block_size_bytes();
  491. /* Find the node id for this address */
  492. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  493. /* Add the memory */
  494. rc = add_memory(nid, lmb->base_addr, block_sz);
  495. if (rc) {
  496. dlpar_remove_device_tree_lmb(lmb);
  497. dlpar_release_drc(lmb->drc_index);
  498. } else {
  499. lmb->flags |= DRCONF_MEM_ASSIGNED;
  500. }
  501. return rc;
  502. }
  503. static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
  504. {
  505. struct of_drconf_cell *lmbs;
  506. u32 num_lmbs, *p;
  507. int lmbs_available = 0;
  508. int lmbs_added = 0;
  509. int i, rc;
  510. pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
  511. if (lmbs_to_add == 0)
  512. return -EINVAL;
  513. p = prop->value;
  514. num_lmbs = *p++;
  515. lmbs = (struct of_drconf_cell *)p;
  516. /* Validate that there are enough LMBs to satisfy the request */
  517. for (i = 0; i < num_lmbs; i++) {
  518. if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
  519. lmbs_available++;
  520. }
  521. if (lmbs_available < lmbs_to_add)
  522. return -EINVAL;
  523. for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
  524. rc = dlpar_add_lmb(&lmbs[i]);
  525. if (rc)
  526. continue;
  527. lmbs_added++;
  528. /* Mark this lmb so we can remove it later if all of the
  529. * requested LMBs cannot be added.
  530. */
  531. lmbs[i].reserved = 1;
  532. }
  533. if (lmbs_added != lmbs_to_add) {
  534. pr_err("Memory hot-add failed, removing any added LMBs\n");
  535. for (i = 0; i < num_lmbs; i++) {
  536. if (!lmbs[i].reserved)
  537. continue;
  538. rc = dlpar_remove_lmb(&lmbs[i]);
  539. if (rc)
  540. pr_err("Failed to remove LMB, drc index %x\n",
  541. be32_to_cpu(lmbs[i].drc_index));
  542. }
  543. rc = -EINVAL;
  544. } else {
  545. for (i = 0; i < num_lmbs; i++) {
  546. if (!lmbs[i].reserved)
  547. continue;
  548. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  549. lmbs[i].base_addr, lmbs[i].drc_index);
  550. lmbs[i].reserved = 0;
  551. }
  552. }
  553. return rc;
  554. }
  555. static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
  556. {
  557. struct of_drconf_cell *lmbs;
  558. u32 num_lmbs, *p;
  559. int i, lmb_found;
  560. int rc;
  561. pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
  562. p = prop->value;
  563. num_lmbs = *p++;
  564. lmbs = (struct of_drconf_cell *)p;
  565. lmb_found = 0;
  566. for (i = 0; i < num_lmbs; i++) {
  567. if (lmbs[i].drc_index == drc_index) {
  568. lmb_found = 1;
  569. rc = dlpar_add_lmb(&lmbs[i]);
  570. break;
  571. }
  572. }
  573. if (!lmb_found)
  574. rc = -EINVAL;
  575. if (rc)
  576. pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
  577. else
  578. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  579. lmbs[i].base_addr, drc_index);
  580. return rc;
  581. }
  582. int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
  583. {
  584. struct device_node *dn;
  585. struct property *prop;
  586. u32 count, drc_index;
  587. int rc;
  588. count = hp_elog->_drc_u.drc_count;
  589. drc_index = hp_elog->_drc_u.drc_index;
  590. lock_device_hotplug();
  591. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  592. if (!dn) {
  593. rc = -EINVAL;
  594. goto dlpar_memory_out;
  595. }
  596. prop = dlpar_clone_drconf_property(dn);
  597. if (!prop) {
  598. rc = -EINVAL;
  599. goto dlpar_memory_out;
  600. }
  601. switch (hp_elog->action) {
  602. case PSERIES_HP_ELOG_ACTION_ADD:
  603. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  604. rc = dlpar_memory_add_by_count(count, prop);
  605. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  606. rc = dlpar_memory_add_by_index(drc_index, prop);
  607. else
  608. rc = -EINVAL;
  609. break;
  610. case PSERIES_HP_ELOG_ACTION_REMOVE:
  611. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  612. rc = dlpar_memory_remove_by_count(count, prop);
  613. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  614. rc = dlpar_memory_remove_by_index(drc_index, prop);
  615. else
  616. rc = -EINVAL;
  617. break;
  618. default:
  619. pr_err("Invalid action (%d) specified\n", hp_elog->action);
  620. rc = -EINVAL;
  621. break;
  622. }
  623. dlpar_free_property(prop);
  624. dlpar_memory_out:
  625. of_node_put(dn);
  626. unlock_device_hotplug();
  627. return rc;
  628. }
  629. static int pseries_add_mem_node(struct device_node *np)
  630. {
  631. const char *type;
  632. const __be32 *regs;
  633. unsigned long base;
  634. unsigned int lmb_size;
  635. int ret = -EINVAL;
  636. /*
  637. * Check to see if we are actually adding memory
  638. */
  639. type = of_get_property(np, "device_type", NULL);
  640. if (type == NULL || strcmp(type, "memory") != 0)
  641. return 0;
  642. /*
  643. * Find the base and size of the memblock
  644. */
  645. regs = of_get_property(np, "reg", NULL);
  646. if (!regs)
  647. return ret;
  648. base = be64_to_cpu(*(unsigned long *)regs);
  649. lmb_size = be32_to_cpu(regs[3]);
  650. /*
  651. * Update memory region to represent the memory add
  652. */
  653. ret = memblock_add(base, lmb_size);
  654. return (ret < 0) ? -EINVAL : 0;
  655. }
  656. static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
  657. {
  658. struct of_drconf_cell *new_drmem, *old_drmem;
  659. unsigned long memblock_size;
  660. u32 entries;
  661. __be32 *p;
  662. int i, rc = -EINVAL;
  663. if (rtas_hp_event)
  664. return 0;
  665. memblock_size = pseries_memory_block_size();
  666. if (!memblock_size)
  667. return -EINVAL;
  668. p = (__be32 *) pr->old_prop->value;
  669. if (!p)
  670. return -EINVAL;
  671. /* The first int of the property is the number of lmb's described
  672. * by the property. This is followed by an array of of_drconf_cell
  673. * entries. Get the number of entries and skip to the array of
  674. * of_drconf_cell's.
  675. */
  676. entries = be32_to_cpu(*p++);
  677. old_drmem = (struct of_drconf_cell *)p;
  678. p = (__be32 *)pr->prop->value;
  679. p++;
  680. new_drmem = (struct of_drconf_cell *)p;
  681. for (i = 0; i < entries; i++) {
  682. if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
  683. (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
  684. rc = pseries_remove_memblock(
  685. be64_to_cpu(old_drmem[i].base_addr),
  686. memblock_size);
  687. break;
  688. } else if ((!(be32_to_cpu(old_drmem[i].flags) &
  689. DRCONF_MEM_ASSIGNED)) &&
  690. (be32_to_cpu(new_drmem[i].flags) &
  691. DRCONF_MEM_ASSIGNED)) {
  692. rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
  693. memblock_size);
  694. rc = (rc < 0) ? -EINVAL : 0;
  695. break;
  696. }
  697. }
  698. return rc;
  699. }
  700. static int pseries_memory_notifier(struct notifier_block *nb,
  701. unsigned long action, void *data)
  702. {
  703. struct of_reconfig_data *rd = data;
  704. int err = 0;
  705. switch (action) {
  706. case OF_RECONFIG_ATTACH_NODE:
  707. err = pseries_add_mem_node(rd->dn);
  708. break;
  709. case OF_RECONFIG_DETACH_NODE:
  710. err = pseries_remove_mem_node(rd->dn);
  711. break;
  712. case OF_RECONFIG_UPDATE_PROPERTY:
  713. if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
  714. err = pseries_update_drconf_memory(rd);
  715. break;
  716. }
  717. return notifier_from_errno(err);
  718. }
  719. static struct notifier_block pseries_mem_nb = {
  720. .notifier_call = pseries_memory_notifier,
  721. };
  722. static int __init pseries_memory_hotplug_init(void)
  723. {
  724. if (firmware_has_feature(FW_FEATURE_LPAR))
  725. of_reconfig_notifier_register(&pseries_mem_nb);
  726. return 0;
  727. }
  728. machine_device_initcall(pseries, pseries_memory_hotplug_init);