cmm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * Collaborative memory management interface.
  3. *
  4. * Copyright (C) 2008 IBM Corporation
  5. * Author(s): Brian King (brking@linux.vnet.ibm.com),
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/ctype.h>
  23. #include <linux/delay.h>
  24. #include <linux/errno.h>
  25. #include <linux/fs.h>
  26. #include <linux/gfp.h>
  27. #include <linux/init.h>
  28. #include <linux/kthread.h>
  29. #include <linux/module.h>
  30. #include <linux/oom.h>
  31. #include <linux/reboot.h>
  32. #include <linux/sched.h>
  33. #include <linux/stringify.h>
  34. #include <linux/swap.h>
  35. #include <linux/device.h>
  36. #include <asm/firmware.h>
  37. #include <asm/hvcall.h>
  38. #include <asm/mmu.h>
  39. #include <asm/pgalloc.h>
  40. #include <asm/uaccess.h>
  41. #include <linux/memory.h>
  42. #include "plpar_wrappers.h"
  43. #define CMM_DRIVER_VERSION "1.0.0"
  44. #define CMM_DEFAULT_DELAY 1
  45. #define CMM_HOTPLUG_DELAY 5
  46. #define CMM_DEBUG 0
  47. #define CMM_DISABLE 0
  48. #define CMM_OOM_KB 1024
  49. #define CMM_MIN_MEM_MB 256
  50. #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
  51. #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
  52. /*
  53. * The priority level tries to ensure that this notifier is called as
  54. * late as possible to reduce thrashing in the shared memory pool.
  55. */
  56. #define CMM_MEM_HOTPLUG_PRI 1
  57. #define CMM_MEM_ISOLATE_PRI 15
  58. static unsigned int delay = CMM_DEFAULT_DELAY;
  59. static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
  60. static unsigned int oom_kb = CMM_OOM_KB;
  61. static unsigned int cmm_debug = CMM_DEBUG;
  62. static unsigned int cmm_disabled = CMM_DISABLE;
  63. static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
  64. static struct device cmm_dev;
  65. MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
  66. MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
  67. MODULE_LICENSE("GPL");
  68. MODULE_VERSION(CMM_DRIVER_VERSION);
  69. module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR);
  70. MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
  71. "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
  72. module_param_named(hotplug_delay, hotplug_delay, uint, S_IRUGO | S_IWUSR);
  73. MODULE_PARM_DESC(delay, "Delay (in seconds) after memory hotplug remove "
  74. "before loaning resumes. "
  75. "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
  76. module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR);
  77. MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
  78. "[Default=" __stringify(CMM_OOM_KB) "]");
  79. module_param_named(min_mem_mb, min_mem_mb, ulong, S_IRUGO | S_IWUSR);
  80. MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
  81. "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
  82. module_param_named(debug, cmm_debug, uint, S_IRUGO | S_IWUSR);
  83. MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
  84. "[Default=" __stringify(CMM_DEBUG) "]");
  85. #define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
  86. #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
  87. struct cmm_page_array {
  88. struct cmm_page_array *next;
  89. unsigned long index;
  90. unsigned long page[CMM_NR_PAGES];
  91. };
  92. static unsigned long loaned_pages;
  93. static unsigned long loaned_pages_target;
  94. static unsigned long oom_freed_pages;
  95. static struct cmm_page_array *cmm_page_list;
  96. static DEFINE_SPINLOCK(cmm_lock);
  97. static DEFINE_MUTEX(hotplug_mutex);
  98. static int hotplug_occurred; /* protected by the hotplug mutex */
  99. static struct task_struct *cmm_thread_ptr;
  100. /**
  101. * cmm_alloc_pages - Allocate pages and mark them as loaned
  102. * @nr: number of pages to allocate
  103. *
  104. * Return value:
  105. * number of pages requested to be allocated which were not
  106. **/
  107. static long cmm_alloc_pages(long nr)
  108. {
  109. struct cmm_page_array *pa, *npa;
  110. unsigned long addr;
  111. long rc;
  112. cmm_dbg("Begin request for %ld pages\n", nr);
  113. while (nr) {
  114. /* Exit if a hotplug operation is in progress or occurred */
  115. if (mutex_trylock(&hotplug_mutex)) {
  116. if (hotplug_occurred) {
  117. mutex_unlock(&hotplug_mutex);
  118. break;
  119. }
  120. mutex_unlock(&hotplug_mutex);
  121. } else {
  122. break;
  123. }
  124. addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
  125. __GFP_NORETRY | __GFP_NOMEMALLOC);
  126. if (!addr)
  127. break;
  128. spin_lock(&cmm_lock);
  129. pa = cmm_page_list;
  130. if (!pa || pa->index >= CMM_NR_PAGES) {
  131. /* Need a new page for the page list. */
  132. spin_unlock(&cmm_lock);
  133. npa = (struct cmm_page_array *)__get_free_page(
  134. GFP_NOIO | __GFP_NOWARN |
  135. __GFP_NORETRY | __GFP_NOMEMALLOC);
  136. if (!npa) {
  137. pr_info("%s: Can not allocate new page list\n", __func__);
  138. free_page(addr);
  139. break;
  140. }
  141. spin_lock(&cmm_lock);
  142. pa = cmm_page_list;
  143. if (!pa || pa->index >= CMM_NR_PAGES) {
  144. npa->next = pa;
  145. npa->index = 0;
  146. pa = npa;
  147. cmm_page_list = pa;
  148. } else
  149. free_page((unsigned long) npa);
  150. }
  151. if ((rc = plpar_page_set_loaned(__pa(addr)))) {
  152. pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
  153. spin_unlock(&cmm_lock);
  154. free_page(addr);
  155. break;
  156. }
  157. pa->page[pa->index++] = addr;
  158. loaned_pages++;
  159. totalram_pages--;
  160. spin_unlock(&cmm_lock);
  161. nr--;
  162. }
  163. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  164. return nr;
  165. }
  166. /**
  167. * cmm_free_pages - Free pages and mark them as active
  168. * @nr: number of pages to free
  169. *
  170. * Return value:
  171. * number of pages requested to be freed which were not
  172. **/
  173. static long cmm_free_pages(long nr)
  174. {
  175. struct cmm_page_array *pa;
  176. unsigned long addr;
  177. cmm_dbg("Begin free of %ld pages.\n", nr);
  178. spin_lock(&cmm_lock);
  179. pa = cmm_page_list;
  180. while (nr) {
  181. if (!pa || pa->index <= 0)
  182. break;
  183. addr = pa->page[--pa->index];
  184. if (pa->index == 0) {
  185. pa = pa->next;
  186. free_page((unsigned long) cmm_page_list);
  187. cmm_page_list = pa;
  188. }
  189. plpar_page_set_active(__pa(addr));
  190. free_page(addr);
  191. loaned_pages--;
  192. nr--;
  193. totalram_pages++;
  194. }
  195. spin_unlock(&cmm_lock);
  196. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  197. return nr;
  198. }
  199. /**
  200. * cmm_oom_notify - OOM notifier
  201. * @self: notifier block struct
  202. * @dummy: not used
  203. * @parm: returned - number of pages freed
  204. *
  205. * Return value:
  206. * NOTIFY_OK
  207. **/
  208. static int cmm_oom_notify(struct notifier_block *self,
  209. unsigned long dummy, void *parm)
  210. {
  211. unsigned long *freed = parm;
  212. long nr = KB2PAGES(oom_kb);
  213. cmm_dbg("OOM processing started\n");
  214. nr = cmm_free_pages(nr);
  215. loaned_pages_target = loaned_pages;
  216. *freed += KB2PAGES(oom_kb) - nr;
  217. oom_freed_pages += KB2PAGES(oom_kb) - nr;
  218. cmm_dbg("OOM processing complete\n");
  219. return NOTIFY_OK;
  220. }
  221. /**
  222. * cmm_get_mpp - Read memory performance parameters
  223. *
  224. * Makes hcall to query the current page loan request from the hypervisor.
  225. *
  226. * Return value:
  227. * nothing
  228. **/
  229. static void cmm_get_mpp(void)
  230. {
  231. int rc;
  232. struct hvcall_mpp_data mpp_data;
  233. signed long active_pages_target, page_loan_request, target;
  234. signed long total_pages = totalram_pages + loaned_pages;
  235. signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
  236. rc = h_get_mpp(&mpp_data);
  237. if (rc != H_SUCCESS)
  238. return;
  239. page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
  240. target = page_loan_request + (signed long)loaned_pages;
  241. if (target < 0 || total_pages < min_mem_pages)
  242. target = 0;
  243. if (target > oom_freed_pages)
  244. target -= oom_freed_pages;
  245. else
  246. target = 0;
  247. active_pages_target = total_pages - target;
  248. if (min_mem_pages > active_pages_target)
  249. target = total_pages - min_mem_pages;
  250. if (target < 0)
  251. target = 0;
  252. loaned_pages_target = target;
  253. cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
  254. page_loan_request, loaned_pages, loaned_pages_target,
  255. oom_freed_pages, totalram_pages);
  256. }
  257. static struct notifier_block cmm_oom_nb = {
  258. .notifier_call = cmm_oom_notify
  259. };
  260. /**
  261. * cmm_thread - CMM task thread
  262. * @dummy: not used
  263. *
  264. * Return value:
  265. * 0
  266. **/
  267. static int cmm_thread(void *dummy)
  268. {
  269. unsigned long timeleft;
  270. while (1) {
  271. timeleft = msleep_interruptible(delay * 1000);
  272. if (kthread_should_stop() || timeleft)
  273. break;
  274. if (mutex_trylock(&hotplug_mutex)) {
  275. if (hotplug_occurred) {
  276. hotplug_occurred = 0;
  277. mutex_unlock(&hotplug_mutex);
  278. cmm_dbg("Hotplug operation has occurred, "
  279. "loaning activity suspended "
  280. "for %d seconds.\n",
  281. hotplug_delay);
  282. timeleft = msleep_interruptible(hotplug_delay *
  283. 1000);
  284. if (kthread_should_stop() || timeleft)
  285. break;
  286. continue;
  287. }
  288. mutex_unlock(&hotplug_mutex);
  289. } else {
  290. cmm_dbg("Hotplug operation in progress, activity "
  291. "suspended\n");
  292. continue;
  293. }
  294. cmm_get_mpp();
  295. if (loaned_pages_target > loaned_pages) {
  296. if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
  297. loaned_pages_target = loaned_pages;
  298. } else if (loaned_pages_target < loaned_pages)
  299. cmm_free_pages(loaned_pages - loaned_pages_target);
  300. }
  301. return 0;
  302. }
  303. #define CMM_SHOW(name, format, args...) \
  304. static ssize_t show_##name(struct device *dev, \
  305. struct device_attribute *attr, \
  306. char *buf) \
  307. { \
  308. return sprintf(buf, format, ##args); \
  309. } \
  310. static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  311. CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
  312. CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
  313. static ssize_t show_oom_pages(struct device *dev,
  314. struct device_attribute *attr, char *buf)
  315. {
  316. return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
  317. }
  318. static ssize_t store_oom_pages(struct device *dev,
  319. struct device_attribute *attr,
  320. const char *buf, size_t count)
  321. {
  322. unsigned long val = simple_strtoul (buf, NULL, 10);
  323. if (!capable(CAP_SYS_ADMIN))
  324. return -EPERM;
  325. if (val != 0)
  326. return -EBADMSG;
  327. oom_freed_pages = 0;
  328. return count;
  329. }
  330. static DEVICE_ATTR(oom_freed_kb, S_IWUSR | S_IRUGO,
  331. show_oom_pages, store_oom_pages);
  332. static struct device_attribute *cmm_attrs[] = {
  333. &dev_attr_loaned_kb,
  334. &dev_attr_loaned_target_kb,
  335. &dev_attr_oom_freed_kb,
  336. };
  337. static struct bus_type cmm_subsys = {
  338. .name = "cmm",
  339. .dev_name = "cmm",
  340. };
  341. /**
  342. * cmm_sysfs_register - Register with sysfs
  343. *
  344. * Return value:
  345. * 0 on success / other on failure
  346. **/
  347. static int cmm_sysfs_register(struct device *dev)
  348. {
  349. int i, rc;
  350. if ((rc = subsys_system_register(&cmm_subsys, NULL)))
  351. return rc;
  352. dev->id = 0;
  353. dev->bus = &cmm_subsys;
  354. if ((rc = device_register(dev)))
  355. goto subsys_unregister;
  356. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
  357. if ((rc = device_create_file(dev, cmm_attrs[i])))
  358. goto fail;
  359. }
  360. return 0;
  361. fail:
  362. while (--i >= 0)
  363. device_remove_file(dev, cmm_attrs[i]);
  364. device_unregister(dev);
  365. subsys_unregister:
  366. bus_unregister(&cmm_subsys);
  367. return rc;
  368. }
  369. /**
  370. * cmm_unregister_sysfs - Unregister from sysfs
  371. *
  372. **/
  373. static void cmm_unregister_sysfs(struct device *dev)
  374. {
  375. int i;
  376. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
  377. device_remove_file(dev, cmm_attrs[i]);
  378. device_unregister(dev);
  379. bus_unregister(&cmm_subsys);
  380. }
  381. /**
  382. * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
  383. *
  384. **/
  385. static int cmm_reboot_notifier(struct notifier_block *nb,
  386. unsigned long action, void *unused)
  387. {
  388. if (action == SYS_RESTART) {
  389. if (cmm_thread_ptr)
  390. kthread_stop(cmm_thread_ptr);
  391. cmm_thread_ptr = NULL;
  392. cmm_free_pages(loaned_pages);
  393. }
  394. return NOTIFY_DONE;
  395. }
  396. static struct notifier_block cmm_reboot_nb = {
  397. .notifier_call = cmm_reboot_notifier,
  398. };
  399. /**
  400. * cmm_count_pages - Count the number of pages loaned in a particular range.
  401. *
  402. * @arg: memory_isolate_notify structure with address range and count
  403. *
  404. * Return value:
  405. * 0 on success
  406. **/
  407. static unsigned long cmm_count_pages(void *arg)
  408. {
  409. struct memory_isolate_notify *marg = arg;
  410. struct cmm_page_array *pa;
  411. unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  412. unsigned long end = start + (marg->nr_pages << PAGE_SHIFT);
  413. unsigned long idx;
  414. spin_lock(&cmm_lock);
  415. pa = cmm_page_list;
  416. while (pa) {
  417. if ((unsigned long)pa >= start && (unsigned long)pa < end)
  418. marg->pages_found++;
  419. for (idx = 0; idx < pa->index; idx++)
  420. if (pa->page[idx] >= start && pa->page[idx] < end)
  421. marg->pages_found++;
  422. pa = pa->next;
  423. }
  424. spin_unlock(&cmm_lock);
  425. return 0;
  426. }
  427. /**
  428. * cmm_memory_isolate_cb - Handle memory isolation notifier calls
  429. * @self: notifier block struct
  430. * @action: action to take
  431. * @arg: struct memory_isolate_notify data for handler
  432. *
  433. * Return value:
  434. * NOTIFY_OK or notifier error based on subfunction return value
  435. **/
  436. static int cmm_memory_isolate_cb(struct notifier_block *self,
  437. unsigned long action, void *arg)
  438. {
  439. int ret = 0;
  440. if (action == MEM_ISOLATE_COUNT)
  441. ret = cmm_count_pages(arg);
  442. return notifier_from_errno(ret);
  443. }
  444. static struct notifier_block cmm_mem_isolate_nb = {
  445. .notifier_call = cmm_memory_isolate_cb,
  446. .priority = CMM_MEM_ISOLATE_PRI
  447. };
  448. /**
  449. * cmm_mem_going_offline - Unloan pages where memory is to be removed
  450. * @arg: memory_notify structure with page range to be offlined
  451. *
  452. * Return value:
  453. * 0 on success
  454. **/
  455. static int cmm_mem_going_offline(void *arg)
  456. {
  457. struct memory_notify *marg = arg;
  458. unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  459. unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT);
  460. struct cmm_page_array *pa_curr, *pa_last, *npa;
  461. unsigned long idx;
  462. unsigned long freed = 0;
  463. cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
  464. start_page, marg->nr_pages);
  465. spin_lock(&cmm_lock);
  466. /* Search the page list for pages in the range to be offlined */
  467. pa_last = pa_curr = cmm_page_list;
  468. while (pa_curr) {
  469. for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) {
  470. if ((pa_curr->page[idx] < start_page) ||
  471. (pa_curr->page[idx] >= end_page))
  472. continue;
  473. plpar_page_set_active(__pa(pa_curr->page[idx]));
  474. free_page(pa_curr->page[idx]);
  475. freed++;
  476. loaned_pages--;
  477. totalram_pages++;
  478. pa_curr->page[idx] = pa_last->page[--pa_last->index];
  479. if (pa_last->index == 0) {
  480. if (pa_curr == pa_last)
  481. pa_curr = pa_last->next;
  482. pa_last = pa_last->next;
  483. free_page((unsigned long)cmm_page_list);
  484. cmm_page_list = pa_last;
  485. continue;
  486. }
  487. }
  488. pa_curr = pa_curr->next;
  489. }
  490. /* Search for page list structures in the range to be offlined */
  491. pa_last = NULL;
  492. pa_curr = cmm_page_list;
  493. while (pa_curr) {
  494. if (((unsigned long)pa_curr >= start_page) &&
  495. ((unsigned long)pa_curr < end_page)) {
  496. npa = (struct cmm_page_array *)__get_free_page(
  497. GFP_NOIO | __GFP_NOWARN |
  498. __GFP_NORETRY | __GFP_NOMEMALLOC);
  499. if (!npa) {
  500. spin_unlock(&cmm_lock);
  501. cmm_dbg("Failed to allocate memory for list "
  502. "management. Memory hotplug "
  503. "failed.\n");
  504. return ENOMEM;
  505. }
  506. memcpy(npa, pa_curr, PAGE_SIZE);
  507. if (pa_curr == cmm_page_list)
  508. cmm_page_list = npa;
  509. if (pa_last)
  510. pa_last->next = npa;
  511. free_page((unsigned long) pa_curr);
  512. freed++;
  513. pa_curr = npa;
  514. }
  515. pa_last = pa_curr;
  516. pa_curr = pa_curr->next;
  517. }
  518. spin_unlock(&cmm_lock);
  519. cmm_dbg("Released %ld pages in the search range.\n", freed);
  520. return 0;
  521. }
  522. /**
  523. * cmm_memory_cb - Handle memory hotplug notifier calls
  524. * @self: notifier block struct
  525. * @action: action to take
  526. * @arg: struct memory_notify data for handler
  527. *
  528. * Return value:
  529. * NOTIFY_OK or notifier error based on subfunction return value
  530. *
  531. **/
  532. static int cmm_memory_cb(struct notifier_block *self,
  533. unsigned long action, void *arg)
  534. {
  535. int ret = 0;
  536. switch (action) {
  537. case MEM_GOING_OFFLINE:
  538. mutex_lock(&hotplug_mutex);
  539. hotplug_occurred = 1;
  540. ret = cmm_mem_going_offline(arg);
  541. break;
  542. case MEM_OFFLINE:
  543. case MEM_CANCEL_OFFLINE:
  544. mutex_unlock(&hotplug_mutex);
  545. cmm_dbg("Memory offline operation complete.\n");
  546. break;
  547. case MEM_GOING_ONLINE:
  548. case MEM_ONLINE:
  549. case MEM_CANCEL_ONLINE:
  550. break;
  551. }
  552. return notifier_from_errno(ret);
  553. }
  554. static struct notifier_block cmm_mem_nb = {
  555. .notifier_call = cmm_memory_cb,
  556. .priority = CMM_MEM_HOTPLUG_PRI
  557. };
  558. /**
  559. * cmm_init - Module initialization
  560. *
  561. * Return value:
  562. * 0 on success / other on failure
  563. **/
  564. static int cmm_init(void)
  565. {
  566. int rc = -ENOMEM;
  567. if (!firmware_has_feature(FW_FEATURE_CMO))
  568. return -EOPNOTSUPP;
  569. if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
  570. return rc;
  571. if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
  572. goto out_oom_notifier;
  573. if ((rc = cmm_sysfs_register(&cmm_dev)))
  574. goto out_reboot_notifier;
  575. if (register_memory_notifier(&cmm_mem_nb) ||
  576. register_memory_isolate_notifier(&cmm_mem_isolate_nb))
  577. goto out_unregister_notifier;
  578. if (cmm_disabled)
  579. return rc;
  580. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  581. if (IS_ERR(cmm_thread_ptr)) {
  582. rc = PTR_ERR(cmm_thread_ptr);
  583. goto out_unregister_notifier;
  584. }
  585. return rc;
  586. out_unregister_notifier:
  587. unregister_memory_notifier(&cmm_mem_nb);
  588. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  589. cmm_unregister_sysfs(&cmm_dev);
  590. out_reboot_notifier:
  591. unregister_reboot_notifier(&cmm_reboot_nb);
  592. out_oom_notifier:
  593. unregister_oom_notifier(&cmm_oom_nb);
  594. return rc;
  595. }
  596. /**
  597. * cmm_exit - Module exit
  598. *
  599. * Return value:
  600. * nothing
  601. **/
  602. static void cmm_exit(void)
  603. {
  604. if (cmm_thread_ptr)
  605. kthread_stop(cmm_thread_ptr);
  606. unregister_oom_notifier(&cmm_oom_nb);
  607. unregister_reboot_notifier(&cmm_reboot_nb);
  608. unregister_memory_notifier(&cmm_mem_nb);
  609. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  610. cmm_free_pages(loaned_pages);
  611. cmm_unregister_sysfs(&cmm_dev);
  612. }
  613. /**
  614. * cmm_set_disable - Disable/Enable CMM
  615. *
  616. * Return value:
  617. * 0 on success / other on failure
  618. **/
  619. static int cmm_set_disable(const char *val, struct kernel_param *kp)
  620. {
  621. int disable = simple_strtoul(val, NULL, 10);
  622. if (disable != 0 && disable != 1)
  623. return -EINVAL;
  624. if (disable && !cmm_disabled) {
  625. if (cmm_thread_ptr)
  626. kthread_stop(cmm_thread_ptr);
  627. cmm_thread_ptr = NULL;
  628. cmm_free_pages(loaned_pages);
  629. } else if (!disable && cmm_disabled) {
  630. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  631. if (IS_ERR(cmm_thread_ptr))
  632. return PTR_ERR(cmm_thread_ptr);
  633. }
  634. cmm_disabled = disable;
  635. return 0;
  636. }
  637. module_param_call(disable, cmm_set_disable, param_get_uint,
  638. &cmm_disabled, S_IRUGO | S_IWUSR);
  639. MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
  640. "[Default=" __stringify(CMM_DISABLE) "]");
  641. module_init(cmm_init);
  642. module_exit(cmm_exit);