cmm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. /*
  2. * Collaborative memory management interface.
  3. *
  4. * Copyright (C) 2008 IBM Corporation
  5. * Author(s): Brian King (brking@linux.vnet.ibm.com),
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/ctype.h>
  23. #include <linux/delay.h>
  24. #include <linux/errno.h>
  25. #include <linux/fs.h>
  26. #include <linux/gfp.h>
  27. #include <linux/kthread.h>
  28. #include <linux/module.h>
  29. #include <linux/oom.h>
  30. #include <linux/reboot.h>
  31. #include <linux/sched.h>
  32. #include <linux/stringify.h>
  33. #include <linux/swap.h>
  34. #include <linux/device.h>
  35. #include <asm/firmware.h>
  36. #include <asm/hvcall.h>
  37. #include <asm/mmu.h>
  38. #include <asm/pgalloc.h>
  39. #include <asm/uaccess.h>
  40. #include <linux/memory.h>
  41. #include <asm/plpar_wrappers.h>
  42. #define CMM_DRIVER_VERSION "1.0.0"
  43. #define CMM_DEFAULT_DELAY 1
  44. #define CMM_HOTPLUG_DELAY 5
  45. #define CMM_DEBUG 0
  46. #define CMM_DISABLE 0
  47. #define CMM_OOM_KB 1024
  48. #define CMM_MIN_MEM_MB 256
  49. #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
  50. #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
  51. /*
  52. * The priority level tries to ensure that this notifier is called as
  53. * late as possible to reduce thrashing in the shared memory pool.
  54. */
  55. #define CMM_MEM_HOTPLUG_PRI 1
  56. #define CMM_MEM_ISOLATE_PRI 15
  57. static unsigned int delay = CMM_DEFAULT_DELAY;
  58. static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
  59. static unsigned int oom_kb = CMM_OOM_KB;
  60. static unsigned int cmm_debug = CMM_DEBUG;
  61. static unsigned int cmm_disabled = CMM_DISABLE;
  62. static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
  63. static struct device cmm_dev;
  64. MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
  65. MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
  66. MODULE_LICENSE("GPL");
  67. MODULE_VERSION(CMM_DRIVER_VERSION);
  68. module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR);
  69. MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
  70. "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
  71. module_param_named(hotplug_delay, hotplug_delay, uint, S_IRUGO | S_IWUSR);
  72. MODULE_PARM_DESC(delay, "Delay (in seconds) after memory hotplug remove "
  73. "before loaning resumes. "
  74. "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
  75. module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR);
  76. MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
  77. "[Default=" __stringify(CMM_OOM_KB) "]");
  78. module_param_named(min_mem_mb, min_mem_mb, ulong, S_IRUGO | S_IWUSR);
  79. MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
  80. "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
  81. module_param_named(debug, cmm_debug, uint, S_IRUGO | S_IWUSR);
  82. MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
  83. "[Default=" __stringify(CMM_DEBUG) "]");
  84. #define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
  85. #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
  86. struct cmm_page_array {
  87. struct cmm_page_array *next;
  88. unsigned long index;
  89. unsigned long page[CMM_NR_PAGES];
  90. };
  91. static unsigned long loaned_pages;
  92. static unsigned long loaned_pages_target;
  93. static unsigned long oom_freed_pages;
  94. static struct cmm_page_array *cmm_page_list;
  95. static DEFINE_SPINLOCK(cmm_lock);
  96. static DEFINE_MUTEX(hotplug_mutex);
  97. static int hotplug_occurred; /* protected by the hotplug mutex */
  98. static struct task_struct *cmm_thread_ptr;
  99. /**
  100. * cmm_alloc_pages - Allocate pages and mark them as loaned
  101. * @nr: number of pages to allocate
  102. *
  103. * Return value:
  104. * number of pages requested to be allocated which were not
  105. **/
  106. static long cmm_alloc_pages(long nr)
  107. {
  108. struct cmm_page_array *pa, *npa;
  109. unsigned long addr;
  110. long rc;
  111. cmm_dbg("Begin request for %ld pages\n", nr);
  112. while (nr) {
  113. /* Exit if a hotplug operation is in progress or occurred */
  114. if (mutex_trylock(&hotplug_mutex)) {
  115. if (hotplug_occurred) {
  116. mutex_unlock(&hotplug_mutex);
  117. break;
  118. }
  119. mutex_unlock(&hotplug_mutex);
  120. } else {
  121. break;
  122. }
  123. addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
  124. __GFP_NORETRY | __GFP_NOMEMALLOC);
  125. if (!addr)
  126. break;
  127. spin_lock(&cmm_lock);
  128. pa = cmm_page_list;
  129. if (!pa || pa->index >= CMM_NR_PAGES) {
  130. /* Need a new page for the page list. */
  131. spin_unlock(&cmm_lock);
  132. npa = (struct cmm_page_array *)__get_free_page(
  133. GFP_NOIO | __GFP_NOWARN |
  134. __GFP_NORETRY | __GFP_NOMEMALLOC);
  135. if (!npa) {
  136. pr_info("%s: Can not allocate new page list\n", __func__);
  137. free_page(addr);
  138. break;
  139. }
  140. spin_lock(&cmm_lock);
  141. pa = cmm_page_list;
  142. if (!pa || pa->index >= CMM_NR_PAGES) {
  143. npa->next = pa;
  144. npa->index = 0;
  145. pa = npa;
  146. cmm_page_list = pa;
  147. } else
  148. free_page((unsigned long) npa);
  149. }
  150. if ((rc = plpar_page_set_loaned(__pa(addr)))) {
  151. pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
  152. spin_unlock(&cmm_lock);
  153. free_page(addr);
  154. break;
  155. }
  156. pa->page[pa->index++] = addr;
  157. loaned_pages++;
  158. totalram_pages--;
  159. spin_unlock(&cmm_lock);
  160. nr--;
  161. }
  162. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  163. return nr;
  164. }
  165. /**
  166. * cmm_free_pages - Free pages and mark them as active
  167. * @nr: number of pages to free
  168. *
  169. * Return value:
  170. * number of pages requested to be freed which were not
  171. **/
  172. static long cmm_free_pages(long nr)
  173. {
  174. struct cmm_page_array *pa;
  175. unsigned long addr;
  176. cmm_dbg("Begin free of %ld pages.\n", nr);
  177. spin_lock(&cmm_lock);
  178. pa = cmm_page_list;
  179. while (nr) {
  180. if (!pa || pa->index <= 0)
  181. break;
  182. addr = pa->page[--pa->index];
  183. if (pa->index == 0) {
  184. pa = pa->next;
  185. free_page((unsigned long) cmm_page_list);
  186. cmm_page_list = pa;
  187. }
  188. plpar_page_set_active(__pa(addr));
  189. free_page(addr);
  190. loaned_pages--;
  191. nr--;
  192. totalram_pages++;
  193. }
  194. spin_unlock(&cmm_lock);
  195. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  196. return nr;
  197. }
  198. /**
  199. * cmm_oom_notify - OOM notifier
  200. * @self: notifier block struct
  201. * @dummy: not used
  202. * @parm: returned - number of pages freed
  203. *
  204. * Return value:
  205. * NOTIFY_OK
  206. **/
  207. static int cmm_oom_notify(struct notifier_block *self,
  208. unsigned long dummy, void *parm)
  209. {
  210. unsigned long *freed = parm;
  211. long nr = KB2PAGES(oom_kb);
  212. cmm_dbg("OOM processing started\n");
  213. nr = cmm_free_pages(nr);
  214. loaned_pages_target = loaned_pages;
  215. *freed += KB2PAGES(oom_kb) - nr;
  216. oom_freed_pages += KB2PAGES(oom_kb) - nr;
  217. cmm_dbg("OOM processing complete\n");
  218. return NOTIFY_OK;
  219. }
  220. /**
  221. * cmm_get_mpp - Read memory performance parameters
  222. *
  223. * Makes hcall to query the current page loan request from the hypervisor.
  224. *
  225. * Return value:
  226. * nothing
  227. **/
  228. static void cmm_get_mpp(void)
  229. {
  230. int rc;
  231. struct hvcall_mpp_data mpp_data;
  232. signed long active_pages_target, page_loan_request, target;
  233. signed long total_pages = totalram_pages + loaned_pages;
  234. signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
  235. rc = h_get_mpp(&mpp_data);
  236. if (rc != H_SUCCESS)
  237. return;
  238. page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
  239. target = page_loan_request + (signed long)loaned_pages;
  240. if (target < 0 || total_pages < min_mem_pages)
  241. target = 0;
  242. if (target > oom_freed_pages)
  243. target -= oom_freed_pages;
  244. else
  245. target = 0;
  246. active_pages_target = total_pages - target;
  247. if (min_mem_pages > active_pages_target)
  248. target = total_pages - min_mem_pages;
  249. if (target < 0)
  250. target = 0;
  251. loaned_pages_target = target;
  252. cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
  253. page_loan_request, loaned_pages, loaned_pages_target,
  254. oom_freed_pages, totalram_pages);
  255. }
  256. static struct notifier_block cmm_oom_nb = {
  257. .notifier_call = cmm_oom_notify
  258. };
  259. /**
  260. * cmm_thread - CMM task thread
  261. * @dummy: not used
  262. *
  263. * Return value:
  264. * 0
  265. **/
  266. static int cmm_thread(void *dummy)
  267. {
  268. unsigned long timeleft;
  269. while (1) {
  270. timeleft = msleep_interruptible(delay * 1000);
  271. if (kthread_should_stop() || timeleft)
  272. break;
  273. if (mutex_trylock(&hotplug_mutex)) {
  274. if (hotplug_occurred) {
  275. hotplug_occurred = 0;
  276. mutex_unlock(&hotplug_mutex);
  277. cmm_dbg("Hotplug operation has occurred, "
  278. "loaning activity suspended "
  279. "for %d seconds.\n",
  280. hotplug_delay);
  281. timeleft = msleep_interruptible(hotplug_delay *
  282. 1000);
  283. if (kthread_should_stop() || timeleft)
  284. break;
  285. continue;
  286. }
  287. mutex_unlock(&hotplug_mutex);
  288. } else {
  289. cmm_dbg("Hotplug operation in progress, activity "
  290. "suspended\n");
  291. continue;
  292. }
  293. cmm_get_mpp();
  294. if (loaned_pages_target > loaned_pages) {
  295. if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
  296. loaned_pages_target = loaned_pages;
  297. } else if (loaned_pages_target < loaned_pages)
  298. cmm_free_pages(loaned_pages - loaned_pages_target);
  299. }
  300. return 0;
  301. }
  302. #define CMM_SHOW(name, format, args...) \
  303. static ssize_t show_##name(struct device *dev, \
  304. struct device_attribute *attr, \
  305. char *buf) \
  306. { \
  307. return sprintf(buf, format, ##args); \
  308. } \
  309. static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  310. CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
  311. CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
  312. static ssize_t show_oom_pages(struct device *dev,
  313. struct device_attribute *attr, char *buf)
  314. {
  315. return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
  316. }
  317. static ssize_t store_oom_pages(struct device *dev,
  318. struct device_attribute *attr,
  319. const char *buf, size_t count)
  320. {
  321. unsigned long val = simple_strtoul (buf, NULL, 10);
  322. if (!capable(CAP_SYS_ADMIN))
  323. return -EPERM;
  324. if (val != 0)
  325. return -EBADMSG;
  326. oom_freed_pages = 0;
  327. return count;
  328. }
  329. static DEVICE_ATTR(oom_freed_kb, S_IWUSR | S_IRUGO,
  330. show_oom_pages, store_oom_pages);
  331. static struct device_attribute *cmm_attrs[] = {
  332. &dev_attr_loaned_kb,
  333. &dev_attr_loaned_target_kb,
  334. &dev_attr_oom_freed_kb,
  335. };
  336. static struct bus_type cmm_subsys = {
  337. .name = "cmm",
  338. .dev_name = "cmm",
  339. };
  340. /**
  341. * cmm_sysfs_register - Register with sysfs
  342. *
  343. * Return value:
  344. * 0 on success / other on failure
  345. **/
  346. static int cmm_sysfs_register(struct device *dev)
  347. {
  348. int i, rc;
  349. if ((rc = subsys_system_register(&cmm_subsys, NULL)))
  350. return rc;
  351. dev->id = 0;
  352. dev->bus = &cmm_subsys;
  353. if ((rc = device_register(dev)))
  354. goto subsys_unregister;
  355. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
  356. if ((rc = device_create_file(dev, cmm_attrs[i])))
  357. goto fail;
  358. }
  359. return 0;
  360. fail:
  361. while (--i >= 0)
  362. device_remove_file(dev, cmm_attrs[i]);
  363. device_unregister(dev);
  364. subsys_unregister:
  365. bus_unregister(&cmm_subsys);
  366. return rc;
  367. }
  368. /**
  369. * cmm_unregister_sysfs - Unregister from sysfs
  370. *
  371. **/
  372. static void cmm_unregister_sysfs(struct device *dev)
  373. {
  374. int i;
  375. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
  376. device_remove_file(dev, cmm_attrs[i]);
  377. device_unregister(dev);
  378. bus_unregister(&cmm_subsys);
  379. }
  380. /**
  381. * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
  382. *
  383. **/
  384. static int cmm_reboot_notifier(struct notifier_block *nb,
  385. unsigned long action, void *unused)
  386. {
  387. if (action == SYS_RESTART) {
  388. if (cmm_thread_ptr)
  389. kthread_stop(cmm_thread_ptr);
  390. cmm_thread_ptr = NULL;
  391. cmm_free_pages(loaned_pages);
  392. }
  393. return NOTIFY_DONE;
  394. }
  395. static struct notifier_block cmm_reboot_nb = {
  396. .notifier_call = cmm_reboot_notifier,
  397. };
  398. /**
  399. * cmm_count_pages - Count the number of pages loaned in a particular range.
  400. *
  401. * @arg: memory_isolate_notify structure with address range and count
  402. *
  403. * Return value:
  404. * 0 on success
  405. **/
  406. static unsigned long cmm_count_pages(void *arg)
  407. {
  408. struct memory_isolate_notify *marg = arg;
  409. struct cmm_page_array *pa;
  410. unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  411. unsigned long end = start + (marg->nr_pages << PAGE_SHIFT);
  412. unsigned long idx;
  413. spin_lock(&cmm_lock);
  414. pa = cmm_page_list;
  415. while (pa) {
  416. if ((unsigned long)pa >= start && (unsigned long)pa < end)
  417. marg->pages_found++;
  418. for (idx = 0; idx < pa->index; idx++)
  419. if (pa->page[idx] >= start && pa->page[idx] < end)
  420. marg->pages_found++;
  421. pa = pa->next;
  422. }
  423. spin_unlock(&cmm_lock);
  424. return 0;
  425. }
  426. /**
  427. * cmm_memory_isolate_cb - Handle memory isolation notifier calls
  428. * @self: notifier block struct
  429. * @action: action to take
  430. * @arg: struct memory_isolate_notify data for handler
  431. *
  432. * Return value:
  433. * NOTIFY_OK or notifier error based on subfunction return value
  434. **/
  435. static int cmm_memory_isolate_cb(struct notifier_block *self,
  436. unsigned long action, void *arg)
  437. {
  438. int ret = 0;
  439. if (action == MEM_ISOLATE_COUNT)
  440. ret = cmm_count_pages(arg);
  441. return notifier_from_errno(ret);
  442. }
  443. static struct notifier_block cmm_mem_isolate_nb = {
  444. .notifier_call = cmm_memory_isolate_cb,
  445. .priority = CMM_MEM_ISOLATE_PRI
  446. };
  447. /**
  448. * cmm_mem_going_offline - Unloan pages where memory is to be removed
  449. * @arg: memory_notify structure with page range to be offlined
  450. *
  451. * Return value:
  452. * 0 on success
  453. **/
  454. static int cmm_mem_going_offline(void *arg)
  455. {
  456. struct memory_notify *marg = arg;
  457. unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  458. unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT);
  459. struct cmm_page_array *pa_curr, *pa_last, *npa;
  460. unsigned long idx;
  461. unsigned long freed = 0;
  462. cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
  463. start_page, marg->nr_pages);
  464. spin_lock(&cmm_lock);
  465. /* Search the page list for pages in the range to be offlined */
  466. pa_last = pa_curr = cmm_page_list;
  467. while (pa_curr) {
  468. for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) {
  469. if ((pa_curr->page[idx] < start_page) ||
  470. (pa_curr->page[idx] >= end_page))
  471. continue;
  472. plpar_page_set_active(__pa(pa_curr->page[idx]));
  473. free_page(pa_curr->page[idx]);
  474. freed++;
  475. loaned_pages--;
  476. totalram_pages++;
  477. pa_curr->page[idx] = pa_last->page[--pa_last->index];
  478. if (pa_last->index == 0) {
  479. if (pa_curr == pa_last)
  480. pa_curr = pa_last->next;
  481. pa_last = pa_last->next;
  482. free_page((unsigned long)cmm_page_list);
  483. cmm_page_list = pa_last;
  484. }
  485. }
  486. pa_curr = pa_curr->next;
  487. }
  488. /* Search for page list structures in the range to be offlined */
  489. pa_last = NULL;
  490. pa_curr = cmm_page_list;
  491. while (pa_curr) {
  492. if (((unsigned long)pa_curr >= start_page) &&
  493. ((unsigned long)pa_curr < end_page)) {
  494. npa = (struct cmm_page_array *)__get_free_page(
  495. GFP_NOIO | __GFP_NOWARN |
  496. __GFP_NORETRY | __GFP_NOMEMALLOC);
  497. if (!npa) {
  498. spin_unlock(&cmm_lock);
  499. cmm_dbg("Failed to allocate memory for list "
  500. "management. Memory hotplug "
  501. "failed.\n");
  502. return -ENOMEM;
  503. }
  504. memcpy(npa, pa_curr, PAGE_SIZE);
  505. if (pa_curr == cmm_page_list)
  506. cmm_page_list = npa;
  507. if (pa_last)
  508. pa_last->next = npa;
  509. free_page((unsigned long) pa_curr);
  510. freed++;
  511. pa_curr = npa;
  512. }
  513. pa_last = pa_curr;
  514. pa_curr = pa_curr->next;
  515. }
  516. spin_unlock(&cmm_lock);
  517. cmm_dbg("Released %ld pages in the search range.\n", freed);
  518. return 0;
  519. }
  520. /**
  521. * cmm_memory_cb - Handle memory hotplug notifier calls
  522. * @self: notifier block struct
  523. * @action: action to take
  524. * @arg: struct memory_notify data for handler
  525. *
  526. * Return value:
  527. * NOTIFY_OK or notifier error based on subfunction return value
  528. *
  529. **/
  530. static int cmm_memory_cb(struct notifier_block *self,
  531. unsigned long action, void *arg)
  532. {
  533. int ret = 0;
  534. switch (action) {
  535. case MEM_GOING_OFFLINE:
  536. mutex_lock(&hotplug_mutex);
  537. hotplug_occurred = 1;
  538. ret = cmm_mem_going_offline(arg);
  539. break;
  540. case MEM_OFFLINE:
  541. case MEM_CANCEL_OFFLINE:
  542. mutex_unlock(&hotplug_mutex);
  543. cmm_dbg("Memory offline operation complete.\n");
  544. break;
  545. case MEM_GOING_ONLINE:
  546. case MEM_ONLINE:
  547. case MEM_CANCEL_ONLINE:
  548. break;
  549. }
  550. return notifier_from_errno(ret);
  551. }
  552. static struct notifier_block cmm_mem_nb = {
  553. .notifier_call = cmm_memory_cb,
  554. .priority = CMM_MEM_HOTPLUG_PRI
  555. };
  556. /**
  557. * cmm_init - Module initialization
  558. *
  559. * Return value:
  560. * 0 on success / other on failure
  561. **/
  562. static int cmm_init(void)
  563. {
  564. int rc = -ENOMEM;
  565. if (!firmware_has_feature(FW_FEATURE_CMO))
  566. return -EOPNOTSUPP;
  567. if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
  568. return rc;
  569. if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
  570. goto out_oom_notifier;
  571. if ((rc = cmm_sysfs_register(&cmm_dev)))
  572. goto out_reboot_notifier;
  573. if (register_memory_notifier(&cmm_mem_nb) ||
  574. register_memory_isolate_notifier(&cmm_mem_isolate_nb))
  575. goto out_unregister_notifier;
  576. if (cmm_disabled)
  577. return rc;
  578. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  579. if (IS_ERR(cmm_thread_ptr)) {
  580. rc = PTR_ERR(cmm_thread_ptr);
  581. goto out_unregister_notifier;
  582. }
  583. return rc;
  584. out_unregister_notifier:
  585. unregister_memory_notifier(&cmm_mem_nb);
  586. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  587. cmm_unregister_sysfs(&cmm_dev);
  588. out_reboot_notifier:
  589. unregister_reboot_notifier(&cmm_reboot_nb);
  590. out_oom_notifier:
  591. unregister_oom_notifier(&cmm_oom_nb);
  592. return rc;
  593. }
  594. /**
  595. * cmm_exit - Module exit
  596. *
  597. * Return value:
  598. * nothing
  599. **/
  600. static void cmm_exit(void)
  601. {
  602. if (cmm_thread_ptr)
  603. kthread_stop(cmm_thread_ptr);
  604. unregister_oom_notifier(&cmm_oom_nb);
  605. unregister_reboot_notifier(&cmm_reboot_nb);
  606. unregister_memory_notifier(&cmm_mem_nb);
  607. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  608. cmm_free_pages(loaned_pages);
  609. cmm_unregister_sysfs(&cmm_dev);
  610. }
  611. /**
  612. * cmm_set_disable - Disable/Enable CMM
  613. *
  614. * Return value:
  615. * 0 on success / other on failure
  616. **/
  617. static int cmm_set_disable(const char *val, struct kernel_param *kp)
  618. {
  619. int disable = simple_strtoul(val, NULL, 10);
  620. if (disable != 0 && disable != 1)
  621. return -EINVAL;
  622. if (disable && !cmm_disabled) {
  623. if (cmm_thread_ptr)
  624. kthread_stop(cmm_thread_ptr);
  625. cmm_thread_ptr = NULL;
  626. cmm_free_pages(loaned_pages);
  627. } else if (!disable && cmm_disabled) {
  628. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  629. if (IS_ERR(cmm_thread_ptr))
  630. return PTR_ERR(cmm_thread_ptr);
  631. }
  632. cmm_disabled = disable;
  633. return 0;
  634. }
  635. module_param_call(disable, cmm_set_disable, param_get_uint,
  636. &cmm_disabled, S_IRUGO | S_IWUSR);
  637. MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
  638. "[Default=" __stringify(CMM_DISABLE) "]");
  639. module_init(cmm_init);
  640. module_exit(cmm_exit);