cmm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * Collaborative memory management interface.
  3. *
  4. * Copyright (C) 2008 IBM Corporation
  5. * Author(s): Brian King (brking@linux.vnet.ibm.com),
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/ctype.h>
  23. #include <linux/delay.h>
  24. #include <linux/errno.h>
  25. #include <linux/fs.h>
  26. #include <linux/gfp.h>
  27. #include <linux/init.h>
  28. #include <linux/kthread.h>
  29. #include <linux/module.h>
  30. #include <linux/oom.h>
  31. #include <linux/reboot.h>
  32. #include <linux/sched.h>
  33. #include <linux/stringify.h>
  34. #include <linux/swap.h>
  35. #include <linux/sysdev.h>
  36. #include <asm/firmware.h>
  37. #include <asm/hvcall.h>
  38. #include <asm/mmu.h>
  39. #include <asm/pgalloc.h>
  40. #include <asm/uaccess.h>
  41. #include <linux/memory.h>
  42. #include "plpar_wrappers.h"
  43. #define CMM_DRIVER_VERSION "1.0.0"
  44. #define CMM_DEFAULT_DELAY 1
  45. #define CMM_HOTPLUG_DELAY 5
  46. #define CMM_DEBUG 0
  47. #define CMM_DISABLE 0
  48. #define CMM_OOM_KB 1024
  49. #define CMM_MIN_MEM_MB 256
  50. #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
  51. #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
  52. /*
  53. * The priority level tries to ensure that this notifier is called as
  54. * late as possible to reduce thrashing in the shared memory pool.
  55. */
  56. #define CMM_MEM_HOTPLUG_PRI 1
  57. #define CMM_MEM_ISOLATE_PRI 15
  58. static unsigned int delay = CMM_DEFAULT_DELAY;
  59. static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
  60. static unsigned int oom_kb = CMM_OOM_KB;
  61. static unsigned int cmm_debug = CMM_DEBUG;
  62. static unsigned int cmm_disabled = CMM_DISABLE;
  63. static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
  64. static struct sys_device cmm_sysdev;
  65. MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
  66. MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
  67. MODULE_LICENSE("GPL");
  68. MODULE_VERSION(CMM_DRIVER_VERSION);
  69. module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR);
  70. MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
  71. "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
  72. module_param_named(hotplug_delay, hotplug_delay, uint, S_IRUGO | S_IWUSR);
  73. MODULE_PARM_DESC(delay, "Delay (in seconds) after memory hotplug remove "
  74. "before loaning resumes. "
  75. "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
  76. module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR);
  77. MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
  78. "[Default=" __stringify(CMM_OOM_KB) "]");
  79. module_param_named(min_mem_mb, min_mem_mb, ulong, S_IRUGO | S_IWUSR);
  80. MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
  81. "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
  82. module_param_named(debug, cmm_debug, uint, S_IRUGO | S_IWUSR);
  83. MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
  84. "[Default=" __stringify(CMM_DEBUG) "]");
  85. #define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
  86. #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
  87. struct cmm_page_array {
  88. struct cmm_page_array *next;
  89. unsigned long index;
  90. unsigned long page[CMM_NR_PAGES];
  91. };
  92. static unsigned long loaned_pages;
  93. static unsigned long loaned_pages_target;
  94. static unsigned long oom_freed_pages;
  95. static struct cmm_page_array *cmm_page_list;
  96. static DEFINE_SPINLOCK(cmm_lock);
  97. static DEFINE_MUTEX(hotplug_mutex);
  98. static int hotplug_occurred; /* protected by the hotplug mutex */
  99. static struct task_struct *cmm_thread_ptr;
  100. /**
  101. * cmm_alloc_pages - Allocate pages and mark them as loaned
  102. * @nr: number of pages to allocate
  103. *
  104. * Return value:
  105. * number of pages requested to be allocated which were not
  106. **/
  107. static long cmm_alloc_pages(long nr)
  108. {
  109. struct cmm_page_array *pa, *npa;
  110. unsigned long addr;
  111. long rc;
  112. cmm_dbg("Begin request for %ld pages\n", nr);
  113. while (nr) {
  114. /* Exit if a hotplug operation is in progress or occurred */
  115. if (mutex_trylock(&hotplug_mutex)) {
  116. if (hotplug_occurred) {
  117. mutex_unlock(&hotplug_mutex);
  118. break;
  119. }
  120. mutex_unlock(&hotplug_mutex);
  121. } else {
  122. break;
  123. }
  124. addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
  125. __GFP_NORETRY | __GFP_NOMEMALLOC);
  126. if (!addr)
  127. break;
  128. spin_lock(&cmm_lock);
  129. pa = cmm_page_list;
  130. if (!pa || pa->index >= CMM_NR_PAGES) {
  131. /* Need a new page for the page list. */
  132. spin_unlock(&cmm_lock);
  133. npa = (struct cmm_page_array *)__get_free_page(
  134. GFP_NOIO | __GFP_NOWARN |
  135. __GFP_NORETRY | __GFP_NOMEMALLOC);
  136. if (!npa) {
  137. pr_info("%s: Can not allocate new page list\n", __func__);
  138. free_page(addr);
  139. break;
  140. }
  141. spin_lock(&cmm_lock);
  142. pa = cmm_page_list;
  143. if (!pa || pa->index >= CMM_NR_PAGES) {
  144. npa->next = pa;
  145. npa->index = 0;
  146. pa = npa;
  147. cmm_page_list = pa;
  148. } else
  149. free_page((unsigned long) npa);
  150. }
  151. if ((rc = plpar_page_set_loaned(__pa(addr)))) {
  152. pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
  153. spin_unlock(&cmm_lock);
  154. free_page(addr);
  155. break;
  156. }
  157. pa->page[pa->index++] = addr;
  158. loaned_pages++;
  159. totalram_pages--;
  160. spin_unlock(&cmm_lock);
  161. nr--;
  162. }
  163. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  164. return nr;
  165. }
  166. /**
  167. * cmm_free_pages - Free pages and mark them as active
  168. * @nr: number of pages to free
  169. *
  170. * Return value:
  171. * number of pages requested to be freed which were not
  172. **/
  173. static long cmm_free_pages(long nr)
  174. {
  175. struct cmm_page_array *pa;
  176. unsigned long addr;
  177. cmm_dbg("Begin free of %ld pages.\n", nr);
  178. spin_lock(&cmm_lock);
  179. pa = cmm_page_list;
  180. while (nr) {
  181. if (!pa || pa->index <= 0)
  182. break;
  183. addr = pa->page[--pa->index];
  184. if (pa->index == 0) {
  185. pa = pa->next;
  186. free_page((unsigned long) cmm_page_list);
  187. cmm_page_list = pa;
  188. }
  189. plpar_page_set_active(__pa(addr));
  190. free_page(addr);
  191. loaned_pages--;
  192. nr--;
  193. totalram_pages++;
  194. }
  195. spin_unlock(&cmm_lock);
  196. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  197. return nr;
  198. }
  199. /**
  200. * cmm_oom_notify - OOM notifier
  201. * @self: notifier block struct
  202. * @dummy: not used
  203. * @parm: returned - number of pages freed
  204. *
  205. * Return value:
  206. * NOTIFY_OK
  207. **/
  208. static int cmm_oom_notify(struct notifier_block *self,
  209. unsigned long dummy, void *parm)
  210. {
  211. unsigned long *freed = parm;
  212. long nr = KB2PAGES(oom_kb);
  213. cmm_dbg("OOM processing started\n");
  214. nr = cmm_free_pages(nr);
  215. loaned_pages_target = loaned_pages;
  216. *freed += KB2PAGES(oom_kb) - nr;
  217. oom_freed_pages += KB2PAGES(oom_kb) - nr;
  218. cmm_dbg("OOM processing complete\n");
  219. return NOTIFY_OK;
  220. }
  221. /**
  222. * cmm_get_mpp - Read memory performance parameters
  223. *
  224. * Makes hcall to query the current page loan request from the hypervisor.
  225. *
  226. * Return value:
  227. * nothing
  228. **/
  229. static void cmm_get_mpp(void)
  230. {
  231. int rc;
  232. struct hvcall_mpp_data mpp_data;
  233. signed long active_pages_target, page_loan_request, target;
  234. signed long total_pages = totalram_pages + loaned_pages;
  235. signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
  236. rc = h_get_mpp(&mpp_data);
  237. if (rc != H_SUCCESS)
  238. return;
  239. page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
  240. target = page_loan_request + (signed long)loaned_pages;
  241. if (target < 0 || total_pages < min_mem_pages)
  242. target = 0;
  243. if (target > oom_freed_pages)
  244. target -= oom_freed_pages;
  245. else
  246. target = 0;
  247. active_pages_target = total_pages - target;
  248. if (min_mem_pages > active_pages_target)
  249. target = total_pages - min_mem_pages;
  250. if (target < 0)
  251. target = 0;
  252. loaned_pages_target = target;
  253. cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
  254. page_loan_request, loaned_pages, loaned_pages_target,
  255. oom_freed_pages, totalram_pages);
  256. }
  257. static struct notifier_block cmm_oom_nb = {
  258. .notifier_call = cmm_oom_notify
  259. };
  260. /**
  261. * cmm_thread - CMM task thread
  262. * @dummy: not used
  263. *
  264. * Return value:
  265. * 0
  266. **/
  267. static int cmm_thread(void *dummy)
  268. {
  269. unsigned long timeleft;
  270. while (1) {
  271. timeleft = msleep_interruptible(delay * 1000);
  272. if (kthread_should_stop() || timeleft)
  273. break;
  274. if (mutex_trylock(&hotplug_mutex)) {
  275. if (hotplug_occurred) {
  276. hotplug_occurred = 0;
  277. mutex_unlock(&hotplug_mutex);
  278. cmm_dbg("Hotplug operation has occurred, "
  279. "loaning activity suspended "
  280. "for %d seconds.\n",
  281. hotplug_delay);
  282. timeleft = msleep_interruptible(hotplug_delay *
  283. 1000);
  284. if (kthread_should_stop() || timeleft)
  285. break;
  286. continue;
  287. }
  288. mutex_unlock(&hotplug_mutex);
  289. } else {
  290. cmm_dbg("Hotplug operation in progress, activity "
  291. "suspended\n");
  292. continue;
  293. }
  294. cmm_get_mpp();
  295. if (loaned_pages_target > loaned_pages) {
  296. if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
  297. loaned_pages_target = loaned_pages;
  298. } else if (loaned_pages_target < loaned_pages)
  299. cmm_free_pages(loaned_pages - loaned_pages_target);
  300. }
  301. return 0;
  302. }
  303. #define CMM_SHOW(name, format, args...) \
  304. static ssize_t show_##name(struct sys_device *dev, \
  305. struct sysdev_attribute *attr, \
  306. char *buf) \
  307. { \
  308. return sprintf(buf, format, ##args); \
  309. } \
  310. static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
  311. CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
  312. CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
  313. static ssize_t show_oom_pages(struct sys_device *dev,
  314. struct sysdev_attribute *attr, char *buf)
  315. {
  316. return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
  317. }
  318. static ssize_t store_oom_pages(struct sys_device *dev,
  319. struct sysdev_attribute *attr,
  320. const char *buf, size_t count)
  321. {
  322. unsigned long val = simple_strtoul (buf, NULL, 10);
  323. if (!capable(CAP_SYS_ADMIN))
  324. return -EPERM;
  325. if (val != 0)
  326. return -EBADMSG;
  327. oom_freed_pages = 0;
  328. return count;
  329. }
  330. static SYSDEV_ATTR(oom_freed_kb, S_IWUSR| S_IRUGO,
  331. show_oom_pages, store_oom_pages);
  332. static struct sysdev_attribute *cmm_attrs[] = {
  333. &attr_loaned_kb,
  334. &attr_loaned_target_kb,
  335. &attr_oom_freed_kb,
  336. };
  337. static struct sysdev_class cmm_sysdev_class = {
  338. .name = "cmm",
  339. };
  340. /**
  341. * cmm_sysfs_register - Register with sysfs
  342. *
  343. * Return value:
  344. * 0 on success / other on failure
  345. **/
  346. static int cmm_sysfs_register(struct sys_device *sysdev)
  347. {
  348. int i, rc;
  349. if ((rc = sysdev_class_register(&cmm_sysdev_class)))
  350. return rc;
  351. sysdev->id = 0;
  352. sysdev->cls = &cmm_sysdev_class;
  353. if ((rc = sysdev_register(sysdev)))
  354. goto class_unregister;
  355. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
  356. if ((rc = sysdev_create_file(sysdev, cmm_attrs[i])))
  357. goto fail;
  358. }
  359. return 0;
  360. fail:
  361. while (--i >= 0)
  362. sysdev_remove_file(sysdev, cmm_attrs[i]);
  363. sysdev_unregister(sysdev);
  364. class_unregister:
  365. sysdev_class_unregister(&cmm_sysdev_class);
  366. return rc;
  367. }
  368. /**
  369. * cmm_unregister_sysfs - Unregister from sysfs
  370. *
  371. **/
  372. static void cmm_unregister_sysfs(struct sys_device *sysdev)
  373. {
  374. int i;
  375. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
  376. sysdev_remove_file(sysdev, cmm_attrs[i]);
  377. sysdev_unregister(sysdev);
  378. sysdev_class_unregister(&cmm_sysdev_class);
  379. }
  380. /**
  381. * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
  382. *
  383. **/
  384. static int cmm_reboot_notifier(struct notifier_block *nb,
  385. unsigned long action, void *unused)
  386. {
  387. if (action == SYS_RESTART) {
  388. if (cmm_thread_ptr)
  389. kthread_stop(cmm_thread_ptr);
  390. cmm_thread_ptr = NULL;
  391. cmm_free_pages(loaned_pages);
  392. }
  393. return NOTIFY_DONE;
  394. }
  395. static struct notifier_block cmm_reboot_nb = {
  396. .notifier_call = cmm_reboot_notifier,
  397. };
  398. /**
  399. * cmm_count_pages - Count the number of pages loaned in a particular range.
  400. *
  401. * @arg: memory_isolate_notify structure with address range and count
  402. *
  403. * Return value:
  404. * 0 on success
  405. **/
  406. static unsigned long cmm_count_pages(void *arg)
  407. {
  408. struct memory_isolate_notify *marg = arg;
  409. struct cmm_page_array *pa;
  410. unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  411. unsigned long end = start + (marg->nr_pages << PAGE_SHIFT);
  412. unsigned long idx;
  413. spin_lock(&cmm_lock);
  414. pa = cmm_page_list;
  415. while (pa) {
  416. if ((unsigned long)pa >= start && (unsigned long)pa < end)
  417. marg->pages_found++;
  418. for (idx = 0; idx < pa->index; idx++)
  419. if (pa->page[idx] >= start && pa->page[idx] < end)
  420. marg->pages_found++;
  421. pa = pa->next;
  422. }
  423. spin_unlock(&cmm_lock);
  424. return 0;
  425. }
  426. /**
  427. * cmm_memory_isolate_cb - Handle memory isolation notifier calls
  428. * @self: notifier block struct
  429. * @action: action to take
  430. * @arg: struct memory_isolate_notify data for handler
  431. *
  432. * Return value:
  433. * NOTIFY_OK or notifier error based on subfunction return value
  434. **/
  435. static int cmm_memory_isolate_cb(struct notifier_block *self,
  436. unsigned long action, void *arg)
  437. {
  438. int ret = 0;
  439. if (action == MEM_ISOLATE_COUNT)
  440. ret = cmm_count_pages(arg);
  441. if (ret)
  442. ret = notifier_from_errno(ret);
  443. else
  444. ret = NOTIFY_OK;
  445. return ret;
  446. }
  447. static struct notifier_block cmm_mem_isolate_nb = {
  448. .notifier_call = cmm_memory_isolate_cb,
  449. .priority = CMM_MEM_ISOLATE_PRI
  450. };
  451. /**
  452. * cmm_mem_going_offline - Unloan pages where memory is to be removed
  453. * @arg: memory_notify structure with page range to be offlined
  454. *
  455. * Return value:
  456. * 0 on success
  457. **/
  458. static int cmm_mem_going_offline(void *arg)
  459. {
  460. struct memory_notify *marg = arg;
  461. unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  462. unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT);
  463. struct cmm_page_array *pa_curr, *pa_last, *npa;
  464. unsigned long idx;
  465. unsigned long freed = 0;
  466. cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
  467. start_page, marg->nr_pages);
  468. spin_lock(&cmm_lock);
  469. /* Search the page list for pages in the range to be offlined */
  470. pa_last = pa_curr = cmm_page_list;
  471. while (pa_curr) {
  472. for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) {
  473. if ((pa_curr->page[idx] < start_page) ||
  474. (pa_curr->page[idx] >= end_page))
  475. continue;
  476. plpar_page_set_active(__pa(pa_curr->page[idx]));
  477. free_page(pa_curr->page[idx]);
  478. freed++;
  479. loaned_pages--;
  480. totalram_pages++;
  481. pa_curr->page[idx] = pa_last->page[--pa_last->index];
  482. if (pa_last->index == 0) {
  483. if (pa_curr == pa_last)
  484. pa_curr = pa_last->next;
  485. pa_last = pa_last->next;
  486. free_page((unsigned long)cmm_page_list);
  487. cmm_page_list = pa_last;
  488. continue;
  489. }
  490. }
  491. pa_curr = pa_curr->next;
  492. }
  493. /* Search for page list structures in the range to be offlined */
  494. pa_last = NULL;
  495. pa_curr = cmm_page_list;
  496. while (pa_curr) {
  497. if (((unsigned long)pa_curr >= start_page) &&
  498. ((unsigned long)pa_curr < end_page)) {
  499. npa = (struct cmm_page_array *)__get_free_page(
  500. GFP_NOIO | __GFP_NOWARN |
  501. __GFP_NORETRY | __GFP_NOMEMALLOC);
  502. if (!npa) {
  503. spin_unlock(&cmm_lock);
  504. cmm_dbg("Failed to allocate memory for list "
  505. "management. Memory hotplug "
  506. "failed.\n");
  507. return ENOMEM;
  508. }
  509. memcpy(npa, pa_curr, PAGE_SIZE);
  510. if (pa_curr == cmm_page_list)
  511. cmm_page_list = npa;
  512. if (pa_last)
  513. pa_last->next = npa;
  514. free_page((unsigned long) pa_curr);
  515. freed++;
  516. pa_curr = npa;
  517. }
  518. pa_last = pa_curr;
  519. pa_curr = pa_curr->next;
  520. }
  521. spin_unlock(&cmm_lock);
  522. cmm_dbg("Released %ld pages in the search range.\n", freed);
  523. return 0;
  524. }
  525. /**
  526. * cmm_memory_cb - Handle memory hotplug notifier calls
  527. * @self: notifier block struct
  528. * @action: action to take
  529. * @arg: struct memory_notify data for handler
  530. *
  531. * Return value:
  532. * NOTIFY_OK or notifier error based on subfunction return value
  533. *
  534. **/
  535. static int cmm_memory_cb(struct notifier_block *self,
  536. unsigned long action, void *arg)
  537. {
  538. int ret = 0;
  539. switch (action) {
  540. case MEM_GOING_OFFLINE:
  541. mutex_lock(&hotplug_mutex);
  542. hotplug_occurred = 1;
  543. ret = cmm_mem_going_offline(arg);
  544. break;
  545. case MEM_OFFLINE:
  546. case MEM_CANCEL_OFFLINE:
  547. mutex_unlock(&hotplug_mutex);
  548. cmm_dbg("Memory offline operation complete.\n");
  549. break;
  550. case MEM_GOING_ONLINE:
  551. case MEM_ONLINE:
  552. case MEM_CANCEL_ONLINE:
  553. break;
  554. }
  555. if (ret)
  556. ret = notifier_from_errno(ret);
  557. else
  558. ret = NOTIFY_OK;
  559. return ret;
  560. }
  561. static struct notifier_block cmm_mem_nb = {
  562. .notifier_call = cmm_memory_cb,
  563. .priority = CMM_MEM_HOTPLUG_PRI
  564. };
  565. /**
  566. * cmm_init - Module initialization
  567. *
  568. * Return value:
  569. * 0 on success / other on failure
  570. **/
  571. static int cmm_init(void)
  572. {
  573. int rc = -ENOMEM;
  574. if (!firmware_has_feature(FW_FEATURE_CMO))
  575. return -EOPNOTSUPP;
  576. if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
  577. return rc;
  578. if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
  579. goto out_oom_notifier;
  580. if ((rc = cmm_sysfs_register(&cmm_sysdev)))
  581. goto out_reboot_notifier;
  582. if (register_memory_notifier(&cmm_mem_nb) ||
  583. register_memory_isolate_notifier(&cmm_mem_isolate_nb))
  584. goto out_unregister_notifier;
  585. if (cmm_disabled)
  586. return rc;
  587. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  588. if (IS_ERR(cmm_thread_ptr)) {
  589. rc = PTR_ERR(cmm_thread_ptr);
  590. goto out_unregister_notifier;
  591. }
  592. return rc;
  593. out_unregister_notifier:
  594. unregister_memory_notifier(&cmm_mem_nb);
  595. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  596. cmm_unregister_sysfs(&cmm_sysdev);
  597. out_reboot_notifier:
  598. unregister_reboot_notifier(&cmm_reboot_nb);
  599. out_oom_notifier:
  600. unregister_oom_notifier(&cmm_oom_nb);
  601. return rc;
  602. }
  603. /**
  604. * cmm_exit - Module exit
  605. *
  606. * Return value:
  607. * nothing
  608. **/
  609. static void cmm_exit(void)
  610. {
  611. if (cmm_thread_ptr)
  612. kthread_stop(cmm_thread_ptr);
  613. unregister_oom_notifier(&cmm_oom_nb);
  614. unregister_reboot_notifier(&cmm_reboot_nb);
  615. unregister_memory_notifier(&cmm_mem_nb);
  616. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  617. cmm_free_pages(loaned_pages);
  618. cmm_unregister_sysfs(&cmm_sysdev);
  619. }
  620. /**
  621. * cmm_set_disable - Disable/Enable CMM
  622. *
  623. * Return value:
  624. * 0 on success / other on failure
  625. **/
  626. static int cmm_set_disable(const char *val, struct kernel_param *kp)
  627. {
  628. int disable = simple_strtoul(val, NULL, 10);
  629. if (disable != 0 && disable != 1)
  630. return -EINVAL;
  631. if (disable && !cmm_disabled) {
  632. if (cmm_thread_ptr)
  633. kthread_stop(cmm_thread_ptr);
  634. cmm_thread_ptr = NULL;
  635. cmm_free_pages(loaned_pages);
  636. } else if (!disable && cmm_disabled) {
  637. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  638. if (IS_ERR(cmm_thread_ptr))
  639. return PTR_ERR(cmm_thread_ptr);
  640. }
  641. cmm_disabled = disable;
  642. return 0;
  643. }
  644. module_param_call(disable, cmm_set_disable, param_get_uint,
  645. &cmm_disabled, S_IRUGO | S_IWUSR);
  646. MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
  647. "[Default=" __stringify(CMM_DISABLE) "]");
  648. module_init(cmm_init);
  649. module_exit(cmm_exit);