cmm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * Collaborative memory management interface.
  3. *
  4. * Copyright (C) 2008 IBM Corporation
  5. * Author(s): Brian King (brking@linux.vnet.ibm.com),
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/ctype.h>
  23. #include <linux/delay.h>
  24. #include <linux/errno.h>
  25. #include <linux/fs.h>
  26. #include <linux/init.h>
  27. #include <linux/kthread.h>
  28. #include <linux/module.h>
  29. #include <linux/oom.h>
  30. #include <linux/reboot.h>
  31. #include <linux/sched.h>
  32. #include <linux/stringify.h>
  33. #include <linux/swap.h>
  34. #include <linux/sysdev.h>
  35. #include <asm/firmware.h>
  36. #include <asm/hvcall.h>
  37. #include <asm/mmu.h>
  38. #include <asm/pgalloc.h>
  39. #include <asm/uaccess.h>
  40. #include <linux/memory.h>
  41. #include "plpar_wrappers.h"
  42. #define CMM_DRIVER_VERSION "1.0.0"
  43. #define CMM_DEFAULT_DELAY 1
  44. #define CMM_HOTPLUG_DELAY 5
  45. #define CMM_DEBUG 0
  46. #define CMM_DISABLE 0
  47. #define CMM_OOM_KB 1024
  48. #define CMM_MIN_MEM_MB 256
  49. #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
  50. #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
  51. /*
  52. * The priority level tries to ensure that this notifier is called as
  53. * late as possible to reduce thrashing in the shared memory pool.
  54. */
  55. #define CMM_MEM_HOTPLUG_PRI 1
  56. #define CMM_MEM_ISOLATE_PRI 15
  57. static unsigned int delay = CMM_DEFAULT_DELAY;
  58. static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
  59. static unsigned int oom_kb = CMM_OOM_KB;
  60. static unsigned int cmm_debug = CMM_DEBUG;
  61. static unsigned int cmm_disabled = CMM_DISABLE;
  62. static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
  63. static struct sys_device cmm_sysdev;
  64. MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
  65. MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
  66. MODULE_LICENSE("GPL");
  67. MODULE_VERSION(CMM_DRIVER_VERSION);
  68. module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR);
  69. MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
  70. "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
  71. module_param_named(hotplug_delay, hotplug_delay, uint, S_IRUGO | S_IWUSR);
  72. MODULE_PARM_DESC(delay, "Delay (in seconds) after memory hotplug remove "
  73. "before loaning resumes. "
  74. "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
  75. module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR);
  76. MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
  77. "[Default=" __stringify(CMM_OOM_KB) "]");
  78. module_param_named(min_mem_mb, min_mem_mb, ulong, S_IRUGO | S_IWUSR);
  79. MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
  80. "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
  81. module_param_named(debug, cmm_debug, uint, S_IRUGO | S_IWUSR);
  82. MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
  83. "[Default=" __stringify(CMM_DEBUG) "]");
  84. #define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
  85. #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
  86. struct cmm_page_array {
  87. struct cmm_page_array *next;
  88. unsigned long index;
  89. unsigned long page[CMM_NR_PAGES];
  90. };
  91. static unsigned long loaned_pages;
  92. static unsigned long loaned_pages_target;
  93. static unsigned long oom_freed_pages;
  94. static struct cmm_page_array *cmm_page_list;
  95. static DEFINE_SPINLOCK(cmm_lock);
  96. static DEFINE_MUTEX(hotplug_mutex);
  97. static int hotplug_occurred; /* protected by the hotplug mutex */
  98. static struct task_struct *cmm_thread_ptr;
  99. /**
  100. * cmm_alloc_pages - Allocate pages and mark them as loaned
  101. * @nr: number of pages to allocate
  102. *
  103. * Return value:
  104. * number of pages requested to be allocated which were not
  105. **/
  106. static long cmm_alloc_pages(long nr)
  107. {
  108. struct cmm_page_array *pa, *npa;
  109. unsigned long addr;
  110. long rc;
  111. cmm_dbg("Begin request for %ld pages\n", nr);
  112. while (nr) {
  113. /* Exit if a hotplug operation is in progress or occurred */
  114. if (mutex_trylock(&hotplug_mutex)) {
  115. if (hotplug_occurred) {
  116. mutex_unlock(&hotplug_mutex);
  117. break;
  118. }
  119. mutex_unlock(&hotplug_mutex);
  120. } else {
  121. break;
  122. }
  123. addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
  124. __GFP_NORETRY | __GFP_NOMEMALLOC);
  125. if (!addr)
  126. break;
  127. spin_lock(&cmm_lock);
  128. pa = cmm_page_list;
  129. if (!pa || pa->index >= CMM_NR_PAGES) {
  130. /* Need a new page for the page list. */
  131. spin_unlock(&cmm_lock);
  132. npa = (struct cmm_page_array *)__get_free_page(
  133. GFP_NOIO | __GFP_NOWARN |
  134. __GFP_NORETRY | __GFP_NOMEMALLOC);
  135. if (!npa) {
  136. pr_info("%s: Can not allocate new page list\n", __func__);
  137. free_page(addr);
  138. break;
  139. }
  140. spin_lock(&cmm_lock);
  141. pa = cmm_page_list;
  142. if (!pa || pa->index >= CMM_NR_PAGES) {
  143. npa->next = pa;
  144. npa->index = 0;
  145. pa = npa;
  146. cmm_page_list = pa;
  147. } else
  148. free_page((unsigned long) npa);
  149. }
  150. if ((rc = plpar_page_set_loaned(__pa(addr)))) {
  151. pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
  152. spin_unlock(&cmm_lock);
  153. free_page(addr);
  154. break;
  155. }
  156. pa->page[pa->index++] = addr;
  157. loaned_pages++;
  158. totalram_pages--;
  159. spin_unlock(&cmm_lock);
  160. nr--;
  161. }
  162. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  163. return nr;
  164. }
  165. /**
  166. * cmm_free_pages - Free pages and mark them as active
  167. * @nr: number of pages to free
  168. *
  169. * Return value:
  170. * number of pages requested to be freed which were not
  171. **/
  172. static long cmm_free_pages(long nr)
  173. {
  174. struct cmm_page_array *pa;
  175. unsigned long addr;
  176. cmm_dbg("Begin free of %ld pages.\n", nr);
  177. spin_lock(&cmm_lock);
  178. pa = cmm_page_list;
  179. while (nr) {
  180. if (!pa || pa->index <= 0)
  181. break;
  182. addr = pa->page[--pa->index];
  183. if (pa->index == 0) {
  184. pa = pa->next;
  185. free_page((unsigned long) cmm_page_list);
  186. cmm_page_list = pa;
  187. }
  188. plpar_page_set_active(__pa(addr));
  189. free_page(addr);
  190. loaned_pages--;
  191. nr--;
  192. totalram_pages++;
  193. }
  194. spin_unlock(&cmm_lock);
  195. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  196. return nr;
  197. }
  198. /**
  199. * cmm_oom_notify - OOM notifier
  200. * @self: notifier block struct
  201. * @dummy: not used
  202. * @parm: returned - number of pages freed
  203. *
  204. * Return value:
  205. * NOTIFY_OK
  206. **/
  207. static int cmm_oom_notify(struct notifier_block *self,
  208. unsigned long dummy, void *parm)
  209. {
  210. unsigned long *freed = parm;
  211. long nr = KB2PAGES(oom_kb);
  212. cmm_dbg("OOM processing started\n");
  213. nr = cmm_free_pages(nr);
  214. loaned_pages_target = loaned_pages;
  215. *freed += KB2PAGES(oom_kb) - nr;
  216. oom_freed_pages += KB2PAGES(oom_kb) - nr;
  217. cmm_dbg("OOM processing complete\n");
  218. return NOTIFY_OK;
  219. }
  220. /**
  221. * cmm_get_mpp - Read memory performance parameters
  222. *
  223. * Makes hcall to query the current page loan request from the hypervisor.
  224. *
  225. * Return value:
  226. * nothing
  227. **/
  228. static void cmm_get_mpp(void)
  229. {
  230. int rc;
  231. struct hvcall_mpp_data mpp_data;
  232. signed long active_pages_target, page_loan_request, target;
  233. signed long total_pages = totalram_pages + loaned_pages;
  234. signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
  235. rc = h_get_mpp(&mpp_data);
  236. if (rc != H_SUCCESS)
  237. return;
  238. page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
  239. target = page_loan_request + (signed long)loaned_pages;
  240. if (target < 0 || total_pages < min_mem_pages)
  241. target = 0;
  242. if (target > oom_freed_pages)
  243. target -= oom_freed_pages;
  244. else
  245. target = 0;
  246. active_pages_target = total_pages - target;
  247. if (min_mem_pages > active_pages_target)
  248. target = total_pages - min_mem_pages;
  249. if (target < 0)
  250. target = 0;
  251. loaned_pages_target = target;
  252. cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
  253. page_loan_request, loaned_pages, loaned_pages_target,
  254. oom_freed_pages, totalram_pages);
  255. }
  256. static struct notifier_block cmm_oom_nb = {
  257. .notifier_call = cmm_oom_notify
  258. };
  259. /**
  260. * cmm_thread - CMM task thread
  261. * @dummy: not used
  262. *
  263. * Return value:
  264. * 0
  265. **/
  266. static int cmm_thread(void *dummy)
  267. {
  268. unsigned long timeleft;
  269. while (1) {
  270. timeleft = msleep_interruptible(delay * 1000);
  271. if (kthread_should_stop() || timeleft)
  272. break;
  273. if (mutex_trylock(&hotplug_mutex)) {
  274. if (hotplug_occurred) {
  275. hotplug_occurred = 0;
  276. mutex_unlock(&hotplug_mutex);
  277. cmm_dbg("Hotplug operation has occurred, "
  278. "loaning activity suspended "
  279. "for %d seconds.\n",
  280. hotplug_delay);
  281. timeleft = msleep_interruptible(hotplug_delay *
  282. 1000);
  283. if (kthread_should_stop() || timeleft)
  284. break;
  285. continue;
  286. }
  287. mutex_unlock(&hotplug_mutex);
  288. } else {
  289. cmm_dbg("Hotplug operation in progress, activity "
  290. "suspended\n");
  291. continue;
  292. }
  293. cmm_get_mpp();
  294. if (loaned_pages_target > loaned_pages) {
  295. if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
  296. loaned_pages_target = loaned_pages;
  297. } else if (loaned_pages_target < loaned_pages)
  298. cmm_free_pages(loaned_pages - loaned_pages_target);
  299. }
  300. return 0;
  301. }
  302. #define CMM_SHOW(name, format, args...) \
  303. static ssize_t show_##name(struct sys_device *dev, \
  304. struct sysdev_attribute *attr, \
  305. char *buf) \
  306. { \
  307. return sprintf(buf, format, ##args); \
  308. } \
  309. static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
  310. CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
  311. CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
  312. static ssize_t show_oom_pages(struct sys_device *dev,
  313. struct sysdev_attribute *attr, char *buf)
  314. {
  315. return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
  316. }
  317. static ssize_t store_oom_pages(struct sys_device *dev,
  318. struct sysdev_attribute *attr,
  319. const char *buf, size_t count)
  320. {
  321. unsigned long val = simple_strtoul (buf, NULL, 10);
  322. if (!capable(CAP_SYS_ADMIN))
  323. return -EPERM;
  324. if (val != 0)
  325. return -EBADMSG;
  326. oom_freed_pages = 0;
  327. return count;
  328. }
  329. static SYSDEV_ATTR(oom_freed_kb, S_IWUSR| S_IRUGO,
  330. show_oom_pages, store_oom_pages);
  331. static struct sysdev_attribute *cmm_attrs[] = {
  332. &attr_loaned_kb,
  333. &attr_loaned_target_kb,
  334. &attr_oom_freed_kb,
  335. };
  336. static struct sysdev_class cmm_sysdev_class = {
  337. .name = "cmm",
  338. };
  339. /**
  340. * cmm_sysfs_register - Register with sysfs
  341. *
  342. * Return value:
  343. * 0 on success / other on failure
  344. **/
  345. static int cmm_sysfs_register(struct sys_device *sysdev)
  346. {
  347. int i, rc;
  348. if ((rc = sysdev_class_register(&cmm_sysdev_class)))
  349. return rc;
  350. sysdev->id = 0;
  351. sysdev->cls = &cmm_sysdev_class;
  352. if ((rc = sysdev_register(sysdev)))
  353. goto class_unregister;
  354. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
  355. if ((rc = sysdev_create_file(sysdev, cmm_attrs[i])))
  356. goto fail;
  357. }
  358. return 0;
  359. fail:
  360. while (--i >= 0)
  361. sysdev_remove_file(sysdev, cmm_attrs[i]);
  362. sysdev_unregister(sysdev);
  363. class_unregister:
  364. sysdev_class_unregister(&cmm_sysdev_class);
  365. return rc;
  366. }
  367. /**
  368. * cmm_unregister_sysfs - Unregister from sysfs
  369. *
  370. **/
  371. static void cmm_unregister_sysfs(struct sys_device *sysdev)
  372. {
  373. int i;
  374. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
  375. sysdev_remove_file(sysdev, cmm_attrs[i]);
  376. sysdev_unregister(sysdev);
  377. sysdev_class_unregister(&cmm_sysdev_class);
  378. }
  379. /**
  380. * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
  381. *
  382. **/
  383. static int cmm_reboot_notifier(struct notifier_block *nb,
  384. unsigned long action, void *unused)
  385. {
  386. if (action == SYS_RESTART) {
  387. if (cmm_thread_ptr)
  388. kthread_stop(cmm_thread_ptr);
  389. cmm_thread_ptr = NULL;
  390. cmm_free_pages(loaned_pages);
  391. }
  392. return NOTIFY_DONE;
  393. }
  394. static struct notifier_block cmm_reboot_nb = {
  395. .notifier_call = cmm_reboot_notifier,
  396. };
  397. /**
  398. * cmm_count_pages - Count the number of pages loaned in a particular range.
  399. *
  400. * @arg: memory_isolate_notify structure with address range and count
  401. *
  402. * Return value:
  403. * 0 on success
  404. **/
  405. static unsigned long cmm_count_pages(void *arg)
  406. {
  407. struct memory_isolate_notify *marg = arg;
  408. struct cmm_page_array *pa;
  409. unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  410. unsigned long end = start + (marg->nr_pages << PAGE_SHIFT);
  411. unsigned long idx;
  412. spin_lock(&cmm_lock);
  413. pa = cmm_page_list;
  414. while (pa) {
  415. if ((unsigned long)pa >= start && (unsigned long)pa < end)
  416. marg->pages_found++;
  417. for (idx = 0; idx < pa->index; idx++)
  418. if (pa->page[idx] >= start && pa->page[idx] < end)
  419. marg->pages_found++;
  420. pa = pa->next;
  421. }
  422. spin_unlock(&cmm_lock);
  423. return 0;
  424. }
  425. /**
  426. * cmm_memory_isolate_cb - Handle memory isolation notifier calls
  427. * @self: notifier block struct
  428. * @action: action to take
  429. * @arg: struct memory_isolate_notify data for handler
  430. *
  431. * Return value:
  432. * NOTIFY_OK or notifier error based on subfunction return value
  433. **/
  434. static int cmm_memory_isolate_cb(struct notifier_block *self,
  435. unsigned long action, void *arg)
  436. {
  437. int ret = 0;
  438. if (action == MEM_ISOLATE_COUNT)
  439. ret = cmm_count_pages(arg);
  440. if (ret)
  441. ret = notifier_from_errno(ret);
  442. else
  443. ret = NOTIFY_OK;
  444. return ret;
  445. }
  446. static struct notifier_block cmm_mem_isolate_nb = {
  447. .notifier_call = cmm_memory_isolate_cb,
  448. .priority = CMM_MEM_ISOLATE_PRI
  449. };
  450. /**
  451. * cmm_mem_going_offline - Unloan pages where memory is to be removed
  452. * @arg: memory_notify structure with page range to be offlined
  453. *
  454. * Return value:
  455. * 0 on success
  456. **/
  457. static int cmm_mem_going_offline(void *arg)
  458. {
  459. struct memory_notify *marg = arg;
  460. unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn);
  461. unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT);
  462. struct cmm_page_array *pa_curr, *pa_last, *npa;
  463. unsigned long idx;
  464. unsigned long freed = 0;
  465. cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
  466. start_page, marg->nr_pages);
  467. spin_lock(&cmm_lock);
  468. /* Search the page list for pages in the range to be offlined */
  469. pa_last = pa_curr = cmm_page_list;
  470. while (pa_curr) {
  471. for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) {
  472. if ((pa_curr->page[idx] < start_page) ||
  473. (pa_curr->page[idx] >= end_page))
  474. continue;
  475. plpar_page_set_active(__pa(pa_curr->page[idx]));
  476. free_page(pa_curr->page[idx]);
  477. freed++;
  478. loaned_pages--;
  479. totalram_pages++;
  480. pa_curr->page[idx] = pa_last->page[--pa_last->index];
  481. if (pa_last->index == 0) {
  482. if (pa_curr == pa_last)
  483. pa_curr = pa_last->next;
  484. pa_last = pa_last->next;
  485. free_page((unsigned long)cmm_page_list);
  486. cmm_page_list = pa_last;
  487. continue;
  488. }
  489. }
  490. pa_curr = pa_curr->next;
  491. }
  492. /* Search for page list structures in the range to be offlined */
  493. pa_last = NULL;
  494. pa_curr = cmm_page_list;
  495. while (pa_curr) {
  496. if (((unsigned long)pa_curr >= start_page) &&
  497. ((unsigned long)pa_curr < end_page)) {
  498. npa = (struct cmm_page_array *)__get_free_page(
  499. GFP_NOIO | __GFP_NOWARN |
  500. __GFP_NORETRY | __GFP_NOMEMALLOC);
  501. if (!npa) {
  502. spin_unlock(&cmm_lock);
  503. cmm_dbg("Failed to allocate memory for list "
  504. "management. Memory hotplug "
  505. "failed.\n");
  506. return ENOMEM;
  507. }
  508. memcpy(npa, pa_curr, PAGE_SIZE);
  509. if (pa_curr == cmm_page_list)
  510. cmm_page_list = npa;
  511. if (pa_last)
  512. pa_last->next = npa;
  513. free_page((unsigned long) pa_curr);
  514. freed++;
  515. pa_curr = npa;
  516. }
  517. pa_last = pa_curr;
  518. pa_curr = pa_curr->next;
  519. }
  520. spin_unlock(&cmm_lock);
  521. cmm_dbg("Released %ld pages in the search range.\n", freed);
  522. return 0;
  523. }
  524. /**
  525. * cmm_memory_cb - Handle memory hotplug notifier calls
  526. * @self: notifier block struct
  527. * @action: action to take
  528. * @arg: struct memory_notify data for handler
  529. *
  530. * Return value:
  531. * NOTIFY_OK or notifier error based on subfunction return value
  532. *
  533. **/
  534. static int cmm_memory_cb(struct notifier_block *self,
  535. unsigned long action, void *arg)
  536. {
  537. int ret = 0;
  538. switch (action) {
  539. case MEM_GOING_OFFLINE:
  540. mutex_lock(&hotplug_mutex);
  541. hotplug_occurred = 1;
  542. ret = cmm_mem_going_offline(arg);
  543. break;
  544. case MEM_OFFLINE:
  545. case MEM_CANCEL_OFFLINE:
  546. mutex_unlock(&hotplug_mutex);
  547. cmm_dbg("Memory offline operation complete.\n");
  548. break;
  549. case MEM_GOING_ONLINE:
  550. case MEM_ONLINE:
  551. case MEM_CANCEL_ONLINE:
  552. break;
  553. }
  554. if (ret)
  555. ret = notifier_from_errno(ret);
  556. else
  557. ret = NOTIFY_OK;
  558. return ret;
  559. }
  560. static struct notifier_block cmm_mem_nb = {
  561. .notifier_call = cmm_memory_cb,
  562. .priority = CMM_MEM_HOTPLUG_PRI
  563. };
  564. /**
  565. * cmm_init - Module initialization
  566. *
  567. * Return value:
  568. * 0 on success / other on failure
  569. **/
  570. static int cmm_init(void)
  571. {
  572. int rc = -ENOMEM;
  573. if (!firmware_has_feature(FW_FEATURE_CMO))
  574. return -EOPNOTSUPP;
  575. if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
  576. return rc;
  577. if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
  578. goto out_oom_notifier;
  579. if ((rc = cmm_sysfs_register(&cmm_sysdev)))
  580. goto out_reboot_notifier;
  581. if (register_memory_notifier(&cmm_mem_nb) ||
  582. register_memory_isolate_notifier(&cmm_mem_isolate_nb))
  583. goto out_unregister_notifier;
  584. if (cmm_disabled)
  585. return rc;
  586. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  587. if (IS_ERR(cmm_thread_ptr)) {
  588. rc = PTR_ERR(cmm_thread_ptr);
  589. goto out_unregister_notifier;
  590. }
  591. return rc;
  592. out_unregister_notifier:
  593. unregister_memory_notifier(&cmm_mem_nb);
  594. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  595. cmm_unregister_sysfs(&cmm_sysdev);
  596. out_reboot_notifier:
  597. unregister_reboot_notifier(&cmm_reboot_nb);
  598. out_oom_notifier:
  599. unregister_oom_notifier(&cmm_oom_nb);
  600. return rc;
  601. }
  602. /**
  603. * cmm_exit - Module exit
  604. *
  605. * Return value:
  606. * nothing
  607. **/
  608. static void cmm_exit(void)
  609. {
  610. if (cmm_thread_ptr)
  611. kthread_stop(cmm_thread_ptr);
  612. unregister_oom_notifier(&cmm_oom_nb);
  613. unregister_reboot_notifier(&cmm_reboot_nb);
  614. unregister_memory_notifier(&cmm_mem_nb);
  615. unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
  616. cmm_free_pages(loaned_pages);
  617. cmm_unregister_sysfs(&cmm_sysdev);
  618. }
  619. /**
  620. * cmm_set_disable - Disable/Enable CMM
  621. *
  622. * Return value:
  623. * 0 on success / other on failure
  624. **/
  625. static int cmm_set_disable(const char *val, struct kernel_param *kp)
  626. {
  627. int disable = simple_strtoul(val, NULL, 10);
  628. if (disable != 0 && disable != 1)
  629. return -EINVAL;
  630. if (disable && !cmm_disabled) {
  631. if (cmm_thread_ptr)
  632. kthread_stop(cmm_thread_ptr);
  633. cmm_thread_ptr = NULL;
  634. cmm_free_pages(loaned_pages);
  635. } else if (!disable && cmm_disabled) {
  636. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  637. if (IS_ERR(cmm_thread_ptr))
  638. return PTR_ERR(cmm_thread_ptr);
  639. }
  640. cmm_disabled = disable;
  641. return 0;
  642. }
  643. module_param_call(disable, cmm_set_disable, param_get_uint,
  644. &cmm_disabled, S_IRUGO | S_IWUSR);
  645. MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
  646. "[Default=" __stringify(CMM_DISABLE) "]");
  647. module_init(cmm_init);
  648. module_exit(cmm_exit);