memory.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * Memory subsystem support
  3. *
  4. * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
  5. * Dave Hansen <haveblue@us.ibm.com>
  6. *
  7. * This file provides the necessary infrastructure to represent
  8. * a SPARSEMEM-memory-model system's physical memory in /sysfs.
  9. * All arch-independent code that assumes MEMORY_HOTPLUG requires
  10. * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/topology.h>
  15. #include <linux/capability.h>
  16. #include <linux/device.h>
  17. #include <linux/memory.h>
  18. #include <linux/kobject.h>
  19. #include <linux/memory_hotplug.h>
  20. #include <linux/mm.h>
  21. #include <linux/mutex.h>
  22. #include <linux/stat.h>
  23. #include <linux/slab.h>
  24. #include <linux/atomic.h>
  25. #include <asm/uaccess.h>
  26. static DEFINE_MUTEX(mem_sysfs_mutex);
  27. #define MEMORY_CLASS_NAME "memory"
  28. static int sections_per_block;
  29. static inline int base_memory_block_id(int section_nr)
  30. {
  31. return section_nr / sections_per_block;
  32. }
  33. static struct bus_type memory_subsys = {
  34. .name = MEMORY_CLASS_NAME,
  35. .dev_name = MEMORY_CLASS_NAME,
  36. };
  37. static BLOCKING_NOTIFIER_HEAD(memory_chain);
  38. int register_memory_notifier(struct notifier_block *nb)
  39. {
  40. return blocking_notifier_chain_register(&memory_chain, nb);
  41. }
  42. EXPORT_SYMBOL(register_memory_notifier);
  43. void unregister_memory_notifier(struct notifier_block *nb)
  44. {
  45. blocking_notifier_chain_unregister(&memory_chain, nb);
  46. }
  47. EXPORT_SYMBOL(unregister_memory_notifier);
  48. static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
  49. int register_memory_isolate_notifier(struct notifier_block *nb)
  50. {
  51. return atomic_notifier_chain_register(&memory_isolate_chain, nb);
  52. }
  53. EXPORT_SYMBOL(register_memory_isolate_notifier);
  54. void unregister_memory_isolate_notifier(struct notifier_block *nb)
  55. {
  56. atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
  57. }
  58. EXPORT_SYMBOL(unregister_memory_isolate_notifier);
  59. static void memory_block_release(struct device *dev)
  60. {
  61. struct memory_block *mem = container_of(dev, struct memory_block, dev);
  62. kfree(mem);
  63. }
  64. /*
  65. * register_memory - Setup a sysfs device for a memory block
  66. */
  67. static
  68. int register_memory(struct memory_block *memory)
  69. {
  70. int error;
  71. memory->dev.bus = &memory_subsys;
  72. memory->dev.id = memory->start_section_nr / sections_per_block;
  73. memory->dev.release = memory_block_release;
  74. error = device_register(&memory->dev);
  75. return error;
  76. }
  77. unsigned long __weak memory_block_size_bytes(void)
  78. {
  79. return MIN_MEMORY_BLOCK_SIZE;
  80. }
  81. static unsigned long get_memory_block_size(void)
  82. {
  83. unsigned long block_sz;
  84. block_sz = memory_block_size_bytes();
  85. /* Validate blk_sz is a power of 2 and not less than section size */
  86. if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
  87. WARN_ON(1);
  88. block_sz = MIN_MEMORY_BLOCK_SIZE;
  89. }
  90. return block_sz;
  91. }
  92. /*
  93. * use this as the physical section index that this memsection
  94. * uses.
  95. */
  96. static ssize_t show_mem_start_phys_index(struct device *dev,
  97. struct device_attribute *attr, char *buf)
  98. {
  99. struct memory_block *mem =
  100. container_of(dev, struct memory_block, dev);
  101. unsigned long phys_index;
  102. phys_index = mem->start_section_nr / sections_per_block;
  103. return sprintf(buf, "%08lx\n", phys_index);
  104. }
  105. static ssize_t show_mem_end_phys_index(struct device *dev,
  106. struct device_attribute *attr, char *buf)
  107. {
  108. struct memory_block *mem =
  109. container_of(dev, struct memory_block, dev);
  110. unsigned long phys_index;
  111. phys_index = mem->end_section_nr / sections_per_block;
  112. return sprintf(buf, "%08lx\n", phys_index);
  113. }
  114. /*
  115. * Show whether the section of memory is likely to be hot-removable
  116. */
  117. static ssize_t show_mem_removable(struct device *dev,
  118. struct device_attribute *attr, char *buf)
  119. {
  120. unsigned long i, pfn;
  121. int ret = 1;
  122. struct memory_block *mem =
  123. container_of(dev, struct memory_block, dev);
  124. for (i = 0; i < sections_per_block; i++) {
  125. pfn = section_nr_to_pfn(mem->start_section_nr + i);
  126. ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
  127. }
  128. return sprintf(buf, "%d\n", ret);
  129. }
  130. /*
  131. * online, offline, going offline, etc.
  132. */
  133. static ssize_t show_mem_state(struct device *dev,
  134. struct device_attribute *attr, char *buf)
  135. {
  136. struct memory_block *mem =
  137. container_of(dev, struct memory_block, dev);
  138. ssize_t len = 0;
  139. /*
  140. * We can probably put these states in a nice little array
  141. * so that they're not open-coded
  142. */
  143. switch (mem->state) {
  144. case MEM_ONLINE:
  145. len = sprintf(buf, "online\n");
  146. break;
  147. case MEM_OFFLINE:
  148. len = sprintf(buf, "offline\n");
  149. break;
  150. case MEM_GOING_OFFLINE:
  151. len = sprintf(buf, "going-offline\n");
  152. break;
  153. default:
  154. len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
  155. mem->state);
  156. WARN_ON(1);
  157. break;
  158. }
  159. return len;
  160. }
  161. int memory_notify(unsigned long val, void *v)
  162. {
  163. return blocking_notifier_call_chain(&memory_chain, val, v);
  164. }
  165. int memory_isolate_notify(unsigned long val, void *v)
  166. {
  167. return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
  168. }
  169. /*
  170. * The probe routines leave the pages reserved, just as the bootmem code does.
  171. * Make sure they're still that way.
  172. */
  173. static bool pages_correctly_reserved(unsigned long start_pfn)
  174. {
  175. int i, j;
  176. struct page *page;
  177. unsigned long pfn = start_pfn;
  178. /*
  179. * memmap between sections is not contiguous except with
  180. * SPARSEMEM_VMEMMAP. We lookup the page once per section
  181. * and assume memmap is contiguous within each section
  182. */
  183. for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
  184. if (WARN_ON_ONCE(!pfn_valid(pfn)))
  185. return false;
  186. page = pfn_to_page(pfn);
  187. for (j = 0; j < PAGES_PER_SECTION; j++) {
  188. if (PageReserved(page + j))
  189. continue;
  190. printk(KERN_WARNING "section number %ld page number %d "
  191. "not reserved, was it already online?\n",
  192. pfn_to_section_nr(pfn), j);
  193. return false;
  194. }
  195. }
  196. return true;
  197. }
  198. /*
  199. * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
  200. * OK to have direct references to sparsemem variables in here.
  201. */
  202. static int
  203. memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
  204. {
  205. unsigned long start_pfn;
  206. unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
  207. struct page *first_page;
  208. int ret;
  209. first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
  210. start_pfn = page_to_pfn(first_page);
  211. switch (action) {
  212. case MEM_ONLINE:
  213. if (!pages_correctly_reserved(start_pfn))
  214. return -EBUSY;
  215. ret = online_pages(start_pfn, nr_pages, online_type);
  216. break;
  217. case MEM_OFFLINE:
  218. ret = offline_pages(start_pfn, nr_pages);
  219. break;
  220. default:
  221. WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
  222. "%ld\n", __func__, phys_index, action, action);
  223. ret = -EINVAL;
  224. }
  225. return ret;
  226. }
  227. static int __memory_block_change_state(struct memory_block *mem,
  228. unsigned long to_state, unsigned long from_state_req,
  229. int online_type)
  230. {
  231. int ret = 0;
  232. if (mem->state != from_state_req) {
  233. ret = -EINVAL;
  234. goto out;
  235. }
  236. if (to_state == MEM_OFFLINE)
  237. mem->state = MEM_GOING_OFFLINE;
  238. ret = memory_block_action(mem->start_section_nr, to_state, online_type);
  239. if (ret) {
  240. mem->state = from_state_req;
  241. goto out;
  242. }
  243. mem->state = to_state;
  244. switch (mem->state) {
  245. case MEM_OFFLINE:
  246. kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
  247. break;
  248. case MEM_ONLINE:
  249. kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
  250. break;
  251. default:
  252. break;
  253. }
  254. out:
  255. return ret;
  256. }
  257. static int memory_block_change_state(struct memory_block *mem,
  258. unsigned long to_state, unsigned long from_state_req,
  259. int online_type)
  260. {
  261. int ret;
  262. mutex_lock(&mem->state_mutex);
  263. ret = __memory_block_change_state(mem, to_state, from_state_req,
  264. online_type);
  265. mutex_unlock(&mem->state_mutex);
  266. return ret;
  267. }
  268. static ssize_t
  269. store_mem_state(struct device *dev,
  270. struct device_attribute *attr, const char *buf, size_t count)
  271. {
  272. struct memory_block *mem;
  273. int ret = -EINVAL;
  274. mem = container_of(dev, struct memory_block, dev);
  275. if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
  276. ret = memory_block_change_state(mem, MEM_ONLINE,
  277. MEM_OFFLINE, ONLINE_KERNEL);
  278. else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
  279. ret = memory_block_change_state(mem, MEM_ONLINE,
  280. MEM_OFFLINE, ONLINE_MOVABLE);
  281. else if (!strncmp(buf, "online", min_t(int, count, 6)))
  282. ret = memory_block_change_state(mem, MEM_ONLINE,
  283. MEM_OFFLINE, ONLINE_KEEP);
  284. else if(!strncmp(buf, "offline", min_t(int, count, 7)))
  285. ret = memory_block_change_state(mem, MEM_OFFLINE,
  286. MEM_ONLINE, -1);
  287. if (ret)
  288. return ret;
  289. return count;
  290. }
  291. /*
  292. * phys_device is a bad name for this. What I really want
  293. * is a way to differentiate between memory ranges that
  294. * are part of physical devices that constitute
  295. * a complete removable unit or fru.
  296. * i.e. do these ranges belong to the same physical device,
  297. * s.t. if I offline all of these sections I can then
  298. * remove the physical device?
  299. */
  300. static ssize_t show_phys_device(struct device *dev,
  301. struct device_attribute *attr, char *buf)
  302. {
  303. struct memory_block *mem =
  304. container_of(dev, struct memory_block, dev);
  305. return sprintf(buf, "%d\n", mem->phys_device);
  306. }
  307. static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
  308. static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
  309. static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
  310. static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
  311. static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
  312. #define mem_create_simple_file(mem, attr_name) \
  313. device_create_file(&mem->dev, &dev_attr_##attr_name)
  314. #define mem_remove_simple_file(mem, attr_name) \
  315. device_remove_file(&mem->dev, &dev_attr_##attr_name)
  316. /*
  317. * Block size attribute stuff
  318. */
  319. static ssize_t
  320. print_block_size(struct device *dev, struct device_attribute *attr,
  321. char *buf)
  322. {
  323. return sprintf(buf, "%lx\n", get_memory_block_size());
  324. }
  325. static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
  326. static int block_size_init(void)
  327. {
  328. return device_create_file(memory_subsys.dev_root,
  329. &dev_attr_block_size_bytes);
  330. }
  331. /*
  332. * Some architectures will have custom drivers to do this, and
  333. * will not need to do it from userspace. The fake hot-add code
  334. * as well as ppc64 will do all of their discovery in userspace
  335. * and will require this interface.
  336. */
  337. #ifdef CONFIG_ARCH_MEMORY_PROBE
  338. static ssize_t
  339. memory_probe_store(struct device *dev, struct device_attribute *attr,
  340. const char *buf, size_t count)
  341. {
  342. u64 phys_addr;
  343. int nid;
  344. int i, ret;
  345. unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
  346. phys_addr = simple_strtoull(buf, NULL, 0);
  347. if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
  348. return -EINVAL;
  349. for (i = 0; i < sections_per_block; i++) {
  350. nid = memory_add_physaddr_to_nid(phys_addr);
  351. ret = add_memory(nid, phys_addr,
  352. PAGES_PER_SECTION << PAGE_SHIFT);
  353. if (ret)
  354. goto out;
  355. phys_addr += MIN_MEMORY_BLOCK_SIZE;
  356. }
  357. ret = count;
  358. out:
  359. return ret;
  360. }
  361. static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
  362. static int memory_probe_init(void)
  363. {
  364. return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
  365. }
  366. #else
  367. static inline int memory_probe_init(void)
  368. {
  369. return 0;
  370. }
  371. #endif
  372. #ifdef CONFIG_MEMORY_FAILURE
  373. /*
  374. * Support for offlining pages of memory
  375. */
  376. /* Soft offline a page */
  377. static ssize_t
  378. store_soft_offline_page(struct device *dev,
  379. struct device_attribute *attr,
  380. const char *buf, size_t count)
  381. {
  382. int ret;
  383. u64 pfn;
  384. if (!capable(CAP_SYS_ADMIN))
  385. return -EPERM;
  386. if (strict_strtoull(buf, 0, &pfn) < 0)
  387. return -EINVAL;
  388. pfn >>= PAGE_SHIFT;
  389. if (!pfn_valid(pfn))
  390. return -ENXIO;
  391. ret = soft_offline_page(pfn_to_page(pfn), 0);
  392. return ret == 0 ? count : ret;
  393. }
  394. /* Forcibly offline a page, including killing processes. */
  395. static ssize_t
  396. store_hard_offline_page(struct device *dev,
  397. struct device_attribute *attr,
  398. const char *buf, size_t count)
  399. {
  400. int ret;
  401. u64 pfn;
  402. if (!capable(CAP_SYS_ADMIN))
  403. return -EPERM;
  404. if (strict_strtoull(buf, 0, &pfn) < 0)
  405. return -EINVAL;
  406. pfn >>= PAGE_SHIFT;
  407. ret = memory_failure(pfn, 0, 0);
  408. return ret ? ret : count;
  409. }
  410. static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
  411. static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
  412. static __init int memory_fail_init(void)
  413. {
  414. int err;
  415. err = device_create_file(memory_subsys.dev_root,
  416. &dev_attr_soft_offline_page);
  417. if (!err)
  418. err = device_create_file(memory_subsys.dev_root,
  419. &dev_attr_hard_offline_page);
  420. return err;
  421. }
  422. #else
  423. static inline int memory_fail_init(void)
  424. {
  425. return 0;
  426. }
  427. #endif
  428. /*
  429. * Note that phys_device is optional. It is here to allow for
  430. * differentiation between which *physical* devices each
  431. * section belongs to...
  432. */
  433. int __weak arch_get_memory_phys_device(unsigned long start_pfn)
  434. {
  435. return 0;
  436. }
  437. /*
  438. * A reference for the returned object is held and the reference for the
  439. * hinted object is released.
  440. */
  441. struct memory_block *find_memory_block_hinted(struct mem_section *section,
  442. struct memory_block *hint)
  443. {
  444. int block_id = base_memory_block_id(__section_nr(section));
  445. struct device *hintdev = hint ? &hint->dev : NULL;
  446. struct device *dev;
  447. dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
  448. if (hint)
  449. put_device(&hint->dev);
  450. if (!dev)
  451. return NULL;
  452. return container_of(dev, struct memory_block, dev);
  453. }
  454. /*
  455. * For now, we have a linear search to go find the appropriate
  456. * memory_block corresponding to a particular phys_index. If
  457. * this gets to be a real problem, we can always use a radix
  458. * tree or something here.
  459. *
  460. * This could be made generic for all device subsystems.
  461. */
  462. struct memory_block *find_memory_block(struct mem_section *section)
  463. {
  464. return find_memory_block_hinted(section, NULL);
  465. }
  466. static int init_memory_block(struct memory_block **memory,
  467. struct mem_section *section, unsigned long state)
  468. {
  469. struct memory_block *mem;
  470. unsigned long start_pfn;
  471. int scn_nr;
  472. int ret = 0;
  473. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  474. if (!mem)
  475. return -ENOMEM;
  476. scn_nr = __section_nr(section);
  477. mem->start_section_nr =
  478. base_memory_block_id(scn_nr) * sections_per_block;
  479. mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
  480. mem->state = state;
  481. mem->section_count++;
  482. mutex_init(&mem->state_mutex);
  483. start_pfn = section_nr_to_pfn(mem->start_section_nr);
  484. mem->phys_device = arch_get_memory_phys_device(start_pfn);
  485. ret = register_memory(mem);
  486. if (!ret)
  487. ret = mem_create_simple_file(mem, phys_index);
  488. if (!ret)
  489. ret = mem_create_simple_file(mem, end_phys_index);
  490. if (!ret)
  491. ret = mem_create_simple_file(mem, state);
  492. if (!ret)
  493. ret = mem_create_simple_file(mem, phys_device);
  494. if (!ret)
  495. ret = mem_create_simple_file(mem, removable);
  496. *memory = mem;
  497. return ret;
  498. }
  499. static int add_memory_section(int nid, struct mem_section *section,
  500. struct memory_block **mem_p,
  501. unsigned long state, enum mem_add_context context)
  502. {
  503. struct memory_block *mem = NULL;
  504. int scn_nr = __section_nr(section);
  505. int ret = 0;
  506. mutex_lock(&mem_sysfs_mutex);
  507. if (context == BOOT) {
  508. /* same memory block ? */
  509. if (mem_p && *mem_p)
  510. if (scn_nr >= (*mem_p)->start_section_nr &&
  511. scn_nr <= (*mem_p)->end_section_nr) {
  512. mem = *mem_p;
  513. kobject_get(&mem->dev.kobj);
  514. }
  515. } else
  516. mem = find_memory_block(section);
  517. if (mem) {
  518. mem->section_count++;
  519. kobject_put(&mem->dev.kobj);
  520. } else {
  521. ret = init_memory_block(&mem, section, state);
  522. /* store memory_block pointer for next loop */
  523. if (!ret && context == BOOT)
  524. if (mem_p)
  525. *mem_p = mem;
  526. }
  527. if (!ret) {
  528. if (context == HOTPLUG &&
  529. mem->section_count == sections_per_block)
  530. ret = register_mem_sect_under_node(mem, nid);
  531. }
  532. mutex_unlock(&mem_sysfs_mutex);
  533. return ret;
  534. }
  535. /*
  536. * need an interface for the VM to add new memory regions,
  537. * but without onlining it.
  538. */
  539. int register_new_memory(int nid, struct mem_section *section)
  540. {
  541. return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
  542. }
  543. #ifdef CONFIG_MEMORY_HOTREMOVE
  544. static void
  545. unregister_memory(struct memory_block *memory)
  546. {
  547. BUG_ON(memory->dev.bus != &memory_subsys);
  548. /* drop the ref. we got in remove_memory_block() */
  549. kobject_put(&memory->dev.kobj);
  550. device_unregister(&memory->dev);
  551. }
  552. static int remove_memory_block(unsigned long node_id,
  553. struct mem_section *section, int phys_device)
  554. {
  555. struct memory_block *mem;
  556. mutex_lock(&mem_sysfs_mutex);
  557. mem = find_memory_block(section);
  558. unregister_mem_sect_under_nodes(mem, __section_nr(section));
  559. mem->section_count--;
  560. if (mem->section_count == 0) {
  561. mem_remove_simple_file(mem, phys_index);
  562. mem_remove_simple_file(mem, end_phys_index);
  563. mem_remove_simple_file(mem, state);
  564. mem_remove_simple_file(mem, phys_device);
  565. mem_remove_simple_file(mem, removable);
  566. unregister_memory(mem);
  567. } else
  568. kobject_put(&mem->dev.kobj);
  569. mutex_unlock(&mem_sysfs_mutex);
  570. return 0;
  571. }
  572. int unregister_memory_section(struct mem_section *section)
  573. {
  574. if (!present_section(section))
  575. return -EINVAL;
  576. return remove_memory_block(0, section, 0);
  577. }
  578. #endif /* CONFIG_MEMORY_HOTREMOVE */
  579. /*
  580. * offline one memory block. If the memory block has been offlined, do nothing.
  581. */
  582. int offline_memory_block(struct memory_block *mem)
  583. {
  584. int ret = 0;
  585. mutex_lock(&mem->state_mutex);
  586. if (mem->state != MEM_OFFLINE)
  587. ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
  588. mutex_unlock(&mem->state_mutex);
  589. return ret;
  590. }
  591. /* return true if the memory block is offlined, otherwise, return false */
  592. bool is_memblock_offlined(struct memory_block *mem)
  593. {
  594. return mem->state == MEM_OFFLINE;
  595. }
  596. /*
  597. * Initialize the sysfs support for memory devices...
  598. */
  599. int __init memory_dev_init(void)
  600. {
  601. unsigned int i;
  602. int ret;
  603. int err;
  604. unsigned long block_sz;
  605. struct memory_block *mem = NULL;
  606. ret = subsys_system_register(&memory_subsys, NULL);
  607. if (ret)
  608. goto out;
  609. block_sz = get_memory_block_size();
  610. sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  611. /*
  612. * Create entries for memory sections that were found
  613. * during boot and have been initialized
  614. */
  615. for (i = 0; i < NR_MEM_SECTIONS; i++) {
  616. if (!present_section_nr(i))
  617. continue;
  618. /* don't need to reuse memory_block if only one per block */
  619. err = add_memory_section(0, __nr_to_section(i),
  620. (sections_per_block == 1) ? NULL : &mem,
  621. MEM_ONLINE,
  622. BOOT);
  623. if (!ret)
  624. ret = err;
  625. }
  626. err = memory_probe_init();
  627. if (!ret)
  628. ret = err;
  629. err = memory_fail_init();
  630. if (!ret)
  631. ret = err;
  632. err = block_size_init();
  633. if (!ret)
  634. ret = err;
  635. out:
  636. if (ret)
  637. printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
  638. return ret;
  639. }