mm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. /*
  2. * PS3 address space management.
  3. *
  4. * Copyright (C) 2006 Sony Computer Entertainment Inc.
  5. * Copyright 2006 Sony Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/memory_hotplug.h>
  23. #include <asm/firmware.h>
  24. #include <asm/lmb.h>
  25. #include <asm/udbg.h>
  26. #include <asm/ps3.h>
  27. #include <asm/lv1call.h>
  28. #include "platform.h"
  29. #if defined(DEBUG)
  30. #define DBG(fmt...) udbg_printf(fmt)
  31. #else
  32. #define DBG(fmt...) do{if(0)printk(fmt);}while(0)
  33. #endif
  34. enum {
  35. #if defined(CONFIG_PS3_USE_LPAR_ADDR)
  36. USE_LPAR_ADDR = 1,
  37. #else
  38. USE_LPAR_ADDR = 0,
  39. #endif
  40. #if defined(CONFIG_PS3_DYNAMIC_DMA)
  41. USE_DYNAMIC_DMA = 1,
  42. #else
  43. USE_DYNAMIC_DMA = 0,
  44. #endif
  45. };
  46. enum {
  47. PAGE_SHIFT_4K = 12U,
  48. PAGE_SHIFT_64K = 16U,
  49. PAGE_SHIFT_16M = 24U,
  50. };
  51. static unsigned long make_page_sizes(unsigned long a, unsigned long b)
  52. {
  53. return (a << 56) | (b << 48);
  54. }
  55. enum {
  56. ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
  57. ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
  58. };
  59. /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
  60. enum {
  61. HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
  62. HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
  63. };
  64. /*============================================================================*/
  65. /* virtual address space routines */
  66. /*============================================================================*/
  67. /**
  68. * struct mem_region - memory region structure
  69. * @base: base address
  70. * @size: size in bytes
  71. * @offset: difference between base and rm.size
  72. */
  73. struct mem_region {
  74. unsigned long base;
  75. unsigned long size;
  76. unsigned long offset;
  77. };
  78. /**
  79. * struct map - address space state variables holder
  80. * @total: total memory available as reported by HV
  81. * @vas_id - HV virtual address space id
  82. * @htab_size: htab size in bytes
  83. *
  84. * The HV virtual address space (vas) allows for hotplug memory regions.
  85. * Memory regions can be created and destroyed in the vas at runtime.
  86. * @rm: real mode (bootmem) region
  87. * @r1: hotplug memory region(s)
  88. *
  89. * ps3 addresses
  90. * virt_addr: a cpu 'translated' effective address
  91. * phys_addr: an address in what Linux thinks is the physical address space
  92. * lpar_addr: an address in the HV virtual address space
  93. * bus_addr: an io controller 'translated' address on a device bus
  94. */
  95. struct map {
  96. unsigned long total;
  97. unsigned long vas_id;
  98. unsigned long htab_size;
  99. struct mem_region rm;
  100. struct mem_region r1;
  101. };
  102. #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
  103. static void _debug_dump_map(const struct map* m, const char* func, int line)
  104. {
  105. DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
  106. DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
  107. DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id);
  108. DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
  109. DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base);
  110. DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
  111. DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
  112. }
  113. static struct map map;
  114. /**
  115. * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
  116. * @phys_addr: linux physical address
  117. */
  118. unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
  119. {
  120. BUG_ON(is_kernel_addr(phys_addr));
  121. if (USE_LPAR_ADDR)
  122. return phys_addr;
  123. else
  124. return (phys_addr < map.rm.size || phys_addr >= map.total)
  125. ? phys_addr : phys_addr + map.r1.offset;
  126. }
  127. EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
  128. /**
  129. * ps3_mm_vas_create - create the virtual address space
  130. */
  131. void __init ps3_mm_vas_create(unsigned long* htab_size)
  132. {
  133. int result;
  134. unsigned long start_address;
  135. unsigned long size;
  136. unsigned long access_right;
  137. unsigned long max_page_size;
  138. unsigned long flags;
  139. result = lv1_query_logical_partition_address_region_info(0,
  140. &start_address, &size, &access_right, &max_page_size,
  141. &flags);
  142. if (result) {
  143. DBG("%s:%d: lv1_query_logical_partition_address_region_info "
  144. "failed: %s\n", __func__, __LINE__,
  145. ps3_result(result));
  146. goto fail;
  147. }
  148. if (max_page_size < PAGE_SHIFT_16M) {
  149. DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
  150. max_page_size);
  151. goto fail;
  152. }
  153. BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
  154. BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
  155. result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
  156. 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
  157. &map.vas_id, &map.htab_size);
  158. if (result) {
  159. DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
  160. __func__, __LINE__, ps3_result(result));
  161. goto fail;
  162. }
  163. result = lv1_select_virtual_address_space(map.vas_id);
  164. if (result) {
  165. DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
  166. __func__, __LINE__, ps3_result(result));
  167. goto fail;
  168. }
  169. *htab_size = map.htab_size;
  170. debug_dump_map(&map);
  171. return;
  172. fail:
  173. panic("ps3_mm_vas_create failed");
  174. }
  175. /**
  176. * ps3_mm_vas_destroy -
  177. */
  178. void ps3_mm_vas_destroy(void)
  179. {
  180. if (map.vas_id) {
  181. lv1_select_virtual_address_space(0);
  182. lv1_destruct_virtual_address_space(map.vas_id);
  183. map.vas_id = 0;
  184. }
  185. }
  186. /*============================================================================*/
  187. /* memory hotplug routines */
  188. /*============================================================================*/
  189. /**
  190. * ps3_mm_region_create - create a memory region in the vas
  191. * @r: pointer to a struct mem_region to accept initialized values
  192. * @size: requested region size
  193. *
  194. * This implementation creates the region with the vas large page size.
  195. * @size is rounded down to a multiple of the vas large page size.
  196. */
  197. int ps3_mm_region_create(struct mem_region *r, unsigned long size)
  198. {
  199. int result;
  200. unsigned long muid;
  201. r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
  202. DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
  203. DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
  204. DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
  205. (unsigned long)(size - r->size),
  206. (size - r->size) / 1024 / 1024);
  207. if (r->size == 0) {
  208. DBG("%s:%d: size == 0\n", __func__, __LINE__);
  209. result = -1;
  210. goto zero_region;
  211. }
  212. result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
  213. ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
  214. if (result || r->base < map.rm.size) {
  215. DBG("%s:%d: lv1_allocate_memory failed: %s\n",
  216. __func__, __LINE__, ps3_result(result));
  217. goto zero_region;
  218. }
  219. r->offset = r->base - map.rm.size;
  220. return result;
  221. zero_region:
  222. r->size = r->base = r->offset = 0;
  223. return result;
  224. }
  225. /**
  226. * ps3_mm_region_destroy - destroy a memory region
  227. * @r: pointer to struct mem_region
  228. */
  229. void ps3_mm_region_destroy(struct mem_region *r)
  230. {
  231. if (r->base) {
  232. lv1_release_memory(r->base);
  233. r->size = r->base = r->offset = 0;
  234. map.total = map.rm.size;
  235. }
  236. }
  237. /**
  238. * ps3_mm_add_memory - hot add memory
  239. */
  240. static int __init ps3_mm_add_memory(void)
  241. {
  242. int result;
  243. unsigned long start_addr;
  244. unsigned long start_pfn;
  245. unsigned long nr_pages;
  246. if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
  247. return 0;
  248. BUG_ON(!mem_init_done);
  249. start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
  250. start_pfn = start_addr >> PAGE_SHIFT;
  251. nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  252. DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
  253. __func__, __LINE__, start_addr, start_pfn, nr_pages);
  254. result = add_memory(0, start_addr, map.r1.size);
  255. if (result) {
  256. DBG("%s:%d: add_memory failed: (%d)\n",
  257. __func__, __LINE__, result);
  258. return result;
  259. }
  260. result = online_pages(start_pfn, nr_pages);
  261. if (result)
  262. DBG("%s:%d: online_pages failed: (%d)\n",
  263. __func__, __LINE__, result);
  264. return result;
  265. }
  266. core_initcall(ps3_mm_add_memory);
  267. /*============================================================================*/
  268. /* dma routines */
  269. /*============================================================================*/
  270. /**
  271. * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
  272. * @r: pointer to dma region structure
  273. * @lpar_addr: HV lpar address
  274. */
  275. static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
  276. unsigned long lpar_addr)
  277. {
  278. BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
  279. return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
  280. : lpar_addr - map.r1.offset);
  281. }
  282. #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
  283. static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
  284. int line)
  285. {
  286. DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id,
  287. r->did.dev_id);
  288. DBG("%s:%d: page_size %u\n", func, line, r->page_size);
  289. DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
  290. DBG("%s:%d: len %lxh\n", func, line, r->len);
  291. }
  292. /**
  293. * dma_chunk - A chunk of dma pages mapped by the io controller.
  294. * @region - The dma region that owns this chunk.
  295. * @lpar_addr: Starting lpar address of the area to map.
  296. * @bus_addr: Starting ioc bus address of the area to map.
  297. * @len: Length in bytes of the area to map.
  298. * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
  299. * list of all chuncks owned by the region.
  300. *
  301. * This implementation uses a very simple dma page manager
  302. * based on the dma_chunk structure. This scheme assumes
  303. * that all drivers use very well behaved dma ops.
  304. */
  305. struct dma_chunk {
  306. struct ps3_dma_region *region;
  307. unsigned long lpar_addr;
  308. unsigned long bus_addr;
  309. unsigned long len;
  310. struct list_head link;
  311. unsigned int usage_count;
  312. };
  313. #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
  314. static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
  315. int line)
  316. {
  317. DBG("%s:%d: r.dev %u:%u\n", func, line,
  318. c->region->did.bus_id, c->region->did.dev_id);
  319. DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
  320. DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
  321. DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
  322. DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
  323. DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
  324. DBG("%s:%d: c.len %lxh\n", func, line, c->len);
  325. }
  326. static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
  327. unsigned long bus_addr, unsigned long len)
  328. {
  329. struct dma_chunk *c;
  330. unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
  331. unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
  332. list_for_each_entry(c, &r->chunk_list.head, link) {
  333. /* intersection */
  334. if (aligned_bus >= c->bus_addr
  335. && aligned_bus < c->bus_addr + c->len
  336. && aligned_bus + aligned_len <= c->bus_addr + c->len) {
  337. return c;
  338. }
  339. /* below */
  340. if (aligned_bus + aligned_len <= c->bus_addr) {
  341. continue;
  342. }
  343. /* above */
  344. if (aligned_bus >= c->bus_addr + c->len) {
  345. continue;
  346. }
  347. /* we don't handle the multi-chunk case for now */
  348. dma_dump_chunk(c);
  349. BUG();
  350. }
  351. return NULL;
  352. }
  353. static int dma_free_chunk(struct dma_chunk *c)
  354. {
  355. int result = 0;
  356. if (c->bus_addr) {
  357. result = lv1_unmap_device_dma_region(c->region->did.bus_id,
  358. c->region->did.dev_id, c->bus_addr, c->len);
  359. BUG_ON(result);
  360. }
  361. kfree(c);
  362. return result;
  363. }
  364. /**
  365. * dma_map_pages - Maps dma pages into the io controller bus address space.
  366. * @r: Pointer to a struct ps3_dma_region.
  367. * @phys_addr: Starting physical address of the area to map.
  368. * @len: Length in bytes of the area to map.
  369. * c_out: A pointer to receive an allocated struct dma_chunk for this area.
  370. *
  371. * This is the lowest level dma mapping routine, and is the one that will
  372. * make the HV call to add the pages into the io controller address space.
  373. */
  374. static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
  375. unsigned long len, struct dma_chunk **c_out)
  376. {
  377. int result;
  378. struct dma_chunk *c;
  379. c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
  380. if (!c) {
  381. result = -ENOMEM;
  382. goto fail_alloc;
  383. }
  384. c->region = r;
  385. c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
  386. c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
  387. c->len = len;
  388. result = lv1_map_device_dma_region(c->region->did.bus_id,
  389. c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
  390. 0xf800000000000000UL);
  391. if (result) {
  392. DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
  393. __func__, __LINE__, ps3_result(result));
  394. goto fail_map;
  395. }
  396. list_add(&c->link, &r->chunk_list.head);
  397. *c_out = c;
  398. return 0;
  399. fail_map:
  400. kfree(c);
  401. fail_alloc:
  402. *c_out = NULL;
  403. DBG(" <- %s:%d\n", __func__, __LINE__);
  404. return result;
  405. }
  406. /**
  407. * dma_region_create - Create a device dma region.
  408. * @r: Pointer to a struct ps3_dma_region.
  409. *
  410. * This is the lowest level dma region create routine, and is the one that
  411. * will make the HV call to create the region.
  412. */
  413. static int dma_region_create(struct ps3_dma_region* r)
  414. {
  415. int result;
  416. r->len = _ALIGN_UP(map.total, 1 << r->page_size);
  417. INIT_LIST_HEAD(&r->chunk_list.head);
  418. spin_lock_init(&r->chunk_list.lock);
  419. result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
  420. r->len, r->page_size, r->region_type, &r->bus_addr);
  421. dma_dump_region(r);
  422. if (result) {
  423. DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
  424. __func__, __LINE__, ps3_result(result));
  425. r->len = r->bus_addr = 0;
  426. }
  427. return result;
  428. }
  429. /**
  430. * dma_region_free - Free a device dma region.
  431. * @r: Pointer to a struct ps3_dma_region.
  432. *
  433. * This is the lowest level dma region free routine, and is the one that
  434. * will make the HV call to free the region.
  435. */
  436. static int dma_region_free(struct ps3_dma_region* r)
  437. {
  438. int result;
  439. struct dma_chunk *c;
  440. struct dma_chunk *tmp;
  441. list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
  442. list_del(&c->link);
  443. dma_free_chunk(c);
  444. }
  445. result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
  446. r->bus_addr);
  447. if (result)
  448. DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
  449. __func__, __LINE__, ps3_result(result));
  450. r->len = r->bus_addr = 0;
  451. return result;
  452. }
  453. /**
  454. * dma_map_area - Map an area of memory into a device dma region.
  455. * @r: Pointer to a struct ps3_dma_region.
  456. * @virt_addr: Starting virtual address of the area to map.
  457. * @len: Length in bytes of the area to map.
  458. * @bus_addr: A pointer to return the starting ioc bus address of the area to
  459. * map.
  460. *
  461. * This is the common dma mapping routine.
  462. */
  463. static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
  464. unsigned long len, unsigned long *bus_addr)
  465. {
  466. int result;
  467. unsigned long flags;
  468. struct dma_chunk *c;
  469. unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
  470. : virt_addr;
  471. *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
  472. if (!USE_DYNAMIC_DMA) {
  473. unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
  474. DBG(" -> %s:%d\n", __func__, __LINE__);
  475. DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
  476. virt_addr);
  477. DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
  478. phys_addr);
  479. DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
  480. lpar_addr);
  481. DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
  482. DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__,
  483. *bus_addr, len);
  484. }
  485. spin_lock_irqsave(&r->chunk_list.lock, flags);
  486. c = dma_find_chunk(r, *bus_addr, len);
  487. if (c) {
  488. c->usage_count++;
  489. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  490. return 0;
  491. }
  492. result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
  493. _ALIGN_UP(len, 1 << r->page_size), &c);
  494. if (result) {
  495. *bus_addr = 0;
  496. DBG("%s:%d: dma_map_pages failed (%d)\n",
  497. __func__, __LINE__, result);
  498. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  499. return result;
  500. }
  501. c->usage_count = 1;
  502. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  503. return result;
  504. }
  505. /**
  506. * dma_unmap_area - Unmap an area of memory from a device dma region.
  507. * @r: Pointer to a struct ps3_dma_region.
  508. * @bus_addr: The starting ioc bus address of the area to unmap.
  509. * @len: Length in bytes of the area to unmap.
  510. *
  511. * This is the common dma unmap routine.
  512. */
  513. int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
  514. unsigned long len)
  515. {
  516. unsigned long flags;
  517. struct dma_chunk *c;
  518. spin_lock_irqsave(&r->chunk_list.lock, flags);
  519. c = dma_find_chunk(r, bus_addr, len);
  520. if (!c) {
  521. unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
  522. 1 << r->page_size);
  523. unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
  524. DBG("%s:%d: not found: bus_addr %lxh\n",
  525. __func__, __LINE__, bus_addr);
  526. DBG("%s:%d: not found: len %lxh\n",
  527. __func__, __LINE__, len);
  528. DBG("%s:%d: not found: aligned_bus %lxh\n",
  529. __func__, __LINE__, aligned_bus);
  530. DBG("%s:%d: not found: aligned_len %lxh\n",
  531. __func__, __LINE__, aligned_len);
  532. BUG();
  533. }
  534. c->usage_count--;
  535. if (!c->usage_count) {
  536. list_del(&c->link);
  537. dma_free_chunk(c);
  538. }
  539. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  540. return 0;
  541. }
  542. /**
  543. * dma_region_create_linear - Setup a linear dma maping for a device.
  544. * @r: Pointer to a struct ps3_dma_region.
  545. *
  546. * This routine creates an HV dma region for the device and maps all available
  547. * ram into the io controller bus address space.
  548. */
  549. static int dma_region_create_linear(struct ps3_dma_region *r)
  550. {
  551. int result;
  552. unsigned long tmp;
  553. /* force 16M dma pages for linear mapping */
  554. if (r->page_size != PS3_DMA_16M) {
  555. pr_info("%s:%d: forcing 16M pages for linear map\n",
  556. __func__, __LINE__);
  557. r->page_size = PS3_DMA_16M;
  558. }
  559. result = dma_region_create(r);
  560. BUG_ON(result);
  561. result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
  562. BUG_ON(result);
  563. if (USE_LPAR_ADDR)
  564. result = dma_map_area(r, map.r1.base, map.r1.size,
  565. &tmp);
  566. else
  567. result = dma_map_area(r, map.rm.size, map.r1.size,
  568. &tmp);
  569. BUG_ON(result);
  570. return result;
  571. }
  572. /**
  573. * dma_region_free_linear - Free a linear dma mapping for a device.
  574. * @r: Pointer to a struct ps3_dma_region.
  575. *
  576. * This routine will unmap all mapped areas and free the HV dma region.
  577. */
  578. static int dma_region_free_linear(struct ps3_dma_region *r)
  579. {
  580. int result;
  581. result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
  582. BUG_ON(result);
  583. result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
  584. map.r1.size);
  585. BUG_ON(result);
  586. result = dma_region_free(r);
  587. BUG_ON(result);
  588. return result;
  589. }
  590. /**
  591. * dma_map_area_linear - Map an area of memory into a device dma region.
  592. * @r: Pointer to a struct ps3_dma_region.
  593. * @virt_addr: Starting virtual address of the area to map.
  594. * @len: Length in bytes of the area to map.
  595. * @bus_addr: A pointer to return the starting ioc bus address of the area to
  596. * map.
  597. *
  598. * This routine just returns the coresponding bus address. Actual mapping
  599. * occurs in dma_region_create_linear().
  600. */
  601. static int dma_map_area_linear(struct ps3_dma_region *r,
  602. unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
  603. {
  604. unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
  605. : virt_addr;
  606. *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
  607. return 0;
  608. }
  609. /**
  610. * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
  611. * @r: Pointer to a struct ps3_dma_region.
  612. * @bus_addr: The starting ioc bus address of the area to unmap.
  613. * @len: Length in bytes of the area to unmap.
  614. *
  615. * This routine does nothing. Unmapping occurs in dma_region_free_linear().
  616. */
  617. static int dma_unmap_area_linear(struct ps3_dma_region *r,
  618. unsigned long bus_addr, unsigned long len)
  619. {
  620. return 0;
  621. }
  622. int ps3_dma_region_create(struct ps3_dma_region *r)
  623. {
  624. return (USE_DYNAMIC_DMA)
  625. ? dma_region_create(r)
  626. : dma_region_create_linear(r);
  627. }
  628. int ps3_dma_region_free(struct ps3_dma_region *r)
  629. {
  630. return (USE_DYNAMIC_DMA)
  631. ? dma_region_free(r)
  632. : dma_region_free_linear(r);
  633. }
  634. int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
  635. unsigned long len, unsigned long *bus_addr)
  636. {
  637. return (USE_DYNAMIC_DMA)
  638. ? dma_map_area(r, virt_addr, len, bus_addr)
  639. : dma_map_area_linear(r, virt_addr, len, bus_addr);
  640. }
  641. int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
  642. unsigned long len)
  643. {
  644. return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
  645. : dma_unmap_area_linear(r, bus_addr, len);
  646. }
  647. /*============================================================================*/
  648. /* system startup routines */
  649. /*============================================================================*/
  650. /**
  651. * ps3_mm_init - initialize the address space state variables
  652. */
  653. void __init ps3_mm_init(void)
  654. {
  655. int result;
  656. DBG(" -> %s:%d\n", __func__, __LINE__);
  657. result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
  658. &map.total);
  659. if (result)
  660. panic("ps3_repository_read_mm_info() failed");
  661. map.rm.offset = map.rm.base;
  662. map.vas_id = map.htab_size = 0;
  663. /* this implementation assumes map.rm.base is zero */
  664. BUG_ON(map.rm.base);
  665. BUG_ON(!map.rm.size);
  666. lmb_add(map.rm.base, map.rm.size);
  667. lmb_analyze();
  668. /* arrange to do this in ps3_mm_add_memory */
  669. ps3_mm_region_create(&map.r1, map.total - map.rm.size);
  670. DBG(" <- %s:%d\n", __func__, __LINE__);
  671. }
  672. /**
  673. * ps3_mm_shutdown - final cleanup of address space
  674. */
  675. void ps3_mm_shutdown(void)
  676. {
  677. ps3_mm_region_destroy(&map.r1);
  678. map.total = map.rm.size;
  679. }