mm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. /*
  2. * PS3 address space management.
  3. *
  4. * Copyright (C) 2006 Sony Computer Entertainment Inc.
  5. * Copyright 2006 Sony Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/memory_hotplug.h>
  23. #include <asm/firmware.h>
  24. #include <asm/lmb.h>
  25. #include <asm/udbg.h>
  26. #include <asm/lv1call.h>
  27. #include "platform.h"
  28. #if defined(DEBUG)
  29. #define DBG(fmt...) udbg_printf(fmt)
  30. #else
  31. #define DBG(fmt...) do{if(0)printk(fmt);}while(0)
  32. #endif
  33. enum {
  34. #if defined(CONFIG_PS3_USE_LPAR_ADDR)
  35. USE_LPAR_ADDR = 1,
  36. #else
  37. USE_LPAR_ADDR = 0,
  38. #endif
  39. #if defined(CONFIG_PS3_DYNAMIC_DMA)
  40. USE_DYNAMIC_DMA = 1,
  41. #else
  42. USE_DYNAMIC_DMA = 0,
  43. #endif
  44. };
  45. enum {
  46. PAGE_SHIFT_4K = 12U,
  47. PAGE_SHIFT_64K = 16U,
  48. PAGE_SHIFT_16M = 24U,
  49. };
  50. static unsigned long make_page_sizes(unsigned long a, unsigned long b)
  51. {
  52. return (a << 56) | (b << 48);
  53. }
  54. enum {
  55. ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
  56. ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
  57. };
  58. /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
  59. enum {
  60. HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
  61. HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
  62. };
  63. /*============================================================================*/
  64. /* virtual address space routines */
  65. /*============================================================================*/
  66. /**
  67. * struct mem_region - memory region structure
  68. * @base: base address
  69. * @size: size in bytes
  70. * @offset: difference between base and rm.size
  71. */
  72. struct mem_region {
  73. unsigned long base;
  74. unsigned long size;
  75. unsigned long offset;
  76. };
  77. /**
  78. * struct map - address space state variables holder
  79. * @total: total memory available as reported by HV
  80. * @vas_id - HV virtual address space id
  81. * @htab_size: htab size in bytes
  82. *
  83. * The HV virtual address space (vas) allows for hotplug memory regions.
  84. * Memory regions can be created and destroyed in the vas at runtime.
  85. * @rm: real mode (bootmem) region
  86. * @r1: hotplug memory region(s)
  87. *
  88. * ps3 addresses
  89. * virt_addr: a cpu 'translated' effective address
  90. * phys_addr: an address in what Linux thinks is the physical address space
  91. * lpar_addr: an address in the HV virtual address space
  92. * bus_addr: an io controller 'translated' address on a device bus
  93. */
  94. struct map {
  95. unsigned long total;
  96. unsigned long vas_id;
  97. unsigned long htab_size;
  98. struct mem_region rm;
  99. struct mem_region r1;
  100. };
  101. #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
  102. static void _debug_dump_map(const struct map* m, const char* func, int line)
  103. {
  104. DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
  105. DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
  106. DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id);
  107. DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
  108. DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base);
  109. DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
  110. DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
  111. }
  112. static struct map map;
  113. /**
  114. * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
  115. * @phys_addr: linux physical address
  116. */
  117. unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
  118. {
  119. BUG_ON(is_kernel_addr(phys_addr));
  120. if (USE_LPAR_ADDR)
  121. return phys_addr;
  122. else
  123. return (phys_addr < map.rm.size || phys_addr >= map.total)
  124. ? phys_addr : phys_addr + map.r1.offset;
  125. }
  126. EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
  127. /**
  128. * ps3_mm_vas_create - create the virtual address space
  129. */
  130. void __init ps3_mm_vas_create(unsigned long* htab_size)
  131. {
  132. int result;
  133. unsigned long start_address;
  134. unsigned long size;
  135. unsigned long access_right;
  136. unsigned long max_page_size;
  137. unsigned long flags;
  138. result = lv1_query_logical_partition_address_region_info(0,
  139. &start_address, &size, &access_right, &max_page_size,
  140. &flags);
  141. if (result) {
  142. DBG("%s:%d: lv1_query_logical_partition_address_region_info "
  143. "failed: %s\n", __func__, __LINE__,
  144. ps3_result(result));
  145. goto fail;
  146. }
  147. if (max_page_size < PAGE_SHIFT_16M) {
  148. DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
  149. max_page_size);
  150. goto fail;
  151. }
  152. BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
  153. BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
  154. result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
  155. 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
  156. &map.vas_id, &map.htab_size);
  157. if (result) {
  158. DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
  159. __func__, __LINE__, ps3_result(result));
  160. goto fail;
  161. }
  162. result = lv1_select_virtual_address_space(map.vas_id);
  163. if (result) {
  164. DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
  165. __func__, __LINE__, ps3_result(result));
  166. goto fail;
  167. }
  168. *htab_size = map.htab_size;
  169. debug_dump_map(&map);
  170. return;
  171. fail:
  172. panic("ps3_mm_vas_create failed");
  173. }
  174. /**
  175. * ps3_mm_vas_destroy -
  176. */
  177. void ps3_mm_vas_destroy(void)
  178. {
  179. if (map.vas_id) {
  180. lv1_select_virtual_address_space(0);
  181. lv1_destruct_virtual_address_space(map.vas_id);
  182. map.vas_id = 0;
  183. }
  184. }
  185. /*============================================================================*/
  186. /* memory hotplug routines */
  187. /*============================================================================*/
  188. /**
  189. * ps3_mm_region_create - create a memory region in the vas
  190. * @r: pointer to a struct mem_region to accept initialized values
  191. * @size: requested region size
  192. *
  193. * This implementation creates the region with the vas large page size.
  194. * @size is rounded down to a multiple of the vas large page size.
  195. */
  196. int ps3_mm_region_create(struct mem_region *r, unsigned long size)
  197. {
  198. int result;
  199. unsigned long muid;
  200. r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
  201. DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
  202. DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
  203. DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
  204. (unsigned long)(size - r->size),
  205. (size - r->size) / 1024 / 1024);
  206. if (r->size == 0) {
  207. DBG("%s:%d: size == 0\n", __func__, __LINE__);
  208. result = -1;
  209. goto zero_region;
  210. }
  211. result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
  212. ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
  213. if (result || r->base < map.rm.size) {
  214. DBG("%s:%d: lv1_allocate_memory failed: %s\n",
  215. __func__, __LINE__, ps3_result(result));
  216. goto zero_region;
  217. }
  218. r->offset = r->base - map.rm.size;
  219. return result;
  220. zero_region:
  221. r->size = r->base = r->offset = 0;
  222. return result;
  223. }
  224. /**
  225. * ps3_mm_region_destroy - destroy a memory region
  226. * @r: pointer to struct mem_region
  227. */
  228. void ps3_mm_region_destroy(struct mem_region *r)
  229. {
  230. if (r->base) {
  231. lv1_release_memory(r->base);
  232. r->size = r->base = r->offset = 0;
  233. map.total = map.rm.size;
  234. }
  235. }
  236. /**
  237. * ps3_mm_add_memory - hot add memory
  238. */
  239. static int __init ps3_mm_add_memory(void)
  240. {
  241. int result;
  242. unsigned long start_addr;
  243. unsigned long start_pfn;
  244. unsigned long nr_pages;
  245. if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
  246. return -ENODEV;
  247. BUG_ON(!mem_init_done);
  248. start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
  249. start_pfn = start_addr >> PAGE_SHIFT;
  250. nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  251. DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
  252. __func__, __LINE__, start_addr, start_pfn, nr_pages);
  253. result = add_memory(0, start_addr, map.r1.size);
  254. if (result) {
  255. DBG("%s:%d: add_memory failed: (%d)\n",
  256. __func__, __LINE__, result);
  257. return result;
  258. }
  259. result = online_pages(start_pfn, nr_pages);
  260. if (result)
  261. DBG("%s:%d: online_pages failed: (%d)\n",
  262. __func__, __LINE__, result);
  263. return result;
  264. }
  265. core_initcall(ps3_mm_add_memory);
  266. /*============================================================================*/
  267. /* dma routines */
  268. /*============================================================================*/
  269. /**
  270. * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
  271. * @r: pointer to dma region structure
  272. * @lpar_addr: HV lpar address
  273. */
  274. static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
  275. unsigned long lpar_addr)
  276. {
  277. BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
  278. return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
  279. : lpar_addr - map.r1.offset);
  280. }
  281. #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
  282. static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
  283. int line)
  284. {
  285. DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id,
  286. r->did.dev_id);
  287. DBG("%s:%d: page_size %u\n", func, line, r->page_size);
  288. DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
  289. DBG("%s:%d: len %lxh\n", func, line, r->len);
  290. }
  291. /**
  292. * dma_chunk - A chunk of dma pages mapped by the io controller.
  293. * @region - The dma region that owns this chunk.
  294. * @lpar_addr: Starting lpar address of the area to map.
  295. * @bus_addr: Starting ioc bus address of the area to map.
  296. * @len: Length in bytes of the area to map.
  297. * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
  298. * list of all chuncks owned by the region.
  299. *
  300. * This implementation uses a very simple dma page manager
  301. * based on the dma_chunk structure. This scheme assumes
  302. * that all drivers use very well behaved dma ops.
  303. */
  304. struct dma_chunk {
  305. struct ps3_dma_region *region;
  306. unsigned long lpar_addr;
  307. unsigned long bus_addr;
  308. unsigned long len;
  309. struct list_head link;
  310. unsigned int usage_count;
  311. };
  312. #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
  313. static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
  314. int line)
  315. {
  316. DBG("%s:%d: r.dev %u:%u\n", func, line,
  317. c->region->did.bus_id, c->region->did.dev_id);
  318. DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
  319. DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
  320. DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
  321. DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
  322. DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
  323. DBG("%s:%d: c.len %lxh\n", func, line, c->len);
  324. }
  325. static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
  326. unsigned long bus_addr, unsigned long len)
  327. {
  328. struct dma_chunk *c;
  329. unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
  330. unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
  331. list_for_each_entry(c, &r->chunk_list.head, link) {
  332. /* intersection */
  333. if (aligned_bus >= c->bus_addr
  334. && aligned_bus < c->bus_addr + c->len
  335. && aligned_bus + aligned_len <= c->bus_addr + c->len) {
  336. return c;
  337. }
  338. /* below */
  339. if (aligned_bus + aligned_len <= c->bus_addr) {
  340. continue;
  341. }
  342. /* above */
  343. if (aligned_bus >= c->bus_addr + c->len) {
  344. continue;
  345. }
  346. /* we don't handle the multi-chunk case for now */
  347. dma_dump_chunk(c);
  348. BUG();
  349. }
  350. return NULL;
  351. }
  352. static int dma_free_chunk(struct dma_chunk *c)
  353. {
  354. int result = 0;
  355. if (c->bus_addr) {
  356. result = lv1_unmap_device_dma_region(c->region->did.bus_id,
  357. c->region->did.dev_id, c->bus_addr, c->len);
  358. BUG_ON(result);
  359. }
  360. kfree(c);
  361. return result;
  362. }
  363. /**
  364. * dma_map_pages - Maps dma pages into the io controller bus address space.
  365. * @r: Pointer to a struct ps3_dma_region.
  366. * @phys_addr: Starting physical address of the area to map.
  367. * @len: Length in bytes of the area to map.
  368. * c_out: A pointer to receive an allocated struct dma_chunk for this area.
  369. *
  370. * This is the lowest level dma mapping routine, and is the one that will
  371. * make the HV call to add the pages into the io controller address space.
  372. */
  373. static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
  374. unsigned long len, struct dma_chunk **c_out)
  375. {
  376. int result;
  377. struct dma_chunk *c;
  378. c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
  379. if (!c) {
  380. result = -ENOMEM;
  381. goto fail_alloc;
  382. }
  383. c->region = r;
  384. c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
  385. c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
  386. c->len = len;
  387. result = lv1_map_device_dma_region(c->region->did.bus_id,
  388. c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
  389. 0xf800000000000000UL);
  390. if (result) {
  391. DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
  392. __func__, __LINE__, ps3_result(result));
  393. goto fail_map;
  394. }
  395. list_add(&c->link, &r->chunk_list.head);
  396. *c_out = c;
  397. return 0;
  398. fail_map:
  399. kfree(c);
  400. fail_alloc:
  401. *c_out = NULL;
  402. DBG(" <- %s:%d\n", __func__, __LINE__);
  403. return result;
  404. }
  405. /**
  406. * dma_region_create - Create a device dma region.
  407. * @r: Pointer to a struct ps3_dma_region.
  408. *
  409. * This is the lowest level dma region create routine, and is the one that
  410. * will make the HV call to create the region.
  411. */
  412. static int dma_region_create(struct ps3_dma_region* r)
  413. {
  414. int result;
  415. r->len = _ALIGN_UP(map.total, 1 << r->page_size);
  416. INIT_LIST_HEAD(&r->chunk_list.head);
  417. spin_lock_init(&r->chunk_list.lock);
  418. result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
  419. r->len, r->page_size, r->region_type, &r->bus_addr);
  420. dma_dump_region(r);
  421. if (result) {
  422. DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
  423. __func__, __LINE__, ps3_result(result));
  424. r->len = r->bus_addr = 0;
  425. }
  426. return result;
  427. }
  428. /**
  429. * dma_region_free - Free a device dma region.
  430. * @r: Pointer to a struct ps3_dma_region.
  431. *
  432. * This is the lowest level dma region free routine, and is the one that
  433. * will make the HV call to free the region.
  434. */
  435. static int dma_region_free(struct ps3_dma_region* r)
  436. {
  437. int result;
  438. struct dma_chunk *c;
  439. struct dma_chunk *tmp;
  440. list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
  441. list_del(&c->link);
  442. dma_free_chunk(c);
  443. }
  444. result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
  445. r->bus_addr);
  446. if (result)
  447. DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
  448. __func__, __LINE__, ps3_result(result));
  449. r->len = r->bus_addr = 0;
  450. return result;
  451. }
  452. /**
  453. * dma_map_area - Map an area of memory into a device dma region.
  454. * @r: Pointer to a struct ps3_dma_region.
  455. * @virt_addr: Starting virtual address of the area to map.
  456. * @len: Length in bytes of the area to map.
  457. * @bus_addr: A pointer to return the starting ioc bus address of the area to
  458. * map.
  459. *
  460. * This is the common dma mapping routine.
  461. */
  462. static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
  463. unsigned long len, unsigned long *bus_addr)
  464. {
  465. int result;
  466. unsigned long flags;
  467. struct dma_chunk *c;
  468. unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
  469. : virt_addr;
  470. *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
  471. if (!USE_DYNAMIC_DMA) {
  472. unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
  473. DBG(" -> %s:%d\n", __func__, __LINE__);
  474. DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
  475. virt_addr);
  476. DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
  477. phys_addr);
  478. DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
  479. lpar_addr);
  480. DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
  481. DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__,
  482. *bus_addr, len);
  483. }
  484. spin_lock_irqsave(&r->chunk_list.lock, flags);
  485. c = dma_find_chunk(r, *bus_addr, len);
  486. if (c) {
  487. c->usage_count++;
  488. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  489. return 0;
  490. }
  491. result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
  492. _ALIGN_UP(len, 1 << r->page_size), &c);
  493. if (result) {
  494. *bus_addr = 0;
  495. DBG("%s:%d: dma_map_pages failed (%d)\n",
  496. __func__, __LINE__, result);
  497. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  498. return result;
  499. }
  500. c->usage_count = 1;
  501. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  502. return result;
  503. }
  504. /**
  505. * dma_unmap_area - Unmap an area of memory from a device dma region.
  506. * @r: Pointer to a struct ps3_dma_region.
  507. * @bus_addr: The starting ioc bus address of the area to unmap.
  508. * @len: Length in bytes of the area to unmap.
  509. *
  510. * This is the common dma unmap routine.
  511. */
  512. int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
  513. unsigned long len)
  514. {
  515. unsigned long flags;
  516. struct dma_chunk *c;
  517. spin_lock_irqsave(&r->chunk_list.lock, flags);
  518. c = dma_find_chunk(r, bus_addr, len);
  519. if (!c) {
  520. unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
  521. 1 << r->page_size);
  522. unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
  523. DBG("%s:%d: not found: bus_addr %lxh\n",
  524. __func__, __LINE__, bus_addr);
  525. DBG("%s:%d: not found: len %lxh\n",
  526. __func__, __LINE__, len);
  527. DBG("%s:%d: not found: aligned_bus %lxh\n",
  528. __func__, __LINE__, aligned_bus);
  529. DBG("%s:%d: not found: aligned_len %lxh\n",
  530. __func__, __LINE__, aligned_len);
  531. BUG();
  532. }
  533. c->usage_count--;
  534. if (!c->usage_count) {
  535. list_del(&c->link);
  536. dma_free_chunk(c);
  537. }
  538. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  539. return 0;
  540. }
  541. /**
  542. * dma_region_create_linear - Setup a linear dma maping for a device.
  543. * @r: Pointer to a struct ps3_dma_region.
  544. *
  545. * This routine creates an HV dma region for the device and maps all available
  546. * ram into the io controller bus address space.
  547. */
  548. static int dma_region_create_linear(struct ps3_dma_region *r)
  549. {
  550. int result;
  551. unsigned long tmp;
  552. /* force 16M dma pages for linear mapping */
  553. if (r->page_size != PS3_DMA_16M) {
  554. pr_info("%s:%d: forcing 16M pages for linear map\n",
  555. __func__, __LINE__);
  556. r->page_size = PS3_DMA_16M;
  557. }
  558. result = dma_region_create(r);
  559. BUG_ON(result);
  560. result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
  561. BUG_ON(result);
  562. if (USE_LPAR_ADDR)
  563. result = dma_map_area(r, map.r1.base, map.r1.size,
  564. &tmp);
  565. else
  566. result = dma_map_area(r, map.rm.size, map.r1.size,
  567. &tmp);
  568. BUG_ON(result);
  569. return result;
  570. }
  571. /**
  572. * dma_region_free_linear - Free a linear dma mapping for a device.
  573. * @r: Pointer to a struct ps3_dma_region.
  574. *
  575. * This routine will unmap all mapped areas and free the HV dma region.
  576. */
  577. static int dma_region_free_linear(struct ps3_dma_region *r)
  578. {
  579. int result;
  580. result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
  581. BUG_ON(result);
  582. result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
  583. map.r1.size);
  584. BUG_ON(result);
  585. result = dma_region_free(r);
  586. BUG_ON(result);
  587. return result;
  588. }
  589. /**
  590. * dma_map_area_linear - Map an area of memory into a device dma region.
  591. * @r: Pointer to a struct ps3_dma_region.
  592. * @virt_addr: Starting virtual address of the area to map.
  593. * @len: Length in bytes of the area to map.
  594. * @bus_addr: A pointer to return the starting ioc bus address of the area to
  595. * map.
  596. *
  597. * This routine just returns the coresponding bus address. Actual mapping
  598. * occurs in dma_region_create_linear().
  599. */
  600. static int dma_map_area_linear(struct ps3_dma_region *r,
  601. unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
  602. {
  603. unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
  604. : virt_addr;
  605. *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
  606. return 0;
  607. }
  608. /**
  609. * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
  610. * @r: Pointer to a struct ps3_dma_region.
  611. * @bus_addr: The starting ioc bus address of the area to unmap.
  612. * @len: Length in bytes of the area to unmap.
  613. *
  614. * This routine does nothing. Unmapping occurs in dma_region_free_linear().
  615. */
  616. static int dma_unmap_area_linear(struct ps3_dma_region *r,
  617. unsigned long bus_addr, unsigned long len)
  618. {
  619. return 0;
  620. }
  621. int ps3_dma_region_create(struct ps3_dma_region *r)
  622. {
  623. return (USE_DYNAMIC_DMA)
  624. ? dma_region_create(r)
  625. : dma_region_create_linear(r);
  626. }
  627. int ps3_dma_region_free(struct ps3_dma_region *r)
  628. {
  629. return (USE_DYNAMIC_DMA)
  630. ? dma_region_free(r)
  631. : dma_region_free_linear(r);
  632. }
  633. int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
  634. unsigned long len, unsigned long *bus_addr)
  635. {
  636. return (USE_DYNAMIC_DMA)
  637. ? dma_map_area(r, virt_addr, len, bus_addr)
  638. : dma_map_area_linear(r, virt_addr, len, bus_addr);
  639. }
  640. int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
  641. unsigned long len)
  642. {
  643. return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
  644. : dma_unmap_area_linear(r, bus_addr, len);
  645. }
  646. /*============================================================================*/
  647. /* system startup routines */
  648. /*============================================================================*/
  649. /**
  650. * ps3_mm_init - initialize the address space state variables
  651. */
  652. void __init ps3_mm_init(void)
  653. {
  654. int result;
  655. DBG(" -> %s:%d\n", __func__, __LINE__);
  656. result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
  657. &map.total);
  658. if (result)
  659. panic("ps3_repository_read_mm_info() failed");
  660. map.rm.offset = map.rm.base;
  661. map.vas_id = map.htab_size = 0;
  662. /* this implementation assumes map.rm.base is zero */
  663. BUG_ON(map.rm.base);
  664. BUG_ON(!map.rm.size);
  665. lmb_add(map.rm.base, map.rm.size);
  666. lmb_analyze();
  667. /* arrange to do this in ps3_mm_add_memory */
  668. ps3_mm_region_create(&map.r1, map.total - map.rm.size);
  669. DBG(" <- %s:%d\n", __func__, __LINE__);
  670. }
  671. /**
  672. * ps3_mm_shutdown - final cleanup of address space
  673. */
  674. void ps3_mm_shutdown(void)
  675. {
  676. ps3_mm_region_destroy(&map.r1);
  677. }