mm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * PS3 address space management.
  3. *
  4. * Copyright (C) 2006 Sony Computer Entertainment Inc.
  5. * Copyright 2006 Sony Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/memory_hotplug.h>
  23. #include <asm/lmb.h>
  24. #include <asm/udbg.h>
  25. #include <asm/ps3.h>
  26. #include <asm/lv1call.h>
  27. #include "platform.h"
  28. #if defined(DEBUG)
  29. #define DBG(fmt...) udbg_printf(fmt)
  30. #else
  31. #define DBG(fmt...) do{if(0)printk(fmt);}while(0)
  32. #endif
  33. enum {
  34. #if defined(CONFIG_PS3_USE_LPAR_ADDR)
  35. USE_LPAR_ADDR = 1,
  36. #else
  37. USE_LPAR_ADDR = 0,
  38. #endif
  39. #if defined(CONFIG_PS3_DYNAMIC_DMA)
  40. USE_DYNAMIC_DMA = 1,
  41. #else
  42. USE_DYNAMIC_DMA = 0,
  43. #endif
  44. };
  45. enum {
  46. PAGE_SHIFT_4K = 12U,
  47. PAGE_SHIFT_64K = 16U,
  48. PAGE_SHIFT_16M = 24U,
  49. };
  50. static unsigned long make_page_sizes(unsigned long a, unsigned long b)
  51. {
  52. return (a << 56) | (b << 48);
  53. }
  54. enum {
  55. ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
  56. ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
  57. };
  58. /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
  59. enum {
  60. HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
  61. HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
  62. };
  63. /*============================================================================*/
  64. /* virtual address space routines */
  65. /*============================================================================*/
  66. /**
  67. * struct mem_region - memory region structure
  68. * @base: base address
  69. * @size: size in bytes
  70. * @offset: difference between base and rm.size
  71. */
  72. struct mem_region {
  73. unsigned long base;
  74. unsigned long size;
  75. unsigned long offset;
  76. };
  77. /**
  78. * struct map - address space state variables holder
  79. * @total: total memory available as reported by HV
  80. * @vas_id - HV virtual address space id
  81. * @htab_size: htab size in bytes
  82. *
  83. * The HV virtual address space (vas) allows for hotplug memory regions.
  84. * Memory regions can be created and destroyed in the vas at runtime.
  85. * @rm: real mode (bootmem) region
  86. * @r1: hotplug memory region(s)
  87. *
  88. * ps3 addresses
  89. * virt_addr: a cpu 'translated' effective address
  90. * phys_addr: an address in what Linux thinks is the physical address space
  91. * lpar_addr: an address in the HV virtual address space
  92. * bus_addr: an io controller 'translated' address on a device bus
  93. */
  94. struct map {
  95. unsigned long total;
  96. unsigned long vas_id;
  97. unsigned long htab_size;
  98. struct mem_region rm;
  99. struct mem_region r1;
  100. };
  101. #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
  102. static void _debug_dump_map(const struct map* m, const char* func, int line)
  103. {
  104. DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
  105. DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
  106. DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id);
  107. DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
  108. DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base);
  109. DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
  110. DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
  111. }
  112. static struct map map;
  113. /**
  114. * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
  115. * @phys_addr: linux physical address
  116. */
  117. unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
  118. {
  119. BUG_ON(is_kernel_addr(phys_addr));
  120. if (USE_LPAR_ADDR)
  121. return phys_addr;
  122. else
  123. return (phys_addr < map.rm.size || phys_addr >= map.total)
  124. ? phys_addr : phys_addr + map.r1.offset;
  125. }
  126. EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
  127. /**
  128. * ps3_mm_vas_create - create the virtual address space
  129. */
  130. void __init ps3_mm_vas_create(unsigned long* htab_size)
  131. {
  132. int result;
  133. unsigned long start_address;
  134. unsigned long size;
  135. unsigned long access_right;
  136. unsigned long max_page_size;
  137. unsigned long flags;
  138. result = lv1_query_logical_partition_address_region_info(0,
  139. &start_address, &size, &access_right, &max_page_size,
  140. &flags);
  141. if (result) {
  142. DBG("%s:%d: lv1_query_logical_partition_address_region_info "
  143. "failed: %s\n", __func__, __LINE__,
  144. ps3_result(result));
  145. goto fail;
  146. }
  147. if (max_page_size < PAGE_SHIFT_16M) {
  148. DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
  149. max_page_size);
  150. goto fail;
  151. }
  152. BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
  153. BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
  154. result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
  155. 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
  156. &map.vas_id, &map.htab_size);
  157. if (result) {
  158. DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
  159. __func__, __LINE__, ps3_result(result));
  160. goto fail;
  161. }
  162. result = lv1_select_virtual_address_space(map.vas_id);
  163. if (result) {
  164. DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
  165. __func__, __LINE__, ps3_result(result));
  166. goto fail;
  167. }
  168. *htab_size = map.htab_size;
  169. debug_dump_map(&map);
  170. return;
  171. fail:
  172. panic("ps3_mm_vas_create failed");
  173. }
  174. /**
  175. * ps3_mm_vas_destroy -
  176. */
  177. void ps3_mm_vas_destroy(void)
  178. {
  179. if (map.vas_id) {
  180. lv1_select_virtual_address_space(0);
  181. lv1_destruct_virtual_address_space(map.vas_id);
  182. map.vas_id = 0;
  183. }
  184. }
  185. /*============================================================================*/
  186. /* memory hotplug routines */
  187. /*============================================================================*/
  188. /**
  189. * ps3_mm_region_create - create a memory region in the vas
  190. * @r: pointer to a struct mem_region to accept initialized values
  191. * @size: requested region size
  192. *
  193. * This implementation creates the region with the vas large page size.
  194. * @size is rounded down to a multiple of the vas large page size.
  195. */
  196. int ps3_mm_region_create(struct mem_region *r, unsigned long size)
  197. {
  198. int result;
  199. unsigned long muid;
  200. r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
  201. DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
  202. DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
  203. DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
  204. (unsigned long)(size - r->size),
  205. (size - r->size) / 1024 / 1024);
  206. if (r->size == 0) {
  207. DBG("%s:%d: size == 0\n", __func__, __LINE__);
  208. result = -1;
  209. goto zero_region;
  210. }
  211. result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
  212. ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
  213. if (result || r->base < map.rm.size) {
  214. DBG("%s:%d: lv1_allocate_memory failed: %s\n",
  215. __func__, __LINE__, ps3_result(result));
  216. goto zero_region;
  217. }
  218. r->offset = r->base - map.rm.size;
  219. return result;
  220. zero_region:
  221. r->size = r->base = r->offset = 0;
  222. return result;
  223. }
  224. /**
  225. * ps3_mm_region_destroy - destroy a memory region
  226. * @r: pointer to struct mem_region
  227. */
  228. void ps3_mm_region_destroy(struct mem_region *r)
  229. {
  230. if (r->base) {
  231. lv1_release_memory(r->base);
  232. r->size = r->base = r->offset = 0;
  233. map.total = map.rm.size;
  234. }
  235. }
  236. /**
  237. * ps3_mm_add_memory - hot add memory
  238. */
  239. static int __init ps3_mm_add_memory(void)
  240. {
  241. int result;
  242. unsigned long start_addr;
  243. unsigned long start_pfn;
  244. unsigned long nr_pages;
  245. BUG_ON(!mem_init_done);
  246. start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
  247. start_pfn = start_addr >> PAGE_SHIFT;
  248. nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  249. DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
  250. __func__, __LINE__, start_addr, start_pfn, nr_pages);
  251. result = add_memory(0, start_addr, map.r1.size);
  252. if (result) {
  253. DBG("%s:%d: add_memory failed: (%d)\n",
  254. __func__, __LINE__, result);
  255. return result;
  256. }
  257. result = online_pages(start_pfn, nr_pages);
  258. if (result)
  259. DBG("%s:%d: online_pages failed: (%d)\n",
  260. __func__, __LINE__, result);
  261. return result;
  262. }
  263. core_initcall(ps3_mm_add_memory);
  264. /*============================================================================*/
  265. /* dma routines */
  266. /*============================================================================*/
  267. /**
  268. * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
  269. * @r: pointer to dma region structure
  270. * @lpar_addr: HV lpar address
  271. */
  272. static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
  273. unsigned long lpar_addr)
  274. {
  275. BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
  276. return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
  277. : lpar_addr - map.r1.offset);
  278. }
  279. #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
  280. static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
  281. int line)
  282. {
  283. DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id,
  284. r->did.dev_id);
  285. DBG("%s:%d: page_size %u\n", func, line, r->page_size);
  286. DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
  287. DBG("%s:%d: len %lxh\n", func, line, r->len);
  288. }
  289. /**
  290. * dma_chunk - A chunk of dma pages mapped by the io controller.
  291. * @region - The dma region that owns this chunk.
  292. * @lpar_addr: Starting lpar address of the area to map.
  293. * @bus_addr: Starting ioc bus address of the area to map.
  294. * @len: Length in bytes of the area to map.
  295. * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
  296. * list of all chuncks owned by the region.
  297. *
  298. * This implementation uses a very simple dma page manager
  299. * based on the dma_chunk structure. This scheme assumes
  300. * that all drivers use very well behaved dma ops.
  301. */
  302. struct dma_chunk {
  303. struct ps3_dma_region *region;
  304. unsigned long lpar_addr;
  305. unsigned long bus_addr;
  306. unsigned long len;
  307. struct list_head link;
  308. unsigned int usage_count;
  309. };
  310. #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
  311. static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
  312. int line)
  313. {
  314. DBG("%s:%d: r.dev %u:%u\n", func, line,
  315. c->region->did.bus_id, c->region->did.dev_id);
  316. DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
  317. DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
  318. DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
  319. DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
  320. DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
  321. DBG("%s:%d: c.len %lxh\n", func, line, c->len);
  322. }
  323. static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
  324. unsigned long bus_addr, unsigned long len)
  325. {
  326. struct dma_chunk *c;
  327. unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
  328. unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
  329. list_for_each_entry(c, &r->chunk_list.head, link) {
  330. /* intersection */
  331. if (aligned_bus >= c->bus_addr
  332. && aligned_bus < c->bus_addr + c->len
  333. && aligned_bus + aligned_len <= c->bus_addr + c->len) {
  334. return c;
  335. }
  336. /* below */
  337. if (aligned_bus + aligned_len <= c->bus_addr) {
  338. continue;
  339. }
  340. /* above */
  341. if (aligned_bus >= c->bus_addr + c->len) {
  342. continue;
  343. }
  344. /* we don't handle the multi-chunk case for now */
  345. dma_dump_chunk(c);
  346. BUG();
  347. }
  348. return NULL;
  349. }
  350. static int dma_free_chunk(struct dma_chunk *c)
  351. {
  352. int result = 0;
  353. if (c->bus_addr) {
  354. result = lv1_unmap_device_dma_region(c->region->did.bus_id,
  355. c->region->did.dev_id, c->bus_addr, c->len);
  356. BUG_ON(result);
  357. }
  358. kfree(c);
  359. return result;
  360. }
  361. /**
  362. * dma_map_pages - Maps dma pages into the io controller bus address space.
  363. * @r: Pointer to a struct ps3_dma_region.
  364. * @phys_addr: Starting physical address of the area to map.
  365. * @len: Length in bytes of the area to map.
  366. * c_out: A pointer to receive an allocated struct dma_chunk for this area.
  367. *
  368. * This is the lowest level dma mapping routine, and is the one that will
  369. * make the HV call to add the pages into the io controller address space.
  370. */
  371. static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
  372. unsigned long len, struct dma_chunk **c_out)
  373. {
  374. int result;
  375. struct dma_chunk *c;
  376. c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
  377. if (!c) {
  378. result = -ENOMEM;
  379. goto fail_alloc;
  380. }
  381. c->region = r;
  382. c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
  383. c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
  384. c->len = len;
  385. result = lv1_map_device_dma_region(c->region->did.bus_id,
  386. c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
  387. 0xf800000000000000UL);
  388. if (result) {
  389. DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
  390. __func__, __LINE__, ps3_result(result));
  391. goto fail_map;
  392. }
  393. list_add(&c->link, &r->chunk_list.head);
  394. *c_out = c;
  395. return 0;
  396. fail_map:
  397. kfree(c);
  398. fail_alloc:
  399. *c_out = NULL;
  400. DBG(" <- %s:%d\n", __func__, __LINE__);
  401. return result;
  402. }
  403. /**
  404. * dma_region_create - Create a device dma region.
  405. * @r: Pointer to a struct ps3_dma_region.
  406. *
  407. * This is the lowest level dma region create routine, and is the one that
  408. * will make the HV call to create the region.
  409. */
  410. static int dma_region_create(struct ps3_dma_region* r)
  411. {
  412. int result;
  413. r->len = _ALIGN_UP(map.total, 1 << r->page_size);
  414. INIT_LIST_HEAD(&r->chunk_list.head);
  415. spin_lock_init(&r->chunk_list.lock);
  416. result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
  417. r->len, r->page_size, r->region_type, &r->bus_addr);
  418. dma_dump_region(r);
  419. if (result) {
  420. DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
  421. __func__, __LINE__, ps3_result(result));
  422. r->len = r->bus_addr = 0;
  423. }
  424. return result;
  425. }
  426. /**
  427. * dma_region_free - Free a device dma region.
  428. * @r: Pointer to a struct ps3_dma_region.
  429. *
  430. * This is the lowest level dma region free routine, and is the one that
  431. * will make the HV call to free the region.
  432. */
  433. static int dma_region_free(struct ps3_dma_region* r)
  434. {
  435. int result;
  436. struct dma_chunk *c;
  437. struct dma_chunk *tmp;
  438. list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
  439. list_del(&c->link);
  440. dma_free_chunk(c);
  441. }
  442. result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
  443. r->bus_addr);
  444. if (result)
  445. DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
  446. __func__, __LINE__, ps3_result(result));
  447. r->len = r->bus_addr = 0;
  448. return result;
  449. }
  450. /**
  451. * dma_map_area - Map an area of memory into a device dma region.
  452. * @r: Pointer to a struct ps3_dma_region.
  453. * @virt_addr: Starting virtual address of the area to map.
  454. * @len: Length in bytes of the area to map.
  455. * @bus_addr: A pointer to return the starting ioc bus address of the area to
  456. * map.
  457. *
  458. * This is the common dma mapping routine.
  459. */
  460. static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
  461. unsigned long len, unsigned long *bus_addr)
  462. {
  463. int result;
  464. unsigned long flags;
  465. struct dma_chunk *c;
  466. unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
  467. : virt_addr;
  468. *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
  469. if (!USE_DYNAMIC_DMA) {
  470. unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
  471. DBG(" -> %s:%d\n", __func__, __LINE__);
  472. DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
  473. virt_addr);
  474. DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
  475. phys_addr);
  476. DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
  477. lpar_addr);
  478. DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
  479. DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__,
  480. *bus_addr, len);
  481. }
  482. spin_lock_irqsave(&r->chunk_list.lock, flags);
  483. c = dma_find_chunk(r, *bus_addr, len);
  484. if (c) {
  485. c->usage_count++;
  486. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  487. return 0;
  488. }
  489. result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
  490. _ALIGN_UP(len, 1 << r->page_size), &c);
  491. if (result) {
  492. *bus_addr = 0;
  493. DBG("%s:%d: dma_map_pages failed (%d)\n",
  494. __func__, __LINE__, result);
  495. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  496. return result;
  497. }
  498. c->usage_count = 1;
  499. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  500. return result;
  501. }
  502. /**
  503. * dma_unmap_area - Unmap an area of memory from a device dma region.
  504. * @r: Pointer to a struct ps3_dma_region.
  505. * @bus_addr: The starting ioc bus address of the area to unmap.
  506. * @len: Length in bytes of the area to unmap.
  507. *
  508. * This is the common dma unmap routine.
  509. */
  510. int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
  511. unsigned long len)
  512. {
  513. unsigned long flags;
  514. struct dma_chunk *c;
  515. spin_lock_irqsave(&r->chunk_list.lock, flags);
  516. c = dma_find_chunk(r, bus_addr, len);
  517. if (!c) {
  518. unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
  519. 1 << r->page_size);
  520. unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
  521. DBG("%s:%d: not found: bus_addr %lxh\n",
  522. __func__, __LINE__, bus_addr);
  523. DBG("%s:%d: not found: len %lxh\n",
  524. __func__, __LINE__, len);
  525. DBG("%s:%d: not found: aligned_bus %lxh\n",
  526. __func__, __LINE__, aligned_bus);
  527. DBG("%s:%d: not found: aligned_len %lxh\n",
  528. __func__, __LINE__, aligned_len);
  529. BUG();
  530. }
  531. c->usage_count--;
  532. if (!c->usage_count) {
  533. list_del(&c->link);
  534. dma_free_chunk(c);
  535. }
  536. spin_unlock_irqrestore(&r->chunk_list.lock, flags);
  537. return 0;
  538. }
  539. /**
  540. * dma_region_create_linear - Setup a linear dma maping for a device.
  541. * @r: Pointer to a struct ps3_dma_region.
  542. *
  543. * This routine creates an HV dma region for the device and maps all available
  544. * ram into the io controller bus address space.
  545. */
  546. static int dma_region_create_linear(struct ps3_dma_region *r)
  547. {
  548. int result;
  549. unsigned long tmp;
  550. /* force 16M dma pages for linear mapping */
  551. if (r->page_size != PS3_DMA_16M) {
  552. pr_info("%s:%d: forcing 16M pages for linear map\n",
  553. __func__, __LINE__);
  554. r->page_size = PS3_DMA_16M;
  555. }
  556. result = dma_region_create(r);
  557. BUG_ON(result);
  558. result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
  559. BUG_ON(result);
  560. if (USE_LPAR_ADDR)
  561. result = dma_map_area(r, map.r1.base, map.r1.size,
  562. &tmp);
  563. else
  564. result = dma_map_area(r, map.rm.size, map.r1.size,
  565. &tmp);
  566. BUG_ON(result);
  567. return result;
  568. }
  569. /**
  570. * dma_region_free_linear - Free a linear dma mapping for a device.
  571. * @r: Pointer to a struct ps3_dma_region.
  572. *
  573. * This routine will unmap all mapped areas and free the HV dma region.
  574. */
  575. static int dma_region_free_linear(struct ps3_dma_region *r)
  576. {
  577. int result;
  578. result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
  579. BUG_ON(result);
  580. result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
  581. map.r1.size);
  582. BUG_ON(result);
  583. result = dma_region_free(r);
  584. BUG_ON(result);
  585. return result;
  586. }
  587. /**
  588. * dma_map_area_linear - Map an area of memory into a device dma region.
  589. * @r: Pointer to a struct ps3_dma_region.
  590. * @virt_addr: Starting virtual address of the area to map.
  591. * @len: Length in bytes of the area to map.
  592. * @bus_addr: A pointer to return the starting ioc bus address of the area to
  593. * map.
  594. *
  595. * This routine just returns the coresponding bus address. Actual mapping
  596. * occurs in dma_region_create_linear().
  597. */
  598. static int dma_map_area_linear(struct ps3_dma_region *r,
  599. unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
  600. {
  601. unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
  602. : virt_addr;
  603. *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
  604. return 0;
  605. }
  606. /**
  607. * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
  608. * @r: Pointer to a struct ps3_dma_region.
  609. * @bus_addr: The starting ioc bus address of the area to unmap.
  610. * @len: Length in bytes of the area to unmap.
  611. *
  612. * This routine does nothing. Unmapping occurs in dma_region_free_linear().
  613. */
  614. static int dma_unmap_area_linear(struct ps3_dma_region *r,
  615. unsigned long bus_addr, unsigned long len)
  616. {
  617. return 0;
  618. }
  619. int ps3_dma_region_create(struct ps3_dma_region *r)
  620. {
  621. return (USE_DYNAMIC_DMA)
  622. ? dma_region_create(r)
  623. : dma_region_create_linear(r);
  624. }
  625. int ps3_dma_region_free(struct ps3_dma_region *r)
  626. {
  627. return (USE_DYNAMIC_DMA)
  628. ? dma_region_free(r)
  629. : dma_region_free_linear(r);
  630. }
  631. int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
  632. unsigned long len, unsigned long *bus_addr)
  633. {
  634. return (USE_DYNAMIC_DMA)
  635. ? dma_map_area(r, virt_addr, len, bus_addr)
  636. : dma_map_area_linear(r, virt_addr, len, bus_addr);
  637. }
  638. int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
  639. unsigned long len)
  640. {
  641. return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
  642. : dma_unmap_area_linear(r, bus_addr, len);
  643. }
  644. /*============================================================================*/
  645. /* system startup routines */
  646. /*============================================================================*/
  647. /**
  648. * ps3_mm_init - initialize the address space state variables
  649. */
  650. void __init ps3_mm_init(void)
  651. {
  652. int result;
  653. DBG(" -> %s:%d\n", __func__, __LINE__);
  654. result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
  655. &map.total);
  656. if (result)
  657. panic("ps3_repository_read_mm_info() failed");
  658. map.rm.offset = map.rm.base;
  659. map.vas_id = map.htab_size = 0;
  660. /* this implementation assumes map.rm.base is zero */
  661. BUG_ON(map.rm.base);
  662. BUG_ON(!map.rm.size);
  663. lmb_add(map.rm.base, map.rm.size);
  664. lmb_analyze();
  665. /* arrange to do this in ps3_mm_add_memory */
  666. ps3_mm_region_create(&map.r1, map.total - map.rm.size);
  667. DBG(" <- %s:%d\n", __func__, __LINE__);
  668. }
  669. /**
  670. * ps3_mm_shutdown - final cleanup of address space
  671. */
  672. void ps3_mm_shutdown(void)
  673. {
  674. ps3_mm_region_destroy(&map.r1);
  675. map.total = map.rm.size;
  676. }