spu_base.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Low-level SPU handling
  3. *
  4. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5. *
  6. * Author: Arnd Bergmann <arndb@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/ptrace.h>
  29. #include <linux/slab.h>
  30. #include <linux/wait.h>
  31. #include <asm/firmware.h>
  32. #include <asm/io.h>
  33. #include <asm/prom.h>
  34. #include <linux/mutex.h>
  35. #include <asm/spu.h>
  36. #include <asm/spu_priv1.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/xmon.h>
  39. #include "interrupt.h"
  40. const struct spu_priv1_ops *spu_priv1_ops;
  41. EXPORT_SYMBOL_GPL(spu_priv1_ops);
  42. static int __spu_trap_invalid_dma(struct spu *spu)
  43. {
  44. pr_debug("%s\n", __FUNCTION__);
  45. spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
  46. return 0;
  47. }
  48. static int __spu_trap_dma_align(struct spu *spu)
  49. {
  50. pr_debug("%s\n", __FUNCTION__);
  51. spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
  52. return 0;
  53. }
  54. static int __spu_trap_error(struct spu *spu)
  55. {
  56. pr_debug("%s\n", __FUNCTION__);
  57. spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
  58. return 0;
  59. }
  60. static void spu_restart_dma(struct spu *spu)
  61. {
  62. struct spu_priv2 __iomem *priv2 = spu->priv2;
  63. if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
  64. out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
  65. }
  66. static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
  67. {
  68. struct spu_priv2 __iomem *priv2 = spu->priv2;
  69. struct mm_struct *mm = spu->mm;
  70. u64 esid, vsid, llp;
  71. pr_debug("%s\n", __FUNCTION__);
  72. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  73. /* SLBs are pre-loaded for context switch, so
  74. * we should never get here!
  75. */
  76. printk("%s: invalid access during switch!\n", __func__);
  77. return 1;
  78. }
  79. esid = (ea & ESID_MASK) | SLB_ESID_V;
  80. switch(REGION_ID(ea)) {
  81. case USER_REGION_ID:
  82. #ifdef CONFIG_HUGETLB_PAGE
  83. if (in_hugepage_area(mm->context, ea))
  84. llp = mmu_psize_defs[mmu_huge_psize].sllp;
  85. else
  86. #endif
  87. llp = mmu_psize_defs[mmu_virtual_psize].sllp;
  88. vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
  89. SLB_VSID_USER | llp;
  90. break;
  91. case VMALLOC_REGION_ID:
  92. llp = mmu_psize_defs[mmu_virtual_psize].sllp;
  93. vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
  94. SLB_VSID_KERNEL | llp;
  95. break;
  96. case KERNEL_REGION_ID:
  97. llp = mmu_psize_defs[mmu_linear_psize].sllp;
  98. vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
  99. SLB_VSID_KERNEL | llp;
  100. break;
  101. default:
  102. /* Future: support kernel segments so that drivers
  103. * can use SPUs.
  104. */
  105. pr_debug("invalid region access at %016lx\n", ea);
  106. return 1;
  107. }
  108. out_be64(&priv2->slb_index_W, spu->slb_replace);
  109. out_be64(&priv2->slb_vsid_RW, vsid);
  110. out_be64(&priv2->slb_esid_RW, esid);
  111. spu->slb_replace++;
  112. if (spu->slb_replace >= 8)
  113. spu->slb_replace = 0;
  114. spu_restart_dma(spu);
  115. return 0;
  116. }
  117. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
  118. static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
  119. {
  120. pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
  121. /* Handle kernel space hash faults immediately.
  122. User hash faults need to be deferred to process context. */
  123. if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
  124. && REGION_ID(ea) != USER_REGION_ID
  125. && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
  126. spu_restart_dma(spu);
  127. return 0;
  128. }
  129. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  130. printk("%s: invalid access during switch!\n", __func__);
  131. return 1;
  132. }
  133. spu->dar = ea;
  134. spu->dsisr = dsisr;
  135. mb();
  136. spu->stop_callback(spu);
  137. return 0;
  138. }
  139. static irqreturn_t
  140. spu_irq_class_0(int irq, void *data)
  141. {
  142. struct spu *spu;
  143. spu = data;
  144. spu->class_0_pending = 1;
  145. spu->stop_callback(spu);
  146. return IRQ_HANDLED;
  147. }
  148. int
  149. spu_irq_class_0_bottom(struct spu *spu)
  150. {
  151. unsigned long stat, mask;
  152. spu->class_0_pending = 0;
  153. mask = spu_int_mask_get(spu, 0);
  154. stat = spu_int_stat_get(spu, 0);
  155. stat &= mask;
  156. if (stat & 1) /* invalid DMA alignment */
  157. __spu_trap_dma_align(spu);
  158. if (stat & 2) /* invalid MFC DMA */
  159. __spu_trap_invalid_dma(spu);
  160. if (stat & 4) /* error on SPU */
  161. __spu_trap_error(spu);
  162. spu_int_stat_clear(spu, 0, stat);
  163. return (stat & 0x7) ? -EIO : 0;
  164. }
  165. EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
  166. static irqreturn_t
  167. spu_irq_class_1(int irq, void *data)
  168. {
  169. struct spu *spu;
  170. unsigned long stat, mask, dar, dsisr;
  171. spu = data;
  172. /* atomically read & clear class1 status. */
  173. spin_lock(&spu->register_lock);
  174. mask = spu_int_mask_get(spu, 1);
  175. stat = spu_int_stat_get(spu, 1) & mask;
  176. dar = spu_mfc_dar_get(spu);
  177. dsisr = spu_mfc_dsisr_get(spu);
  178. if (stat & 2) /* mapping fault */
  179. spu_mfc_dsisr_set(spu, 0ul);
  180. spu_int_stat_clear(spu, 1, stat);
  181. spin_unlock(&spu->register_lock);
  182. pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
  183. dar, dsisr);
  184. if (stat & 1) /* segment fault */
  185. __spu_trap_data_seg(spu, dar);
  186. if (stat & 2) { /* mapping fault */
  187. __spu_trap_data_map(spu, dar, dsisr);
  188. }
  189. if (stat & 4) /* ls compare & suspend on get */
  190. ;
  191. if (stat & 8) /* ls compare & suspend on put */
  192. ;
  193. return stat ? IRQ_HANDLED : IRQ_NONE;
  194. }
  195. EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
  196. static irqreturn_t
  197. spu_irq_class_2(int irq, void *data)
  198. {
  199. struct spu *spu;
  200. unsigned long stat;
  201. unsigned long mask;
  202. spu = data;
  203. spin_lock(&spu->register_lock);
  204. stat = spu_int_stat_get(spu, 2);
  205. mask = spu_int_mask_get(spu, 2);
  206. /* ignore interrupts we're not waiting for */
  207. stat &= mask;
  208. /*
  209. * mailbox interrupts (0x1 and 0x10) are level triggered.
  210. * mask them now before acknowledging.
  211. */
  212. if (stat & 0x11)
  213. spu_int_mask_and(spu, 2, ~(stat & 0x11));
  214. /* acknowledge all interrupts before the callbacks */
  215. spu_int_stat_clear(spu, 2, stat);
  216. spin_unlock(&spu->register_lock);
  217. pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
  218. if (stat & 1) /* PPC core mailbox */
  219. spu->ibox_callback(spu);
  220. if (stat & 2) /* SPU stop-and-signal */
  221. spu->stop_callback(spu);
  222. if (stat & 4) /* SPU halted */
  223. spu->stop_callback(spu);
  224. if (stat & 8) /* DMA tag group complete */
  225. spu->mfc_callback(spu);
  226. if (stat & 0x10) /* SPU mailbox threshold */
  227. spu->wbox_callback(spu);
  228. return stat ? IRQ_HANDLED : IRQ_NONE;
  229. }
  230. static int spu_request_irqs(struct spu *spu)
  231. {
  232. int ret = 0;
  233. if (spu->irqs[0] != NO_IRQ) {
  234. snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
  235. spu->number);
  236. ret = request_irq(spu->irqs[0], spu_irq_class_0,
  237. IRQF_DISABLED,
  238. spu->irq_c0, spu);
  239. if (ret)
  240. goto bail0;
  241. }
  242. if (spu->irqs[1] != NO_IRQ) {
  243. snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
  244. spu->number);
  245. ret = request_irq(spu->irqs[1], spu_irq_class_1,
  246. IRQF_DISABLED,
  247. spu->irq_c1, spu);
  248. if (ret)
  249. goto bail1;
  250. }
  251. if (spu->irqs[2] != NO_IRQ) {
  252. snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
  253. spu->number);
  254. ret = request_irq(spu->irqs[2], spu_irq_class_2,
  255. IRQF_DISABLED,
  256. spu->irq_c2, spu);
  257. if (ret)
  258. goto bail2;
  259. }
  260. return 0;
  261. bail2:
  262. if (spu->irqs[1] != NO_IRQ)
  263. free_irq(spu->irqs[1], spu);
  264. bail1:
  265. if (spu->irqs[0] != NO_IRQ)
  266. free_irq(spu->irqs[0], spu);
  267. bail0:
  268. return ret;
  269. }
  270. static void spu_free_irqs(struct spu *spu)
  271. {
  272. if (spu->irqs[0] != NO_IRQ)
  273. free_irq(spu->irqs[0], spu);
  274. if (spu->irqs[1] != NO_IRQ)
  275. free_irq(spu->irqs[1], spu);
  276. if (spu->irqs[2] != NO_IRQ)
  277. free_irq(spu->irqs[2], spu);
  278. }
  279. static struct list_head spu_list[MAX_NUMNODES];
  280. static LIST_HEAD(spu_full_list);
  281. static DEFINE_MUTEX(spu_mutex);
  282. static void spu_init_channels(struct spu *spu)
  283. {
  284. static const struct {
  285. unsigned channel;
  286. unsigned count;
  287. } zero_list[] = {
  288. { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
  289. { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
  290. }, count_list[] = {
  291. { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
  292. { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
  293. { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
  294. };
  295. struct spu_priv2 __iomem *priv2;
  296. int i;
  297. priv2 = spu->priv2;
  298. /* initialize all channel data to zero */
  299. for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
  300. int count;
  301. out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
  302. for (count = 0; count < zero_list[i].count; count++)
  303. out_be64(&priv2->spu_chnldata_RW, 0);
  304. }
  305. /* initialize channel counts to meaningful values */
  306. for (i = 0; i < ARRAY_SIZE(count_list); i++) {
  307. out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
  308. out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
  309. }
  310. }
  311. struct spu *spu_alloc_node(int node)
  312. {
  313. struct spu *spu = NULL;
  314. mutex_lock(&spu_mutex);
  315. if (!list_empty(&spu_list[node])) {
  316. spu = list_entry(spu_list[node].next, struct spu, list);
  317. list_del_init(&spu->list);
  318. pr_debug("Got SPU %d %d\n", spu->number, spu->node);
  319. spu_init_channels(spu);
  320. }
  321. mutex_unlock(&spu_mutex);
  322. return spu;
  323. }
  324. EXPORT_SYMBOL_GPL(spu_alloc_node);
  325. struct spu *spu_alloc(void)
  326. {
  327. struct spu *spu = NULL;
  328. int node;
  329. for (node = 0; node < MAX_NUMNODES; node++) {
  330. spu = spu_alloc_node(node);
  331. if (spu)
  332. break;
  333. }
  334. return spu;
  335. }
  336. void spu_free(struct spu *spu)
  337. {
  338. mutex_lock(&spu_mutex);
  339. list_add_tail(&spu->list, &spu_list[spu->node]);
  340. mutex_unlock(&spu_mutex);
  341. }
  342. EXPORT_SYMBOL_GPL(spu_free);
  343. static int spu_handle_mm_fault(struct spu *spu)
  344. {
  345. struct mm_struct *mm = spu->mm;
  346. struct vm_area_struct *vma;
  347. u64 ea, dsisr, is_write;
  348. int ret;
  349. ea = spu->dar;
  350. dsisr = spu->dsisr;
  351. #if 0
  352. if (!IS_VALID_EA(ea)) {
  353. return -EFAULT;
  354. }
  355. #endif /* XXX */
  356. if (mm == NULL) {
  357. return -EFAULT;
  358. }
  359. if (mm->pgd == NULL) {
  360. return -EFAULT;
  361. }
  362. down_read(&mm->mmap_sem);
  363. vma = find_vma(mm, ea);
  364. if (!vma)
  365. goto bad_area;
  366. if (vma->vm_start <= ea)
  367. goto good_area;
  368. if (!(vma->vm_flags & VM_GROWSDOWN))
  369. goto bad_area;
  370. #if 0
  371. if (expand_stack(vma, ea))
  372. goto bad_area;
  373. #endif /* XXX */
  374. good_area:
  375. is_write = dsisr & MFC_DSISR_ACCESS_PUT;
  376. if (is_write) {
  377. if (!(vma->vm_flags & VM_WRITE))
  378. goto bad_area;
  379. } else {
  380. if (dsisr & MFC_DSISR_ACCESS_DENIED)
  381. goto bad_area;
  382. if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  383. goto bad_area;
  384. }
  385. ret = 0;
  386. switch (handle_mm_fault(mm, vma, ea, is_write)) {
  387. case VM_FAULT_MINOR:
  388. current->min_flt++;
  389. break;
  390. case VM_FAULT_MAJOR:
  391. current->maj_flt++;
  392. break;
  393. case VM_FAULT_SIGBUS:
  394. ret = -EFAULT;
  395. goto bad_area;
  396. case VM_FAULT_OOM:
  397. ret = -ENOMEM;
  398. goto bad_area;
  399. default:
  400. BUG();
  401. }
  402. up_read(&mm->mmap_sem);
  403. return ret;
  404. bad_area:
  405. up_read(&mm->mmap_sem);
  406. return -EFAULT;
  407. }
  408. int spu_irq_class_1_bottom(struct spu *spu)
  409. {
  410. u64 ea, dsisr, access, error = 0UL;
  411. int ret = 0;
  412. ea = spu->dar;
  413. dsisr = spu->dsisr;
  414. if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
  415. u64 flags;
  416. access = (_PAGE_PRESENT | _PAGE_USER);
  417. access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
  418. local_irq_save(flags);
  419. if (hash_page(ea, access, 0x300) != 0)
  420. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  421. local_irq_restore(flags);
  422. }
  423. if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
  424. if ((ret = spu_handle_mm_fault(spu)) != 0)
  425. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  426. else
  427. error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
  428. }
  429. spu->dar = 0UL;
  430. spu->dsisr = 0UL;
  431. if (!error) {
  432. spu_restart_dma(spu);
  433. } else {
  434. __spu_trap_invalid_dma(spu);
  435. }
  436. return ret;
  437. }
  438. static int __init find_spu_node_id(struct device_node *spe)
  439. {
  440. const unsigned int *id;
  441. struct device_node *cpu;
  442. cpu = spe->parent->parent;
  443. id = get_property(cpu, "node-id", NULL);
  444. return id ? *id : 0;
  445. }
  446. static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
  447. const char *prop)
  448. {
  449. static DEFINE_MUTEX(add_spumem_mutex);
  450. const struct address_prop {
  451. unsigned long address;
  452. unsigned int len;
  453. } __attribute__((packed)) *p;
  454. int proplen;
  455. unsigned long start_pfn, nr_pages;
  456. struct pglist_data *pgdata;
  457. struct zone *zone;
  458. int ret;
  459. p = get_property(spe, prop, &proplen);
  460. WARN_ON(proplen != sizeof (*p));
  461. start_pfn = p->address >> PAGE_SHIFT;
  462. nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  463. pgdata = NODE_DATA(spu->nid);
  464. zone = pgdata->node_zones;
  465. /* XXX rethink locking here */
  466. mutex_lock(&add_spumem_mutex);
  467. ret = __add_pages(zone, start_pfn, nr_pages);
  468. mutex_unlock(&add_spumem_mutex);
  469. return ret;
  470. }
  471. static void __iomem * __init map_spe_prop(struct spu *spu,
  472. struct device_node *n, const char *name)
  473. {
  474. const struct address_prop {
  475. unsigned long address;
  476. unsigned int len;
  477. } __attribute__((packed)) *prop;
  478. const void *p;
  479. int proplen;
  480. void __iomem *ret = NULL;
  481. int err = 0;
  482. p = get_property(n, name, &proplen);
  483. if (proplen != sizeof (struct address_prop))
  484. return NULL;
  485. prop = p;
  486. err = cell_spuprop_present(spu, n, name);
  487. if (err && (err != -EEXIST))
  488. goto out;
  489. ret = ioremap(prop->address, prop->len);
  490. out:
  491. return ret;
  492. }
  493. static void spu_unmap(struct spu *spu)
  494. {
  495. iounmap(spu->priv2);
  496. iounmap(spu->priv1);
  497. iounmap(spu->problem);
  498. iounmap((__force u8 __iomem *)spu->local_store);
  499. }
  500. /* This function shall be abstracted for HV platforms */
  501. static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
  502. {
  503. unsigned int isrc;
  504. const u32 *tmp;
  505. /* Get the interrupt source unit from the device-tree */
  506. tmp = get_property(np, "isrc", NULL);
  507. if (!tmp)
  508. return -ENODEV;
  509. isrc = tmp[0];
  510. /* Add the node number */
  511. isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
  512. /* Now map interrupts of all 3 classes */
  513. spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
  514. spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
  515. spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
  516. /* Right now, we only fail if class 2 failed */
  517. return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
  518. }
  519. static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
  520. {
  521. const char *prop;
  522. int ret;
  523. ret = -ENODEV;
  524. spu->name = get_property(node, "name", NULL);
  525. if (!spu->name)
  526. goto out;
  527. prop = get_property(node, "local-store", NULL);
  528. if (!prop)
  529. goto out;
  530. spu->local_store_phys = *(unsigned long *)prop;
  531. /* we use local store as ram, not io memory */
  532. spu->local_store = (void __force *)
  533. map_spe_prop(spu, node, "local-store");
  534. if (!spu->local_store)
  535. goto out;
  536. prop = get_property(node, "problem", NULL);
  537. if (!prop)
  538. goto out_unmap;
  539. spu->problem_phys = *(unsigned long *)prop;
  540. spu->problem= map_spe_prop(spu, node, "problem");
  541. if (!spu->problem)
  542. goto out_unmap;
  543. spu->priv1= map_spe_prop(spu, node, "priv1");
  544. /* priv1 is not available on a hypervisor */
  545. spu->priv2= map_spe_prop(spu, node, "priv2");
  546. if (!spu->priv2)
  547. goto out_unmap;
  548. ret = 0;
  549. goto out;
  550. out_unmap:
  551. spu_unmap(spu);
  552. out:
  553. return ret;
  554. }
  555. static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
  556. {
  557. struct of_irq oirq;
  558. int ret;
  559. int i;
  560. for (i=0; i < 3; i++) {
  561. ret = of_irq_map_one(np, i, &oirq);
  562. if (ret) {
  563. pr_debug("spu_new: failed to get irq %d\n", i);
  564. goto err;
  565. }
  566. ret = -EINVAL;
  567. pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
  568. oirq.controller->full_name);
  569. spu->irqs[i] = irq_create_of_mapping(oirq.controller,
  570. oirq.specifier, oirq.size);
  571. if (spu->irqs[i] == NO_IRQ) {
  572. pr_debug("spu_new: failed to map it !\n");
  573. goto err;
  574. }
  575. }
  576. return 0;
  577. err:
  578. pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
  579. for (; i >= 0; i--) {
  580. if (spu->irqs[i] != NO_IRQ)
  581. irq_dispose_mapping(spu->irqs[i]);
  582. }
  583. return ret;
  584. }
  585. static int spu_map_resource(struct device_node *node, int nr,
  586. void __iomem** virt, unsigned long *phys)
  587. {
  588. struct resource resource = { };
  589. int ret;
  590. ret = of_address_to_resource(node, nr, &resource);
  591. if (ret)
  592. goto out;
  593. if (phys)
  594. *phys = resource.start;
  595. *virt = ioremap(resource.start, resource.end - resource.start);
  596. if (!*virt)
  597. ret = -EINVAL;
  598. out:
  599. return ret;
  600. }
  601. static int __init spu_map_device(struct spu *spu, struct device_node *node)
  602. {
  603. int ret = -ENODEV;
  604. spu->name = get_property(node, "name", NULL);
  605. if (!spu->name)
  606. goto out;
  607. ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
  608. &spu->local_store_phys);
  609. if (ret) {
  610. pr_debug("spu_new: failed to map %s resource 0\n",
  611. node->full_name);
  612. goto out;
  613. }
  614. ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
  615. &spu->problem_phys);
  616. if (ret) {
  617. pr_debug("spu_new: failed to map %s resource 1\n",
  618. node->full_name);
  619. goto out_unmap;
  620. }
  621. ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
  622. NULL);
  623. if (ret) {
  624. pr_debug("spu_new: failed to map %s resource 2\n",
  625. node->full_name);
  626. goto out_unmap;
  627. }
  628. if (!firmware_has_feature(FW_FEATURE_LPAR))
  629. ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
  630. NULL);
  631. if (ret) {
  632. pr_debug("spu_new: failed to map %s resource 3\n",
  633. node->full_name);
  634. goto out_unmap;
  635. }
  636. pr_debug("spu_new: %s maps:\n", node->full_name);
  637. pr_debug(" local store : 0x%016lx -> 0x%p\n",
  638. spu->local_store_phys, spu->local_store);
  639. pr_debug(" problem state : 0x%016lx -> 0x%p\n",
  640. spu->problem_phys, spu->problem);
  641. pr_debug(" priv2 : 0x%p\n", spu->priv2);
  642. pr_debug(" priv1 : 0x%p\n", spu->priv1);
  643. return 0;
  644. out_unmap:
  645. spu_unmap(spu);
  646. out:
  647. pr_debug("failed to map spe %s: %d\n", spu->name, ret);
  648. return ret;
  649. }
  650. struct sysdev_class spu_sysdev_class = {
  651. set_kset_name("spu")
  652. };
  653. int spu_add_sysdev_attr(struct sysdev_attribute *attr)
  654. {
  655. struct spu *spu;
  656. mutex_lock(&spu_mutex);
  657. list_for_each_entry(spu, &spu_full_list, full_list)
  658. sysdev_create_file(&spu->sysdev, attr);
  659. mutex_unlock(&spu_mutex);
  660. return 0;
  661. }
  662. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
  663. int spu_add_sysdev_attr_group(struct attribute_group *attrs)
  664. {
  665. struct spu *spu;
  666. mutex_lock(&spu_mutex);
  667. list_for_each_entry(spu, &spu_full_list, full_list)
  668. sysfs_create_group(&spu->sysdev.kobj, attrs);
  669. mutex_unlock(&spu_mutex);
  670. return 0;
  671. }
  672. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
  673. void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
  674. {
  675. struct spu *spu;
  676. mutex_lock(&spu_mutex);
  677. list_for_each_entry(spu, &spu_full_list, full_list)
  678. sysdev_remove_file(&spu->sysdev, attr);
  679. mutex_unlock(&spu_mutex);
  680. }
  681. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
  682. void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
  683. {
  684. struct spu *spu;
  685. mutex_lock(&spu_mutex);
  686. list_for_each_entry(spu, &spu_full_list, full_list)
  687. sysfs_remove_group(&spu->sysdev.kobj, attrs);
  688. mutex_unlock(&spu_mutex);
  689. }
  690. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
  691. static int spu_create_sysdev(struct spu *spu)
  692. {
  693. int ret;
  694. spu->sysdev.id = spu->number;
  695. spu->sysdev.cls = &spu_sysdev_class;
  696. ret = sysdev_register(&spu->sysdev);
  697. if (ret) {
  698. printk(KERN_ERR "Can't register SPU %d with sysfs\n",
  699. spu->number);
  700. return ret;
  701. }
  702. sysfs_add_device_to_node(&spu->sysdev, spu->node);
  703. return 0;
  704. }
  705. static void spu_destroy_sysdev(struct spu *spu)
  706. {
  707. sysfs_remove_device_from_node(&spu->sysdev, spu->node);
  708. sysdev_unregister(&spu->sysdev);
  709. }
  710. static int __init create_spu(struct device_node *spe)
  711. {
  712. struct spu *spu;
  713. int ret;
  714. static int number;
  715. ret = -ENOMEM;
  716. spu = kzalloc(sizeof (*spu), GFP_KERNEL);
  717. if (!spu)
  718. goto out;
  719. spu->node = find_spu_node_id(spe);
  720. if (spu->node >= MAX_NUMNODES) {
  721. printk(KERN_WARNING "SPE %s on node %d ignored,"
  722. " node number too big\n", spe->full_name, spu->node);
  723. printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
  724. return -ENODEV;
  725. }
  726. spu->nid = of_node_to_nid(spe);
  727. if (spu->nid == -1)
  728. spu->nid = 0;
  729. ret = spu_map_device(spu, spe);
  730. /* try old method */
  731. if (ret)
  732. ret = spu_map_device_old(spu, spe);
  733. if (ret)
  734. goto out_free;
  735. ret = spu_map_interrupts(spu, spe);
  736. if (ret)
  737. ret = spu_map_interrupts_old(spu, spe);
  738. if (ret)
  739. goto out_unmap;
  740. spin_lock_init(&spu->register_lock);
  741. spu_mfc_sdr_setup(spu);
  742. spu_mfc_sr1_set(spu, 0x33);
  743. mutex_lock(&spu_mutex);
  744. spu->number = number++;
  745. ret = spu_request_irqs(spu);
  746. if (ret)
  747. goto out_unlock;
  748. ret = spu_create_sysdev(spu);
  749. if (ret)
  750. goto out_free_irqs;
  751. list_add(&spu->list, &spu_list[spu->node]);
  752. list_add(&spu->full_list, &spu_full_list);
  753. spu->devnode = of_node_get(spe);
  754. mutex_unlock(&spu_mutex);
  755. pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n",
  756. spu->name, spu->local_store,
  757. spu->problem, spu->priv1, spu->priv2, spu->number);
  758. goto out;
  759. out_free_irqs:
  760. spu_free_irqs(spu);
  761. out_unlock:
  762. mutex_unlock(&spu_mutex);
  763. out_unmap:
  764. spu_unmap(spu);
  765. out_free:
  766. kfree(spu);
  767. out:
  768. return ret;
  769. }
  770. static void destroy_spu(struct spu *spu)
  771. {
  772. list_del_init(&spu->list);
  773. list_del_init(&spu->full_list);
  774. of_node_put(spu->devnode);
  775. spu_destroy_sysdev(spu);
  776. spu_free_irqs(spu);
  777. spu_unmap(spu);
  778. kfree(spu);
  779. }
  780. static void cleanup_spu_base(void)
  781. {
  782. struct spu *spu, *tmp;
  783. int node;
  784. mutex_lock(&spu_mutex);
  785. for (node = 0; node < MAX_NUMNODES; node++) {
  786. list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
  787. destroy_spu(spu);
  788. }
  789. mutex_unlock(&spu_mutex);
  790. sysdev_class_unregister(&spu_sysdev_class);
  791. }
  792. module_exit(cleanup_spu_base);
  793. static int __init init_spu_base(void)
  794. {
  795. struct device_node *node;
  796. int i, ret;
  797. /* create sysdev class for spus */
  798. ret = sysdev_class_register(&spu_sysdev_class);
  799. if (ret)
  800. return ret;
  801. for (i = 0; i < MAX_NUMNODES; i++)
  802. INIT_LIST_HEAD(&spu_list[i]);
  803. ret = -ENODEV;
  804. for (node = of_find_node_by_type(NULL, "spe");
  805. node; node = of_find_node_by_type(node, "spe")) {
  806. ret = create_spu(node);
  807. if (ret) {
  808. printk(KERN_WARNING "%s: Error initializing %s\n",
  809. __FUNCTION__, node->name);
  810. cleanup_spu_base();
  811. break;
  812. }
  813. }
  814. xmon_register_spus(&spu_full_list);
  815. return ret;
  816. }
  817. module_init(init_spu_base);
  818. MODULE_LICENSE("GPL");
  819. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");