spu_base.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. /*
  2. * Low-level SPU handling
  3. *
  4. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5. *
  6. * Author: Arnd Bergmann <arndb@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/ptrace.h>
  29. #include <linux/slab.h>
  30. #include <linux/wait.h>
  31. #include <asm/firmware.h>
  32. #include <asm/io.h>
  33. #include <asm/prom.h>
  34. #include <linux/mutex.h>
  35. #include <asm/spu.h>
  36. #include <asm/spu_priv1.h>
  37. #include <asm/mmu_context.h>
  38. #include "interrupt.h"
  39. const struct spu_priv1_ops *spu_priv1_ops;
  40. EXPORT_SYMBOL_GPL(spu_priv1_ops);
  41. static int __spu_trap_invalid_dma(struct spu *spu)
  42. {
  43. pr_debug("%s\n", __FUNCTION__);
  44. spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
  45. return 0;
  46. }
  47. static int __spu_trap_dma_align(struct spu *spu)
  48. {
  49. pr_debug("%s\n", __FUNCTION__);
  50. spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
  51. return 0;
  52. }
  53. static int __spu_trap_error(struct spu *spu)
  54. {
  55. pr_debug("%s\n", __FUNCTION__);
  56. spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
  57. return 0;
  58. }
  59. static void spu_restart_dma(struct spu *spu)
  60. {
  61. struct spu_priv2 __iomem *priv2 = spu->priv2;
  62. if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
  63. out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
  64. }
  65. static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
  66. {
  67. struct spu_priv2 __iomem *priv2 = spu->priv2;
  68. struct mm_struct *mm = spu->mm;
  69. u64 esid, vsid, llp;
  70. pr_debug("%s\n", __FUNCTION__);
  71. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  72. /* SLBs are pre-loaded for context switch, so
  73. * we should never get here!
  74. */
  75. printk("%s: invalid access during switch!\n", __func__);
  76. return 1;
  77. }
  78. if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
  79. /* Future: support kernel segments so that drivers
  80. * can use SPUs.
  81. */
  82. pr_debug("invalid region access at %016lx\n", ea);
  83. return 1;
  84. }
  85. esid = (ea & ESID_MASK) | SLB_ESID_V;
  86. #ifdef CONFIG_HUGETLB_PAGE
  87. if (in_hugepage_area(mm->context, ea))
  88. llp = mmu_psize_defs[mmu_huge_psize].sllp;
  89. else
  90. #endif
  91. llp = mmu_psize_defs[mmu_virtual_psize].sllp;
  92. vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
  93. SLB_VSID_USER | llp;
  94. out_be64(&priv2->slb_index_W, spu->slb_replace);
  95. out_be64(&priv2->slb_vsid_RW, vsid);
  96. out_be64(&priv2->slb_esid_RW, esid);
  97. spu->slb_replace++;
  98. if (spu->slb_replace >= 8)
  99. spu->slb_replace = 0;
  100. spu_restart_dma(spu);
  101. return 0;
  102. }
  103. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
  104. static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
  105. {
  106. pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
  107. /* Handle kernel space hash faults immediately.
  108. User hash faults need to be deferred to process context. */
  109. if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
  110. && REGION_ID(ea) != USER_REGION_ID
  111. && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
  112. spu_restart_dma(spu);
  113. return 0;
  114. }
  115. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  116. printk("%s: invalid access during switch!\n", __func__);
  117. return 1;
  118. }
  119. spu->dar = ea;
  120. spu->dsisr = dsisr;
  121. mb();
  122. spu->stop_callback(spu);
  123. return 0;
  124. }
  125. static irqreturn_t
  126. spu_irq_class_0(int irq, void *data)
  127. {
  128. struct spu *spu;
  129. spu = data;
  130. spu->class_0_pending = 1;
  131. spu->stop_callback(spu);
  132. return IRQ_HANDLED;
  133. }
  134. int
  135. spu_irq_class_0_bottom(struct spu *spu)
  136. {
  137. unsigned long stat, mask;
  138. spu->class_0_pending = 0;
  139. mask = spu_int_mask_get(spu, 0);
  140. stat = spu_int_stat_get(spu, 0);
  141. stat &= mask;
  142. if (stat & 1) /* invalid DMA alignment */
  143. __spu_trap_dma_align(spu);
  144. if (stat & 2) /* invalid MFC DMA */
  145. __spu_trap_invalid_dma(spu);
  146. if (stat & 4) /* error on SPU */
  147. __spu_trap_error(spu);
  148. spu_int_stat_clear(spu, 0, stat);
  149. return (stat & 0x7) ? -EIO : 0;
  150. }
  151. EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
  152. static irqreturn_t
  153. spu_irq_class_1(int irq, void *data)
  154. {
  155. struct spu *spu;
  156. unsigned long stat, mask, dar, dsisr;
  157. spu = data;
  158. /* atomically read & clear class1 status. */
  159. spin_lock(&spu->register_lock);
  160. mask = spu_int_mask_get(spu, 1);
  161. stat = spu_int_stat_get(spu, 1) & mask;
  162. dar = spu_mfc_dar_get(spu);
  163. dsisr = spu_mfc_dsisr_get(spu);
  164. if (stat & 2) /* mapping fault */
  165. spu_mfc_dsisr_set(spu, 0ul);
  166. spu_int_stat_clear(spu, 1, stat);
  167. spin_unlock(&spu->register_lock);
  168. pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
  169. dar, dsisr);
  170. if (stat & 1) /* segment fault */
  171. __spu_trap_data_seg(spu, dar);
  172. if (stat & 2) { /* mapping fault */
  173. __spu_trap_data_map(spu, dar, dsisr);
  174. }
  175. if (stat & 4) /* ls compare & suspend on get */
  176. ;
  177. if (stat & 8) /* ls compare & suspend on put */
  178. ;
  179. return stat ? IRQ_HANDLED : IRQ_NONE;
  180. }
  181. EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
  182. static irqreturn_t
  183. spu_irq_class_2(int irq, void *data)
  184. {
  185. struct spu *spu;
  186. unsigned long stat;
  187. unsigned long mask;
  188. spu = data;
  189. spin_lock(&spu->register_lock);
  190. stat = spu_int_stat_get(spu, 2);
  191. mask = spu_int_mask_get(spu, 2);
  192. /* ignore interrupts we're not waiting for */
  193. stat &= mask;
  194. /*
  195. * mailbox interrupts (0x1 and 0x10) are level triggered.
  196. * mask them now before acknowledging.
  197. */
  198. if (stat & 0x11)
  199. spu_int_mask_and(spu, 2, ~(stat & 0x11));
  200. /* acknowledge all interrupts before the callbacks */
  201. spu_int_stat_clear(spu, 2, stat);
  202. spin_unlock(&spu->register_lock);
  203. pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
  204. if (stat & 1) /* PPC core mailbox */
  205. spu->ibox_callback(spu);
  206. if (stat & 2) /* SPU stop-and-signal */
  207. spu->stop_callback(spu);
  208. if (stat & 4) /* SPU halted */
  209. spu->stop_callback(spu);
  210. if (stat & 8) /* DMA tag group complete */
  211. spu->mfc_callback(spu);
  212. if (stat & 0x10) /* SPU mailbox threshold */
  213. spu->wbox_callback(spu);
  214. return stat ? IRQ_HANDLED : IRQ_NONE;
  215. }
  216. static int spu_request_irqs(struct spu *spu)
  217. {
  218. int ret = 0;
  219. if (spu->irqs[0] != NO_IRQ) {
  220. snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
  221. spu->number);
  222. ret = request_irq(spu->irqs[0], spu_irq_class_0,
  223. IRQF_DISABLED,
  224. spu->irq_c0, spu);
  225. if (ret)
  226. goto bail0;
  227. }
  228. if (spu->irqs[1] != NO_IRQ) {
  229. snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
  230. spu->number);
  231. ret = request_irq(spu->irqs[1], spu_irq_class_1,
  232. IRQF_DISABLED,
  233. spu->irq_c1, spu);
  234. if (ret)
  235. goto bail1;
  236. }
  237. if (spu->irqs[2] != NO_IRQ) {
  238. snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
  239. spu->number);
  240. ret = request_irq(spu->irqs[2], spu_irq_class_2,
  241. IRQF_DISABLED,
  242. spu->irq_c2, spu);
  243. if (ret)
  244. goto bail2;
  245. }
  246. return 0;
  247. bail2:
  248. if (spu->irqs[1] != NO_IRQ)
  249. free_irq(spu->irqs[1], spu);
  250. bail1:
  251. if (spu->irqs[0] != NO_IRQ)
  252. free_irq(spu->irqs[0], spu);
  253. bail0:
  254. return ret;
  255. }
  256. static void spu_free_irqs(struct spu *spu)
  257. {
  258. if (spu->irqs[0] != NO_IRQ)
  259. free_irq(spu->irqs[0], spu);
  260. if (spu->irqs[1] != NO_IRQ)
  261. free_irq(spu->irqs[1], spu);
  262. if (spu->irqs[2] != NO_IRQ)
  263. free_irq(spu->irqs[2], spu);
  264. }
  265. static struct list_head spu_list[MAX_NUMNODES];
  266. static DEFINE_MUTEX(spu_mutex);
  267. static void spu_init_channels(struct spu *spu)
  268. {
  269. static const struct {
  270. unsigned channel;
  271. unsigned count;
  272. } zero_list[] = {
  273. { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
  274. { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
  275. }, count_list[] = {
  276. { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
  277. { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
  278. { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
  279. };
  280. struct spu_priv2 __iomem *priv2;
  281. int i;
  282. priv2 = spu->priv2;
  283. /* initialize all channel data to zero */
  284. for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
  285. int count;
  286. out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
  287. for (count = 0; count < zero_list[i].count; count++)
  288. out_be64(&priv2->spu_chnldata_RW, 0);
  289. }
  290. /* initialize channel counts to meaningful values */
  291. for (i = 0; i < ARRAY_SIZE(count_list); i++) {
  292. out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
  293. out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
  294. }
  295. }
  296. struct spu *spu_alloc_node(int node)
  297. {
  298. struct spu *spu = NULL;
  299. mutex_lock(&spu_mutex);
  300. if (!list_empty(&spu_list[node])) {
  301. spu = list_entry(spu_list[node].next, struct spu, list);
  302. list_del_init(&spu->list);
  303. pr_debug("Got SPU %x %d %d\n",
  304. spu->isrc, spu->number, spu->node);
  305. spu_init_channels(spu);
  306. }
  307. mutex_unlock(&spu_mutex);
  308. return spu;
  309. }
  310. EXPORT_SYMBOL_GPL(spu_alloc_node);
  311. struct spu *spu_alloc(void)
  312. {
  313. struct spu *spu = NULL;
  314. int node;
  315. for (node = 0; node < MAX_NUMNODES; node++) {
  316. spu = spu_alloc_node(node);
  317. if (spu)
  318. break;
  319. }
  320. return spu;
  321. }
  322. void spu_free(struct spu *spu)
  323. {
  324. mutex_lock(&spu_mutex);
  325. list_add_tail(&spu->list, &spu_list[spu->node]);
  326. mutex_unlock(&spu_mutex);
  327. }
  328. EXPORT_SYMBOL_GPL(spu_free);
  329. static int spu_handle_mm_fault(struct spu *spu)
  330. {
  331. struct mm_struct *mm = spu->mm;
  332. struct vm_area_struct *vma;
  333. u64 ea, dsisr, is_write;
  334. int ret;
  335. ea = spu->dar;
  336. dsisr = spu->dsisr;
  337. #if 0
  338. if (!IS_VALID_EA(ea)) {
  339. return -EFAULT;
  340. }
  341. #endif /* XXX */
  342. if (mm == NULL) {
  343. return -EFAULT;
  344. }
  345. if (mm->pgd == NULL) {
  346. return -EFAULT;
  347. }
  348. down_read(&mm->mmap_sem);
  349. vma = find_vma(mm, ea);
  350. if (!vma)
  351. goto bad_area;
  352. if (vma->vm_start <= ea)
  353. goto good_area;
  354. if (!(vma->vm_flags & VM_GROWSDOWN))
  355. goto bad_area;
  356. #if 0
  357. if (expand_stack(vma, ea))
  358. goto bad_area;
  359. #endif /* XXX */
  360. good_area:
  361. is_write = dsisr & MFC_DSISR_ACCESS_PUT;
  362. if (is_write) {
  363. if (!(vma->vm_flags & VM_WRITE))
  364. goto bad_area;
  365. } else {
  366. if (dsisr & MFC_DSISR_ACCESS_DENIED)
  367. goto bad_area;
  368. if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  369. goto bad_area;
  370. }
  371. ret = 0;
  372. switch (handle_mm_fault(mm, vma, ea, is_write)) {
  373. case VM_FAULT_MINOR:
  374. current->min_flt++;
  375. break;
  376. case VM_FAULT_MAJOR:
  377. current->maj_flt++;
  378. break;
  379. case VM_FAULT_SIGBUS:
  380. ret = -EFAULT;
  381. goto bad_area;
  382. case VM_FAULT_OOM:
  383. ret = -ENOMEM;
  384. goto bad_area;
  385. default:
  386. BUG();
  387. }
  388. up_read(&mm->mmap_sem);
  389. return ret;
  390. bad_area:
  391. up_read(&mm->mmap_sem);
  392. return -EFAULT;
  393. }
  394. int spu_irq_class_1_bottom(struct spu *spu)
  395. {
  396. u64 ea, dsisr, access, error = 0UL;
  397. int ret = 0;
  398. ea = spu->dar;
  399. dsisr = spu->dsisr;
  400. if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
  401. u64 flags;
  402. access = (_PAGE_PRESENT | _PAGE_USER);
  403. access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
  404. local_irq_save(flags);
  405. if (hash_page(ea, access, 0x300) != 0)
  406. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  407. local_irq_restore(flags);
  408. }
  409. if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
  410. if ((ret = spu_handle_mm_fault(spu)) != 0)
  411. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  412. else
  413. error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
  414. }
  415. spu->dar = 0UL;
  416. spu->dsisr = 0UL;
  417. if (!error) {
  418. spu_restart_dma(spu);
  419. } else {
  420. __spu_trap_invalid_dma(spu);
  421. }
  422. return ret;
  423. }
  424. static int __init find_spu_node_id(struct device_node *spe)
  425. {
  426. const unsigned int *id;
  427. struct device_node *cpu;
  428. cpu = spe->parent->parent;
  429. id = get_property(cpu, "node-id", NULL);
  430. return id ? *id : 0;
  431. }
  432. static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
  433. const char *prop)
  434. {
  435. static DEFINE_MUTEX(add_spumem_mutex);
  436. const struct address_prop {
  437. unsigned long address;
  438. unsigned int len;
  439. } __attribute__((packed)) *p;
  440. int proplen;
  441. unsigned long start_pfn, nr_pages;
  442. struct pglist_data *pgdata;
  443. struct zone *zone;
  444. int ret;
  445. p = get_property(spe, prop, &proplen);
  446. WARN_ON(proplen != sizeof (*p));
  447. start_pfn = p->address >> PAGE_SHIFT;
  448. nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  449. pgdata = NODE_DATA(spu->nid);
  450. zone = pgdata->node_zones;
  451. /* XXX rethink locking here */
  452. mutex_lock(&add_spumem_mutex);
  453. ret = __add_pages(zone, start_pfn, nr_pages);
  454. mutex_unlock(&add_spumem_mutex);
  455. return ret;
  456. }
  457. static void __iomem * __init map_spe_prop(struct spu *spu,
  458. struct device_node *n, const char *name)
  459. {
  460. const struct address_prop {
  461. unsigned long address;
  462. unsigned int len;
  463. } __attribute__((packed)) *prop;
  464. const void *p;
  465. int proplen;
  466. void __iomem *ret = NULL;
  467. int err = 0;
  468. p = get_property(n, name, &proplen);
  469. if (proplen != sizeof (struct address_prop))
  470. return NULL;
  471. prop = p;
  472. err = cell_spuprop_present(spu, n, name);
  473. if (err && (err != -EEXIST))
  474. goto out;
  475. ret = ioremap(prop->address, prop->len);
  476. out:
  477. return ret;
  478. }
  479. static void spu_unmap(struct spu *spu)
  480. {
  481. iounmap(spu->priv2);
  482. iounmap(spu->priv1);
  483. iounmap(spu->problem);
  484. iounmap((__force u8 __iomem *)spu->local_store);
  485. }
  486. /* This function shall be abstracted for HV platforms */
  487. static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
  488. {
  489. unsigned int isrc;
  490. const u32 *tmp;
  491. /* Get the interrupt source unit from the device-tree */
  492. tmp = get_property(np, "isrc", NULL);
  493. if (!tmp)
  494. return -ENODEV;
  495. isrc = tmp[0];
  496. /* Add the node number */
  497. isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
  498. spu->isrc = isrc;
  499. /* Now map interrupts of all 3 classes */
  500. spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
  501. spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
  502. spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
  503. /* Right now, we only fail if class 2 failed */
  504. return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
  505. }
  506. static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
  507. {
  508. const char *prop;
  509. int ret;
  510. ret = -ENODEV;
  511. spu->name = get_property(node, "name", NULL);
  512. if (!spu->name)
  513. goto out;
  514. prop = get_property(node, "local-store", NULL);
  515. if (!prop)
  516. goto out;
  517. spu->local_store_phys = *(unsigned long *)prop;
  518. /* we use local store as ram, not io memory */
  519. spu->local_store = (void __force *)
  520. map_spe_prop(spu, node, "local-store");
  521. if (!spu->local_store)
  522. goto out;
  523. prop = get_property(node, "problem", NULL);
  524. if (!prop)
  525. goto out_unmap;
  526. spu->problem_phys = *(unsigned long *)prop;
  527. spu->problem= map_spe_prop(spu, node, "problem");
  528. if (!spu->problem)
  529. goto out_unmap;
  530. spu->priv1= map_spe_prop(spu, node, "priv1");
  531. /* priv1 is not available on a hypervisor */
  532. spu->priv2= map_spe_prop(spu, node, "priv2");
  533. if (!spu->priv2)
  534. goto out_unmap;
  535. ret = 0;
  536. goto out;
  537. out_unmap:
  538. spu_unmap(spu);
  539. out:
  540. return ret;
  541. }
  542. static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
  543. {
  544. struct of_irq oirq;
  545. int ret;
  546. int i;
  547. for (i=0; i < 3; i++) {
  548. ret = of_irq_map_one(np, i, &oirq);
  549. if (ret)
  550. goto err;
  551. ret = -EINVAL;
  552. spu->irqs[i] = irq_create_of_mapping(oirq.controller,
  553. oirq.specifier, oirq.size);
  554. if (spu->irqs[i] == NO_IRQ)
  555. goto err;
  556. }
  557. return 0;
  558. err:
  559. pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
  560. for (; i >= 0; i--) {
  561. if (spu->irqs[i] != NO_IRQ)
  562. irq_dispose_mapping(spu->irqs[i]);
  563. }
  564. return ret;
  565. }
  566. static int spu_map_resource(struct device_node *node, int nr,
  567. void __iomem** virt, unsigned long *phys)
  568. {
  569. struct resource resource = { };
  570. int ret;
  571. ret = of_address_to_resource(node, 0, &resource);
  572. if (ret)
  573. goto out;
  574. if (phys)
  575. *phys = resource.start;
  576. *virt = ioremap(resource.start, resource.end - resource.start);
  577. if (!*virt)
  578. ret = -EINVAL;
  579. out:
  580. return ret;
  581. }
  582. static int __init spu_map_device(struct spu *spu, struct device_node *node)
  583. {
  584. int ret = -ENODEV;
  585. spu->name = get_property(node, "name", NULL);
  586. if (!spu->name)
  587. goto out;
  588. ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
  589. &spu->local_store_phys);
  590. if (ret)
  591. goto out;
  592. ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
  593. &spu->problem_phys);
  594. if (ret)
  595. goto out_unmap;
  596. ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
  597. NULL);
  598. if (ret)
  599. goto out_unmap;
  600. if (!firmware_has_feature(FW_FEATURE_LPAR))
  601. ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
  602. NULL);
  603. if (ret)
  604. goto out_unmap;
  605. return 0;
  606. out_unmap:
  607. spu_unmap(spu);
  608. out:
  609. pr_debug("failed to map spe %s: %d\n", spu->name, ret);
  610. return ret;
  611. }
  612. struct sysdev_class spu_sysdev_class = {
  613. set_kset_name("spu")
  614. };
  615. static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
  616. {
  617. struct spu *spu = container_of(sysdev, struct spu, sysdev);
  618. return sprintf(buf, "%d\n", spu->isrc);
  619. }
  620. static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
  621. extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
  622. static int spu_create_sysdev(struct spu *spu)
  623. {
  624. int ret;
  625. spu->sysdev.id = spu->number;
  626. spu->sysdev.cls = &spu_sysdev_class;
  627. ret = sysdev_register(&spu->sysdev);
  628. if (ret) {
  629. printk(KERN_ERR "Can't register SPU %d with sysfs\n",
  630. spu->number);
  631. return ret;
  632. }
  633. if (spu->isrc != 0)
  634. sysdev_create_file(&spu->sysdev, &attr_isrc);
  635. sysfs_add_device_to_node(&spu->sysdev, spu->nid);
  636. return 0;
  637. }
  638. static void spu_destroy_sysdev(struct spu *spu)
  639. {
  640. sysdev_remove_file(&spu->sysdev, &attr_isrc);
  641. sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
  642. sysdev_unregister(&spu->sysdev);
  643. }
  644. static int __init create_spu(struct device_node *spe)
  645. {
  646. struct spu *spu;
  647. int ret;
  648. static int number;
  649. ret = -ENOMEM;
  650. spu = kzalloc(sizeof (*spu), GFP_KERNEL);
  651. if (!spu)
  652. goto out;
  653. spu->node = find_spu_node_id(spe);
  654. if (spu->node >= MAX_NUMNODES) {
  655. printk(KERN_WARNING "SPE %s on node %d ignored,"
  656. " node number too big\n", spe->full_name, spu->node);
  657. printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
  658. return -ENODEV;
  659. }
  660. spu->nid = of_node_to_nid(spe);
  661. if (spu->nid == -1)
  662. spu->nid = 0;
  663. ret = spu_map_device(spu, spe);
  664. /* try old method */
  665. if (ret)
  666. ret = spu_map_device_old(spu, spe);
  667. if (ret)
  668. goto out_free;
  669. ret = spu_map_interrupts(spu, spe);
  670. if (ret)
  671. ret = spu_map_interrupts_old(spu, spe);
  672. if (ret)
  673. goto out_unmap;
  674. spin_lock_init(&spu->register_lock);
  675. spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
  676. spu_mfc_sr1_set(spu, 0x33);
  677. mutex_lock(&spu_mutex);
  678. spu->number = number++;
  679. ret = spu_request_irqs(spu);
  680. if (ret)
  681. goto out_unlock;
  682. ret = spu_create_sysdev(spu);
  683. if (ret)
  684. goto out_free_irqs;
  685. list_add(&spu->list, &spu_list[spu->node]);
  686. mutex_unlock(&spu_mutex);
  687. pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
  688. spu->name, spu->isrc, spu->local_store,
  689. spu->problem, spu->priv1, spu->priv2, spu->number);
  690. goto out;
  691. out_free_irqs:
  692. spu_free_irqs(spu);
  693. out_unlock:
  694. mutex_unlock(&spu_mutex);
  695. out_unmap:
  696. spu_unmap(spu);
  697. out_free:
  698. kfree(spu);
  699. out:
  700. return ret;
  701. }
  702. static void destroy_spu(struct spu *spu)
  703. {
  704. list_del_init(&spu->list);
  705. spu_destroy_sysdev(spu);
  706. spu_free_irqs(spu);
  707. spu_unmap(spu);
  708. kfree(spu);
  709. }
  710. static void cleanup_spu_base(void)
  711. {
  712. struct spu *spu, *tmp;
  713. int node;
  714. mutex_lock(&spu_mutex);
  715. for (node = 0; node < MAX_NUMNODES; node++) {
  716. list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
  717. destroy_spu(spu);
  718. }
  719. mutex_unlock(&spu_mutex);
  720. sysdev_class_unregister(&spu_sysdev_class);
  721. }
  722. module_exit(cleanup_spu_base);
  723. static int __init init_spu_base(void)
  724. {
  725. struct device_node *node;
  726. int i, ret;
  727. /* create sysdev class for spus */
  728. ret = sysdev_class_register(&spu_sysdev_class);
  729. if (ret)
  730. return ret;
  731. for (i = 0; i < MAX_NUMNODES; i++)
  732. INIT_LIST_HEAD(&spu_list[i]);
  733. ret = -ENODEV;
  734. for (node = of_find_node_by_type(NULL, "spe");
  735. node; node = of_find_node_by_type(node, "spe")) {
  736. ret = create_spu(node);
  737. if (ret) {
  738. printk(KERN_WARNING "%s: Error initializing %s\n",
  739. __FUNCTION__, node->name);
  740. cleanup_spu_base();
  741. break;
  742. }
  743. }
  744. return ret;
  745. }
  746. module_init(init_spu_base);
  747. MODULE_LICENSE("GPL");
  748. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");