spu_base.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /*
  2. * Low-level SPU handling
  3. *
  4. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5. *
  6. * Author: Arnd Bergmann <arndb@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/module.h>
  26. #include <linux/poll.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/slab.h>
  29. #include <linux/wait.h>
  30. #include <asm/io.h>
  31. #include <asm/prom.h>
  32. #include <linux/mutex.h>
  33. #include <asm/spu.h>
  34. #include <asm/spu_priv1.h>
  35. #include <asm/mmu_context.h>
  36. #include "interrupt.h"
  37. const struct spu_priv1_ops *spu_priv1_ops;
  38. EXPORT_SYMBOL_GPL(spu_priv1_ops);
  39. static int __spu_trap_invalid_dma(struct spu *spu)
  40. {
  41. pr_debug("%s\n", __FUNCTION__);
  42. spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
  43. return 0;
  44. }
  45. static int __spu_trap_dma_align(struct spu *spu)
  46. {
  47. pr_debug("%s\n", __FUNCTION__);
  48. spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
  49. return 0;
  50. }
  51. static int __spu_trap_error(struct spu *spu)
  52. {
  53. pr_debug("%s\n", __FUNCTION__);
  54. spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
  55. return 0;
  56. }
  57. static void spu_restart_dma(struct spu *spu)
  58. {
  59. struct spu_priv2 __iomem *priv2 = spu->priv2;
  60. if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
  61. out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
  62. }
  63. static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
  64. {
  65. struct spu_priv2 __iomem *priv2 = spu->priv2;
  66. struct mm_struct *mm = spu->mm;
  67. u64 esid, vsid, llp;
  68. pr_debug("%s\n", __FUNCTION__);
  69. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  70. /* SLBs are pre-loaded for context switch, so
  71. * we should never get here!
  72. */
  73. printk("%s: invalid access during switch!\n", __func__);
  74. return 1;
  75. }
  76. if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
  77. /* Future: support kernel segments so that drivers
  78. * can use SPUs.
  79. */
  80. pr_debug("invalid region access at %016lx\n", ea);
  81. return 1;
  82. }
  83. esid = (ea & ESID_MASK) | SLB_ESID_V;
  84. #ifdef CONFIG_HUGETLB_PAGE
  85. if (in_hugepage_area(mm->context, ea))
  86. llp = mmu_psize_defs[mmu_huge_psize].sllp;
  87. else
  88. #endif
  89. llp = mmu_psize_defs[mmu_virtual_psize].sllp;
  90. vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
  91. SLB_VSID_USER | llp;
  92. out_be64(&priv2->slb_index_W, spu->slb_replace);
  93. out_be64(&priv2->slb_vsid_RW, vsid);
  94. out_be64(&priv2->slb_esid_RW, esid);
  95. spu->slb_replace++;
  96. if (spu->slb_replace >= 8)
  97. spu->slb_replace = 0;
  98. spu_restart_dma(spu);
  99. return 0;
  100. }
  101. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
  102. static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
  103. {
  104. pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
  105. /* Handle kernel space hash faults immediately.
  106. User hash faults need to be deferred to process context. */
  107. if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
  108. && REGION_ID(ea) != USER_REGION_ID
  109. && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
  110. spu_restart_dma(spu);
  111. return 0;
  112. }
  113. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  114. printk("%s: invalid access during switch!\n", __func__);
  115. return 1;
  116. }
  117. spu->dar = ea;
  118. spu->dsisr = dsisr;
  119. mb();
  120. spu->stop_callback(spu);
  121. return 0;
  122. }
  123. static irqreturn_t
  124. spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
  125. {
  126. struct spu *spu;
  127. spu = data;
  128. spu->class_0_pending = 1;
  129. spu->stop_callback(spu);
  130. return IRQ_HANDLED;
  131. }
  132. int
  133. spu_irq_class_0_bottom(struct spu *spu)
  134. {
  135. unsigned long stat, mask;
  136. spu->class_0_pending = 0;
  137. mask = spu_int_mask_get(spu, 0);
  138. stat = spu_int_stat_get(spu, 0);
  139. stat &= mask;
  140. if (stat & 1) /* invalid DMA alignment */
  141. __spu_trap_dma_align(spu);
  142. if (stat & 2) /* invalid MFC DMA */
  143. __spu_trap_invalid_dma(spu);
  144. if (stat & 4) /* error on SPU */
  145. __spu_trap_error(spu);
  146. spu_int_stat_clear(spu, 0, stat);
  147. return (stat & 0x7) ? -EIO : 0;
  148. }
  149. EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
  150. static irqreturn_t
  151. spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
  152. {
  153. struct spu *spu;
  154. unsigned long stat, mask, dar, dsisr;
  155. spu = data;
  156. /* atomically read & clear class1 status. */
  157. spin_lock(&spu->register_lock);
  158. mask = spu_int_mask_get(spu, 1);
  159. stat = spu_int_stat_get(spu, 1) & mask;
  160. dar = spu_mfc_dar_get(spu);
  161. dsisr = spu_mfc_dsisr_get(spu);
  162. if (stat & 2) /* mapping fault */
  163. spu_mfc_dsisr_set(spu, 0ul);
  164. spu_int_stat_clear(spu, 1, stat);
  165. spin_unlock(&spu->register_lock);
  166. pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
  167. dar, dsisr);
  168. if (stat & 1) /* segment fault */
  169. __spu_trap_data_seg(spu, dar);
  170. if (stat & 2) { /* mapping fault */
  171. __spu_trap_data_map(spu, dar, dsisr);
  172. }
  173. if (stat & 4) /* ls compare & suspend on get */
  174. ;
  175. if (stat & 8) /* ls compare & suspend on put */
  176. ;
  177. return stat ? IRQ_HANDLED : IRQ_NONE;
  178. }
  179. EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
  180. static irqreturn_t
  181. spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
  182. {
  183. struct spu *spu;
  184. unsigned long stat;
  185. unsigned long mask;
  186. spu = data;
  187. spin_lock(&spu->register_lock);
  188. stat = spu_int_stat_get(spu, 2);
  189. mask = spu_int_mask_get(spu, 2);
  190. /* ignore interrupts we're not waiting for */
  191. stat &= mask;
  192. /*
  193. * mailbox interrupts (0x1 and 0x10) are level triggered.
  194. * mask them now before acknowledging.
  195. */
  196. if (stat & 0x11)
  197. spu_int_mask_and(spu, 2, ~(stat & 0x11));
  198. /* acknowledge all interrupts before the callbacks */
  199. spu_int_stat_clear(spu, 2, stat);
  200. spin_unlock(&spu->register_lock);
  201. pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
  202. if (stat & 1) /* PPC core mailbox */
  203. spu->ibox_callback(spu);
  204. if (stat & 2) /* SPU stop-and-signal */
  205. spu->stop_callback(spu);
  206. if (stat & 4) /* SPU halted */
  207. spu->stop_callback(spu);
  208. if (stat & 8) /* DMA tag group complete */
  209. spu->mfc_callback(spu);
  210. if (stat & 0x10) /* SPU mailbox threshold */
  211. spu->wbox_callback(spu);
  212. return stat ? IRQ_HANDLED : IRQ_NONE;
  213. }
  214. static int spu_request_irqs(struct spu *spu)
  215. {
  216. int ret = 0;
  217. if (spu->irqs[0] != NO_IRQ) {
  218. snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
  219. spu->number);
  220. ret = request_irq(spu->irqs[0], spu_irq_class_0,
  221. IRQF_DISABLED,
  222. spu->irq_c0, spu);
  223. if (ret)
  224. goto bail0;
  225. }
  226. if (spu->irqs[1] != NO_IRQ) {
  227. snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
  228. spu->number);
  229. ret = request_irq(spu->irqs[1], spu_irq_class_1,
  230. IRQF_DISABLED,
  231. spu->irq_c1, spu);
  232. if (ret)
  233. goto bail1;
  234. }
  235. if (spu->irqs[2] != NO_IRQ) {
  236. snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
  237. spu->number);
  238. ret = request_irq(spu->irqs[2], spu_irq_class_2,
  239. IRQF_DISABLED,
  240. spu->irq_c2, spu);
  241. if (ret)
  242. goto bail2;
  243. }
  244. return 0;
  245. bail2:
  246. if (spu->irqs[1] != NO_IRQ)
  247. free_irq(spu->irqs[1], spu);
  248. bail1:
  249. if (spu->irqs[0] != NO_IRQ)
  250. free_irq(spu->irqs[0], spu);
  251. bail0:
  252. return ret;
  253. }
  254. static void spu_free_irqs(struct spu *spu)
  255. {
  256. if (spu->irqs[0] != NO_IRQ)
  257. free_irq(spu->irqs[0], spu);
  258. if (spu->irqs[1] != NO_IRQ)
  259. free_irq(spu->irqs[1], spu);
  260. if (spu->irqs[2] != NO_IRQ)
  261. free_irq(spu->irqs[2], spu);
  262. }
  263. static struct list_head spu_list[MAX_NUMNODES];
  264. static DEFINE_MUTEX(spu_mutex);
  265. static void spu_init_channels(struct spu *spu)
  266. {
  267. static const struct {
  268. unsigned channel;
  269. unsigned count;
  270. } zero_list[] = {
  271. { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
  272. { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
  273. }, count_list[] = {
  274. { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
  275. { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
  276. { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
  277. };
  278. struct spu_priv2 __iomem *priv2;
  279. int i;
  280. priv2 = spu->priv2;
  281. /* initialize all channel data to zero */
  282. for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
  283. int count;
  284. out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
  285. for (count = 0; count < zero_list[i].count; count++)
  286. out_be64(&priv2->spu_chnldata_RW, 0);
  287. }
  288. /* initialize channel counts to meaningful values */
  289. for (i = 0; i < ARRAY_SIZE(count_list); i++) {
  290. out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
  291. out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
  292. }
  293. }
  294. struct spu *spu_alloc_node(int node)
  295. {
  296. struct spu *spu = NULL;
  297. mutex_lock(&spu_mutex);
  298. if (!list_empty(&spu_list[node])) {
  299. spu = list_entry(spu_list[node].next, struct spu, list);
  300. list_del_init(&spu->list);
  301. pr_debug("Got SPU %x %d %d\n",
  302. spu->isrc, spu->number, spu->node);
  303. spu_init_channels(spu);
  304. }
  305. mutex_unlock(&spu_mutex);
  306. return spu;
  307. }
  308. EXPORT_SYMBOL_GPL(spu_alloc_node);
  309. struct spu *spu_alloc(void)
  310. {
  311. struct spu *spu = NULL;
  312. int node;
  313. for (node = 0; node < MAX_NUMNODES; node++) {
  314. spu = spu_alloc_node(node);
  315. if (spu)
  316. break;
  317. }
  318. return spu;
  319. }
  320. void spu_free(struct spu *spu)
  321. {
  322. mutex_lock(&spu_mutex);
  323. list_add_tail(&spu->list, &spu_list[spu->node]);
  324. mutex_unlock(&spu_mutex);
  325. }
  326. EXPORT_SYMBOL_GPL(spu_free);
  327. static int spu_handle_mm_fault(struct spu *spu)
  328. {
  329. struct mm_struct *mm = spu->mm;
  330. struct vm_area_struct *vma;
  331. u64 ea, dsisr, is_write;
  332. int ret;
  333. ea = spu->dar;
  334. dsisr = spu->dsisr;
  335. #if 0
  336. if (!IS_VALID_EA(ea)) {
  337. return -EFAULT;
  338. }
  339. #endif /* XXX */
  340. if (mm == NULL) {
  341. return -EFAULT;
  342. }
  343. if (mm->pgd == NULL) {
  344. return -EFAULT;
  345. }
  346. down_read(&mm->mmap_sem);
  347. vma = find_vma(mm, ea);
  348. if (!vma)
  349. goto bad_area;
  350. if (vma->vm_start <= ea)
  351. goto good_area;
  352. if (!(vma->vm_flags & VM_GROWSDOWN))
  353. goto bad_area;
  354. #if 0
  355. if (expand_stack(vma, ea))
  356. goto bad_area;
  357. #endif /* XXX */
  358. good_area:
  359. is_write = dsisr & MFC_DSISR_ACCESS_PUT;
  360. if (is_write) {
  361. if (!(vma->vm_flags & VM_WRITE))
  362. goto bad_area;
  363. } else {
  364. if (dsisr & MFC_DSISR_ACCESS_DENIED)
  365. goto bad_area;
  366. if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  367. goto bad_area;
  368. }
  369. ret = 0;
  370. switch (handle_mm_fault(mm, vma, ea, is_write)) {
  371. case VM_FAULT_MINOR:
  372. current->min_flt++;
  373. break;
  374. case VM_FAULT_MAJOR:
  375. current->maj_flt++;
  376. break;
  377. case VM_FAULT_SIGBUS:
  378. ret = -EFAULT;
  379. goto bad_area;
  380. case VM_FAULT_OOM:
  381. ret = -ENOMEM;
  382. goto bad_area;
  383. default:
  384. BUG();
  385. }
  386. up_read(&mm->mmap_sem);
  387. return ret;
  388. bad_area:
  389. up_read(&mm->mmap_sem);
  390. return -EFAULT;
  391. }
  392. int spu_irq_class_1_bottom(struct spu *spu)
  393. {
  394. u64 ea, dsisr, access, error = 0UL;
  395. int ret = 0;
  396. ea = spu->dar;
  397. dsisr = spu->dsisr;
  398. if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
  399. u64 flags;
  400. access = (_PAGE_PRESENT | _PAGE_USER);
  401. access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
  402. local_irq_save(flags);
  403. if (hash_page(ea, access, 0x300) != 0)
  404. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  405. local_irq_restore(flags);
  406. }
  407. if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
  408. if ((ret = spu_handle_mm_fault(spu)) != 0)
  409. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  410. else
  411. error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
  412. }
  413. spu->dar = 0UL;
  414. spu->dsisr = 0UL;
  415. if (!error) {
  416. spu_restart_dma(spu);
  417. } else {
  418. __spu_trap_invalid_dma(spu);
  419. }
  420. return ret;
  421. }
  422. static int __init find_spu_node_id(struct device_node *spe)
  423. {
  424. const unsigned int *id;
  425. struct device_node *cpu;
  426. cpu = spe->parent->parent;
  427. id = get_property(cpu, "node-id", NULL);
  428. return id ? *id : 0;
  429. }
  430. static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
  431. const char *prop)
  432. {
  433. static DEFINE_MUTEX(add_spumem_mutex);
  434. const struct address_prop {
  435. unsigned long address;
  436. unsigned int len;
  437. } __attribute__((packed)) *p;
  438. int proplen;
  439. unsigned long start_pfn, nr_pages;
  440. struct pglist_data *pgdata;
  441. struct zone *zone;
  442. int ret;
  443. p = get_property(spe, prop, &proplen);
  444. WARN_ON(proplen != sizeof (*p));
  445. start_pfn = p->address >> PAGE_SHIFT;
  446. nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  447. pgdata = NODE_DATA(spu->nid);
  448. zone = pgdata->node_zones;
  449. /* XXX rethink locking here */
  450. mutex_lock(&add_spumem_mutex);
  451. ret = __add_pages(zone, start_pfn, nr_pages);
  452. mutex_unlock(&add_spumem_mutex);
  453. return ret;
  454. }
  455. static void __iomem * __init map_spe_prop(struct spu *spu,
  456. struct device_node *n, const char *name)
  457. {
  458. const struct address_prop {
  459. unsigned long address;
  460. unsigned int len;
  461. } __attribute__((packed)) *prop;
  462. const void *p;
  463. int proplen;
  464. void __iomem *ret = NULL;
  465. int err = 0;
  466. p = get_property(n, name, &proplen);
  467. if (proplen != sizeof (struct address_prop))
  468. return NULL;
  469. prop = p;
  470. err = cell_spuprop_present(spu, n, name);
  471. if (err && (err != -EEXIST))
  472. goto out;
  473. ret = ioremap(prop->address, prop->len);
  474. out:
  475. return ret;
  476. }
  477. static void spu_unmap(struct spu *spu)
  478. {
  479. iounmap(spu->priv2);
  480. iounmap(spu->priv1);
  481. iounmap(spu->problem);
  482. iounmap((__force u8 __iomem *)spu->local_store);
  483. }
  484. /* This function shall be abstracted for HV platforms */
  485. static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
  486. {
  487. unsigned int isrc;
  488. const u32 *tmp;
  489. /* Get the interrupt source unit from the device-tree */
  490. tmp = get_property(np, "isrc", NULL);
  491. if (!tmp)
  492. return -ENODEV;
  493. isrc = tmp[0];
  494. /* Add the node number */
  495. isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
  496. spu->isrc = isrc;
  497. /* Now map interrupts of all 3 classes */
  498. spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
  499. spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
  500. spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
  501. /* Right now, we only fail if class 2 failed */
  502. return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
  503. }
  504. static int __init spu_map_device(struct spu *spu, struct device_node *node)
  505. {
  506. const char *prop;
  507. int ret;
  508. ret = -ENODEV;
  509. spu->name = get_property(node, "name", NULL);
  510. if (!spu->name)
  511. goto out;
  512. prop = get_property(node, "local-store", NULL);
  513. if (!prop)
  514. goto out;
  515. spu->local_store_phys = *(unsigned long *)prop;
  516. /* we use local store as ram, not io memory */
  517. spu->local_store = (void __force *)
  518. map_spe_prop(spu, node, "local-store");
  519. if (!spu->local_store)
  520. goto out;
  521. prop = get_property(node, "problem", NULL);
  522. if (!prop)
  523. goto out_unmap;
  524. spu->problem_phys = *(unsigned long *)prop;
  525. spu->problem= map_spe_prop(spu, node, "problem");
  526. if (!spu->problem)
  527. goto out_unmap;
  528. spu->priv1= map_spe_prop(spu, node, "priv1");
  529. /* priv1 is not available on a hypervisor */
  530. spu->priv2= map_spe_prop(spu, node, "priv2");
  531. if (!spu->priv2)
  532. goto out_unmap;
  533. ret = 0;
  534. goto out;
  535. out_unmap:
  536. spu_unmap(spu);
  537. out:
  538. return ret;
  539. }
  540. struct sysdev_class spu_sysdev_class = {
  541. set_kset_name("spu")
  542. };
  543. static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
  544. {
  545. struct spu *spu = container_of(sysdev, struct spu, sysdev);
  546. return sprintf(buf, "%d\n", spu->isrc);
  547. }
  548. static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
  549. extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
  550. static int spu_create_sysdev(struct spu *spu)
  551. {
  552. int ret;
  553. spu->sysdev.id = spu->number;
  554. spu->sysdev.cls = &spu_sysdev_class;
  555. ret = sysdev_register(&spu->sysdev);
  556. if (ret) {
  557. printk(KERN_ERR "Can't register SPU %d with sysfs\n",
  558. spu->number);
  559. return ret;
  560. }
  561. if (spu->isrc != 0)
  562. sysdev_create_file(&spu->sysdev, &attr_isrc);
  563. sysfs_add_device_to_node(&spu->sysdev, spu->nid);
  564. return 0;
  565. }
  566. static void spu_destroy_sysdev(struct spu *spu)
  567. {
  568. sysdev_remove_file(&spu->sysdev, &attr_isrc);
  569. sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
  570. sysdev_unregister(&spu->sysdev);
  571. }
  572. static int __init create_spu(struct device_node *spe)
  573. {
  574. struct spu *spu;
  575. int ret;
  576. static int number;
  577. ret = -ENOMEM;
  578. spu = kzalloc(sizeof (*spu), GFP_KERNEL);
  579. if (!spu)
  580. goto out;
  581. ret = spu_map_device(spu, spe);
  582. if (ret)
  583. goto out_free;
  584. spu->node = find_spu_node_id(spe);
  585. spu->nid = of_node_to_nid(spe);
  586. if (spu->nid == -1)
  587. spu->nid = 0;
  588. ret = spu_map_interrupts(spu, spe);
  589. if (ret)
  590. goto out_unmap;
  591. spin_lock_init(&spu->register_lock);
  592. spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
  593. spu_mfc_sr1_set(spu, 0x33);
  594. mutex_lock(&spu_mutex);
  595. spu->number = number++;
  596. ret = spu_request_irqs(spu);
  597. if (ret)
  598. goto out_unmap;
  599. ret = spu_create_sysdev(spu);
  600. if (ret)
  601. goto out_free_irqs;
  602. list_add(&spu->list, &spu_list[spu->node]);
  603. mutex_unlock(&spu_mutex);
  604. pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
  605. spu->name, spu->isrc, spu->local_store,
  606. spu->problem, spu->priv1, spu->priv2, spu->number);
  607. goto out;
  608. out_free_irqs:
  609. spu_free_irqs(spu);
  610. out_unmap:
  611. mutex_unlock(&spu_mutex);
  612. spu_unmap(spu);
  613. out_free:
  614. kfree(spu);
  615. out:
  616. return ret;
  617. }
  618. static void destroy_spu(struct spu *spu)
  619. {
  620. list_del_init(&spu->list);
  621. spu_destroy_sysdev(spu);
  622. spu_free_irqs(spu);
  623. spu_unmap(spu);
  624. kfree(spu);
  625. }
  626. static void cleanup_spu_base(void)
  627. {
  628. struct spu *spu, *tmp;
  629. int node;
  630. mutex_lock(&spu_mutex);
  631. for (node = 0; node < MAX_NUMNODES; node++) {
  632. list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
  633. destroy_spu(spu);
  634. }
  635. mutex_unlock(&spu_mutex);
  636. sysdev_class_unregister(&spu_sysdev_class);
  637. }
  638. module_exit(cleanup_spu_base);
  639. static int __init init_spu_base(void)
  640. {
  641. struct device_node *node;
  642. int i, ret;
  643. /* create sysdev class for spus */
  644. ret = sysdev_class_register(&spu_sysdev_class);
  645. if (ret)
  646. return ret;
  647. for (i = 0; i < MAX_NUMNODES; i++)
  648. INIT_LIST_HEAD(&spu_list[i]);
  649. ret = -ENODEV;
  650. for (node = of_find_node_by_type(NULL, "spe");
  651. node; node = of_find_node_by_type(node, "spe")) {
  652. ret = create_spu(node);
  653. if (ret) {
  654. printk(KERN_WARNING "%s: Error initializing %s\n",
  655. __FUNCTION__, node->name);
  656. cleanup_spu_base();
  657. break;
  658. }
  659. }
  660. return ret;
  661. }
  662. module_init(init_spu_base);
  663. MODULE_LICENSE("GPL");
  664. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");