spu_base.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /*
  2. * Low-level SPU handling
  3. *
  4. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5. *
  6. * Author: Arnd Bergmann <arndb@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/module.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/slab.h>
  28. #include <linux/wait.h>
  29. #include <linux/mm.h>
  30. #include <linux/io.h>
  31. #include <linux/mutex.h>
  32. #include <asm/spu.h>
  33. #include <asm/spu_priv1.h>
  34. #include <asm/xmon.h>
  35. const struct spu_management_ops *spu_management_ops;
  36. const struct spu_priv1_ops *spu_priv1_ops;
  37. EXPORT_SYMBOL_GPL(spu_priv1_ops);
  38. static int __spu_trap_invalid_dma(struct spu *spu)
  39. {
  40. pr_debug("%s\n", __FUNCTION__);
  41. spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
  42. return 0;
  43. }
  44. static int __spu_trap_dma_align(struct spu *spu)
  45. {
  46. pr_debug("%s\n", __FUNCTION__);
  47. spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
  48. return 0;
  49. }
  50. static int __spu_trap_error(struct spu *spu)
  51. {
  52. pr_debug("%s\n", __FUNCTION__);
  53. spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
  54. return 0;
  55. }
  56. static void spu_restart_dma(struct spu *spu)
  57. {
  58. struct spu_priv2 __iomem *priv2 = spu->priv2;
  59. if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
  60. out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
  61. }
  62. static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
  63. {
  64. struct spu_priv2 __iomem *priv2 = spu->priv2;
  65. struct mm_struct *mm = spu->mm;
  66. u64 esid, vsid, llp;
  67. pr_debug("%s\n", __FUNCTION__);
  68. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  69. /* SLBs are pre-loaded for context switch, so
  70. * we should never get here!
  71. */
  72. printk("%s: invalid access during switch!\n", __func__);
  73. return 1;
  74. }
  75. esid = (ea & ESID_MASK) | SLB_ESID_V;
  76. switch(REGION_ID(ea)) {
  77. case USER_REGION_ID:
  78. #ifdef CONFIG_HUGETLB_PAGE
  79. if (in_hugepage_area(mm->context, ea))
  80. llp = mmu_psize_defs[mmu_huge_psize].sllp;
  81. else
  82. #endif
  83. llp = mmu_psize_defs[mmu_virtual_psize].sllp;
  84. vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
  85. SLB_VSID_USER | llp;
  86. break;
  87. case VMALLOC_REGION_ID:
  88. llp = mmu_psize_defs[mmu_virtual_psize].sllp;
  89. vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
  90. SLB_VSID_KERNEL | llp;
  91. break;
  92. case KERNEL_REGION_ID:
  93. llp = mmu_psize_defs[mmu_linear_psize].sllp;
  94. vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
  95. SLB_VSID_KERNEL | llp;
  96. break;
  97. default:
  98. /* Future: support kernel segments so that drivers
  99. * can use SPUs.
  100. */
  101. pr_debug("invalid region access at %016lx\n", ea);
  102. return 1;
  103. }
  104. out_be64(&priv2->slb_index_W, spu->slb_replace);
  105. out_be64(&priv2->slb_vsid_RW, vsid);
  106. out_be64(&priv2->slb_esid_RW, esid);
  107. spu->slb_replace++;
  108. if (spu->slb_replace >= 8)
  109. spu->slb_replace = 0;
  110. spu_restart_dma(spu);
  111. return 0;
  112. }
  113. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
  114. static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
  115. {
  116. pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
  117. /* Handle kernel space hash faults immediately.
  118. User hash faults need to be deferred to process context. */
  119. if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
  120. && REGION_ID(ea) != USER_REGION_ID
  121. && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
  122. spu_restart_dma(spu);
  123. return 0;
  124. }
  125. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  126. printk("%s: invalid access during switch!\n", __func__);
  127. return 1;
  128. }
  129. spu->dar = ea;
  130. spu->dsisr = dsisr;
  131. mb();
  132. spu->stop_callback(spu);
  133. return 0;
  134. }
  135. static irqreturn_t
  136. spu_irq_class_0(int irq, void *data)
  137. {
  138. struct spu *spu;
  139. spu = data;
  140. spu->class_0_pending = 1;
  141. spu->stop_callback(spu);
  142. return IRQ_HANDLED;
  143. }
  144. int
  145. spu_irq_class_0_bottom(struct spu *spu)
  146. {
  147. unsigned long stat, mask;
  148. unsigned long flags;
  149. spu->class_0_pending = 0;
  150. spin_lock_irqsave(&spu->register_lock, flags);
  151. mask = spu_int_mask_get(spu, 0);
  152. stat = spu_int_stat_get(spu, 0);
  153. stat &= mask;
  154. if (stat & 1) /* invalid DMA alignment */
  155. __spu_trap_dma_align(spu);
  156. if (stat & 2) /* invalid MFC DMA */
  157. __spu_trap_invalid_dma(spu);
  158. if (stat & 4) /* error on SPU */
  159. __spu_trap_error(spu);
  160. spu_int_stat_clear(spu, 0, stat);
  161. spin_unlock_irqrestore(&spu->register_lock, flags);
  162. return (stat & 0x7) ? -EIO : 0;
  163. }
  164. EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
  165. static irqreturn_t
  166. spu_irq_class_1(int irq, void *data)
  167. {
  168. struct spu *spu;
  169. unsigned long stat, mask, dar, dsisr;
  170. spu = data;
  171. /* atomically read & clear class1 status. */
  172. spin_lock(&spu->register_lock);
  173. mask = spu_int_mask_get(spu, 1);
  174. stat = spu_int_stat_get(spu, 1) & mask;
  175. dar = spu_mfc_dar_get(spu);
  176. dsisr = spu_mfc_dsisr_get(spu);
  177. if (stat & 2) /* mapping fault */
  178. spu_mfc_dsisr_set(spu, 0ul);
  179. spu_int_stat_clear(spu, 1, stat);
  180. spin_unlock(&spu->register_lock);
  181. pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
  182. dar, dsisr);
  183. if (stat & 1) /* segment fault */
  184. __spu_trap_data_seg(spu, dar);
  185. if (stat & 2) { /* mapping fault */
  186. __spu_trap_data_map(spu, dar, dsisr);
  187. }
  188. if (stat & 4) /* ls compare & suspend on get */
  189. ;
  190. if (stat & 8) /* ls compare & suspend on put */
  191. ;
  192. return stat ? IRQ_HANDLED : IRQ_NONE;
  193. }
  194. EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
  195. static irqreturn_t
  196. spu_irq_class_2(int irq, void *data)
  197. {
  198. struct spu *spu;
  199. unsigned long stat;
  200. unsigned long mask;
  201. spu = data;
  202. spin_lock(&spu->register_lock);
  203. stat = spu_int_stat_get(spu, 2);
  204. mask = spu_int_mask_get(spu, 2);
  205. /* ignore interrupts we're not waiting for */
  206. stat &= mask;
  207. /*
  208. * mailbox interrupts (0x1 and 0x10) are level triggered.
  209. * mask them now before acknowledging.
  210. */
  211. if (stat & 0x11)
  212. spu_int_mask_and(spu, 2, ~(stat & 0x11));
  213. /* acknowledge all interrupts before the callbacks */
  214. spu_int_stat_clear(spu, 2, stat);
  215. spin_unlock(&spu->register_lock);
  216. pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
  217. if (stat & 1) /* PPC core mailbox */
  218. spu->ibox_callback(spu);
  219. if (stat & 2) /* SPU stop-and-signal */
  220. spu->stop_callback(spu);
  221. if (stat & 4) /* SPU halted */
  222. spu->stop_callback(spu);
  223. if (stat & 8) /* DMA tag group complete */
  224. spu->mfc_callback(spu);
  225. if (stat & 0x10) /* SPU mailbox threshold */
  226. spu->wbox_callback(spu);
  227. return stat ? IRQ_HANDLED : IRQ_NONE;
  228. }
  229. static int spu_request_irqs(struct spu *spu)
  230. {
  231. int ret = 0;
  232. if (spu->irqs[0] != NO_IRQ) {
  233. snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
  234. spu->number);
  235. ret = request_irq(spu->irqs[0], spu_irq_class_0,
  236. IRQF_DISABLED,
  237. spu->irq_c0, spu);
  238. if (ret)
  239. goto bail0;
  240. }
  241. if (spu->irqs[1] != NO_IRQ) {
  242. snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
  243. spu->number);
  244. ret = request_irq(spu->irqs[1], spu_irq_class_1,
  245. IRQF_DISABLED,
  246. spu->irq_c1, spu);
  247. if (ret)
  248. goto bail1;
  249. }
  250. if (spu->irqs[2] != NO_IRQ) {
  251. snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
  252. spu->number);
  253. ret = request_irq(spu->irqs[2], spu_irq_class_2,
  254. IRQF_DISABLED,
  255. spu->irq_c2, spu);
  256. if (ret)
  257. goto bail2;
  258. }
  259. return 0;
  260. bail2:
  261. if (spu->irqs[1] != NO_IRQ)
  262. free_irq(spu->irqs[1], spu);
  263. bail1:
  264. if (spu->irqs[0] != NO_IRQ)
  265. free_irq(spu->irqs[0], spu);
  266. bail0:
  267. return ret;
  268. }
  269. static void spu_free_irqs(struct spu *spu)
  270. {
  271. if (spu->irqs[0] != NO_IRQ)
  272. free_irq(spu->irqs[0], spu);
  273. if (spu->irqs[1] != NO_IRQ)
  274. free_irq(spu->irqs[1], spu);
  275. if (spu->irqs[2] != NO_IRQ)
  276. free_irq(spu->irqs[2], spu);
  277. }
  278. static struct list_head spu_list[MAX_NUMNODES];
  279. static LIST_HEAD(spu_full_list);
  280. static DEFINE_MUTEX(spu_mutex);
  281. static void spu_init_channels(struct spu *spu)
  282. {
  283. static const struct {
  284. unsigned channel;
  285. unsigned count;
  286. } zero_list[] = {
  287. { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
  288. { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
  289. }, count_list[] = {
  290. { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
  291. { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
  292. { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
  293. };
  294. struct spu_priv2 __iomem *priv2;
  295. int i;
  296. priv2 = spu->priv2;
  297. /* initialize all channel data to zero */
  298. for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
  299. int count;
  300. out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
  301. for (count = 0; count < zero_list[i].count; count++)
  302. out_be64(&priv2->spu_chnldata_RW, 0);
  303. }
  304. /* initialize channel counts to meaningful values */
  305. for (i = 0; i < ARRAY_SIZE(count_list); i++) {
  306. out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
  307. out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
  308. }
  309. }
  310. struct spu *spu_alloc_node(int node)
  311. {
  312. struct spu *spu = NULL;
  313. mutex_lock(&spu_mutex);
  314. if (!list_empty(&spu_list[node])) {
  315. spu = list_entry(spu_list[node].next, struct spu, list);
  316. list_del_init(&spu->list);
  317. pr_debug("Got SPU %d %d\n", spu->number, spu->node);
  318. spu_init_channels(spu);
  319. }
  320. mutex_unlock(&spu_mutex);
  321. return spu;
  322. }
  323. EXPORT_SYMBOL_GPL(spu_alloc_node);
  324. struct spu *spu_alloc(void)
  325. {
  326. struct spu *spu = NULL;
  327. int node;
  328. for (node = 0; node < MAX_NUMNODES; node++) {
  329. spu = spu_alloc_node(node);
  330. if (spu)
  331. break;
  332. }
  333. return spu;
  334. }
  335. void spu_free(struct spu *spu)
  336. {
  337. mutex_lock(&spu_mutex);
  338. list_add_tail(&spu->list, &spu_list[spu->node]);
  339. mutex_unlock(&spu_mutex);
  340. }
  341. EXPORT_SYMBOL_GPL(spu_free);
  342. static int spu_handle_mm_fault(struct spu *spu)
  343. {
  344. struct mm_struct *mm = spu->mm;
  345. struct vm_area_struct *vma;
  346. u64 ea, dsisr, is_write;
  347. int ret;
  348. ea = spu->dar;
  349. dsisr = spu->dsisr;
  350. #if 0
  351. if (!IS_VALID_EA(ea)) {
  352. return -EFAULT;
  353. }
  354. #endif /* XXX */
  355. if (mm == NULL) {
  356. return -EFAULT;
  357. }
  358. if (mm->pgd == NULL) {
  359. return -EFAULT;
  360. }
  361. down_read(&mm->mmap_sem);
  362. vma = find_vma(mm, ea);
  363. if (!vma)
  364. goto bad_area;
  365. if (vma->vm_start <= ea)
  366. goto good_area;
  367. if (!(vma->vm_flags & VM_GROWSDOWN))
  368. goto bad_area;
  369. #if 0
  370. if (expand_stack(vma, ea))
  371. goto bad_area;
  372. #endif /* XXX */
  373. good_area:
  374. is_write = dsisr & MFC_DSISR_ACCESS_PUT;
  375. if (is_write) {
  376. if (!(vma->vm_flags & VM_WRITE))
  377. goto bad_area;
  378. } else {
  379. if (dsisr & MFC_DSISR_ACCESS_DENIED)
  380. goto bad_area;
  381. if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  382. goto bad_area;
  383. }
  384. ret = 0;
  385. switch (handle_mm_fault(mm, vma, ea, is_write)) {
  386. case VM_FAULT_MINOR:
  387. current->min_flt++;
  388. break;
  389. case VM_FAULT_MAJOR:
  390. current->maj_flt++;
  391. break;
  392. case VM_FAULT_SIGBUS:
  393. ret = -EFAULT;
  394. goto bad_area;
  395. case VM_FAULT_OOM:
  396. ret = -ENOMEM;
  397. goto bad_area;
  398. default:
  399. BUG();
  400. }
  401. up_read(&mm->mmap_sem);
  402. return ret;
  403. bad_area:
  404. up_read(&mm->mmap_sem);
  405. return -EFAULT;
  406. }
  407. int spu_irq_class_1_bottom(struct spu *spu)
  408. {
  409. u64 ea, dsisr, access, error = 0UL;
  410. int ret = 0;
  411. ea = spu->dar;
  412. dsisr = spu->dsisr;
  413. if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
  414. u64 flags;
  415. access = (_PAGE_PRESENT | _PAGE_USER);
  416. access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
  417. local_irq_save(flags);
  418. if (hash_page(ea, access, 0x300) != 0)
  419. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  420. local_irq_restore(flags);
  421. }
  422. if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
  423. if ((ret = spu_handle_mm_fault(spu)) != 0)
  424. error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
  425. else
  426. error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
  427. }
  428. spu->dar = 0UL;
  429. spu->dsisr = 0UL;
  430. if (!error) {
  431. spu_restart_dma(spu);
  432. } else {
  433. spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
  434. }
  435. return ret;
  436. }
  437. struct sysdev_class spu_sysdev_class = {
  438. set_kset_name("spu")
  439. };
  440. int spu_add_sysdev_attr(struct sysdev_attribute *attr)
  441. {
  442. struct spu *spu;
  443. mutex_lock(&spu_mutex);
  444. list_for_each_entry(spu, &spu_full_list, full_list)
  445. sysdev_create_file(&spu->sysdev, attr);
  446. mutex_unlock(&spu_mutex);
  447. return 0;
  448. }
  449. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
  450. int spu_add_sysdev_attr_group(struct attribute_group *attrs)
  451. {
  452. struct spu *spu;
  453. mutex_lock(&spu_mutex);
  454. list_for_each_entry(spu, &spu_full_list, full_list)
  455. sysfs_create_group(&spu->sysdev.kobj, attrs);
  456. mutex_unlock(&spu_mutex);
  457. return 0;
  458. }
  459. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
  460. void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
  461. {
  462. struct spu *spu;
  463. mutex_lock(&spu_mutex);
  464. list_for_each_entry(spu, &spu_full_list, full_list)
  465. sysdev_remove_file(&spu->sysdev, attr);
  466. mutex_unlock(&spu_mutex);
  467. }
  468. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
  469. void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
  470. {
  471. struct spu *spu;
  472. mutex_lock(&spu_mutex);
  473. list_for_each_entry(spu, &spu_full_list, full_list)
  474. sysfs_remove_group(&spu->sysdev.kobj, attrs);
  475. mutex_unlock(&spu_mutex);
  476. }
  477. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
  478. static int spu_create_sysdev(struct spu *spu)
  479. {
  480. int ret;
  481. spu->sysdev.id = spu->number;
  482. spu->sysdev.cls = &spu_sysdev_class;
  483. ret = sysdev_register(&spu->sysdev);
  484. if (ret) {
  485. printk(KERN_ERR "Can't register SPU %d with sysfs\n",
  486. spu->number);
  487. return ret;
  488. }
  489. sysfs_add_device_to_node(&spu->sysdev, spu->node);
  490. return 0;
  491. }
  492. static void spu_destroy_sysdev(struct spu *spu)
  493. {
  494. sysfs_remove_device_from_node(&spu->sysdev, spu->node);
  495. sysdev_unregister(&spu->sysdev);
  496. }
  497. static int __init create_spu(void *data)
  498. {
  499. struct spu *spu;
  500. int ret;
  501. static int number;
  502. ret = -ENOMEM;
  503. spu = kzalloc(sizeof (*spu), GFP_KERNEL);
  504. if (!spu)
  505. goto out;
  506. spin_lock_init(&spu->register_lock);
  507. mutex_lock(&spu_mutex);
  508. spu->number = number++;
  509. mutex_unlock(&spu_mutex);
  510. ret = spu_create_spu(spu, data);
  511. if (ret)
  512. goto out_free;
  513. spu_mfc_sdr_setup(spu);
  514. spu_mfc_sr1_set(spu, 0x33);
  515. ret = spu_request_irqs(spu);
  516. if (ret)
  517. goto out_destroy;
  518. ret = spu_create_sysdev(spu);
  519. if (ret)
  520. goto out_free_irqs;
  521. mutex_lock(&spu_mutex);
  522. list_add(&spu->list, &spu_list[spu->node]);
  523. list_add(&spu->full_list, &spu_full_list);
  524. mutex_unlock(&spu_mutex);
  525. goto out;
  526. out_free_irqs:
  527. spu_free_irqs(spu);
  528. out_destroy:
  529. spu_destroy_spu(spu);
  530. out_free:
  531. kfree(spu);
  532. out:
  533. return ret;
  534. }
  535. static void destroy_spu(struct spu *spu)
  536. {
  537. list_del_init(&spu->list);
  538. list_del_init(&spu->full_list);
  539. spu_destroy_sysdev(spu);
  540. spu_free_irqs(spu);
  541. spu_destroy_spu(spu);
  542. kfree(spu);
  543. }
  544. static void cleanup_spu_base(void)
  545. {
  546. struct spu *spu, *tmp;
  547. int node;
  548. mutex_lock(&spu_mutex);
  549. for (node = 0; node < MAX_NUMNODES; node++) {
  550. list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
  551. destroy_spu(spu);
  552. }
  553. mutex_unlock(&spu_mutex);
  554. sysdev_class_unregister(&spu_sysdev_class);
  555. }
  556. module_exit(cleanup_spu_base);
  557. static int __init init_spu_base(void)
  558. {
  559. int i, ret;
  560. if (!spu_management_ops)
  561. return 0;
  562. /* create sysdev class for spus */
  563. ret = sysdev_class_register(&spu_sysdev_class);
  564. if (ret)
  565. return ret;
  566. for (i = 0; i < MAX_NUMNODES; i++)
  567. INIT_LIST_HEAD(&spu_list[i]);
  568. ret = spu_enumerate_spus(create_spu);
  569. if (ret) {
  570. printk(KERN_WARNING "%s: Error initializing spus\n",
  571. __FUNCTION__);
  572. cleanup_spu_base();
  573. return ret;
  574. }
  575. xmon_register_spus(&spu_full_list);
  576. return ret;
  577. }
  578. module_init(init_spu_base);
  579. MODULE_LICENSE("GPL");
  580. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");