spu_base.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. /*
  2. * Low-level SPU handling
  3. *
  4. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5. *
  6. * Author: Arnd Bergmann <arndb@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/module.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/slab.h>
  28. #include <linux/wait.h>
  29. #include <linux/mm.h>
  30. #include <linux/io.h>
  31. #include <linux/mutex.h>
  32. #include <linux/linux_logo.h>
  33. #include <asm/spu.h>
  34. #include <asm/spu_priv1.h>
  35. #include <asm/xmon.h>
  36. const struct spu_management_ops *spu_management_ops;
  37. EXPORT_SYMBOL_GPL(spu_management_ops);
  38. const struct spu_priv1_ops *spu_priv1_ops;
  39. static LIST_HEAD(spu_full_list);
  40. static DEFINE_MUTEX(spu_mutex);
  41. static DEFINE_SPINLOCK(spu_list_lock);
  42. EXPORT_SYMBOL_GPL(spu_priv1_ops);
  43. void spu_invalidate_slbs(struct spu *spu)
  44. {
  45. struct spu_priv2 __iomem *priv2 = spu->priv2;
  46. if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
  47. out_be64(&priv2->slb_invalidate_all_W, 0UL);
  48. }
  49. EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
  50. /* This is called by the MM core when a segment size is changed, to
  51. * request a flush of all the SPEs using a given mm
  52. */
  53. void spu_flush_all_slbs(struct mm_struct *mm)
  54. {
  55. struct spu *spu;
  56. unsigned long flags;
  57. spin_lock_irqsave(&spu_list_lock, flags);
  58. list_for_each_entry(spu, &spu_full_list, full_list) {
  59. if (spu->mm == mm)
  60. spu_invalidate_slbs(spu);
  61. }
  62. spin_unlock_irqrestore(&spu_list_lock, flags);
  63. }
  64. /* The hack below stinks... try to do something better one of
  65. * these days... Does it even work properly with NR_CPUS == 1 ?
  66. */
  67. static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  68. {
  69. int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
  70. /* Global TLBIE broadcast required with SPEs. */
  71. __cpus_setall(&mm->cpu_vm_mask, nr);
  72. }
  73. void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
  74. {
  75. unsigned long flags;
  76. spin_lock_irqsave(&spu_list_lock, flags);
  77. spu->mm = mm;
  78. spin_unlock_irqrestore(&spu_list_lock, flags);
  79. if (mm)
  80. mm_needs_global_tlbie(mm);
  81. }
  82. EXPORT_SYMBOL_GPL(spu_associate_mm);
  83. static int __spu_trap_invalid_dma(struct spu *spu)
  84. {
  85. pr_debug("%s\n", __FUNCTION__);
  86. spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
  87. return 0;
  88. }
  89. static int __spu_trap_dma_align(struct spu *spu)
  90. {
  91. pr_debug("%s\n", __FUNCTION__);
  92. spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
  93. return 0;
  94. }
  95. static int __spu_trap_error(struct spu *spu)
  96. {
  97. pr_debug("%s\n", __FUNCTION__);
  98. spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
  99. return 0;
  100. }
  101. static void spu_restart_dma(struct spu *spu)
  102. {
  103. struct spu_priv2 __iomem *priv2 = spu->priv2;
  104. if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
  105. out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
  106. }
  107. static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
  108. {
  109. struct spu_priv2 __iomem *priv2 = spu->priv2;
  110. struct mm_struct *mm = spu->mm;
  111. u64 esid, vsid, llp;
  112. int psize;
  113. pr_debug("%s\n", __FUNCTION__);
  114. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  115. /* SLBs are pre-loaded for context switch, so
  116. * we should never get here!
  117. */
  118. printk("%s: invalid access during switch!\n", __func__);
  119. return 1;
  120. }
  121. esid = (ea & ESID_MASK) | SLB_ESID_V;
  122. switch(REGION_ID(ea)) {
  123. case USER_REGION_ID:
  124. #ifdef CONFIG_PPC_MM_SLICES
  125. psize = get_slice_psize(mm, ea);
  126. #else
  127. psize = mm->context.user_psize;
  128. #endif
  129. vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
  130. SLB_VSID_USER;
  131. break;
  132. case VMALLOC_REGION_ID:
  133. if (ea < VMALLOC_END)
  134. psize = mmu_vmalloc_psize;
  135. else
  136. psize = mmu_io_psize;
  137. vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
  138. SLB_VSID_KERNEL;
  139. break;
  140. case KERNEL_REGION_ID:
  141. psize = mmu_linear_psize;
  142. vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
  143. SLB_VSID_KERNEL;
  144. break;
  145. default:
  146. /* Future: support kernel segments so that drivers
  147. * can use SPUs.
  148. */
  149. pr_debug("invalid region access at %016lx\n", ea);
  150. return 1;
  151. }
  152. llp = mmu_psize_defs[psize].sllp;
  153. out_be64(&priv2->slb_index_W, spu->slb_replace);
  154. out_be64(&priv2->slb_vsid_RW, vsid | llp);
  155. out_be64(&priv2->slb_esid_RW, esid);
  156. spu->slb_replace++;
  157. if (spu->slb_replace >= 8)
  158. spu->slb_replace = 0;
  159. spu_restart_dma(spu);
  160. spu->stats.slb_flt++;
  161. return 0;
  162. }
  163. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
  164. static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
  165. {
  166. pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
  167. /* Handle kernel space hash faults immediately.
  168. User hash faults need to be deferred to process context. */
  169. if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
  170. && REGION_ID(ea) != USER_REGION_ID
  171. && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
  172. spu_restart_dma(spu);
  173. return 0;
  174. }
  175. if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
  176. printk("%s: invalid access during switch!\n", __func__);
  177. return 1;
  178. }
  179. spu->dar = ea;
  180. spu->dsisr = dsisr;
  181. mb();
  182. spu->stop_callback(spu);
  183. return 0;
  184. }
  185. static irqreturn_t
  186. spu_irq_class_0(int irq, void *data)
  187. {
  188. struct spu *spu;
  189. spu = data;
  190. spu->class_0_pending = 1;
  191. spu->stop_callback(spu);
  192. return IRQ_HANDLED;
  193. }
  194. int
  195. spu_irq_class_0_bottom(struct spu *spu)
  196. {
  197. unsigned long stat, mask;
  198. unsigned long flags;
  199. spu->class_0_pending = 0;
  200. spin_lock_irqsave(&spu->register_lock, flags);
  201. mask = spu_int_mask_get(spu, 0);
  202. stat = spu_int_stat_get(spu, 0);
  203. stat &= mask;
  204. if (stat & 1) /* invalid DMA alignment */
  205. __spu_trap_dma_align(spu);
  206. if (stat & 2) /* invalid MFC DMA */
  207. __spu_trap_invalid_dma(spu);
  208. if (stat & 4) /* error on SPU */
  209. __spu_trap_error(spu);
  210. spu_int_stat_clear(spu, 0, stat);
  211. spin_unlock_irqrestore(&spu->register_lock, flags);
  212. return (stat & 0x7) ? -EIO : 0;
  213. }
  214. EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
  215. static irqreturn_t
  216. spu_irq_class_1(int irq, void *data)
  217. {
  218. struct spu *spu;
  219. unsigned long stat, mask, dar, dsisr;
  220. spu = data;
  221. /* atomically read & clear class1 status. */
  222. spin_lock(&spu->register_lock);
  223. mask = spu_int_mask_get(spu, 1);
  224. stat = spu_int_stat_get(spu, 1) & mask;
  225. dar = spu_mfc_dar_get(spu);
  226. dsisr = spu_mfc_dsisr_get(spu);
  227. if (stat & 2) /* mapping fault */
  228. spu_mfc_dsisr_set(spu, 0ul);
  229. spu_int_stat_clear(spu, 1, stat);
  230. spin_unlock(&spu->register_lock);
  231. pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
  232. dar, dsisr);
  233. if (stat & 1) /* segment fault */
  234. __spu_trap_data_seg(spu, dar);
  235. if (stat & 2) { /* mapping fault */
  236. __spu_trap_data_map(spu, dar, dsisr);
  237. }
  238. if (stat & 4) /* ls compare & suspend on get */
  239. ;
  240. if (stat & 8) /* ls compare & suspend on put */
  241. ;
  242. return stat ? IRQ_HANDLED : IRQ_NONE;
  243. }
  244. static irqreturn_t
  245. spu_irq_class_2(int irq, void *data)
  246. {
  247. struct spu *spu;
  248. unsigned long stat;
  249. unsigned long mask;
  250. spu = data;
  251. spin_lock(&spu->register_lock);
  252. stat = spu_int_stat_get(spu, 2);
  253. mask = spu_int_mask_get(spu, 2);
  254. /* ignore interrupts we're not waiting for */
  255. stat &= mask;
  256. /*
  257. * mailbox interrupts (0x1 and 0x10) are level triggered.
  258. * mask them now before acknowledging.
  259. */
  260. if (stat & 0x11)
  261. spu_int_mask_and(spu, 2, ~(stat & 0x11));
  262. /* acknowledge all interrupts before the callbacks */
  263. spu_int_stat_clear(spu, 2, stat);
  264. spin_unlock(&spu->register_lock);
  265. pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
  266. if (stat & 1) /* PPC core mailbox */
  267. spu->ibox_callback(spu);
  268. if (stat & 2) /* SPU stop-and-signal */
  269. spu->stop_callback(spu);
  270. if (stat & 4) /* SPU halted */
  271. spu->stop_callback(spu);
  272. if (stat & 8) /* DMA tag group complete */
  273. spu->mfc_callback(spu);
  274. if (stat & 0x10) /* SPU mailbox threshold */
  275. spu->wbox_callback(spu);
  276. spu->stats.class2_intr++;
  277. return stat ? IRQ_HANDLED : IRQ_NONE;
  278. }
  279. static int spu_request_irqs(struct spu *spu)
  280. {
  281. int ret = 0;
  282. if (spu->irqs[0] != NO_IRQ) {
  283. snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
  284. spu->number);
  285. ret = request_irq(spu->irqs[0], spu_irq_class_0,
  286. IRQF_DISABLED,
  287. spu->irq_c0, spu);
  288. if (ret)
  289. goto bail0;
  290. }
  291. if (spu->irqs[1] != NO_IRQ) {
  292. snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
  293. spu->number);
  294. ret = request_irq(spu->irqs[1], spu_irq_class_1,
  295. IRQF_DISABLED,
  296. spu->irq_c1, spu);
  297. if (ret)
  298. goto bail1;
  299. }
  300. if (spu->irqs[2] != NO_IRQ) {
  301. snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
  302. spu->number);
  303. ret = request_irq(spu->irqs[2], spu_irq_class_2,
  304. IRQF_DISABLED,
  305. spu->irq_c2, spu);
  306. if (ret)
  307. goto bail2;
  308. }
  309. return 0;
  310. bail2:
  311. if (spu->irqs[1] != NO_IRQ)
  312. free_irq(spu->irqs[1], spu);
  313. bail1:
  314. if (spu->irqs[0] != NO_IRQ)
  315. free_irq(spu->irqs[0], spu);
  316. bail0:
  317. return ret;
  318. }
  319. static void spu_free_irqs(struct spu *spu)
  320. {
  321. if (spu->irqs[0] != NO_IRQ)
  322. free_irq(spu->irqs[0], spu);
  323. if (spu->irqs[1] != NO_IRQ)
  324. free_irq(spu->irqs[1], spu);
  325. if (spu->irqs[2] != NO_IRQ)
  326. free_irq(spu->irqs[2], spu);
  327. }
  328. static void spu_init_channels(struct spu *spu)
  329. {
  330. static const struct {
  331. unsigned channel;
  332. unsigned count;
  333. } zero_list[] = {
  334. { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
  335. { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
  336. }, count_list[] = {
  337. { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
  338. { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
  339. { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
  340. };
  341. struct spu_priv2 __iomem *priv2;
  342. int i;
  343. priv2 = spu->priv2;
  344. /* initialize all channel data to zero */
  345. for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
  346. int count;
  347. out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
  348. for (count = 0; count < zero_list[i].count; count++)
  349. out_be64(&priv2->spu_chnldata_RW, 0);
  350. }
  351. /* initialize channel counts to meaningful values */
  352. for (i = 0; i < ARRAY_SIZE(count_list); i++) {
  353. out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
  354. out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
  355. }
  356. }
  357. struct spu *spu_alloc_node(int node)
  358. {
  359. struct spu *spu = NULL;
  360. mutex_lock(&spu_mutex);
  361. if (!list_empty(&cbe_spu_info[node].free_spus)) {
  362. spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu,
  363. list);
  364. list_del_init(&spu->list);
  365. pr_debug("Got SPU %d %d\n", spu->number, spu->node);
  366. }
  367. mutex_unlock(&spu_mutex);
  368. if (spu)
  369. spu_init_channels(spu);
  370. return spu;
  371. }
  372. EXPORT_SYMBOL_GPL(spu_alloc_node);
  373. struct spu *spu_alloc(void)
  374. {
  375. struct spu *spu = NULL;
  376. int node;
  377. for (node = 0; node < MAX_NUMNODES; node++) {
  378. spu = spu_alloc_node(node);
  379. if (spu)
  380. break;
  381. }
  382. return spu;
  383. }
  384. void spu_free(struct spu *spu)
  385. {
  386. mutex_lock(&spu_mutex);
  387. list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus);
  388. mutex_unlock(&spu_mutex);
  389. }
  390. EXPORT_SYMBOL_GPL(spu_free);
  391. static int spu_shutdown(struct sys_device *sysdev)
  392. {
  393. struct spu *spu = container_of(sysdev, struct spu, sysdev);
  394. spu_free_irqs(spu);
  395. spu_destroy_spu(spu);
  396. return 0;
  397. }
  398. struct sysdev_class spu_sysdev_class = {
  399. set_kset_name("spu"),
  400. .shutdown = spu_shutdown,
  401. };
  402. int spu_add_sysdev_attr(struct sysdev_attribute *attr)
  403. {
  404. struct spu *spu;
  405. mutex_lock(&spu_mutex);
  406. list_for_each_entry(spu, &spu_full_list, full_list)
  407. sysdev_create_file(&spu->sysdev, attr);
  408. mutex_unlock(&spu_mutex);
  409. return 0;
  410. }
  411. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
  412. int spu_add_sysdev_attr_group(struct attribute_group *attrs)
  413. {
  414. struct spu *spu;
  415. mutex_lock(&spu_mutex);
  416. list_for_each_entry(spu, &spu_full_list, full_list)
  417. sysfs_create_group(&spu->sysdev.kobj, attrs);
  418. mutex_unlock(&spu_mutex);
  419. return 0;
  420. }
  421. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
  422. void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
  423. {
  424. struct spu *spu;
  425. mutex_lock(&spu_mutex);
  426. list_for_each_entry(spu, &spu_full_list, full_list)
  427. sysdev_remove_file(&spu->sysdev, attr);
  428. mutex_unlock(&spu_mutex);
  429. }
  430. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
  431. void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
  432. {
  433. struct spu *spu;
  434. mutex_lock(&spu_mutex);
  435. list_for_each_entry(spu, &spu_full_list, full_list)
  436. sysfs_remove_group(&spu->sysdev.kobj, attrs);
  437. mutex_unlock(&spu_mutex);
  438. }
  439. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
  440. static int spu_create_sysdev(struct spu *spu)
  441. {
  442. int ret;
  443. spu->sysdev.id = spu->number;
  444. spu->sysdev.cls = &spu_sysdev_class;
  445. ret = sysdev_register(&spu->sysdev);
  446. if (ret) {
  447. printk(KERN_ERR "Can't register SPU %d with sysfs\n",
  448. spu->number);
  449. return ret;
  450. }
  451. sysfs_add_device_to_node(&spu->sysdev, spu->node);
  452. return 0;
  453. }
  454. static int __init create_spu(void *data)
  455. {
  456. struct spu *spu;
  457. int ret;
  458. static int number;
  459. unsigned long flags;
  460. struct timespec ts;
  461. ret = -ENOMEM;
  462. spu = kzalloc(sizeof (*spu), GFP_KERNEL);
  463. if (!spu)
  464. goto out;
  465. spin_lock_init(&spu->register_lock);
  466. mutex_lock(&spu_mutex);
  467. spu->number = number++;
  468. mutex_unlock(&spu_mutex);
  469. ret = spu_create_spu(spu, data);
  470. if (ret)
  471. goto out_free;
  472. spu_mfc_sdr_setup(spu);
  473. spu_mfc_sr1_set(spu, 0x33);
  474. ret = spu_request_irqs(spu);
  475. if (ret)
  476. goto out_destroy;
  477. ret = spu_create_sysdev(spu);
  478. if (ret)
  479. goto out_free_irqs;
  480. mutex_lock(&spu_mutex);
  481. spin_lock_irqsave(&spu_list_lock, flags);
  482. list_add(&spu->list, &cbe_spu_info[spu->node].free_spus);
  483. list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
  484. cbe_spu_info[spu->node].n_spus++;
  485. list_add(&spu->full_list, &spu_full_list);
  486. spin_unlock_irqrestore(&spu_list_lock, flags);
  487. mutex_unlock(&spu_mutex);
  488. spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
  489. ktime_get_ts(&ts);
  490. spu->stats.tstamp = timespec_to_ns(&ts);
  491. INIT_LIST_HEAD(&spu->aff_list);
  492. goto out;
  493. out_free_irqs:
  494. spu_free_irqs(spu);
  495. out_destroy:
  496. spu_destroy_spu(spu);
  497. out_free:
  498. kfree(spu);
  499. out:
  500. return ret;
  501. }
  502. static const char *spu_state_names[] = {
  503. "user", "system", "iowait", "idle"
  504. };
  505. static unsigned long long spu_acct_time(struct spu *spu,
  506. enum spu_utilization_state state)
  507. {
  508. struct timespec ts;
  509. unsigned long long time = spu->stats.times[state];
  510. /*
  511. * If the spu is idle or the context is stopped, utilization
  512. * statistics are not updated. Apply the time delta from the
  513. * last recorded state of the spu.
  514. */
  515. if (spu->stats.util_state == state) {
  516. ktime_get_ts(&ts);
  517. time += timespec_to_ns(&ts) - spu->stats.tstamp;
  518. }
  519. return time / NSEC_PER_MSEC;
  520. }
  521. static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
  522. {
  523. struct spu *spu = container_of(sysdev, struct spu, sysdev);
  524. return sprintf(buf, "%s %llu %llu %llu %llu "
  525. "%llu %llu %llu %llu %llu %llu %llu %llu\n",
  526. spu_state_names[spu->stats.util_state],
  527. spu_acct_time(spu, SPU_UTIL_USER),
  528. spu_acct_time(spu, SPU_UTIL_SYSTEM),
  529. spu_acct_time(spu, SPU_UTIL_IOWAIT),
  530. spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
  531. spu->stats.vol_ctx_switch,
  532. spu->stats.invol_ctx_switch,
  533. spu->stats.slb_flt,
  534. spu->stats.hash_flt,
  535. spu->stats.min_flt,
  536. spu->stats.maj_flt,
  537. spu->stats.class2_intr,
  538. spu->stats.libassist);
  539. }
  540. static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
  541. struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
  542. EXPORT_SYMBOL_GPL(cbe_spu_info);
  543. static int __init init_spu_base(void)
  544. {
  545. int i, ret = 0;
  546. for (i = 0; i < MAX_NUMNODES; i++) {
  547. INIT_LIST_HEAD(&cbe_spu_info[i].spus);
  548. INIT_LIST_HEAD(&cbe_spu_info[i].free_spus);
  549. }
  550. if (!spu_management_ops)
  551. goto out;
  552. /* create sysdev class for spus */
  553. ret = sysdev_class_register(&spu_sysdev_class);
  554. if (ret)
  555. goto out;
  556. ret = spu_enumerate_spus(create_spu);
  557. if (ret < 0) {
  558. printk(KERN_WARNING "%s: Error initializing spus\n",
  559. __FUNCTION__);
  560. goto out_unregister_sysdev_class;
  561. }
  562. if (ret > 0) {
  563. /*
  564. * We cannot put the forward declaration in
  565. * <linux/linux_logo.h> because of conflicting session type
  566. * conflicts for const and __initdata with different compiler
  567. * versions
  568. */
  569. extern const struct linux_logo logo_spe_clut224;
  570. fb_append_extra_logo(&logo_spe_clut224, ret);
  571. }
  572. xmon_register_spus(&spu_full_list);
  573. crash_register_spus(&spu_full_list);
  574. spu_add_sysdev_attr(&attr_stat);
  575. return 0;
  576. out_unregister_sysdev_class:
  577. sysdev_class_unregister(&spu_sysdev_class);
  578. out:
  579. return ret;
  580. }
  581. module_init(init_spu_base);
  582. MODULE_LICENSE("GPL");
  583. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");