spu_base.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. /*
  2. * Low-level SPU handling
  3. *
  4. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5. *
  6. * Author: Arnd Bergmann <arndb@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/module.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/slab.h>
  28. #include <linux/wait.h>
  29. #include <linux/mm.h>
  30. #include <linux/io.h>
  31. #include <linux/mutex.h>
  32. #include <linux/linux_logo.h>
  33. #include <asm/spu.h>
  34. #include <asm/spu_priv1.h>
  35. #include <asm/spu_csa.h>
  36. #include <asm/xmon.h>
  37. #include <asm/prom.h>
  38. const struct spu_management_ops *spu_management_ops;
  39. EXPORT_SYMBOL_GPL(spu_management_ops);
  40. const struct spu_priv1_ops *spu_priv1_ops;
  41. EXPORT_SYMBOL_GPL(spu_priv1_ops);
  42. struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
  43. EXPORT_SYMBOL_GPL(cbe_spu_info);
  44. /*
  45. * The spufs fault-handling code needs to call force_sig_info to raise signals
  46. * on DMA errors. Export it here to avoid general kernel-wide access to this
  47. * function
  48. */
  49. EXPORT_SYMBOL_GPL(force_sig_info);
  50. /*
  51. * Protects cbe_spu_info and spu->number.
  52. */
  53. static DEFINE_SPINLOCK(spu_lock);
  54. /*
  55. * List of all spus in the system.
  56. *
  57. * This list is iterated by callers from irq context and callers that
  58. * want to sleep. Thus modifications need to be done with both
  59. * spu_full_list_lock and spu_full_list_mutex held, while iterating
  60. * through it requires either of these locks.
  61. *
  62. * In addition spu_full_list_lock protects all assignmens to
  63. * spu->mm.
  64. */
  65. static LIST_HEAD(spu_full_list);
  66. static DEFINE_SPINLOCK(spu_full_list_lock);
  67. static DEFINE_MUTEX(spu_full_list_mutex);
  68. struct spu_slb {
  69. u64 esid, vsid;
  70. };
  71. void spu_invalidate_slbs(struct spu *spu)
  72. {
  73. struct spu_priv2 __iomem *priv2 = spu->priv2;
  74. unsigned long flags;
  75. spin_lock_irqsave(&spu->register_lock, flags);
  76. if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
  77. out_be64(&priv2->slb_invalidate_all_W, 0UL);
  78. spin_unlock_irqrestore(&spu->register_lock, flags);
  79. }
  80. EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
  81. /* This is called by the MM core when a segment size is changed, to
  82. * request a flush of all the SPEs using a given mm
  83. */
  84. void spu_flush_all_slbs(struct mm_struct *mm)
  85. {
  86. struct spu *spu;
  87. unsigned long flags;
  88. spin_lock_irqsave(&spu_full_list_lock, flags);
  89. list_for_each_entry(spu, &spu_full_list, full_list) {
  90. if (spu->mm == mm)
  91. spu_invalidate_slbs(spu);
  92. }
  93. spin_unlock_irqrestore(&spu_full_list_lock, flags);
  94. }
  95. /* The hack below stinks... try to do something better one of
  96. * these days... Does it even work properly with NR_CPUS == 1 ?
  97. */
  98. static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  99. {
  100. int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
  101. /* Global TLBIE broadcast required with SPEs. */
  102. bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
  103. }
  104. void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
  105. {
  106. unsigned long flags;
  107. spin_lock_irqsave(&spu_full_list_lock, flags);
  108. spu->mm = mm;
  109. spin_unlock_irqrestore(&spu_full_list_lock, flags);
  110. if (mm)
  111. mm_needs_global_tlbie(mm);
  112. }
  113. EXPORT_SYMBOL_GPL(spu_associate_mm);
  114. int spu_64k_pages_available(void)
  115. {
  116. return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
  117. }
  118. EXPORT_SYMBOL_GPL(spu_64k_pages_available);
  119. static void spu_restart_dma(struct spu *spu)
  120. {
  121. struct spu_priv2 __iomem *priv2 = spu->priv2;
  122. if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
  123. out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
  124. else {
  125. set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
  126. mb();
  127. }
  128. }
  129. static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
  130. {
  131. struct spu_priv2 __iomem *priv2 = spu->priv2;
  132. pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
  133. __func__, slbe, slb->vsid, slb->esid);
  134. out_be64(&priv2->slb_index_W, slbe);
  135. /* set invalid before writing vsid */
  136. out_be64(&priv2->slb_esid_RW, 0);
  137. /* now it's safe to write the vsid */
  138. out_be64(&priv2->slb_vsid_RW, slb->vsid);
  139. /* setting the new esid makes the entry valid again */
  140. out_be64(&priv2->slb_esid_RW, slb->esid);
  141. }
  142. static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
  143. {
  144. struct mm_struct *mm = spu->mm;
  145. struct spu_slb slb;
  146. int psize;
  147. pr_debug("%s\n", __func__);
  148. slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
  149. switch(REGION_ID(ea)) {
  150. case USER_REGION_ID:
  151. #ifdef CONFIG_PPC_MM_SLICES
  152. psize = get_slice_psize(mm, ea);
  153. #else
  154. psize = mm->context.user_psize;
  155. #endif
  156. slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
  157. << SLB_VSID_SHIFT) | SLB_VSID_USER;
  158. break;
  159. case VMALLOC_REGION_ID:
  160. if (ea < VMALLOC_END)
  161. psize = mmu_vmalloc_psize;
  162. else
  163. psize = mmu_io_psize;
  164. slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
  165. << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
  166. break;
  167. case KERNEL_REGION_ID:
  168. psize = mmu_linear_psize;
  169. slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
  170. << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
  171. break;
  172. default:
  173. /* Future: support kernel segments so that drivers
  174. * can use SPUs.
  175. */
  176. pr_debug("invalid region access at %016lx\n", ea);
  177. return 1;
  178. }
  179. slb.vsid |= mmu_psize_defs[psize].sllp;
  180. spu_load_slb(spu, spu->slb_replace, &slb);
  181. spu->slb_replace++;
  182. if (spu->slb_replace >= 8)
  183. spu->slb_replace = 0;
  184. spu_restart_dma(spu);
  185. spu->stats.slb_flt++;
  186. return 0;
  187. }
  188. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
  189. static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
  190. {
  191. int ret;
  192. pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
  193. /*
  194. * Handle kernel space hash faults immediately. User hash
  195. * faults need to be deferred to process context.
  196. */
  197. if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
  198. (REGION_ID(ea) != USER_REGION_ID)) {
  199. spin_unlock(&spu->register_lock);
  200. ret = hash_page(ea, _PAGE_PRESENT, 0x300);
  201. spin_lock(&spu->register_lock);
  202. if (!ret) {
  203. spu_restart_dma(spu);
  204. return 0;
  205. }
  206. }
  207. spu->class_1_dar = ea;
  208. spu->class_1_dsisr = dsisr;
  209. spu->stop_callback(spu, 1);
  210. spu->class_1_dar = 0;
  211. spu->class_1_dsisr = 0;
  212. return 0;
  213. }
  214. static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
  215. {
  216. unsigned long ea = (unsigned long)addr;
  217. u64 llp;
  218. if (REGION_ID(ea) == KERNEL_REGION_ID)
  219. llp = mmu_psize_defs[mmu_linear_psize].sllp;
  220. else
  221. llp = mmu_psize_defs[mmu_virtual_psize].sllp;
  222. slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
  223. SLB_VSID_KERNEL | llp;
  224. slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
  225. }
  226. /**
  227. * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
  228. * address @new_addr is present.
  229. */
  230. static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
  231. void *new_addr)
  232. {
  233. unsigned long ea = (unsigned long)new_addr;
  234. int i;
  235. for (i = 0; i < nr_slbs; i++)
  236. if (!((slbs[i].esid ^ ea) & ESID_MASK))
  237. return 1;
  238. return 0;
  239. }
  240. /**
  241. * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
  242. * need to map both the context save area, and the save/restore code.
  243. *
  244. * Because the lscsa and code may cross segment boundaires, we check to see
  245. * if mappings are required for the start and end of each range. We currently
  246. * assume that the mappings are smaller that one segment - if not, something
  247. * is seriously wrong.
  248. */
  249. void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
  250. void *code, int code_size)
  251. {
  252. struct spu_slb slbs[4];
  253. int i, nr_slbs = 0;
  254. /* start and end addresses of both mappings */
  255. void *addrs[] = {
  256. lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
  257. code, code + code_size - 1
  258. };
  259. /* check the set of addresses, and create a new entry in the slbs array
  260. * if there isn't already a SLB for that address */
  261. for (i = 0; i < ARRAY_SIZE(addrs); i++) {
  262. if (__slb_present(slbs, nr_slbs, addrs[i]))
  263. continue;
  264. __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
  265. nr_slbs++;
  266. }
  267. spin_lock_irq(&spu->register_lock);
  268. /* Add the set of SLBs */
  269. for (i = 0; i < nr_slbs; i++)
  270. spu_load_slb(spu, i, &slbs[i]);
  271. spin_unlock_irq(&spu->register_lock);
  272. }
  273. EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
  274. static irqreturn_t
  275. spu_irq_class_0(int irq, void *data)
  276. {
  277. struct spu *spu;
  278. unsigned long stat, mask;
  279. spu = data;
  280. spin_lock(&spu->register_lock);
  281. mask = spu_int_mask_get(spu, 0);
  282. stat = spu_int_stat_get(spu, 0) & mask;
  283. spu->class_0_pending |= stat;
  284. spu->class_0_dar = spu_mfc_dar_get(spu);
  285. spu->stop_callback(spu, 0);
  286. spu->class_0_pending = 0;
  287. spu->class_0_dar = 0;
  288. spu_int_stat_clear(spu, 0, stat);
  289. spin_unlock(&spu->register_lock);
  290. return IRQ_HANDLED;
  291. }
  292. static irqreturn_t
  293. spu_irq_class_1(int irq, void *data)
  294. {
  295. struct spu *spu;
  296. unsigned long stat, mask, dar, dsisr;
  297. spu = data;
  298. /* atomically read & clear class1 status. */
  299. spin_lock(&spu->register_lock);
  300. mask = spu_int_mask_get(spu, 1);
  301. stat = spu_int_stat_get(spu, 1) & mask;
  302. dar = spu_mfc_dar_get(spu);
  303. dsisr = spu_mfc_dsisr_get(spu);
  304. if (stat & CLASS1_STORAGE_FAULT_INTR)
  305. spu_mfc_dsisr_set(spu, 0ul);
  306. spu_int_stat_clear(spu, 1, stat);
  307. pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
  308. dar, dsisr);
  309. if (stat & CLASS1_SEGMENT_FAULT_INTR)
  310. __spu_trap_data_seg(spu, dar);
  311. if (stat & CLASS1_STORAGE_FAULT_INTR)
  312. __spu_trap_data_map(spu, dar, dsisr);
  313. if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
  314. ;
  315. if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
  316. ;
  317. spu->class_1_dsisr = 0;
  318. spu->class_1_dar = 0;
  319. spin_unlock(&spu->register_lock);
  320. return stat ? IRQ_HANDLED : IRQ_NONE;
  321. }
  322. static irqreturn_t
  323. spu_irq_class_2(int irq, void *data)
  324. {
  325. struct spu *spu;
  326. unsigned long stat;
  327. unsigned long mask;
  328. const int mailbox_intrs =
  329. CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
  330. spu = data;
  331. spin_lock(&spu->register_lock);
  332. stat = spu_int_stat_get(spu, 2);
  333. mask = spu_int_mask_get(spu, 2);
  334. /* ignore interrupts we're not waiting for */
  335. stat &= mask;
  336. /* mailbox interrupts are level triggered. mask them now before
  337. * acknowledging */
  338. if (stat & mailbox_intrs)
  339. spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
  340. /* acknowledge all interrupts before the callbacks */
  341. spu_int_stat_clear(spu, 2, stat);
  342. pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
  343. if (stat & CLASS2_MAILBOX_INTR)
  344. spu->ibox_callback(spu);
  345. if (stat & CLASS2_SPU_STOP_INTR)
  346. spu->stop_callback(spu, 2);
  347. if (stat & CLASS2_SPU_HALT_INTR)
  348. spu->stop_callback(spu, 2);
  349. if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
  350. spu->mfc_callback(spu);
  351. if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
  352. spu->wbox_callback(spu);
  353. spu->stats.class2_intr++;
  354. spin_unlock(&spu->register_lock);
  355. return stat ? IRQ_HANDLED : IRQ_NONE;
  356. }
  357. static int spu_request_irqs(struct spu *spu)
  358. {
  359. int ret = 0;
  360. if (spu->irqs[0] != NO_IRQ) {
  361. snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
  362. spu->number);
  363. ret = request_irq(spu->irqs[0], spu_irq_class_0,
  364. IRQF_DISABLED,
  365. spu->irq_c0, spu);
  366. if (ret)
  367. goto bail0;
  368. }
  369. if (spu->irqs[1] != NO_IRQ) {
  370. snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
  371. spu->number);
  372. ret = request_irq(spu->irqs[1], spu_irq_class_1,
  373. IRQF_DISABLED,
  374. spu->irq_c1, spu);
  375. if (ret)
  376. goto bail1;
  377. }
  378. if (spu->irqs[2] != NO_IRQ) {
  379. snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
  380. spu->number);
  381. ret = request_irq(spu->irqs[2], spu_irq_class_2,
  382. IRQF_DISABLED,
  383. spu->irq_c2, spu);
  384. if (ret)
  385. goto bail2;
  386. }
  387. return 0;
  388. bail2:
  389. if (spu->irqs[1] != NO_IRQ)
  390. free_irq(spu->irqs[1], spu);
  391. bail1:
  392. if (spu->irqs[0] != NO_IRQ)
  393. free_irq(spu->irqs[0], spu);
  394. bail0:
  395. return ret;
  396. }
  397. static void spu_free_irqs(struct spu *spu)
  398. {
  399. if (spu->irqs[0] != NO_IRQ)
  400. free_irq(spu->irqs[0], spu);
  401. if (spu->irqs[1] != NO_IRQ)
  402. free_irq(spu->irqs[1], spu);
  403. if (spu->irqs[2] != NO_IRQ)
  404. free_irq(spu->irqs[2], spu);
  405. }
  406. void spu_init_channels(struct spu *spu)
  407. {
  408. static const struct {
  409. unsigned channel;
  410. unsigned count;
  411. } zero_list[] = {
  412. { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
  413. { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
  414. }, count_list[] = {
  415. { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
  416. { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
  417. { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
  418. };
  419. struct spu_priv2 __iomem *priv2;
  420. int i;
  421. priv2 = spu->priv2;
  422. /* initialize all channel data to zero */
  423. for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
  424. int count;
  425. out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
  426. for (count = 0; count < zero_list[i].count; count++)
  427. out_be64(&priv2->spu_chnldata_RW, 0);
  428. }
  429. /* initialize channel counts to meaningful values */
  430. for (i = 0; i < ARRAY_SIZE(count_list); i++) {
  431. out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
  432. out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
  433. }
  434. }
  435. EXPORT_SYMBOL_GPL(spu_init_channels);
  436. static int spu_shutdown(struct sys_device *sysdev)
  437. {
  438. struct spu *spu = container_of(sysdev, struct spu, sysdev);
  439. spu_free_irqs(spu);
  440. spu_destroy_spu(spu);
  441. return 0;
  442. }
  443. static struct sysdev_class spu_sysdev_class = {
  444. .name = "spu",
  445. .shutdown = spu_shutdown,
  446. };
  447. int spu_add_sysdev_attr(struct sysdev_attribute *attr)
  448. {
  449. struct spu *spu;
  450. mutex_lock(&spu_full_list_mutex);
  451. list_for_each_entry(spu, &spu_full_list, full_list)
  452. sysdev_create_file(&spu->sysdev, attr);
  453. mutex_unlock(&spu_full_list_mutex);
  454. return 0;
  455. }
  456. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
  457. int spu_add_sysdev_attr_group(struct attribute_group *attrs)
  458. {
  459. struct spu *spu;
  460. int rc = 0;
  461. mutex_lock(&spu_full_list_mutex);
  462. list_for_each_entry(spu, &spu_full_list, full_list) {
  463. rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
  464. /* we're in trouble here, but try unwinding anyway */
  465. if (rc) {
  466. printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
  467. __func__, attrs->name);
  468. list_for_each_entry_continue_reverse(spu,
  469. &spu_full_list, full_list)
  470. sysfs_remove_group(&spu->sysdev.kobj, attrs);
  471. break;
  472. }
  473. }
  474. mutex_unlock(&spu_full_list_mutex);
  475. return rc;
  476. }
  477. EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
  478. void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
  479. {
  480. struct spu *spu;
  481. mutex_lock(&spu_full_list_mutex);
  482. list_for_each_entry(spu, &spu_full_list, full_list)
  483. sysdev_remove_file(&spu->sysdev, attr);
  484. mutex_unlock(&spu_full_list_mutex);
  485. }
  486. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
  487. void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
  488. {
  489. struct spu *spu;
  490. mutex_lock(&spu_full_list_mutex);
  491. list_for_each_entry(spu, &spu_full_list, full_list)
  492. sysfs_remove_group(&spu->sysdev.kobj, attrs);
  493. mutex_unlock(&spu_full_list_mutex);
  494. }
  495. EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
  496. static int spu_create_sysdev(struct spu *spu)
  497. {
  498. int ret;
  499. spu->sysdev.id = spu->number;
  500. spu->sysdev.cls = &spu_sysdev_class;
  501. ret = sysdev_register(&spu->sysdev);
  502. if (ret) {
  503. printk(KERN_ERR "Can't register SPU %d with sysfs\n",
  504. spu->number);
  505. return ret;
  506. }
  507. sysfs_add_device_to_node(&spu->sysdev, spu->node);
  508. return 0;
  509. }
  510. static int __init create_spu(void *data)
  511. {
  512. struct spu *spu;
  513. int ret;
  514. static int number;
  515. unsigned long flags;
  516. struct timespec ts;
  517. ret = -ENOMEM;
  518. spu = kzalloc(sizeof (*spu), GFP_KERNEL);
  519. if (!spu)
  520. goto out;
  521. spu->alloc_state = SPU_FREE;
  522. spin_lock_init(&spu->register_lock);
  523. spin_lock(&spu_lock);
  524. spu->number = number++;
  525. spin_unlock(&spu_lock);
  526. ret = spu_create_spu(spu, data);
  527. if (ret)
  528. goto out_free;
  529. spu_mfc_sdr_setup(spu);
  530. spu_mfc_sr1_set(spu, 0x33);
  531. ret = spu_request_irqs(spu);
  532. if (ret)
  533. goto out_destroy;
  534. ret = spu_create_sysdev(spu);
  535. if (ret)
  536. goto out_free_irqs;
  537. mutex_lock(&cbe_spu_info[spu->node].list_mutex);
  538. list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
  539. cbe_spu_info[spu->node].n_spus++;
  540. mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
  541. mutex_lock(&spu_full_list_mutex);
  542. spin_lock_irqsave(&spu_full_list_lock, flags);
  543. list_add(&spu->full_list, &spu_full_list);
  544. spin_unlock_irqrestore(&spu_full_list_lock, flags);
  545. mutex_unlock(&spu_full_list_mutex);
  546. spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
  547. ktime_get_ts(&ts);
  548. spu->stats.tstamp = timespec_to_ns(&ts);
  549. INIT_LIST_HEAD(&spu->aff_list);
  550. goto out;
  551. out_free_irqs:
  552. spu_free_irqs(spu);
  553. out_destroy:
  554. spu_destroy_spu(spu);
  555. out_free:
  556. kfree(spu);
  557. out:
  558. return ret;
  559. }
  560. static const char *spu_state_names[] = {
  561. "user", "system", "iowait", "idle"
  562. };
  563. static unsigned long long spu_acct_time(struct spu *spu,
  564. enum spu_utilization_state state)
  565. {
  566. struct timespec ts;
  567. unsigned long long time = spu->stats.times[state];
  568. /*
  569. * If the spu is idle or the context is stopped, utilization
  570. * statistics are not updated. Apply the time delta from the
  571. * last recorded state of the spu.
  572. */
  573. if (spu->stats.util_state == state) {
  574. ktime_get_ts(&ts);
  575. time += timespec_to_ns(&ts) - spu->stats.tstamp;
  576. }
  577. return time / NSEC_PER_MSEC;
  578. }
  579. static ssize_t spu_stat_show(struct sys_device *sysdev,
  580. struct sysdev_attribute *attr, char *buf)
  581. {
  582. struct spu *spu = container_of(sysdev, struct spu, sysdev);
  583. return sprintf(buf, "%s %llu %llu %llu %llu "
  584. "%llu %llu %llu %llu %llu %llu %llu %llu\n",
  585. spu_state_names[spu->stats.util_state],
  586. spu_acct_time(spu, SPU_UTIL_USER),
  587. spu_acct_time(spu, SPU_UTIL_SYSTEM),
  588. spu_acct_time(spu, SPU_UTIL_IOWAIT),
  589. spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
  590. spu->stats.vol_ctx_switch,
  591. spu->stats.invol_ctx_switch,
  592. spu->stats.slb_flt,
  593. spu->stats.hash_flt,
  594. spu->stats.min_flt,
  595. spu->stats.maj_flt,
  596. spu->stats.class2_intr,
  597. spu->stats.libassist);
  598. }
  599. static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
  600. static int __init init_spu_base(void)
  601. {
  602. int i, ret = 0;
  603. for (i = 0; i < MAX_NUMNODES; i++) {
  604. mutex_init(&cbe_spu_info[i].list_mutex);
  605. INIT_LIST_HEAD(&cbe_spu_info[i].spus);
  606. }
  607. if (!spu_management_ops)
  608. goto out;
  609. /* create sysdev class for spus */
  610. ret = sysdev_class_register(&spu_sysdev_class);
  611. if (ret)
  612. goto out;
  613. ret = spu_enumerate_spus(create_spu);
  614. if (ret < 0) {
  615. printk(KERN_WARNING "%s: Error initializing spus\n",
  616. __func__);
  617. goto out_unregister_sysdev_class;
  618. }
  619. if (ret > 0) {
  620. /*
  621. * We cannot put the forward declaration in
  622. * <linux/linux_logo.h> because of conflicting session type
  623. * conflicts for const and __initdata with different compiler
  624. * versions
  625. */
  626. extern const struct linux_logo logo_spe_clut224;
  627. fb_append_extra_logo(&logo_spe_clut224, ret);
  628. }
  629. mutex_lock(&spu_full_list_mutex);
  630. xmon_register_spus(&spu_full_list);
  631. crash_register_spus(&spu_full_list);
  632. mutex_unlock(&spu_full_list_mutex);
  633. spu_add_sysdev_attr(&attr_stat);
  634. spu_init_affinity();
  635. return 0;
  636. out_unregister_sysdev_class:
  637. sysdev_class_unregister(&spu_sysdev_class);
  638. out:
  639. return ret;
  640. }
  641. module_init(init_spu_base);
  642. MODULE_LICENSE("GPL");
  643. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");