perf_event_mipsxx.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
  2. defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
  3. #define M_CONFIG1_PC (1 << 4)
  4. #define M_PERFCTL_EXL (1UL << 0)
  5. #define M_PERFCTL_KERNEL (1UL << 1)
  6. #define M_PERFCTL_SUPERVISOR (1UL << 2)
  7. #define M_PERFCTL_USER (1UL << 3)
  8. #define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
  9. #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
  10. #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
  11. #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
  12. #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
  13. #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
  14. #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
  15. #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
  16. #define M_PERFCTL_WIDE (1UL << 30)
  17. #define M_PERFCTL_MORE (1UL << 31)
  18. #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
  19. M_PERFCTL_KERNEL | \
  20. M_PERFCTL_USER | \
  21. M_PERFCTL_SUPERVISOR | \
  22. M_PERFCTL_INTERRUPT_ENABLE)
  23. #ifdef CONFIG_MIPS_MT_SMP
  24. #define M_PERFCTL_CONFIG_MASK 0x3fff801f
  25. #else
  26. #define M_PERFCTL_CONFIG_MASK 0x1f
  27. #endif
  28. #define M_PERFCTL_EVENT_MASK 0xfe0
  29. #define M_COUNTER_OVERFLOW (1UL << 31)
  30. #ifdef CONFIG_MIPS_MT_SMP
  31. static int cpu_has_mipsmt_pertccounters;
  32. /*
  33. * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
  34. * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
  35. */
  36. #if defined(CONFIG_HW_PERF_EVENTS)
  37. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  38. 0 : smp_processor_id())
  39. #else
  40. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  41. 0 : cpu_data[smp_processor_id()].vpe_id)
  42. #endif
  43. /* Copied from op_model_mipsxx.c */
  44. static unsigned int vpe_shift(void)
  45. {
  46. if (num_possible_cpus() > 1)
  47. return 1;
  48. return 0;
  49. }
  50. static unsigned int counters_total_to_per_cpu(unsigned int counters)
  51. {
  52. return counters >> vpe_shift();
  53. }
  54. static unsigned int counters_per_cpu_to_total(unsigned int counters)
  55. {
  56. return counters << vpe_shift();
  57. }
  58. #else /* !CONFIG_MIPS_MT_SMP */
  59. #define vpe_id() 0
  60. #endif /* CONFIG_MIPS_MT_SMP */
  61. #define __define_perf_accessors(r, n, np) \
  62. \
  63. static unsigned int r_c0_ ## r ## n(void) \
  64. { \
  65. unsigned int cpu = vpe_id(); \
  66. \
  67. switch (cpu) { \
  68. case 0: \
  69. return read_c0_ ## r ## n(); \
  70. case 1: \
  71. return read_c0_ ## r ## np(); \
  72. default: \
  73. BUG(); \
  74. } \
  75. return 0; \
  76. } \
  77. \
  78. static void w_c0_ ## r ## n(unsigned int value) \
  79. { \
  80. unsigned int cpu = vpe_id(); \
  81. \
  82. switch (cpu) { \
  83. case 0: \
  84. write_c0_ ## r ## n(value); \
  85. return; \
  86. case 1: \
  87. write_c0_ ## r ## np(value); \
  88. return; \
  89. default: \
  90. BUG(); \
  91. } \
  92. return; \
  93. } \
  94. __define_perf_accessors(perfcntr, 0, 2)
  95. __define_perf_accessors(perfcntr, 1, 3)
  96. __define_perf_accessors(perfcntr, 2, 0)
  97. __define_perf_accessors(perfcntr, 3, 1)
  98. __define_perf_accessors(perfctrl, 0, 2)
  99. __define_perf_accessors(perfctrl, 1, 3)
  100. __define_perf_accessors(perfctrl, 2, 0)
  101. __define_perf_accessors(perfctrl, 3, 1)
  102. static int __n_counters(void)
  103. {
  104. if (!(read_c0_config1() & M_CONFIG1_PC))
  105. return 0;
  106. if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
  107. return 1;
  108. if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
  109. return 2;
  110. if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
  111. return 3;
  112. return 4;
  113. }
  114. static int n_counters(void)
  115. {
  116. int counters;
  117. switch (current_cpu_type()) {
  118. case CPU_R10000:
  119. counters = 2;
  120. break;
  121. case CPU_R12000:
  122. case CPU_R14000:
  123. counters = 4;
  124. break;
  125. default:
  126. counters = __n_counters();
  127. }
  128. return counters;
  129. }
  130. static void reset_counters(void *arg)
  131. {
  132. int counters = (int)(long)arg;
  133. switch (counters) {
  134. case 4:
  135. w_c0_perfctrl3(0);
  136. w_c0_perfcntr3(0);
  137. case 3:
  138. w_c0_perfctrl2(0);
  139. w_c0_perfcntr2(0);
  140. case 2:
  141. w_c0_perfctrl1(0);
  142. w_c0_perfcntr1(0);
  143. case 1:
  144. w_c0_perfctrl0(0);
  145. w_c0_perfcntr0(0);
  146. }
  147. }
  148. static u64 mipsxx_pmu_read_counter(unsigned int idx)
  149. {
  150. switch (idx) {
  151. case 0:
  152. return r_c0_perfcntr0();
  153. case 1:
  154. return r_c0_perfcntr1();
  155. case 2:
  156. return r_c0_perfcntr2();
  157. case 3:
  158. return r_c0_perfcntr3();
  159. default:
  160. WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
  161. return 0;
  162. }
  163. }
  164. static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
  165. {
  166. switch (idx) {
  167. case 0:
  168. w_c0_perfcntr0(val);
  169. return;
  170. case 1:
  171. w_c0_perfcntr1(val);
  172. return;
  173. case 2:
  174. w_c0_perfcntr2(val);
  175. return;
  176. case 3:
  177. w_c0_perfcntr3(val);
  178. return;
  179. }
  180. }
  181. static unsigned int mipsxx_pmu_read_control(unsigned int idx)
  182. {
  183. switch (idx) {
  184. case 0:
  185. return r_c0_perfctrl0();
  186. case 1:
  187. return r_c0_perfctrl1();
  188. case 2:
  189. return r_c0_perfctrl2();
  190. case 3:
  191. return r_c0_perfctrl3();
  192. default:
  193. WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
  194. return 0;
  195. }
  196. }
  197. static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
  198. {
  199. switch (idx) {
  200. case 0:
  201. w_c0_perfctrl0(val);
  202. return;
  203. case 1:
  204. w_c0_perfctrl1(val);
  205. return;
  206. case 2:
  207. w_c0_perfctrl2(val);
  208. return;
  209. case 3:
  210. w_c0_perfctrl3(val);
  211. return;
  212. }
  213. }
  214. #ifdef CONFIG_MIPS_MT_SMP
  215. static DEFINE_RWLOCK(pmuint_rwlock);
  216. #endif
  217. /* 24K/34K/1004K cores can share the same event map. */
  218. static const struct mips_perf_event mipsxxcore_event_map
  219. [PERF_COUNT_HW_MAX] = {
  220. [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
  221. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
  222. [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
  223. [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
  224. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
  225. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
  226. [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
  227. };
  228. /* 74K core has different branch event code. */
  229. static const struct mips_perf_event mipsxx74Kcore_event_map
  230. [PERF_COUNT_HW_MAX] = {
  231. [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
  232. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
  233. [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
  234. [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
  235. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
  236. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
  237. [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
  238. };
  239. /* 24K/34K/1004K cores can share the same cache event map. */
  240. static const struct mips_perf_event mipsxxcore_cache_map
  241. [PERF_COUNT_HW_CACHE_MAX]
  242. [PERF_COUNT_HW_CACHE_OP_MAX]
  243. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  244. [C(L1D)] = {
  245. /*
  246. * Like some other architectures (e.g. ARM), the performance
  247. * counters don't differentiate between read and write
  248. * accesses/misses, so this isn't strictly correct, but it's the
  249. * best we can do. Writes and reads get combined.
  250. */
  251. [C(OP_READ)] = {
  252. [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
  253. [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
  254. },
  255. [C(OP_WRITE)] = {
  256. [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
  257. [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
  258. },
  259. [C(OP_PREFETCH)] = {
  260. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  261. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  262. },
  263. },
  264. [C(L1I)] = {
  265. [C(OP_READ)] = {
  266. [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
  267. [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
  268. },
  269. [C(OP_WRITE)] = {
  270. [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
  271. [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
  272. },
  273. [C(OP_PREFETCH)] = {
  274. [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
  275. /*
  276. * Note that MIPS has only "hit" events countable for
  277. * the prefetch operation.
  278. */
  279. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  280. },
  281. },
  282. [C(LL)] = {
  283. [C(OP_READ)] = {
  284. [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
  285. [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
  286. },
  287. [C(OP_WRITE)] = {
  288. [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
  289. [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
  290. },
  291. [C(OP_PREFETCH)] = {
  292. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  293. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  294. },
  295. },
  296. [C(DTLB)] = {
  297. [C(OP_READ)] = {
  298. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  299. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  300. },
  301. [C(OP_WRITE)] = {
  302. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  303. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  304. },
  305. [C(OP_PREFETCH)] = {
  306. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  307. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  308. },
  309. },
  310. [C(ITLB)] = {
  311. [C(OP_READ)] = {
  312. [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
  313. [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
  314. },
  315. [C(OP_WRITE)] = {
  316. [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
  317. [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
  318. },
  319. [C(OP_PREFETCH)] = {
  320. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  321. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  322. },
  323. },
  324. [C(BPU)] = {
  325. /* Using the same code for *HW_BRANCH* */
  326. [C(OP_READ)] = {
  327. [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
  328. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  329. },
  330. [C(OP_WRITE)] = {
  331. [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
  332. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  333. },
  334. [C(OP_PREFETCH)] = {
  335. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  336. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  337. },
  338. },
  339. [C(NODE)] = {
  340. [C(OP_READ)] = {
  341. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  342. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  343. },
  344. [C(OP_WRITE)] = {
  345. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  346. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  347. },
  348. [C(OP_PREFETCH)] = {
  349. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  350. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  351. },
  352. },
  353. };
  354. /* 74K core has completely different cache event map. */
  355. static const struct mips_perf_event mipsxx74Kcore_cache_map
  356. [PERF_COUNT_HW_CACHE_MAX]
  357. [PERF_COUNT_HW_CACHE_OP_MAX]
  358. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  359. [C(L1D)] = {
  360. /*
  361. * Like some other architectures (e.g. ARM), the performance
  362. * counters don't differentiate between read and write
  363. * accesses/misses, so this isn't strictly correct, but it's the
  364. * best we can do. Writes and reads get combined.
  365. */
  366. [C(OP_READ)] = {
  367. [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
  368. [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
  369. },
  370. [C(OP_WRITE)] = {
  371. [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
  372. [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
  373. },
  374. [C(OP_PREFETCH)] = {
  375. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  376. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  377. },
  378. },
  379. [C(L1I)] = {
  380. [C(OP_READ)] = {
  381. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  382. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  383. },
  384. [C(OP_WRITE)] = {
  385. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  386. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  387. },
  388. [C(OP_PREFETCH)] = {
  389. [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
  390. /*
  391. * Note that MIPS has only "hit" events countable for
  392. * the prefetch operation.
  393. */
  394. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  395. },
  396. },
  397. [C(LL)] = {
  398. [C(OP_READ)] = {
  399. [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
  400. [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
  401. },
  402. [C(OP_WRITE)] = {
  403. [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
  404. [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
  405. },
  406. [C(OP_PREFETCH)] = {
  407. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  408. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  409. },
  410. },
  411. [C(DTLB)] = {
  412. /* 74K core does not have specific DTLB events. */
  413. [C(OP_READ)] = {
  414. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  415. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  416. },
  417. [C(OP_WRITE)] = {
  418. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  419. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  420. },
  421. [C(OP_PREFETCH)] = {
  422. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  423. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  424. },
  425. },
  426. [C(ITLB)] = {
  427. [C(OP_READ)] = {
  428. [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
  429. [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
  430. },
  431. [C(OP_WRITE)] = {
  432. [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
  433. [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
  434. },
  435. [C(OP_PREFETCH)] = {
  436. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  437. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  438. },
  439. },
  440. [C(BPU)] = {
  441. /* Using the same code for *HW_BRANCH* */
  442. [C(OP_READ)] = {
  443. [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
  444. [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
  445. },
  446. [C(OP_WRITE)] = {
  447. [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
  448. [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
  449. },
  450. [C(OP_PREFETCH)] = {
  451. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  452. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  453. },
  454. },
  455. [C(NODE)] = {
  456. [C(OP_READ)] = {
  457. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  458. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  459. },
  460. [C(OP_WRITE)] = {
  461. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  462. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  463. },
  464. [C(OP_PREFETCH)] = {
  465. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  466. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  467. },
  468. },
  469. };
  470. #ifdef CONFIG_MIPS_MT_SMP
  471. static void check_and_calc_range(struct perf_event *event,
  472. const struct mips_perf_event *pev)
  473. {
  474. struct hw_perf_event *hwc = &event->hw;
  475. if (event->cpu >= 0) {
  476. if (pev->range > V) {
  477. /*
  478. * The user selected an event that is processor
  479. * wide, while expecting it to be VPE wide.
  480. */
  481. hwc->config_base |= M_TC_EN_ALL;
  482. } else {
  483. /*
  484. * FIXME: cpu_data[event->cpu].vpe_id reports 0
  485. * for both CPUs.
  486. */
  487. hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
  488. hwc->config_base |= M_TC_EN_VPE;
  489. }
  490. } else
  491. hwc->config_base |= M_TC_EN_ALL;
  492. }
  493. #else
  494. static void check_and_calc_range(struct perf_event *event,
  495. const struct mips_perf_event *pev)
  496. {
  497. }
  498. #endif
  499. static int __hw_perf_event_init(struct perf_event *event)
  500. {
  501. struct perf_event_attr *attr = &event->attr;
  502. struct hw_perf_event *hwc = &event->hw;
  503. const struct mips_perf_event *pev;
  504. int err;
  505. /* Returning MIPS event descriptor for generic perf event. */
  506. if (PERF_TYPE_HARDWARE == event->attr.type) {
  507. if (event->attr.config >= PERF_COUNT_HW_MAX)
  508. return -EINVAL;
  509. pev = mipspmu_map_general_event(event->attr.config);
  510. } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
  511. pev = mipspmu_map_cache_event(event->attr.config);
  512. } else if (PERF_TYPE_RAW == event->attr.type) {
  513. /* We are working on the global raw event. */
  514. mutex_lock(&raw_event_mutex);
  515. pev = mipspmu->map_raw_event(event->attr.config);
  516. } else {
  517. /* The event type is not (yet) supported. */
  518. return -EOPNOTSUPP;
  519. }
  520. if (IS_ERR(pev)) {
  521. if (PERF_TYPE_RAW == event->attr.type)
  522. mutex_unlock(&raw_event_mutex);
  523. return PTR_ERR(pev);
  524. }
  525. /*
  526. * We allow max flexibility on how each individual counter shared
  527. * by the single CPU operates (the mode exclusion and the range).
  528. */
  529. hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
  530. /* Calculate range bits and validate it. */
  531. if (num_possible_cpus() > 1)
  532. check_and_calc_range(event, pev);
  533. hwc->event_base = mipspmu_perf_event_encode(pev);
  534. if (PERF_TYPE_RAW == event->attr.type)
  535. mutex_unlock(&raw_event_mutex);
  536. if (!attr->exclude_user)
  537. hwc->config_base |= M_PERFCTL_USER;
  538. if (!attr->exclude_kernel) {
  539. hwc->config_base |= M_PERFCTL_KERNEL;
  540. /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
  541. hwc->config_base |= M_PERFCTL_EXL;
  542. }
  543. if (!attr->exclude_hv)
  544. hwc->config_base |= M_PERFCTL_SUPERVISOR;
  545. hwc->config_base &= M_PERFCTL_CONFIG_MASK;
  546. /*
  547. * The event can belong to another cpu. We do not assign a local
  548. * counter for it for now.
  549. */
  550. hwc->idx = -1;
  551. hwc->config = 0;
  552. if (!hwc->sample_period) {
  553. hwc->sample_period = MAX_PERIOD;
  554. hwc->last_period = hwc->sample_period;
  555. local64_set(&hwc->period_left, hwc->sample_period);
  556. }
  557. err = 0;
  558. if (event->group_leader != event) {
  559. err = validate_group(event);
  560. if (err)
  561. return -EINVAL;
  562. }
  563. event->destroy = hw_perf_event_destroy;
  564. return err;
  565. }
  566. static void pause_local_counters(void)
  567. {
  568. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  569. int counters = mipspmu->num_counters;
  570. unsigned long flags;
  571. local_irq_save(flags);
  572. switch (counters) {
  573. case 4:
  574. cpuc->saved_ctrl[3] = r_c0_perfctrl3();
  575. w_c0_perfctrl3(cpuc->saved_ctrl[3] &
  576. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  577. case 3:
  578. cpuc->saved_ctrl[2] = r_c0_perfctrl2();
  579. w_c0_perfctrl2(cpuc->saved_ctrl[2] &
  580. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  581. case 2:
  582. cpuc->saved_ctrl[1] = r_c0_perfctrl1();
  583. w_c0_perfctrl1(cpuc->saved_ctrl[1] &
  584. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  585. case 1:
  586. cpuc->saved_ctrl[0] = r_c0_perfctrl0();
  587. w_c0_perfctrl0(cpuc->saved_ctrl[0] &
  588. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  589. }
  590. local_irq_restore(flags);
  591. }
  592. static void resume_local_counters(void)
  593. {
  594. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  595. int counters = mipspmu->num_counters;
  596. unsigned long flags;
  597. local_irq_save(flags);
  598. switch (counters) {
  599. case 4:
  600. w_c0_perfctrl3(cpuc->saved_ctrl[3]);
  601. case 3:
  602. w_c0_perfctrl2(cpuc->saved_ctrl[2]);
  603. case 2:
  604. w_c0_perfctrl1(cpuc->saved_ctrl[1]);
  605. case 1:
  606. w_c0_perfctrl0(cpuc->saved_ctrl[0]);
  607. }
  608. local_irq_restore(flags);
  609. }
  610. static int mipsxx_pmu_handle_shared_irq(void)
  611. {
  612. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  613. struct perf_sample_data data;
  614. unsigned int counters = mipspmu->num_counters;
  615. unsigned int counter;
  616. int handled = IRQ_NONE;
  617. struct pt_regs *regs;
  618. if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
  619. return handled;
  620. /*
  621. * First we pause the local counters, so that when we are locked
  622. * here, the counters are all paused. When it gets locked due to
  623. * perf_disable(), the timer interrupt handler will be delayed.
  624. *
  625. * See also mipsxx_pmu_start().
  626. */
  627. pause_local_counters();
  628. #ifdef CONFIG_MIPS_MT_SMP
  629. read_lock(&pmuint_rwlock);
  630. #endif
  631. regs = get_irq_regs();
  632. perf_sample_data_init(&data, 0);
  633. switch (counters) {
  634. #define HANDLE_COUNTER(n) \
  635. case n + 1: \
  636. if (test_bit(n, cpuc->used_mask)) { \
  637. counter = r_c0_perfcntr ## n(); \
  638. if (counter & M_COUNTER_OVERFLOW) { \
  639. w_c0_perfcntr ## n(counter & \
  640. VALID_COUNT); \
  641. if (test_and_change_bit(n, cpuc->msbs)) \
  642. handle_associated_event(cpuc, \
  643. n, &data, regs); \
  644. handled = IRQ_HANDLED; \
  645. } \
  646. }
  647. HANDLE_COUNTER(3)
  648. HANDLE_COUNTER(2)
  649. HANDLE_COUNTER(1)
  650. HANDLE_COUNTER(0)
  651. }
  652. /*
  653. * Do all the work for the pending perf events. We can do this
  654. * in here because the performance counter interrupt is a regular
  655. * interrupt, not NMI.
  656. */
  657. if (handled == IRQ_HANDLED)
  658. irq_work_run();
  659. #ifdef CONFIG_MIPS_MT_SMP
  660. read_unlock(&pmuint_rwlock);
  661. #endif
  662. resume_local_counters();
  663. return handled;
  664. }
  665. static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
  666. {
  667. return mipsxx_pmu_handle_shared_irq();
  668. }
  669. static void mipsxx_pmu_start(void)
  670. {
  671. #ifdef CONFIG_MIPS_MT_SMP
  672. write_unlock(&pmuint_rwlock);
  673. #endif
  674. resume_local_counters();
  675. }
  676. /*
  677. * MIPS performance counters can be per-TC. The control registers can
  678. * not be directly accessed across CPUs. Hence if we want to do global
  679. * control, we need cross CPU calls. on_each_cpu() can help us, but we
  680. * can not make sure this function is called with interrupts enabled. So
  681. * here we pause local counters and then grab a rwlock and leave the
  682. * counters on other CPUs alone. If any counter interrupt raises while
  683. * we own the write lock, simply pause local counters on that CPU and
  684. * spin in the handler. Also we know we won't be switched to another
  685. * CPU after pausing local counters and before grabbing the lock.
  686. */
  687. static void mipsxx_pmu_stop(void)
  688. {
  689. pause_local_counters();
  690. #ifdef CONFIG_MIPS_MT_SMP
  691. write_lock(&pmuint_rwlock);
  692. #endif
  693. }
  694. static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
  695. struct hw_perf_event *hwc)
  696. {
  697. int i;
  698. /*
  699. * We only need to care the counter mask. The range has been
  700. * checked definitely.
  701. */
  702. unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
  703. for (i = mipspmu->num_counters - 1; i >= 0; i--) {
  704. /*
  705. * Note that some MIPS perf events can be counted by both
  706. * even and odd counters, wheresas many other are only by
  707. * even _or_ odd counters. This introduces an issue that
  708. * when the former kind of event takes the counter the
  709. * latter kind of event wants to use, then the "counter
  710. * allocation" for the latter event will fail. In fact if
  711. * they can be dynamically swapped, they both feel happy.
  712. * But here we leave this issue alone for now.
  713. */
  714. if (test_bit(i, &cntr_mask) &&
  715. !test_and_set_bit(i, cpuc->used_mask))
  716. return i;
  717. }
  718. return -EAGAIN;
  719. }
  720. static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
  721. {
  722. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  723. unsigned long flags;
  724. WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  725. local_irq_save(flags);
  726. cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
  727. (evt->config_base & M_PERFCTL_CONFIG_MASK) |
  728. /* Make sure interrupt enabled. */
  729. M_PERFCTL_INTERRUPT_ENABLE;
  730. /*
  731. * We do not actually let the counter run. Leave it until start().
  732. */
  733. local_irq_restore(flags);
  734. }
  735. static void mipsxx_pmu_disable_event(int idx)
  736. {
  737. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  738. unsigned long flags;
  739. WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  740. local_irq_save(flags);
  741. cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
  742. ~M_PERFCTL_COUNT_EVENT_WHENEVER;
  743. mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
  744. local_irq_restore(flags);
  745. }
  746. /* 24K */
  747. #define IS_UNSUPPORTED_24K_EVENT(r, b) \
  748. ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
  749. (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
  750. (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
  751. (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
  752. ((b) >= 68 && (b) <= 127))
  753. #define IS_BOTH_COUNTERS_24K_EVENT(b) \
  754. ((b) == 0 || (b) == 1 || (b) == 11)
  755. /* 34K */
  756. #define IS_UNSUPPORTED_34K_EVENT(r, b) \
  757. ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
  758. (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
  759. ((b) >= 68 && (b) <= 127))
  760. #define IS_BOTH_COUNTERS_34K_EVENT(b) \
  761. ((b) == 0 || (b) == 1 || (b) == 11)
  762. #ifdef CONFIG_MIPS_MT_SMP
  763. #define IS_RANGE_P_34K_EVENT(r, b) \
  764. ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
  765. (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
  766. (r) == 176 || ((b) >= 50 && (b) <= 55) || \
  767. ((b) >= 64 && (b) <= 67))
  768. #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
  769. #endif
  770. /* 74K */
  771. #define IS_UNSUPPORTED_74K_EVENT(r, b) \
  772. ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
  773. ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
  774. (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
  775. (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
  776. (b) == 61 || (r) == 62 || (r) == 191 || \
  777. ((b) >= 64 && (b) <= 127))
  778. #define IS_BOTH_COUNTERS_74K_EVENT(b) \
  779. ((b) == 0 || (b) == 1)
  780. /* 1004K */
  781. #define IS_UNSUPPORTED_1004K_EVENT(r, b) \
  782. ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
  783. (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
  784. #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
  785. ((b) == 0 || (b) == 1 || (b) == 11)
  786. #ifdef CONFIG_MIPS_MT_SMP
  787. #define IS_RANGE_P_1004K_EVENT(r, b) \
  788. ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
  789. (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
  790. (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
  791. (r) == 188 || (b) == 61 || (b) == 62 || \
  792. ((b) >= 64 && (b) <= 67))
  793. #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
  794. #endif
  795. /*
  796. * User can use 0-255 raw events, where 0-127 for the events of even
  797. * counters, and 128-255 for odd counters. Note that bit 7 is used to
  798. * indicate the parity. So, for example, when user wants to take the
  799. * Event Num of 15 for odd counters (by referring to the user manual),
  800. * then 128 needs to be added to 15 as the input for the event config,
  801. * i.e., 143 (0x8F) to be used.
  802. */
  803. static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
  804. {
  805. unsigned int raw_id = config & 0xff;
  806. unsigned int base_id = raw_id & 0x7f;
  807. switch (current_cpu_type()) {
  808. case CPU_24K:
  809. if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
  810. return ERR_PTR(-EOPNOTSUPP);
  811. raw_event.event_id = base_id;
  812. if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
  813. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  814. else
  815. raw_event.cntr_mask =
  816. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  817. #ifdef CONFIG_MIPS_MT_SMP
  818. /*
  819. * This is actually doing nothing. Non-multithreading
  820. * CPUs will not check and calculate the range.
  821. */
  822. raw_event.range = P;
  823. #endif
  824. break;
  825. case CPU_34K:
  826. if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
  827. return ERR_PTR(-EOPNOTSUPP);
  828. raw_event.event_id = base_id;
  829. if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
  830. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  831. else
  832. raw_event.cntr_mask =
  833. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  834. #ifdef CONFIG_MIPS_MT_SMP
  835. if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
  836. raw_event.range = P;
  837. else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
  838. raw_event.range = V;
  839. else
  840. raw_event.range = T;
  841. #endif
  842. break;
  843. case CPU_74K:
  844. if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
  845. return ERR_PTR(-EOPNOTSUPP);
  846. raw_event.event_id = base_id;
  847. if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
  848. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  849. else
  850. raw_event.cntr_mask =
  851. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  852. #ifdef CONFIG_MIPS_MT_SMP
  853. raw_event.range = P;
  854. #endif
  855. break;
  856. case CPU_1004K:
  857. if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
  858. return ERR_PTR(-EOPNOTSUPP);
  859. raw_event.event_id = base_id;
  860. if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
  861. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  862. else
  863. raw_event.cntr_mask =
  864. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  865. #ifdef CONFIG_MIPS_MT_SMP
  866. if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
  867. raw_event.range = P;
  868. else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
  869. raw_event.range = V;
  870. else
  871. raw_event.range = T;
  872. #endif
  873. break;
  874. }
  875. return &raw_event;
  876. }
  877. static struct mips_pmu mipsxxcore_pmu = {
  878. .handle_irq = mipsxx_pmu_handle_irq,
  879. .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
  880. .start = mipsxx_pmu_start,
  881. .stop = mipsxx_pmu_stop,
  882. .alloc_counter = mipsxx_pmu_alloc_counter,
  883. .read_counter = mipsxx_pmu_read_counter,
  884. .write_counter = mipsxx_pmu_write_counter,
  885. .enable_event = mipsxx_pmu_enable_event,
  886. .disable_event = mipsxx_pmu_disable_event,
  887. .map_raw_event = mipsxx_pmu_map_raw_event,
  888. .general_event_map = &mipsxxcore_event_map,
  889. .cache_event_map = &mipsxxcore_cache_map,
  890. };
  891. static struct mips_pmu mipsxx74Kcore_pmu = {
  892. .handle_irq = mipsxx_pmu_handle_irq,
  893. .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
  894. .start = mipsxx_pmu_start,
  895. .stop = mipsxx_pmu_stop,
  896. .alloc_counter = mipsxx_pmu_alloc_counter,
  897. .read_counter = mipsxx_pmu_read_counter,
  898. .write_counter = mipsxx_pmu_write_counter,
  899. .enable_event = mipsxx_pmu_enable_event,
  900. .disable_event = mipsxx_pmu_disable_event,
  901. .map_raw_event = mipsxx_pmu_map_raw_event,
  902. .general_event_map = &mipsxx74Kcore_event_map,
  903. .cache_event_map = &mipsxx74Kcore_cache_map,
  904. };
  905. static int __init
  906. init_hw_perf_events(void)
  907. {
  908. int counters, irq;
  909. pr_info("Performance counters: ");
  910. counters = n_counters();
  911. if (counters == 0) {
  912. pr_cont("No available PMU.\n");
  913. return -ENODEV;
  914. }
  915. #ifdef CONFIG_MIPS_MT_SMP
  916. cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
  917. if (!cpu_has_mipsmt_pertccounters)
  918. counters = counters_total_to_per_cpu(counters);
  919. #endif
  920. #ifdef MSC01E_INT_BASE
  921. if (cpu_has_veic) {
  922. /*
  923. * Using platform specific interrupt controller defines.
  924. */
  925. irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
  926. } else {
  927. #endif
  928. if (cp0_perfcount_irq >= 0)
  929. irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  930. else
  931. irq = -1;
  932. #ifdef MSC01E_INT_BASE
  933. }
  934. #endif
  935. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  936. switch (current_cpu_type()) {
  937. case CPU_24K:
  938. mipsxxcore_pmu.name = "mips/24K";
  939. mipsxxcore_pmu.num_counters = counters;
  940. mipsxxcore_pmu.irq = irq;
  941. mipspmu = &mipsxxcore_pmu;
  942. break;
  943. case CPU_34K:
  944. mipsxxcore_pmu.name = "mips/34K";
  945. mipsxxcore_pmu.num_counters = counters;
  946. mipsxxcore_pmu.irq = irq;
  947. mipspmu = &mipsxxcore_pmu;
  948. break;
  949. case CPU_74K:
  950. mipsxx74Kcore_pmu.name = "mips/74K";
  951. mipsxx74Kcore_pmu.num_counters = counters;
  952. mipsxx74Kcore_pmu.irq = irq;
  953. mipspmu = &mipsxx74Kcore_pmu;
  954. break;
  955. case CPU_1004K:
  956. mipsxxcore_pmu.name = "mips/1004K";
  957. mipsxxcore_pmu.num_counters = counters;
  958. mipsxxcore_pmu.irq = irq;
  959. mipspmu = &mipsxxcore_pmu;
  960. break;
  961. default:
  962. pr_cont("Either hardware does not support performance "
  963. "counters, or not yet implemented.\n");
  964. return -ENODEV;
  965. }
  966. if (mipspmu)
  967. pr_cont("%s PMU enabled, %d counters available to each "
  968. "CPU, irq %d%s\n", mipspmu->name, counters, irq,
  969. irq < 0 ? " (share with timer interrupt)" : "");
  970. perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
  971. return 0;
  972. }
  973. early_initcall(init_hw_perf_events);
  974. #endif /* defined(CONFIG_CPU_MIPS32)... */