perf_event_mipsxx.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082
  1. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
  2. defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
  3. #define M_CONFIG1_PC (1 << 4)
  4. #define M_PERFCTL_EXL (1UL << 0)
  5. #define M_PERFCTL_KERNEL (1UL << 1)
  6. #define M_PERFCTL_SUPERVISOR (1UL << 2)
  7. #define M_PERFCTL_USER (1UL << 3)
  8. #define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
  9. #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
  10. #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
  11. #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
  12. #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
  13. #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
  14. #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
  15. #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
  16. #define M_PERFCTL_WIDE (1UL << 30)
  17. #define M_PERFCTL_MORE (1UL << 31)
  18. #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
  19. M_PERFCTL_KERNEL | \
  20. M_PERFCTL_USER | \
  21. M_PERFCTL_SUPERVISOR | \
  22. M_PERFCTL_INTERRUPT_ENABLE)
  23. #ifdef CONFIG_MIPS_MT_SMP
  24. #define M_PERFCTL_CONFIG_MASK 0x3fff801f
  25. #else
  26. #define M_PERFCTL_CONFIG_MASK 0x1f
  27. #endif
  28. #define M_PERFCTL_EVENT_MASK 0xfe0
  29. #define M_COUNTER_OVERFLOW (1UL << 31)
  30. #ifdef CONFIG_MIPS_MT_SMP
  31. static int cpu_has_mipsmt_pertccounters;
  32. /*
  33. * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
  34. * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
  35. */
  36. #if defined(CONFIG_HW_PERF_EVENTS)
  37. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  38. 0 : smp_processor_id())
  39. #else
  40. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  41. 0 : cpu_data[smp_processor_id()].vpe_id)
  42. #endif
  43. /* Copied from op_model_mipsxx.c */
  44. static inline unsigned int vpe_shift(void)
  45. {
  46. if (num_possible_cpus() > 1)
  47. return 1;
  48. return 0;
  49. }
  50. #else /* !CONFIG_MIPS_MT_SMP */
  51. #define vpe_id() 0
  52. static inline unsigned int vpe_shift(void)
  53. {
  54. return 0;
  55. }
  56. #endif /* CONFIG_MIPS_MT_SMP */
  57. static inline unsigned int
  58. counters_total_to_per_cpu(unsigned int counters)
  59. {
  60. return counters >> vpe_shift();
  61. }
  62. static inline unsigned int
  63. counters_per_cpu_to_total(unsigned int counters)
  64. {
  65. return counters << vpe_shift();
  66. }
  67. #define __define_perf_accessors(r, n, np) \
  68. \
  69. static inline unsigned int r_c0_ ## r ## n(void) \
  70. { \
  71. unsigned int cpu = vpe_id(); \
  72. \
  73. switch (cpu) { \
  74. case 0: \
  75. return read_c0_ ## r ## n(); \
  76. case 1: \
  77. return read_c0_ ## r ## np(); \
  78. default: \
  79. BUG(); \
  80. } \
  81. return 0; \
  82. } \
  83. \
  84. static inline void w_c0_ ## r ## n(unsigned int value) \
  85. { \
  86. unsigned int cpu = vpe_id(); \
  87. \
  88. switch (cpu) { \
  89. case 0: \
  90. write_c0_ ## r ## n(value); \
  91. return; \
  92. case 1: \
  93. write_c0_ ## r ## np(value); \
  94. return; \
  95. default: \
  96. BUG(); \
  97. } \
  98. return; \
  99. } \
  100. __define_perf_accessors(perfcntr, 0, 2)
  101. __define_perf_accessors(perfcntr, 1, 3)
  102. __define_perf_accessors(perfcntr, 2, 0)
  103. __define_perf_accessors(perfcntr, 3, 1)
  104. __define_perf_accessors(perfctrl, 0, 2)
  105. __define_perf_accessors(perfctrl, 1, 3)
  106. __define_perf_accessors(perfctrl, 2, 0)
  107. __define_perf_accessors(perfctrl, 3, 1)
  108. static inline int __n_counters(void)
  109. {
  110. if (!(read_c0_config1() & M_CONFIG1_PC))
  111. return 0;
  112. if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
  113. return 1;
  114. if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
  115. return 2;
  116. if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
  117. return 3;
  118. return 4;
  119. }
  120. static inline int n_counters(void)
  121. {
  122. int counters;
  123. switch (current_cpu_type()) {
  124. case CPU_R10000:
  125. counters = 2;
  126. break;
  127. case CPU_R12000:
  128. case CPU_R14000:
  129. counters = 4;
  130. break;
  131. default:
  132. counters = __n_counters();
  133. }
  134. return counters;
  135. }
  136. static void reset_counters(void *arg)
  137. {
  138. int counters = (int)(long)arg;
  139. switch (counters) {
  140. case 4:
  141. w_c0_perfctrl3(0);
  142. w_c0_perfcntr3(0);
  143. case 3:
  144. w_c0_perfctrl2(0);
  145. w_c0_perfcntr2(0);
  146. case 2:
  147. w_c0_perfctrl1(0);
  148. w_c0_perfcntr1(0);
  149. case 1:
  150. w_c0_perfctrl0(0);
  151. w_c0_perfcntr0(0);
  152. }
  153. }
  154. static inline u64
  155. mipsxx_pmu_read_counter(unsigned int idx)
  156. {
  157. switch (idx) {
  158. case 0:
  159. return r_c0_perfcntr0();
  160. case 1:
  161. return r_c0_perfcntr1();
  162. case 2:
  163. return r_c0_perfcntr2();
  164. case 3:
  165. return r_c0_perfcntr3();
  166. default:
  167. WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
  168. return 0;
  169. }
  170. }
  171. static inline void
  172. mipsxx_pmu_write_counter(unsigned int idx, u64 val)
  173. {
  174. switch (idx) {
  175. case 0:
  176. w_c0_perfcntr0(val);
  177. return;
  178. case 1:
  179. w_c0_perfcntr1(val);
  180. return;
  181. case 2:
  182. w_c0_perfcntr2(val);
  183. return;
  184. case 3:
  185. w_c0_perfcntr3(val);
  186. return;
  187. }
  188. }
  189. static inline unsigned int
  190. mipsxx_pmu_read_control(unsigned int idx)
  191. {
  192. switch (idx) {
  193. case 0:
  194. return r_c0_perfctrl0();
  195. case 1:
  196. return r_c0_perfctrl1();
  197. case 2:
  198. return r_c0_perfctrl2();
  199. case 3:
  200. return r_c0_perfctrl3();
  201. default:
  202. WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
  203. return 0;
  204. }
  205. }
  206. static inline void
  207. mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
  208. {
  209. switch (idx) {
  210. case 0:
  211. w_c0_perfctrl0(val);
  212. return;
  213. case 1:
  214. w_c0_perfctrl1(val);
  215. return;
  216. case 2:
  217. w_c0_perfctrl2(val);
  218. return;
  219. case 3:
  220. w_c0_perfctrl3(val);
  221. return;
  222. }
  223. }
  224. #ifdef CONFIG_MIPS_MT_SMP
  225. static DEFINE_RWLOCK(pmuint_rwlock);
  226. #endif
  227. /* 24K/34K/1004K cores can share the same event map. */
  228. static const struct mips_perf_event mipsxxcore_event_map
  229. [PERF_COUNT_HW_MAX] = {
  230. [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
  231. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
  232. [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
  233. [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
  234. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
  235. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
  236. [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
  237. };
  238. /* 74K core has different branch event code. */
  239. static const struct mips_perf_event mipsxx74Kcore_event_map
  240. [PERF_COUNT_HW_MAX] = {
  241. [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
  242. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
  243. [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
  244. [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
  245. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
  246. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
  247. [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
  248. };
  249. /* 24K/34K/1004K cores can share the same cache event map. */
  250. static const struct mips_perf_event mipsxxcore_cache_map
  251. [PERF_COUNT_HW_CACHE_MAX]
  252. [PERF_COUNT_HW_CACHE_OP_MAX]
  253. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  254. [C(L1D)] = {
  255. /*
  256. * Like some other architectures (e.g. ARM), the performance
  257. * counters don't differentiate between read and write
  258. * accesses/misses, so this isn't strictly correct, but it's the
  259. * best we can do. Writes and reads get combined.
  260. */
  261. [C(OP_READ)] = {
  262. [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
  263. [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
  264. },
  265. [C(OP_WRITE)] = {
  266. [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
  267. [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
  268. },
  269. [C(OP_PREFETCH)] = {
  270. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  271. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  272. },
  273. },
  274. [C(L1I)] = {
  275. [C(OP_READ)] = {
  276. [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
  277. [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
  278. },
  279. [C(OP_WRITE)] = {
  280. [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
  281. [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
  282. },
  283. [C(OP_PREFETCH)] = {
  284. [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
  285. /*
  286. * Note that MIPS has only "hit" events countable for
  287. * the prefetch operation.
  288. */
  289. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  290. },
  291. },
  292. [C(LL)] = {
  293. [C(OP_READ)] = {
  294. [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
  295. [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
  296. },
  297. [C(OP_WRITE)] = {
  298. [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
  299. [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
  300. },
  301. [C(OP_PREFETCH)] = {
  302. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  303. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  304. },
  305. },
  306. [C(DTLB)] = {
  307. [C(OP_READ)] = {
  308. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  309. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  310. },
  311. [C(OP_WRITE)] = {
  312. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  313. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  314. },
  315. [C(OP_PREFETCH)] = {
  316. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  317. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  318. },
  319. },
  320. [C(ITLB)] = {
  321. [C(OP_READ)] = {
  322. [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
  323. [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
  324. },
  325. [C(OP_WRITE)] = {
  326. [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
  327. [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
  328. },
  329. [C(OP_PREFETCH)] = {
  330. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  331. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  332. },
  333. },
  334. [C(BPU)] = {
  335. /* Using the same code for *HW_BRANCH* */
  336. [C(OP_READ)] = {
  337. [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
  338. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  339. },
  340. [C(OP_WRITE)] = {
  341. [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
  342. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  343. },
  344. [C(OP_PREFETCH)] = {
  345. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  346. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  347. },
  348. },
  349. [C(NODE)] = {
  350. [C(OP_READ)] = {
  351. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  352. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  353. },
  354. [C(OP_WRITE)] = {
  355. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  356. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  357. },
  358. [C(OP_PREFETCH)] = {
  359. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  360. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  361. },
  362. },
  363. };
  364. /* 74K core has completely different cache event map. */
  365. static const struct mips_perf_event mipsxx74Kcore_cache_map
  366. [PERF_COUNT_HW_CACHE_MAX]
  367. [PERF_COUNT_HW_CACHE_OP_MAX]
  368. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  369. [C(L1D)] = {
  370. /*
  371. * Like some other architectures (e.g. ARM), the performance
  372. * counters don't differentiate between read and write
  373. * accesses/misses, so this isn't strictly correct, but it's the
  374. * best we can do. Writes and reads get combined.
  375. */
  376. [C(OP_READ)] = {
  377. [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
  378. [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
  379. },
  380. [C(OP_WRITE)] = {
  381. [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
  382. [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
  383. },
  384. [C(OP_PREFETCH)] = {
  385. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  386. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  387. },
  388. },
  389. [C(L1I)] = {
  390. [C(OP_READ)] = {
  391. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  392. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  393. },
  394. [C(OP_WRITE)] = {
  395. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  396. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  397. },
  398. [C(OP_PREFETCH)] = {
  399. [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
  400. /*
  401. * Note that MIPS has only "hit" events countable for
  402. * the prefetch operation.
  403. */
  404. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  405. },
  406. },
  407. [C(LL)] = {
  408. [C(OP_READ)] = {
  409. [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
  410. [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
  411. },
  412. [C(OP_WRITE)] = {
  413. [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
  414. [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
  415. },
  416. [C(OP_PREFETCH)] = {
  417. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  418. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  419. },
  420. },
  421. [C(DTLB)] = {
  422. /* 74K core does not have specific DTLB events. */
  423. [C(OP_READ)] = {
  424. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  425. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  426. },
  427. [C(OP_WRITE)] = {
  428. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  429. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  430. },
  431. [C(OP_PREFETCH)] = {
  432. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  433. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  434. },
  435. },
  436. [C(ITLB)] = {
  437. [C(OP_READ)] = {
  438. [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
  439. [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
  440. },
  441. [C(OP_WRITE)] = {
  442. [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
  443. [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
  444. },
  445. [C(OP_PREFETCH)] = {
  446. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  447. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  448. },
  449. },
  450. [C(BPU)] = {
  451. /* Using the same code for *HW_BRANCH* */
  452. [C(OP_READ)] = {
  453. [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
  454. [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
  455. },
  456. [C(OP_WRITE)] = {
  457. [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
  458. [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
  459. },
  460. [C(OP_PREFETCH)] = {
  461. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  462. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  463. },
  464. },
  465. [C(NODE)] = {
  466. [C(OP_READ)] = {
  467. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  468. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  469. },
  470. [C(OP_WRITE)] = {
  471. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  472. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  473. },
  474. [C(OP_PREFETCH)] = {
  475. [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
  476. [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
  477. },
  478. },
  479. };
  480. #ifdef CONFIG_MIPS_MT_SMP
  481. static void
  482. check_and_calc_range(struct perf_event *event,
  483. const struct mips_perf_event *pev)
  484. {
  485. struct hw_perf_event *hwc = &event->hw;
  486. if (event->cpu >= 0) {
  487. if (pev->range > V) {
  488. /*
  489. * The user selected an event that is processor
  490. * wide, while expecting it to be VPE wide.
  491. */
  492. hwc->config_base |= M_TC_EN_ALL;
  493. } else {
  494. /*
  495. * FIXME: cpu_data[event->cpu].vpe_id reports 0
  496. * for both CPUs.
  497. */
  498. hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
  499. hwc->config_base |= M_TC_EN_VPE;
  500. }
  501. } else
  502. hwc->config_base |= M_TC_EN_ALL;
  503. }
  504. #else
  505. static void
  506. check_and_calc_range(struct perf_event *event,
  507. const struct mips_perf_event *pev)
  508. {
  509. }
  510. #endif
  511. static int __hw_perf_event_init(struct perf_event *event)
  512. {
  513. struct perf_event_attr *attr = &event->attr;
  514. struct hw_perf_event *hwc = &event->hw;
  515. const struct mips_perf_event *pev;
  516. int err;
  517. /* Returning MIPS event descriptor for generic perf event. */
  518. if (PERF_TYPE_HARDWARE == event->attr.type) {
  519. if (event->attr.config >= PERF_COUNT_HW_MAX)
  520. return -EINVAL;
  521. pev = mipspmu_map_general_event(event->attr.config);
  522. } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
  523. pev = mipspmu_map_cache_event(event->attr.config);
  524. } else if (PERF_TYPE_RAW == event->attr.type) {
  525. /* We are working on the global raw event. */
  526. mutex_lock(&raw_event_mutex);
  527. pev = mipspmu->map_raw_event(event->attr.config);
  528. } else {
  529. /* The event type is not (yet) supported. */
  530. return -EOPNOTSUPP;
  531. }
  532. if (IS_ERR(pev)) {
  533. if (PERF_TYPE_RAW == event->attr.type)
  534. mutex_unlock(&raw_event_mutex);
  535. return PTR_ERR(pev);
  536. }
  537. /*
  538. * We allow max flexibility on how each individual counter shared
  539. * by the single CPU operates (the mode exclusion and the range).
  540. */
  541. hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
  542. /* Calculate range bits and validate it. */
  543. if (num_possible_cpus() > 1)
  544. check_and_calc_range(event, pev);
  545. hwc->event_base = mipspmu_perf_event_encode(pev);
  546. if (PERF_TYPE_RAW == event->attr.type)
  547. mutex_unlock(&raw_event_mutex);
  548. if (!attr->exclude_user)
  549. hwc->config_base |= M_PERFCTL_USER;
  550. if (!attr->exclude_kernel) {
  551. hwc->config_base |= M_PERFCTL_KERNEL;
  552. /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
  553. hwc->config_base |= M_PERFCTL_EXL;
  554. }
  555. if (!attr->exclude_hv)
  556. hwc->config_base |= M_PERFCTL_SUPERVISOR;
  557. hwc->config_base &= M_PERFCTL_CONFIG_MASK;
  558. /*
  559. * The event can belong to another cpu. We do not assign a local
  560. * counter for it for now.
  561. */
  562. hwc->idx = -1;
  563. hwc->config = 0;
  564. if (!hwc->sample_period) {
  565. hwc->sample_period = MAX_PERIOD;
  566. hwc->last_period = hwc->sample_period;
  567. local64_set(&hwc->period_left, hwc->sample_period);
  568. }
  569. err = 0;
  570. if (event->group_leader != event) {
  571. err = validate_group(event);
  572. if (err)
  573. return -EINVAL;
  574. }
  575. event->destroy = hw_perf_event_destroy;
  576. return err;
  577. }
  578. static void pause_local_counters(void)
  579. {
  580. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  581. int counters = mipspmu->num_counters;
  582. unsigned long flags;
  583. local_irq_save(flags);
  584. switch (counters) {
  585. case 4:
  586. cpuc->saved_ctrl[3] = r_c0_perfctrl3();
  587. w_c0_perfctrl3(cpuc->saved_ctrl[3] &
  588. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  589. case 3:
  590. cpuc->saved_ctrl[2] = r_c0_perfctrl2();
  591. w_c0_perfctrl2(cpuc->saved_ctrl[2] &
  592. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  593. case 2:
  594. cpuc->saved_ctrl[1] = r_c0_perfctrl1();
  595. w_c0_perfctrl1(cpuc->saved_ctrl[1] &
  596. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  597. case 1:
  598. cpuc->saved_ctrl[0] = r_c0_perfctrl0();
  599. w_c0_perfctrl0(cpuc->saved_ctrl[0] &
  600. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  601. }
  602. local_irq_restore(flags);
  603. }
  604. static void resume_local_counters(void)
  605. {
  606. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  607. int counters = mipspmu->num_counters;
  608. unsigned long flags;
  609. local_irq_save(flags);
  610. switch (counters) {
  611. case 4:
  612. w_c0_perfctrl3(cpuc->saved_ctrl[3]);
  613. case 3:
  614. w_c0_perfctrl2(cpuc->saved_ctrl[2]);
  615. case 2:
  616. w_c0_perfctrl1(cpuc->saved_ctrl[1]);
  617. case 1:
  618. w_c0_perfctrl0(cpuc->saved_ctrl[0]);
  619. }
  620. local_irq_restore(flags);
  621. }
  622. static int mipsxx_pmu_handle_shared_irq(void)
  623. {
  624. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  625. struct perf_sample_data data;
  626. unsigned int counters = mipspmu->num_counters;
  627. unsigned int counter;
  628. int handled = IRQ_NONE;
  629. struct pt_regs *regs;
  630. if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
  631. return handled;
  632. /*
  633. * First we pause the local counters, so that when we are locked
  634. * here, the counters are all paused. When it gets locked due to
  635. * perf_disable(), the timer interrupt handler will be delayed.
  636. *
  637. * See also mipsxx_pmu_start().
  638. */
  639. pause_local_counters();
  640. #ifdef CONFIG_MIPS_MT_SMP
  641. read_lock(&pmuint_rwlock);
  642. #endif
  643. regs = get_irq_regs();
  644. perf_sample_data_init(&data, 0);
  645. switch (counters) {
  646. #define HANDLE_COUNTER(n) \
  647. case n + 1: \
  648. if (test_bit(n, cpuc->used_mask)) { \
  649. counter = r_c0_perfcntr ## n(); \
  650. if (counter & M_COUNTER_OVERFLOW) { \
  651. w_c0_perfcntr ## n(counter & \
  652. VALID_COUNT); \
  653. if (test_and_change_bit(n, cpuc->msbs)) \
  654. handle_associated_event(cpuc, \
  655. n, &data, regs); \
  656. handled = IRQ_HANDLED; \
  657. } \
  658. }
  659. HANDLE_COUNTER(3)
  660. HANDLE_COUNTER(2)
  661. HANDLE_COUNTER(1)
  662. HANDLE_COUNTER(0)
  663. }
  664. /*
  665. * Do all the work for the pending perf events. We can do this
  666. * in here because the performance counter interrupt is a regular
  667. * interrupt, not NMI.
  668. */
  669. if (handled == IRQ_HANDLED)
  670. irq_work_run();
  671. #ifdef CONFIG_MIPS_MT_SMP
  672. read_unlock(&pmuint_rwlock);
  673. #endif
  674. resume_local_counters();
  675. return handled;
  676. }
  677. static irqreturn_t
  678. mipsxx_pmu_handle_irq(int irq, void *dev)
  679. {
  680. return mipsxx_pmu_handle_shared_irq();
  681. }
  682. static void mipsxx_pmu_start(void)
  683. {
  684. #ifdef CONFIG_MIPS_MT_SMP
  685. write_unlock(&pmuint_rwlock);
  686. #endif
  687. resume_local_counters();
  688. }
  689. /*
  690. * MIPS performance counters can be per-TC. The control registers can
  691. * not be directly accessed across CPUs. Hence if we want to do global
  692. * control, we need cross CPU calls. on_each_cpu() can help us, but we
  693. * can not make sure this function is called with interrupts enabled. So
  694. * here we pause local counters and then grab a rwlock and leave the
  695. * counters on other CPUs alone. If any counter interrupt raises while
  696. * we own the write lock, simply pause local counters on that CPU and
  697. * spin in the handler. Also we know we won't be switched to another
  698. * CPU after pausing local counters and before grabbing the lock.
  699. */
  700. static void mipsxx_pmu_stop(void)
  701. {
  702. pause_local_counters();
  703. #ifdef CONFIG_MIPS_MT_SMP
  704. write_lock(&pmuint_rwlock);
  705. #endif
  706. }
  707. static int
  708. mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
  709. struct hw_perf_event *hwc)
  710. {
  711. int i;
  712. /*
  713. * We only need to care the counter mask. The range has been
  714. * checked definitely.
  715. */
  716. unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
  717. for (i = mipspmu->num_counters - 1; i >= 0; i--) {
  718. /*
  719. * Note that some MIPS perf events can be counted by both
  720. * even and odd counters, wheresas many other are only by
  721. * even _or_ odd counters. This introduces an issue that
  722. * when the former kind of event takes the counter the
  723. * latter kind of event wants to use, then the "counter
  724. * allocation" for the latter event will fail. In fact if
  725. * they can be dynamically swapped, they both feel happy.
  726. * But here we leave this issue alone for now.
  727. */
  728. if (test_bit(i, &cntr_mask) &&
  729. !test_and_set_bit(i, cpuc->used_mask))
  730. return i;
  731. }
  732. return -EAGAIN;
  733. }
  734. static void
  735. mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
  736. {
  737. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  738. unsigned long flags;
  739. WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  740. local_irq_save(flags);
  741. cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
  742. (evt->config_base & M_PERFCTL_CONFIG_MASK) |
  743. /* Make sure interrupt enabled. */
  744. M_PERFCTL_INTERRUPT_ENABLE;
  745. /*
  746. * We do not actually let the counter run. Leave it until start().
  747. */
  748. local_irq_restore(flags);
  749. }
  750. static void
  751. mipsxx_pmu_disable_event(int idx)
  752. {
  753. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  754. unsigned long flags;
  755. WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  756. local_irq_save(flags);
  757. cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
  758. ~M_PERFCTL_COUNT_EVENT_WHENEVER;
  759. mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
  760. local_irq_restore(flags);
  761. }
  762. /* 24K */
  763. #define IS_UNSUPPORTED_24K_EVENT(r, b) \
  764. ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
  765. (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
  766. (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
  767. (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
  768. ((b) >= 68 && (b) <= 127))
  769. #define IS_BOTH_COUNTERS_24K_EVENT(b) \
  770. ((b) == 0 || (b) == 1 || (b) == 11)
  771. /* 34K */
  772. #define IS_UNSUPPORTED_34K_EVENT(r, b) \
  773. ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
  774. (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
  775. ((b) >= 68 && (b) <= 127))
  776. #define IS_BOTH_COUNTERS_34K_EVENT(b) \
  777. ((b) == 0 || (b) == 1 || (b) == 11)
  778. #ifdef CONFIG_MIPS_MT_SMP
  779. #define IS_RANGE_P_34K_EVENT(r, b) \
  780. ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
  781. (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
  782. (r) == 176 || ((b) >= 50 && (b) <= 55) || \
  783. ((b) >= 64 && (b) <= 67))
  784. #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
  785. #endif
  786. /* 74K */
  787. #define IS_UNSUPPORTED_74K_EVENT(r, b) \
  788. ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
  789. ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
  790. (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
  791. (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
  792. (b) == 61 || (r) == 62 || (r) == 191 || \
  793. ((b) >= 64 && (b) <= 127))
  794. #define IS_BOTH_COUNTERS_74K_EVENT(b) \
  795. ((b) == 0 || (b) == 1)
  796. /* 1004K */
  797. #define IS_UNSUPPORTED_1004K_EVENT(r, b) \
  798. ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
  799. (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
  800. #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
  801. ((b) == 0 || (b) == 1 || (b) == 11)
  802. #ifdef CONFIG_MIPS_MT_SMP
  803. #define IS_RANGE_P_1004K_EVENT(r, b) \
  804. ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
  805. (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
  806. (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
  807. (r) == 188 || (b) == 61 || (b) == 62 || \
  808. ((b) >= 64 && (b) <= 67))
  809. #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
  810. #endif
  811. /*
  812. * User can use 0-255 raw events, where 0-127 for the events of even
  813. * counters, and 128-255 for odd counters. Note that bit 7 is used to
  814. * indicate the parity. So, for example, when user wants to take the
  815. * Event Num of 15 for odd counters (by referring to the user manual),
  816. * then 128 needs to be added to 15 as the input for the event config,
  817. * i.e., 143 (0x8F) to be used.
  818. */
  819. static const struct mips_perf_event *
  820. mipsxx_pmu_map_raw_event(u64 config)
  821. {
  822. unsigned int raw_id = config & 0xff;
  823. unsigned int base_id = raw_id & 0x7f;
  824. switch (current_cpu_type()) {
  825. case CPU_24K:
  826. if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
  827. return ERR_PTR(-EOPNOTSUPP);
  828. raw_event.event_id = base_id;
  829. if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
  830. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  831. else
  832. raw_event.cntr_mask =
  833. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  834. #ifdef CONFIG_MIPS_MT_SMP
  835. /*
  836. * This is actually doing nothing. Non-multithreading
  837. * CPUs will not check and calculate the range.
  838. */
  839. raw_event.range = P;
  840. #endif
  841. break;
  842. case CPU_34K:
  843. if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
  844. return ERR_PTR(-EOPNOTSUPP);
  845. raw_event.event_id = base_id;
  846. if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
  847. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  848. else
  849. raw_event.cntr_mask =
  850. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  851. #ifdef CONFIG_MIPS_MT_SMP
  852. if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
  853. raw_event.range = P;
  854. else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
  855. raw_event.range = V;
  856. else
  857. raw_event.range = T;
  858. #endif
  859. break;
  860. case CPU_74K:
  861. if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
  862. return ERR_PTR(-EOPNOTSUPP);
  863. raw_event.event_id = base_id;
  864. if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
  865. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  866. else
  867. raw_event.cntr_mask =
  868. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  869. #ifdef CONFIG_MIPS_MT_SMP
  870. raw_event.range = P;
  871. #endif
  872. break;
  873. case CPU_1004K:
  874. if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
  875. return ERR_PTR(-EOPNOTSUPP);
  876. raw_event.event_id = base_id;
  877. if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
  878. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  879. else
  880. raw_event.cntr_mask =
  881. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  882. #ifdef CONFIG_MIPS_MT_SMP
  883. if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
  884. raw_event.range = P;
  885. else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
  886. raw_event.range = V;
  887. else
  888. raw_event.range = T;
  889. #endif
  890. break;
  891. }
  892. return &raw_event;
  893. }
  894. static struct mips_pmu mipsxxcore_pmu = {
  895. .handle_irq = mipsxx_pmu_handle_irq,
  896. .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
  897. .start = mipsxx_pmu_start,
  898. .stop = mipsxx_pmu_stop,
  899. .alloc_counter = mipsxx_pmu_alloc_counter,
  900. .read_counter = mipsxx_pmu_read_counter,
  901. .write_counter = mipsxx_pmu_write_counter,
  902. .enable_event = mipsxx_pmu_enable_event,
  903. .disable_event = mipsxx_pmu_disable_event,
  904. .map_raw_event = mipsxx_pmu_map_raw_event,
  905. .general_event_map = &mipsxxcore_event_map,
  906. .cache_event_map = &mipsxxcore_cache_map,
  907. };
  908. static struct mips_pmu mipsxx74Kcore_pmu = {
  909. .handle_irq = mipsxx_pmu_handle_irq,
  910. .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
  911. .start = mipsxx_pmu_start,
  912. .stop = mipsxx_pmu_stop,
  913. .alloc_counter = mipsxx_pmu_alloc_counter,
  914. .read_counter = mipsxx_pmu_read_counter,
  915. .write_counter = mipsxx_pmu_write_counter,
  916. .enable_event = mipsxx_pmu_enable_event,
  917. .disable_event = mipsxx_pmu_disable_event,
  918. .map_raw_event = mipsxx_pmu_map_raw_event,
  919. .general_event_map = &mipsxx74Kcore_event_map,
  920. .cache_event_map = &mipsxx74Kcore_cache_map,
  921. };
  922. static int __init
  923. init_hw_perf_events(void)
  924. {
  925. int counters, irq;
  926. pr_info("Performance counters: ");
  927. counters = n_counters();
  928. if (counters == 0) {
  929. pr_cont("No available PMU.\n");
  930. return -ENODEV;
  931. }
  932. #ifdef CONFIG_MIPS_MT_SMP
  933. cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
  934. if (!cpu_has_mipsmt_pertccounters)
  935. counters = counters_total_to_per_cpu(counters);
  936. #endif
  937. #ifdef MSC01E_INT_BASE
  938. if (cpu_has_veic) {
  939. /*
  940. * Using platform specific interrupt controller defines.
  941. */
  942. irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
  943. } else {
  944. #endif
  945. if (cp0_perfcount_irq >= 0)
  946. irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  947. else
  948. irq = -1;
  949. #ifdef MSC01E_INT_BASE
  950. }
  951. #endif
  952. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  953. switch (current_cpu_type()) {
  954. case CPU_24K:
  955. mipsxxcore_pmu.name = "mips/24K";
  956. mipsxxcore_pmu.num_counters = counters;
  957. mipsxxcore_pmu.irq = irq;
  958. mipspmu = &mipsxxcore_pmu;
  959. break;
  960. case CPU_34K:
  961. mipsxxcore_pmu.name = "mips/34K";
  962. mipsxxcore_pmu.num_counters = counters;
  963. mipsxxcore_pmu.irq = irq;
  964. mipspmu = &mipsxxcore_pmu;
  965. break;
  966. case CPU_74K:
  967. mipsxx74Kcore_pmu.name = "mips/74K";
  968. mipsxx74Kcore_pmu.num_counters = counters;
  969. mipsxx74Kcore_pmu.irq = irq;
  970. mipspmu = &mipsxx74Kcore_pmu;
  971. break;
  972. case CPU_1004K:
  973. mipsxxcore_pmu.name = "mips/1004K";
  974. mipsxxcore_pmu.num_counters = counters;
  975. mipsxxcore_pmu.irq = irq;
  976. mipspmu = &mipsxxcore_pmu;
  977. break;
  978. default:
  979. pr_cont("Either hardware does not support performance "
  980. "counters, or not yet implemented.\n");
  981. return -ENODEV;
  982. }
  983. if (mipspmu)
  984. pr_cont("%s PMU enabled, %d counters available to each "
  985. "CPU, irq %d%s\n", mipspmu->name, counters, irq,
  986. irq < 0 ? " (share with timer interrupt)" : "");
  987. perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
  988. return 0;
  989. }
  990. early_initcall(init_hw_perf_events);
  991. #endif /* defined(CONFIG_CPU_MIPS32)... */