perf_event_mipsxx.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646
  1. /*
  2. * Linux performance counter support for MIPS.
  3. *
  4. * Copyright (C) 2010 MIPS Technologies, Inc.
  5. * Copyright (C) 2011 Cavium Networks, Inc.
  6. * Author: Deng-Cheng Zhu
  7. *
  8. * This code is based on the implementation for ARM, which is in turn
  9. * based on the sparc64 perf event code and the x86 code. Performance
  10. * counter access is based on the MIPS Oprofile code. And the callchain
  11. * support references the code of MIPS stacktrace.c.
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License version 2 as
  15. * published by the Free Software Foundation.
  16. */
  17. #include <linux/cpumask.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/smp.h>
  20. #include <linux/kernel.h>
  21. #include <linux/perf_event.h>
  22. #include <linux/uaccess.h>
  23. #include <asm/irq.h>
  24. #include <asm/irq_regs.h>
  25. #include <asm/stacktrace.h>
  26. #include <asm/time.h> /* For perf_irq */
  27. #define MIPS_MAX_HWEVENTS 4
  28. #define MIPS_TCS_PER_COUNTER 2
  29. #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
  30. struct cpu_hw_events {
  31. /* Array of events on this cpu. */
  32. struct perf_event *events[MIPS_MAX_HWEVENTS];
  33. /*
  34. * Set the bit (indexed by the counter number) when the counter
  35. * is used for an event.
  36. */
  37. unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
  38. /*
  39. * Software copy of the control register for each performance counter.
  40. * MIPS CPUs vary in performance counters. They use this differently,
  41. * and even may not use it.
  42. */
  43. unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
  44. };
  45. DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  46. .saved_ctrl = {0},
  47. };
  48. /* The description of MIPS performance events. */
  49. struct mips_perf_event {
  50. unsigned int event_id;
  51. /*
  52. * MIPS performance counters are indexed starting from 0.
  53. * CNTR_EVEN indicates the indexes of the counters to be used are
  54. * even numbers.
  55. */
  56. unsigned int cntr_mask;
  57. #define CNTR_EVEN 0x55555555
  58. #define CNTR_ODD 0xaaaaaaaa
  59. #define CNTR_ALL 0xffffffff
  60. #ifdef CONFIG_MIPS_MT_SMP
  61. enum {
  62. T = 0,
  63. V = 1,
  64. P = 2,
  65. } range;
  66. #else
  67. #define T
  68. #define V
  69. #define P
  70. #endif
  71. };
  72. static struct mips_perf_event raw_event;
  73. static DEFINE_MUTEX(raw_event_mutex);
  74. #define C(x) PERF_COUNT_HW_CACHE_##x
  75. struct mips_pmu {
  76. u64 max_period;
  77. u64 valid_count;
  78. u64 overflow;
  79. const char *name;
  80. int irq;
  81. u64 (*read_counter)(unsigned int idx);
  82. void (*write_counter)(unsigned int idx, u64 val);
  83. const struct mips_perf_event *(*map_raw_event)(u64 config);
  84. const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
  85. const struct mips_perf_event (*cache_event_map)
  86. [PERF_COUNT_HW_CACHE_MAX]
  87. [PERF_COUNT_HW_CACHE_OP_MAX]
  88. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  89. unsigned int num_counters;
  90. };
  91. static struct mips_pmu mipspmu;
  92. #define M_CONFIG1_PC (1 << 4)
  93. #define M_PERFCTL_EXL (1 << 0)
  94. #define M_PERFCTL_KERNEL (1 << 1)
  95. #define M_PERFCTL_SUPERVISOR (1 << 2)
  96. #define M_PERFCTL_USER (1 << 3)
  97. #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
  98. #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
  99. #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
  100. #ifdef CONFIG_CPU_BMIPS5000
  101. #define M_PERFCTL_MT_EN(filter) 0
  102. #else /* !CONFIG_CPU_BMIPS5000 */
  103. #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
  104. #endif /* CONFIG_CPU_BMIPS5000 */
  105. #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
  106. #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
  107. #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
  108. #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
  109. #define M_PERFCTL_WIDE (1 << 30)
  110. #define M_PERFCTL_MORE (1 << 31)
  111. #define M_PERFCTL_TC (1 << 30)
  112. #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
  113. M_PERFCTL_KERNEL | \
  114. M_PERFCTL_USER | \
  115. M_PERFCTL_SUPERVISOR | \
  116. M_PERFCTL_INTERRUPT_ENABLE)
  117. #ifdef CONFIG_MIPS_MT_SMP
  118. #define M_PERFCTL_CONFIG_MASK 0x3fff801f
  119. #else
  120. #define M_PERFCTL_CONFIG_MASK 0x1f
  121. #endif
  122. #define M_PERFCTL_EVENT_MASK 0xfe0
  123. #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
  124. static int cpu_has_mipsmt_pertccounters;
  125. static DEFINE_RWLOCK(pmuint_rwlock);
  126. #if defined(CONFIG_CPU_BMIPS5000)
  127. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  128. 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
  129. #else
  130. /*
  131. * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
  132. * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
  133. */
  134. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  135. 0 : smp_processor_id())
  136. #endif
  137. /* Copied from op_model_mipsxx.c */
  138. static unsigned int vpe_shift(void)
  139. {
  140. if (num_possible_cpus() > 1)
  141. return 1;
  142. return 0;
  143. }
  144. static unsigned int counters_total_to_per_cpu(unsigned int counters)
  145. {
  146. return counters >> vpe_shift();
  147. }
  148. #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
  149. #define vpe_id() 0
  150. #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
  151. static void resume_local_counters(void);
  152. static void pause_local_counters(void);
  153. static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
  154. static int mipsxx_pmu_handle_shared_irq(void);
  155. static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
  156. {
  157. if (vpe_id() == 1)
  158. idx = (idx + 2) & 3;
  159. return idx;
  160. }
  161. static u64 mipsxx_pmu_read_counter(unsigned int idx)
  162. {
  163. idx = mipsxx_pmu_swizzle_perf_idx(idx);
  164. switch (idx) {
  165. case 0:
  166. /*
  167. * The counters are unsigned, we must cast to truncate
  168. * off the high bits.
  169. */
  170. return (u32)read_c0_perfcntr0();
  171. case 1:
  172. return (u32)read_c0_perfcntr1();
  173. case 2:
  174. return (u32)read_c0_perfcntr2();
  175. case 3:
  176. return (u32)read_c0_perfcntr3();
  177. default:
  178. WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
  179. return 0;
  180. }
  181. }
  182. static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
  183. {
  184. idx = mipsxx_pmu_swizzle_perf_idx(idx);
  185. switch (idx) {
  186. case 0:
  187. return read_c0_perfcntr0_64();
  188. case 1:
  189. return read_c0_perfcntr1_64();
  190. case 2:
  191. return read_c0_perfcntr2_64();
  192. case 3:
  193. return read_c0_perfcntr3_64();
  194. default:
  195. WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
  196. return 0;
  197. }
  198. }
  199. static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
  200. {
  201. idx = mipsxx_pmu_swizzle_perf_idx(idx);
  202. switch (idx) {
  203. case 0:
  204. write_c0_perfcntr0(val);
  205. return;
  206. case 1:
  207. write_c0_perfcntr1(val);
  208. return;
  209. case 2:
  210. write_c0_perfcntr2(val);
  211. return;
  212. case 3:
  213. write_c0_perfcntr3(val);
  214. return;
  215. }
  216. }
  217. static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
  218. {
  219. idx = mipsxx_pmu_swizzle_perf_idx(idx);
  220. switch (idx) {
  221. case 0:
  222. write_c0_perfcntr0_64(val);
  223. return;
  224. case 1:
  225. write_c0_perfcntr1_64(val);
  226. return;
  227. case 2:
  228. write_c0_perfcntr2_64(val);
  229. return;
  230. case 3:
  231. write_c0_perfcntr3_64(val);
  232. return;
  233. }
  234. }
  235. static unsigned int mipsxx_pmu_read_control(unsigned int idx)
  236. {
  237. idx = mipsxx_pmu_swizzle_perf_idx(idx);
  238. switch (idx) {
  239. case 0:
  240. return read_c0_perfctrl0();
  241. case 1:
  242. return read_c0_perfctrl1();
  243. case 2:
  244. return read_c0_perfctrl2();
  245. case 3:
  246. return read_c0_perfctrl3();
  247. default:
  248. WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
  249. return 0;
  250. }
  251. }
  252. static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
  253. {
  254. idx = mipsxx_pmu_swizzle_perf_idx(idx);
  255. switch (idx) {
  256. case 0:
  257. write_c0_perfctrl0(val);
  258. return;
  259. case 1:
  260. write_c0_perfctrl1(val);
  261. return;
  262. case 2:
  263. write_c0_perfctrl2(val);
  264. return;
  265. case 3:
  266. write_c0_perfctrl3(val);
  267. return;
  268. }
  269. }
  270. static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
  271. struct hw_perf_event *hwc)
  272. {
  273. int i;
  274. /*
  275. * We only need to care the counter mask. The range has been
  276. * checked definitely.
  277. */
  278. unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
  279. for (i = mipspmu.num_counters - 1; i >= 0; i--) {
  280. /*
  281. * Note that some MIPS perf events can be counted by both
  282. * even and odd counters, wheresas many other are only by
  283. * even _or_ odd counters. This introduces an issue that
  284. * when the former kind of event takes the counter the
  285. * latter kind of event wants to use, then the "counter
  286. * allocation" for the latter event will fail. In fact if
  287. * they can be dynamically swapped, they both feel happy.
  288. * But here we leave this issue alone for now.
  289. */
  290. if (test_bit(i, &cntr_mask) &&
  291. !test_and_set_bit(i, cpuc->used_mask))
  292. return i;
  293. }
  294. return -EAGAIN;
  295. }
  296. static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
  297. {
  298. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  299. WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
  300. cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
  301. (evt->config_base & M_PERFCTL_CONFIG_MASK) |
  302. /* Make sure interrupt enabled. */
  303. M_PERFCTL_INTERRUPT_ENABLE;
  304. if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
  305. /* enable the counter for the calling thread */
  306. cpuc->saved_ctrl[idx] |=
  307. (1 << (12 + vpe_id())) | M_PERFCTL_TC;
  308. /*
  309. * We do not actually let the counter run. Leave it until start().
  310. */
  311. }
  312. static void mipsxx_pmu_disable_event(int idx)
  313. {
  314. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  315. unsigned long flags;
  316. WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
  317. local_irq_save(flags);
  318. cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
  319. ~M_PERFCTL_COUNT_EVENT_WHENEVER;
  320. mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
  321. local_irq_restore(flags);
  322. }
  323. static int mipspmu_event_set_period(struct perf_event *event,
  324. struct hw_perf_event *hwc,
  325. int idx)
  326. {
  327. u64 left = local64_read(&hwc->period_left);
  328. u64 period = hwc->sample_period;
  329. int ret = 0;
  330. if (unlikely((left + period) & (1ULL << 63))) {
  331. /* left underflowed by more than period. */
  332. left = period;
  333. local64_set(&hwc->period_left, left);
  334. hwc->last_period = period;
  335. ret = 1;
  336. } else if (unlikely((left + period) <= period)) {
  337. /* left underflowed by less than period. */
  338. left += period;
  339. local64_set(&hwc->period_left, left);
  340. hwc->last_period = period;
  341. ret = 1;
  342. }
  343. if (left > mipspmu.max_period) {
  344. left = mipspmu.max_period;
  345. local64_set(&hwc->period_left, left);
  346. }
  347. local64_set(&hwc->prev_count, mipspmu.overflow - left);
  348. mipspmu.write_counter(idx, mipspmu.overflow - left);
  349. perf_event_update_userpage(event);
  350. return ret;
  351. }
  352. static void mipspmu_event_update(struct perf_event *event,
  353. struct hw_perf_event *hwc,
  354. int idx)
  355. {
  356. u64 prev_raw_count, new_raw_count;
  357. u64 delta;
  358. again:
  359. prev_raw_count = local64_read(&hwc->prev_count);
  360. new_raw_count = mipspmu.read_counter(idx);
  361. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  362. new_raw_count) != prev_raw_count)
  363. goto again;
  364. delta = new_raw_count - prev_raw_count;
  365. local64_add(delta, &event->count);
  366. local64_sub(delta, &hwc->period_left);
  367. }
  368. static void mipspmu_start(struct perf_event *event, int flags)
  369. {
  370. struct hw_perf_event *hwc = &event->hw;
  371. if (flags & PERF_EF_RELOAD)
  372. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  373. hwc->state = 0;
  374. /* Set the period for the event. */
  375. mipspmu_event_set_period(event, hwc, hwc->idx);
  376. /* Enable the event. */
  377. mipsxx_pmu_enable_event(hwc, hwc->idx);
  378. }
  379. static void mipspmu_stop(struct perf_event *event, int flags)
  380. {
  381. struct hw_perf_event *hwc = &event->hw;
  382. if (!(hwc->state & PERF_HES_STOPPED)) {
  383. /* We are working on a local event. */
  384. mipsxx_pmu_disable_event(hwc->idx);
  385. barrier();
  386. mipspmu_event_update(event, hwc, hwc->idx);
  387. hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  388. }
  389. }
  390. static int mipspmu_add(struct perf_event *event, int flags)
  391. {
  392. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  393. struct hw_perf_event *hwc = &event->hw;
  394. int idx;
  395. int err = 0;
  396. perf_pmu_disable(event->pmu);
  397. /* To look for a free counter for this event. */
  398. idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
  399. if (idx < 0) {
  400. err = idx;
  401. goto out;
  402. }
  403. /*
  404. * If there is an event in the counter we are going to use then
  405. * make sure it is disabled.
  406. */
  407. event->hw.idx = idx;
  408. mipsxx_pmu_disable_event(idx);
  409. cpuc->events[idx] = event;
  410. hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  411. if (flags & PERF_EF_START)
  412. mipspmu_start(event, PERF_EF_RELOAD);
  413. /* Propagate our changes to the userspace mapping. */
  414. perf_event_update_userpage(event);
  415. out:
  416. perf_pmu_enable(event->pmu);
  417. return err;
  418. }
  419. static void mipspmu_del(struct perf_event *event, int flags)
  420. {
  421. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  422. struct hw_perf_event *hwc = &event->hw;
  423. int idx = hwc->idx;
  424. WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
  425. mipspmu_stop(event, PERF_EF_UPDATE);
  426. cpuc->events[idx] = NULL;
  427. clear_bit(idx, cpuc->used_mask);
  428. perf_event_update_userpage(event);
  429. }
  430. static void mipspmu_read(struct perf_event *event)
  431. {
  432. struct hw_perf_event *hwc = &event->hw;
  433. /* Don't read disabled counters! */
  434. if (hwc->idx < 0)
  435. return;
  436. mipspmu_event_update(event, hwc, hwc->idx);
  437. }
  438. static void mipspmu_enable(struct pmu *pmu)
  439. {
  440. #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
  441. write_unlock(&pmuint_rwlock);
  442. #endif
  443. resume_local_counters();
  444. }
  445. /*
  446. * MIPS performance counters can be per-TC. The control registers can
  447. * not be directly accessed accross CPUs. Hence if we want to do global
  448. * control, we need cross CPU calls. on_each_cpu() can help us, but we
  449. * can not make sure this function is called with interrupts enabled. So
  450. * here we pause local counters and then grab a rwlock and leave the
  451. * counters on other CPUs alone. If any counter interrupt raises while
  452. * we own the write lock, simply pause local counters on that CPU and
  453. * spin in the handler. Also we know we won't be switched to another
  454. * CPU after pausing local counters and before grabbing the lock.
  455. */
  456. static void mipspmu_disable(struct pmu *pmu)
  457. {
  458. pause_local_counters();
  459. #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
  460. write_lock(&pmuint_rwlock);
  461. #endif
  462. }
  463. static atomic_t active_events = ATOMIC_INIT(0);
  464. static DEFINE_MUTEX(pmu_reserve_mutex);
  465. static int (*save_perf_irq)(void);
  466. static int mipspmu_get_irq(void)
  467. {
  468. int err;
  469. if (mipspmu.irq >= 0) {
  470. /* Request my own irq handler. */
  471. err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
  472. IRQF_PERCPU | IRQF_NOBALANCING,
  473. "mips_perf_pmu", NULL);
  474. if (err) {
  475. pr_warning("Unable to request IRQ%d for MIPS "
  476. "performance counters!\n", mipspmu.irq);
  477. }
  478. } else if (cp0_perfcount_irq < 0) {
  479. /*
  480. * We are sharing the irq number with the timer interrupt.
  481. */
  482. save_perf_irq = perf_irq;
  483. perf_irq = mipsxx_pmu_handle_shared_irq;
  484. err = 0;
  485. } else {
  486. pr_warning("The platform hasn't properly defined its "
  487. "interrupt controller.\n");
  488. err = -ENOENT;
  489. }
  490. return err;
  491. }
  492. static void mipspmu_free_irq(void)
  493. {
  494. if (mipspmu.irq >= 0)
  495. free_irq(mipspmu.irq, NULL);
  496. else if (cp0_perfcount_irq < 0)
  497. perf_irq = save_perf_irq;
  498. }
  499. /*
  500. * mipsxx/rm9000/loongson2 have different performance counters, they have
  501. * specific low-level init routines.
  502. */
  503. static void reset_counters(void *arg);
  504. static int __hw_perf_event_init(struct perf_event *event);
  505. static void hw_perf_event_destroy(struct perf_event *event)
  506. {
  507. if (atomic_dec_and_mutex_lock(&active_events,
  508. &pmu_reserve_mutex)) {
  509. /*
  510. * We must not call the destroy function with interrupts
  511. * disabled.
  512. */
  513. on_each_cpu(reset_counters,
  514. (void *)(long)mipspmu.num_counters, 1);
  515. mipspmu_free_irq();
  516. mutex_unlock(&pmu_reserve_mutex);
  517. }
  518. }
  519. static int mipspmu_event_init(struct perf_event *event)
  520. {
  521. int err = 0;
  522. /* does not support taken branch sampling */
  523. if (has_branch_stack(event))
  524. return -EOPNOTSUPP;
  525. switch (event->attr.type) {
  526. case PERF_TYPE_RAW:
  527. case PERF_TYPE_HARDWARE:
  528. case PERF_TYPE_HW_CACHE:
  529. break;
  530. default:
  531. return -ENOENT;
  532. }
  533. if (event->cpu >= nr_cpumask_bits ||
  534. (event->cpu >= 0 && !cpu_online(event->cpu)))
  535. return -ENODEV;
  536. if (!atomic_inc_not_zero(&active_events)) {
  537. mutex_lock(&pmu_reserve_mutex);
  538. if (atomic_read(&active_events) == 0)
  539. err = mipspmu_get_irq();
  540. if (!err)
  541. atomic_inc(&active_events);
  542. mutex_unlock(&pmu_reserve_mutex);
  543. }
  544. if (err)
  545. return err;
  546. return __hw_perf_event_init(event);
  547. }
  548. static struct pmu pmu = {
  549. .pmu_enable = mipspmu_enable,
  550. .pmu_disable = mipspmu_disable,
  551. .event_init = mipspmu_event_init,
  552. .add = mipspmu_add,
  553. .del = mipspmu_del,
  554. .start = mipspmu_start,
  555. .stop = mipspmu_stop,
  556. .read = mipspmu_read,
  557. };
  558. static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
  559. {
  560. /*
  561. * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
  562. * event_id.
  563. */
  564. #ifdef CONFIG_MIPS_MT_SMP
  565. return ((unsigned int)pev->range << 24) |
  566. (pev->cntr_mask & 0xffff00) |
  567. (pev->event_id & 0xff);
  568. #else
  569. return (pev->cntr_mask & 0xffff00) |
  570. (pev->event_id & 0xff);
  571. #endif
  572. }
  573. static const struct mips_perf_event *mipspmu_map_general_event(int idx)
  574. {
  575. if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
  576. return ERR_PTR(-EOPNOTSUPP);
  577. return &(*mipspmu.general_event_map)[idx];
  578. }
  579. static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
  580. {
  581. unsigned int cache_type, cache_op, cache_result;
  582. const struct mips_perf_event *pev;
  583. cache_type = (config >> 0) & 0xff;
  584. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  585. return ERR_PTR(-EINVAL);
  586. cache_op = (config >> 8) & 0xff;
  587. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  588. return ERR_PTR(-EINVAL);
  589. cache_result = (config >> 16) & 0xff;
  590. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  591. return ERR_PTR(-EINVAL);
  592. pev = &((*mipspmu.cache_event_map)
  593. [cache_type]
  594. [cache_op]
  595. [cache_result]);
  596. if (pev->cntr_mask == 0)
  597. return ERR_PTR(-EOPNOTSUPP);
  598. return pev;
  599. }
  600. static int validate_group(struct perf_event *event)
  601. {
  602. struct perf_event *sibling, *leader = event->group_leader;
  603. struct cpu_hw_events fake_cpuc;
  604. memset(&fake_cpuc, 0, sizeof(fake_cpuc));
  605. if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
  606. return -EINVAL;
  607. list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
  608. if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
  609. return -EINVAL;
  610. }
  611. if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
  612. return -EINVAL;
  613. return 0;
  614. }
  615. /* This is needed by specific irq handlers in perf_event_*.c */
  616. static void handle_associated_event(struct cpu_hw_events *cpuc,
  617. int idx, struct perf_sample_data *data,
  618. struct pt_regs *regs)
  619. {
  620. struct perf_event *event = cpuc->events[idx];
  621. struct hw_perf_event *hwc = &event->hw;
  622. mipspmu_event_update(event, hwc, idx);
  623. data->period = event->hw.last_period;
  624. if (!mipspmu_event_set_period(event, hwc, idx))
  625. return;
  626. if (perf_event_overflow(event, data, regs))
  627. mipsxx_pmu_disable_event(idx);
  628. }
  629. static int __n_counters(void)
  630. {
  631. if (!(read_c0_config1() & M_CONFIG1_PC))
  632. return 0;
  633. if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
  634. return 1;
  635. if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
  636. return 2;
  637. if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
  638. return 3;
  639. return 4;
  640. }
  641. static int n_counters(void)
  642. {
  643. int counters;
  644. switch (current_cpu_type()) {
  645. case CPU_R10000:
  646. counters = 2;
  647. break;
  648. case CPU_R12000:
  649. case CPU_R14000:
  650. counters = 4;
  651. break;
  652. default:
  653. counters = __n_counters();
  654. }
  655. return counters;
  656. }
  657. static void reset_counters(void *arg)
  658. {
  659. int counters = (int)(long)arg;
  660. switch (counters) {
  661. case 4:
  662. mipsxx_pmu_write_control(3, 0);
  663. mipspmu.write_counter(3, 0);
  664. case 3:
  665. mipsxx_pmu_write_control(2, 0);
  666. mipspmu.write_counter(2, 0);
  667. case 2:
  668. mipsxx_pmu_write_control(1, 0);
  669. mipspmu.write_counter(1, 0);
  670. case 1:
  671. mipsxx_pmu_write_control(0, 0);
  672. mipspmu.write_counter(0, 0);
  673. }
  674. }
  675. /* 24K/34K/1004K cores can share the same event map. */
  676. static const struct mips_perf_event mipsxxcore_event_map
  677. [PERF_COUNT_HW_MAX] = {
  678. [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
  679. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
  680. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
  681. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
  682. };
  683. /* 74K core has different branch event code. */
  684. static const struct mips_perf_event mipsxx74Kcore_event_map
  685. [PERF_COUNT_HW_MAX] = {
  686. [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
  687. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
  688. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
  689. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
  690. };
  691. static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
  692. [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
  693. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
  694. [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
  695. [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
  696. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
  697. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
  698. [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
  699. };
  700. static const struct mips_perf_event bmips5000_event_map
  701. [PERF_COUNT_HW_MAX] = {
  702. [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
  703. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
  704. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
  705. };
  706. static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
  707. [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
  708. [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
  709. [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
  710. [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
  711. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
  712. [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
  713. };
  714. /* 24K/34K/1004K cores can share the same cache event map. */
  715. static const struct mips_perf_event mipsxxcore_cache_map
  716. [PERF_COUNT_HW_CACHE_MAX]
  717. [PERF_COUNT_HW_CACHE_OP_MAX]
  718. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  719. [C(L1D)] = {
  720. /*
  721. * Like some other architectures (e.g. ARM), the performance
  722. * counters don't differentiate between read and write
  723. * accesses/misses, so this isn't strictly correct, but it's the
  724. * best we can do. Writes and reads get combined.
  725. */
  726. [C(OP_READ)] = {
  727. [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
  728. [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
  729. },
  730. [C(OP_WRITE)] = {
  731. [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
  732. [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
  733. },
  734. },
  735. [C(L1I)] = {
  736. [C(OP_READ)] = {
  737. [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
  738. [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
  739. },
  740. [C(OP_WRITE)] = {
  741. [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
  742. [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
  743. },
  744. [C(OP_PREFETCH)] = {
  745. [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
  746. /*
  747. * Note that MIPS has only "hit" events countable for
  748. * the prefetch operation.
  749. */
  750. },
  751. },
  752. [C(LL)] = {
  753. [C(OP_READ)] = {
  754. [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
  755. [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
  756. },
  757. [C(OP_WRITE)] = {
  758. [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
  759. [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
  760. },
  761. },
  762. [C(DTLB)] = {
  763. [C(OP_READ)] = {
  764. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  765. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  766. },
  767. [C(OP_WRITE)] = {
  768. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  769. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  770. },
  771. },
  772. [C(ITLB)] = {
  773. [C(OP_READ)] = {
  774. [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
  775. [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
  776. },
  777. [C(OP_WRITE)] = {
  778. [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
  779. [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
  780. },
  781. },
  782. [C(BPU)] = {
  783. /* Using the same code for *HW_BRANCH* */
  784. [C(OP_READ)] = {
  785. [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
  786. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  787. },
  788. [C(OP_WRITE)] = {
  789. [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
  790. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  791. },
  792. },
  793. };
  794. /* 74K core has completely different cache event map. */
  795. static const struct mips_perf_event mipsxx74Kcore_cache_map
  796. [PERF_COUNT_HW_CACHE_MAX]
  797. [PERF_COUNT_HW_CACHE_OP_MAX]
  798. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  799. [C(L1D)] = {
  800. /*
  801. * Like some other architectures (e.g. ARM), the performance
  802. * counters don't differentiate between read and write
  803. * accesses/misses, so this isn't strictly correct, but it's the
  804. * best we can do. Writes and reads get combined.
  805. */
  806. [C(OP_READ)] = {
  807. [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
  808. [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
  809. },
  810. [C(OP_WRITE)] = {
  811. [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
  812. [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
  813. },
  814. },
  815. [C(L1I)] = {
  816. [C(OP_READ)] = {
  817. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  818. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  819. },
  820. [C(OP_WRITE)] = {
  821. [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
  822. [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
  823. },
  824. [C(OP_PREFETCH)] = {
  825. [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
  826. /*
  827. * Note that MIPS has only "hit" events countable for
  828. * the prefetch operation.
  829. */
  830. },
  831. },
  832. [C(LL)] = {
  833. [C(OP_READ)] = {
  834. [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
  835. [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
  836. },
  837. [C(OP_WRITE)] = {
  838. [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
  839. [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
  840. },
  841. },
  842. [C(ITLB)] = {
  843. [C(OP_READ)] = {
  844. [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
  845. [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
  846. },
  847. [C(OP_WRITE)] = {
  848. [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
  849. [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
  850. },
  851. },
  852. [C(BPU)] = {
  853. /* Using the same code for *HW_BRANCH* */
  854. [C(OP_READ)] = {
  855. [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
  856. [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
  857. },
  858. [C(OP_WRITE)] = {
  859. [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
  860. [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
  861. },
  862. },
  863. };
  864. /* BMIPS5000 */
  865. static const struct mips_perf_event bmips5000_cache_map
  866. [PERF_COUNT_HW_CACHE_MAX]
  867. [PERF_COUNT_HW_CACHE_OP_MAX]
  868. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  869. [C(L1D)] = {
  870. /*
  871. * Like some other architectures (e.g. ARM), the performance
  872. * counters don't differentiate between read and write
  873. * accesses/misses, so this isn't strictly correct, but it's the
  874. * best we can do. Writes and reads get combined.
  875. */
  876. [C(OP_READ)] = {
  877. [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
  878. [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
  879. },
  880. [C(OP_WRITE)] = {
  881. [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
  882. [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
  883. },
  884. },
  885. [C(L1I)] = {
  886. [C(OP_READ)] = {
  887. [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
  888. [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
  889. },
  890. [C(OP_WRITE)] = {
  891. [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
  892. [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
  893. },
  894. [C(OP_PREFETCH)] = {
  895. [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
  896. /*
  897. * Note that MIPS has only "hit" events countable for
  898. * the prefetch operation.
  899. */
  900. },
  901. },
  902. [C(LL)] = {
  903. [C(OP_READ)] = {
  904. [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
  905. [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
  906. },
  907. [C(OP_WRITE)] = {
  908. [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
  909. [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
  910. },
  911. },
  912. [C(BPU)] = {
  913. /* Using the same code for *HW_BRANCH* */
  914. [C(OP_READ)] = {
  915. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  916. },
  917. [C(OP_WRITE)] = {
  918. [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
  919. },
  920. },
  921. };
  922. static const struct mips_perf_event octeon_cache_map
  923. [PERF_COUNT_HW_CACHE_MAX]
  924. [PERF_COUNT_HW_CACHE_OP_MAX]
  925. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  926. [C(L1D)] = {
  927. [C(OP_READ)] = {
  928. [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
  929. [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
  930. },
  931. [C(OP_WRITE)] = {
  932. [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
  933. },
  934. },
  935. [C(L1I)] = {
  936. [C(OP_READ)] = {
  937. [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
  938. },
  939. [C(OP_PREFETCH)] = {
  940. [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
  941. },
  942. },
  943. [C(DTLB)] = {
  944. /*
  945. * Only general DTLB misses are counted use the same event for
  946. * read and write.
  947. */
  948. [C(OP_READ)] = {
  949. [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
  950. },
  951. [C(OP_WRITE)] = {
  952. [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
  953. },
  954. },
  955. [C(ITLB)] = {
  956. [C(OP_READ)] = {
  957. [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
  958. },
  959. },
  960. };
  961. static const struct mips_perf_event xlp_cache_map
  962. [PERF_COUNT_HW_CACHE_MAX]
  963. [PERF_COUNT_HW_CACHE_OP_MAX]
  964. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  965. [C(L1D)] = {
  966. [C(OP_READ)] = {
  967. [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
  968. [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
  969. },
  970. [C(OP_WRITE)] = {
  971. [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
  972. [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
  973. },
  974. },
  975. [C(L1I)] = {
  976. [C(OP_READ)] = {
  977. [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
  978. [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
  979. },
  980. },
  981. [C(LL)] = {
  982. [C(OP_READ)] = {
  983. [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
  984. [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
  985. },
  986. [C(OP_WRITE)] = {
  987. [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
  988. [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
  989. },
  990. },
  991. [C(DTLB)] = {
  992. /*
  993. * Only general DTLB misses are counted use the same event for
  994. * read and write.
  995. */
  996. [C(OP_READ)] = {
  997. [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
  998. },
  999. [C(OP_WRITE)] = {
  1000. [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
  1001. },
  1002. },
  1003. [C(ITLB)] = {
  1004. [C(OP_READ)] = {
  1005. [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
  1006. },
  1007. [C(OP_WRITE)] = {
  1008. [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
  1009. },
  1010. },
  1011. [C(BPU)] = {
  1012. [C(OP_READ)] = {
  1013. [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
  1014. },
  1015. },
  1016. };
  1017. #ifdef CONFIG_MIPS_MT_SMP
  1018. static void check_and_calc_range(struct perf_event *event,
  1019. const struct mips_perf_event *pev)
  1020. {
  1021. struct hw_perf_event *hwc = &event->hw;
  1022. if (event->cpu >= 0) {
  1023. if (pev->range > V) {
  1024. /*
  1025. * The user selected an event that is processor
  1026. * wide, while expecting it to be VPE wide.
  1027. */
  1028. hwc->config_base |= M_TC_EN_ALL;
  1029. } else {
  1030. /*
  1031. * FIXME: cpu_data[event->cpu].vpe_id reports 0
  1032. * for both CPUs.
  1033. */
  1034. hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
  1035. hwc->config_base |= M_TC_EN_VPE;
  1036. }
  1037. } else
  1038. hwc->config_base |= M_TC_EN_ALL;
  1039. }
  1040. #else
  1041. static void check_and_calc_range(struct perf_event *event,
  1042. const struct mips_perf_event *pev)
  1043. {
  1044. }
  1045. #endif
  1046. static int __hw_perf_event_init(struct perf_event *event)
  1047. {
  1048. struct perf_event_attr *attr = &event->attr;
  1049. struct hw_perf_event *hwc = &event->hw;
  1050. const struct mips_perf_event *pev;
  1051. int err;
  1052. /* Returning MIPS event descriptor for generic perf event. */
  1053. if (PERF_TYPE_HARDWARE == event->attr.type) {
  1054. if (event->attr.config >= PERF_COUNT_HW_MAX)
  1055. return -EINVAL;
  1056. pev = mipspmu_map_general_event(event->attr.config);
  1057. } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
  1058. pev = mipspmu_map_cache_event(event->attr.config);
  1059. } else if (PERF_TYPE_RAW == event->attr.type) {
  1060. /* We are working on the global raw event. */
  1061. mutex_lock(&raw_event_mutex);
  1062. pev = mipspmu.map_raw_event(event->attr.config);
  1063. } else {
  1064. /* The event type is not (yet) supported. */
  1065. return -EOPNOTSUPP;
  1066. }
  1067. if (IS_ERR(pev)) {
  1068. if (PERF_TYPE_RAW == event->attr.type)
  1069. mutex_unlock(&raw_event_mutex);
  1070. return PTR_ERR(pev);
  1071. }
  1072. /*
  1073. * We allow max flexibility on how each individual counter shared
  1074. * by the single CPU operates (the mode exclusion and the range).
  1075. */
  1076. hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
  1077. /* Calculate range bits and validate it. */
  1078. if (num_possible_cpus() > 1)
  1079. check_and_calc_range(event, pev);
  1080. hwc->event_base = mipspmu_perf_event_encode(pev);
  1081. if (PERF_TYPE_RAW == event->attr.type)
  1082. mutex_unlock(&raw_event_mutex);
  1083. if (!attr->exclude_user)
  1084. hwc->config_base |= M_PERFCTL_USER;
  1085. if (!attr->exclude_kernel) {
  1086. hwc->config_base |= M_PERFCTL_KERNEL;
  1087. /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
  1088. hwc->config_base |= M_PERFCTL_EXL;
  1089. }
  1090. if (!attr->exclude_hv)
  1091. hwc->config_base |= M_PERFCTL_SUPERVISOR;
  1092. hwc->config_base &= M_PERFCTL_CONFIG_MASK;
  1093. /*
  1094. * The event can belong to another cpu. We do not assign a local
  1095. * counter for it for now.
  1096. */
  1097. hwc->idx = -1;
  1098. hwc->config = 0;
  1099. if (!hwc->sample_period) {
  1100. hwc->sample_period = mipspmu.max_period;
  1101. hwc->last_period = hwc->sample_period;
  1102. local64_set(&hwc->period_left, hwc->sample_period);
  1103. }
  1104. err = 0;
  1105. if (event->group_leader != event)
  1106. err = validate_group(event);
  1107. event->destroy = hw_perf_event_destroy;
  1108. if (err)
  1109. event->destroy(event);
  1110. return err;
  1111. }
  1112. static void pause_local_counters(void)
  1113. {
  1114. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1115. int ctr = mipspmu.num_counters;
  1116. unsigned long flags;
  1117. local_irq_save(flags);
  1118. do {
  1119. ctr--;
  1120. cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
  1121. mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
  1122. ~M_PERFCTL_COUNT_EVENT_WHENEVER);
  1123. } while (ctr > 0);
  1124. local_irq_restore(flags);
  1125. }
  1126. static void resume_local_counters(void)
  1127. {
  1128. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1129. int ctr = mipspmu.num_counters;
  1130. do {
  1131. ctr--;
  1132. mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
  1133. } while (ctr > 0);
  1134. }
  1135. static int mipsxx_pmu_handle_shared_irq(void)
  1136. {
  1137. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1138. struct perf_sample_data data;
  1139. unsigned int counters = mipspmu.num_counters;
  1140. u64 counter;
  1141. int handled = IRQ_NONE;
  1142. struct pt_regs *regs;
  1143. if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
  1144. return handled;
  1145. /*
  1146. * First we pause the local counters, so that when we are locked
  1147. * here, the counters are all paused. When it gets locked due to
  1148. * perf_disable(), the timer interrupt handler will be delayed.
  1149. *
  1150. * See also mipsxx_pmu_start().
  1151. */
  1152. pause_local_counters();
  1153. #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
  1154. read_lock(&pmuint_rwlock);
  1155. #endif
  1156. regs = get_irq_regs();
  1157. perf_sample_data_init(&data, 0, 0);
  1158. switch (counters) {
  1159. #define HANDLE_COUNTER(n) \
  1160. case n + 1: \
  1161. if (test_bit(n, cpuc->used_mask)) { \
  1162. counter = mipspmu.read_counter(n); \
  1163. if (counter & mipspmu.overflow) { \
  1164. handle_associated_event(cpuc, n, &data, regs); \
  1165. handled = IRQ_HANDLED; \
  1166. } \
  1167. }
  1168. HANDLE_COUNTER(3)
  1169. HANDLE_COUNTER(2)
  1170. HANDLE_COUNTER(1)
  1171. HANDLE_COUNTER(0)
  1172. }
  1173. /*
  1174. * Do all the work for the pending perf events. We can do this
  1175. * in here because the performance counter interrupt is a regular
  1176. * interrupt, not NMI.
  1177. */
  1178. if (handled == IRQ_HANDLED)
  1179. irq_work_run();
  1180. #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
  1181. read_unlock(&pmuint_rwlock);
  1182. #endif
  1183. resume_local_counters();
  1184. return handled;
  1185. }
  1186. static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
  1187. {
  1188. return mipsxx_pmu_handle_shared_irq();
  1189. }
  1190. /* 24K */
  1191. #define IS_BOTH_COUNTERS_24K_EVENT(b) \
  1192. ((b) == 0 || (b) == 1 || (b) == 11)
  1193. /* 34K */
  1194. #define IS_BOTH_COUNTERS_34K_EVENT(b) \
  1195. ((b) == 0 || (b) == 1 || (b) == 11)
  1196. #ifdef CONFIG_MIPS_MT_SMP
  1197. #define IS_RANGE_P_34K_EVENT(r, b) \
  1198. ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
  1199. (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
  1200. (r) == 176 || ((b) >= 50 && (b) <= 55) || \
  1201. ((b) >= 64 && (b) <= 67))
  1202. #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
  1203. #endif
  1204. /* 74K */
  1205. #define IS_BOTH_COUNTERS_74K_EVENT(b) \
  1206. ((b) == 0 || (b) == 1)
  1207. /* 1004K */
  1208. #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
  1209. ((b) == 0 || (b) == 1 || (b) == 11)
  1210. #ifdef CONFIG_MIPS_MT_SMP
  1211. #define IS_RANGE_P_1004K_EVENT(r, b) \
  1212. ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
  1213. (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
  1214. (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
  1215. (r) == 188 || (b) == 61 || (b) == 62 || \
  1216. ((b) >= 64 && (b) <= 67))
  1217. #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
  1218. #endif
  1219. /* BMIPS5000 */
  1220. #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
  1221. ((b) == 0 || (b) == 1)
  1222. /*
  1223. * User can use 0-255 raw events, where 0-127 for the events of even
  1224. * counters, and 128-255 for odd counters. Note that bit 7 is used to
  1225. * indicate the parity. So, for example, when user wants to take the
  1226. * Event Num of 15 for odd counters (by referring to the user manual),
  1227. * then 128 needs to be added to 15 as the input for the event config,
  1228. * i.e., 143 (0x8F) to be used.
  1229. */
  1230. static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
  1231. {
  1232. unsigned int raw_id = config & 0xff;
  1233. unsigned int base_id = raw_id & 0x7f;
  1234. raw_event.event_id = base_id;
  1235. switch (current_cpu_type()) {
  1236. case CPU_24K:
  1237. if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
  1238. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  1239. else
  1240. raw_event.cntr_mask =
  1241. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  1242. #ifdef CONFIG_MIPS_MT_SMP
  1243. /*
  1244. * This is actually doing nothing. Non-multithreading
  1245. * CPUs will not check and calculate the range.
  1246. */
  1247. raw_event.range = P;
  1248. #endif
  1249. break;
  1250. case CPU_34K:
  1251. if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
  1252. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  1253. else
  1254. raw_event.cntr_mask =
  1255. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  1256. #ifdef CONFIG_MIPS_MT_SMP
  1257. if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
  1258. raw_event.range = P;
  1259. else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
  1260. raw_event.range = V;
  1261. else
  1262. raw_event.range = T;
  1263. #endif
  1264. break;
  1265. case CPU_74K:
  1266. if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
  1267. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  1268. else
  1269. raw_event.cntr_mask =
  1270. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  1271. #ifdef CONFIG_MIPS_MT_SMP
  1272. raw_event.range = P;
  1273. #endif
  1274. break;
  1275. case CPU_1004K:
  1276. if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
  1277. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  1278. else
  1279. raw_event.cntr_mask =
  1280. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  1281. #ifdef CONFIG_MIPS_MT_SMP
  1282. if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
  1283. raw_event.range = P;
  1284. else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
  1285. raw_event.range = V;
  1286. else
  1287. raw_event.range = T;
  1288. #endif
  1289. break;
  1290. case CPU_BMIPS5000:
  1291. if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
  1292. raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
  1293. else
  1294. raw_event.cntr_mask =
  1295. raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
  1296. }
  1297. return &raw_event;
  1298. }
  1299. static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
  1300. {
  1301. unsigned int raw_id = config & 0xff;
  1302. unsigned int base_id = raw_id & 0x7f;
  1303. raw_event.cntr_mask = CNTR_ALL;
  1304. raw_event.event_id = base_id;
  1305. if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
  1306. if (base_id > 0x42)
  1307. return ERR_PTR(-EOPNOTSUPP);
  1308. } else {
  1309. if (base_id > 0x3a)
  1310. return ERR_PTR(-EOPNOTSUPP);
  1311. }
  1312. switch (base_id) {
  1313. case 0x00:
  1314. case 0x0f:
  1315. case 0x1e:
  1316. case 0x1f:
  1317. case 0x2f:
  1318. case 0x34:
  1319. case 0x3b ... 0x3f:
  1320. return ERR_PTR(-EOPNOTSUPP);
  1321. default:
  1322. break;
  1323. }
  1324. return &raw_event;
  1325. }
  1326. static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
  1327. {
  1328. unsigned int raw_id = config & 0xff;
  1329. /* Only 1-63 are defined */
  1330. if ((raw_id < 0x01) || (raw_id > 0x3f))
  1331. return ERR_PTR(-EOPNOTSUPP);
  1332. raw_event.cntr_mask = CNTR_ALL;
  1333. raw_event.event_id = raw_id;
  1334. return &raw_event;
  1335. }
  1336. static int __init
  1337. init_hw_perf_events(void)
  1338. {
  1339. int counters, irq;
  1340. int counter_bits;
  1341. pr_info("Performance counters: ");
  1342. counters = n_counters();
  1343. if (counters == 0) {
  1344. pr_cont("No available PMU.\n");
  1345. return -ENODEV;
  1346. }
  1347. #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
  1348. cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
  1349. if (!cpu_has_mipsmt_pertccounters)
  1350. counters = counters_total_to_per_cpu(counters);
  1351. #endif
  1352. #ifdef MSC01E_INT_BASE
  1353. if (cpu_has_veic) {
  1354. /*
  1355. * Using platform specific interrupt controller defines.
  1356. */
  1357. irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
  1358. } else {
  1359. #endif
  1360. if ((cp0_perfcount_irq >= 0) &&
  1361. (cp0_compare_irq != cp0_perfcount_irq))
  1362. irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  1363. else
  1364. irq = -1;
  1365. #ifdef MSC01E_INT_BASE
  1366. }
  1367. #endif
  1368. mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
  1369. switch (current_cpu_type()) {
  1370. case CPU_24K:
  1371. mipspmu.name = "mips/24K";
  1372. mipspmu.general_event_map = &mipsxxcore_event_map;
  1373. mipspmu.cache_event_map = &mipsxxcore_cache_map;
  1374. break;
  1375. case CPU_34K:
  1376. mipspmu.name = "mips/34K";
  1377. mipspmu.general_event_map = &mipsxxcore_event_map;
  1378. mipspmu.cache_event_map = &mipsxxcore_cache_map;
  1379. break;
  1380. case CPU_74K:
  1381. mipspmu.name = "mips/74K";
  1382. mipspmu.general_event_map = &mipsxx74Kcore_event_map;
  1383. mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
  1384. break;
  1385. case CPU_1004K:
  1386. mipspmu.name = "mips/1004K";
  1387. mipspmu.general_event_map = &mipsxxcore_event_map;
  1388. mipspmu.cache_event_map = &mipsxxcore_cache_map;
  1389. break;
  1390. case CPU_LOONGSON1:
  1391. mipspmu.name = "mips/loongson1";
  1392. mipspmu.general_event_map = &mipsxxcore_event_map;
  1393. mipspmu.cache_event_map = &mipsxxcore_cache_map;
  1394. break;
  1395. case CPU_CAVIUM_OCTEON:
  1396. case CPU_CAVIUM_OCTEON_PLUS:
  1397. case CPU_CAVIUM_OCTEON2:
  1398. mipspmu.name = "octeon";
  1399. mipspmu.general_event_map = &octeon_event_map;
  1400. mipspmu.cache_event_map = &octeon_cache_map;
  1401. mipspmu.map_raw_event = octeon_pmu_map_raw_event;
  1402. break;
  1403. case CPU_BMIPS5000:
  1404. mipspmu.name = "BMIPS5000";
  1405. mipspmu.general_event_map = &bmips5000_event_map;
  1406. mipspmu.cache_event_map = &bmips5000_cache_map;
  1407. break;
  1408. case CPU_XLP:
  1409. mipspmu.name = "xlp";
  1410. mipspmu.general_event_map = &xlp_event_map;
  1411. mipspmu.cache_event_map = &xlp_cache_map;
  1412. mipspmu.map_raw_event = xlp_pmu_map_raw_event;
  1413. break;
  1414. default:
  1415. pr_cont("Either hardware does not support performance "
  1416. "counters, or not yet implemented.\n");
  1417. return -ENODEV;
  1418. }
  1419. mipspmu.num_counters = counters;
  1420. mipspmu.irq = irq;
  1421. if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
  1422. mipspmu.max_period = (1ULL << 63) - 1;
  1423. mipspmu.valid_count = (1ULL << 63) - 1;
  1424. mipspmu.overflow = 1ULL << 63;
  1425. mipspmu.read_counter = mipsxx_pmu_read_counter_64;
  1426. mipspmu.write_counter = mipsxx_pmu_write_counter_64;
  1427. counter_bits = 64;
  1428. } else {
  1429. mipspmu.max_period = (1ULL << 31) - 1;
  1430. mipspmu.valid_count = (1ULL << 31) - 1;
  1431. mipspmu.overflow = 1ULL << 31;
  1432. mipspmu.read_counter = mipsxx_pmu_read_counter;
  1433. mipspmu.write_counter = mipsxx_pmu_write_counter;
  1434. counter_bits = 32;
  1435. }
  1436. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  1437. pr_cont("%s PMU enabled, %d %d-bit counters available to each "
  1438. "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
  1439. irq < 0 ? " (share with timer interrupt)" : "");
  1440. perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
  1441. return 0;
  1442. }
  1443. early_initcall(init_hw_perf_events);