hwsampler.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241
  1. /**
  2. * arch/s390/oprofile/hwsampler.c
  3. *
  4. * Copyright IBM Corp. 2010
  5. * Author: Heinz Graalfs <graalfs@de.ibm.com>
  6. */
  7. #include <linux/kernel_stat.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/smp.h>
  11. #include <linux/errno.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/notifier.h>
  15. #include <linux/cpu.h>
  16. #include <linux/semaphore.h>
  17. #include <linux/oom.h>
  18. #include <linux/oprofile.h>
  19. #include <asm/lowcore.h>
  20. #include <asm/irq.h>
  21. #include "hwsampler.h"
  22. #define MAX_NUM_SDB 511
  23. #define MIN_NUM_SDB 1
  24. #define ALERT_REQ_MASK 0x4000000000000000ul
  25. #define BUFFER_FULL_MASK 0x8000000000000000ul
  26. #define EI_IEA (1 << 31) /* invalid entry address */
  27. #define EI_ISE (1 << 30) /* incorrect SDBT entry */
  28. #define EI_PRA (1 << 29) /* program request alert */
  29. #define EI_SACA (1 << 23) /* sampler authorization change alert */
  30. #define EI_LSDA (1 << 22) /* loss of sample data alert */
  31. DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
  32. struct hws_execute_parms {
  33. void *buffer;
  34. signed int rc;
  35. };
  36. DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
  37. EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
  38. static DEFINE_MUTEX(hws_sem);
  39. static DEFINE_MUTEX(hws_sem_oom);
  40. static unsigned char hws_flush_all;
  41. static unsigned int hws_oom;
  42. static struct workqueue_struct *hws_wq;
  43. static unsigned int hws_state;
  44. enum {
  45. HWS_INIT = 1,
  46. HWS_DEALLOCATED,
  47. HWS_STOPPED,
  48. HWS_STARTED,
  49. HWS_STOPPING };
  50. /* set to 1 if called by kernel during memory allocation */
  51. static unsigned char oom_killer_was_active;
  52. /* size of SDBT and SDB as of allocate API */
  53. static unsigned long num_sdbt = 100;
  54. static unsigned long num_sdb = 511;
  55. /* sampling interval (machine cycles) */
  56. static unsigned long interval;
  57. static unsigned long min_sampler_rate;
  58. static unsigned long max_sampler_rate;
  59. static int ssctl(void *buffer)
  60. {
  61. int cc;
  62. /* set in order to detect a program check */
  63. cc = 1;
  64. asm volatile(
  65. "0: .insn s,0xB2870000,0(%1)\n"
  66. "1: ipm %0\n"
  67. " srl %0,28\n"
  68. "2:\n"
  69. EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
  70. : "+d" (cc), "+a" (buffer)
  71. : "m" (*((struct hws_ssctl_request_block *)buffer))
  72. : "cc", "memory");
  73. return cc ? -EINVAL : 0 ;
  74. }
  75. static int qsi(void *buffer)
  76. {
  77. int cc;
  78. cc = 1;
  79. asm volatile(
  80. "0: .insn s,0xB2860000,0(%1)\n"
  81. "1: lhi %0,0\n"
  82. "2:\n"
  83. EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
  84. : "=d" (cc), "+a" (buffer)
  85. : "m" (*((struct hws_qsi_info_block *)buffer))
  86. : "cc", "memory");
  87. return cc ? -EINVAL : 0;
  88. }
  89. static void execute_qsi(void *parms)
  90. {
  91. struct hws_execute_parms *ep = parms;
  92. ep->rc = qsi(ep->buffer);
  93. }
  94. static void execute_ssctl(void *parms)
  95. {
  96. struct hws_execute_parms *ep = parms;
  97. ep->rc = ssctl(ep->buffer);
  98. }
  99. static int smp_ctl_ssctl_stop(int cpu)
  100. {
  101. int rc;
  102. struct hws_execute_parms ep;
  103. struct hws_cpu_buffer *cb;
  104. cb = &per_cpu(sampler_cpu_buffer, cpu);
  105. cb->ssctl.es = 0;
  106. cb->ssctl.cs = 0;
  107. ep.buffer = &cb->ssctl;
  108. smp_call_function_single(cpu, execute_ssctl, &ep, 1);
  109. rc = ep.rc;
  110. if (rc) {
  111. printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
  112. dump_stack();
  113. }
  114. ep.buffer = &cb->qsi;
  115. smp_call_function_single(cpu, execute_qsi, &ep, 1);
  116. if (cb->qsi.es || cb->qsi.cs) {
  117. printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
  118. dump_stack();
  119. }
  120. return rc;
  121. }
  122. static int smp_ctl_ssctl_deactivate(int cpu)
  123. {
  124. int rc;
  125. struct hws_execute_parms ep;
  126. struct hws_cpu_buffer *cb;
  127. cb = &per_cpu(sampler_cpu_buffer, cpu);
  128. cb->ssctl.es = 1;
  129. cb->ssctl.cs = 0;
  130. ep.buffer = &cb->ssctl;
  131. smp_call_function_single(cpu, execute_ssctl, &ep, 1);
  132. rc = ep.rc;
  133. if (rc)
  134. printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
  135. ep.buffer = &cb->qsi;
  136. smp_call_function_single(cpu, execute_qsi, &ep, 1);
  137. if (cb->qsi.cs)
  138. printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
  139. return rc;
  140. }
  141. static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
  142. {
  143. int rc;
  144. struct hws_execute_parms ep;
  145. struct hws_cpu_buffer *cb;
  146. cb = &per_cpu(sampler_cpu_buffer, cpu);
  147. cb->ssctl.h = 1;
  148. cb->ssctl.tear = cb->first_sdbt;
  149. cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
  150. cb->ssctl.interval = interval;
  151. cb->ssctl.es = 1;
  152. cb->ssctl.cs = 1;
  153. ep.buffer = &cb->ssctl;
  154. smp_call_function_single(cpu, execute_ssctl, &ep, 1);
  155. rc = ep.rc;
  156. if (rc)
  157. printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
  158. ep.buffer = &cb->qsi;
  159. smp_call_function_single(cpu, execute_qsi, &ep, 1);
  160. if (ep.rc)
  161. printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
  162. return rc;
  163. }
  164. static int smp_ctl_qsi(int cpu)
  165. {
  166. struct hws_execute_parms ep;
  167. struct hws_cpu_buffer *cb;
  168. cb = &per_cpu(sampler_cpu_buffer, cpu);
  169. ep.buffer = &cb->qsi;
  170. smp_call_function_single(cpu, execute_qsi, &ep, 1);
  171. return ep.rc;
  172. }
  173. static inline unsigned long *trailer_entry_ptr(unsigned long v)
  174. {
  175. void *ret;
  176. ret = (void *)v;
  177. ret += PAGE_SIZE;
  178. ret -= sizeof(struct hws_trailer_entry);
  179. return (unsigned long *) ret;
  180. }
  181. /* prototypes for external interrupt handler and worker */
  182. static void hws_ext_handler(unsigned int ext_int_code,
  183. unsigned int param32, unsigned long param64);
  184. static void worker(struct work_struct *work);
  185. static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
  186. unsigned long *dear);
  187. static void init_all_cpu_buffers(void)
  188. {
  189. int cpu;
  190. struct hws_cpu_buffer *cb;
  191. for_each_online_cpu(cpu) {
  192. cb = &per_cpu(sampler_cpu_buffer, cpu);
  193. memset(cb, 0, sizeof(struct hws_cpu_buffer));
  194. }
  195. }
  196. static int is_link_entry(unsigned long *s)
  197. {
  198. return *s & 0x1ul ? 1 : 0;
  199. }
  200. static unsigned long *get_next_sdbt(unsigned long *s)
  201. {
  202. return (unsigned long *) (*s & ~0x1ul);
  203. }
  204. static int prepare_cpu_buffers(void)
  205. {
  206. int cpu;
  207. int rc;
  208. struct hws_cpu_buffer *cb;
  209. rc = 0;
  210. for_each_online_cpu(cpu) {
  211. cb = &per_cpu(sampler_cpu_buffer, cpu);
  212. atomic_set(&cb->ext_params, 0);
  213. cb->worker_entry = 0;
  214. cb->sample_overflow = 0;
  215. cb->req_alert = 0;
  216. cb->incorrect_sdbt_entry = 0;
  217. cb->invalid_entry_address = 0;
  218. cb->loss_of_sample_data = 0;
  219. cb->sample_auth_change_alert = 0;
  220. cb->finish = 0;
  221. cb->oom = 0;
  222. cb->stop_mode = 0;
  223. }
  224. return rc;
  225. }
  226. /*
  227. * allocate_sdbt() - allocate sampler memory
  228. * @cpu: the cpu for which sampler memory is allocated
  229. *
  230. * A 4K page is allocated for each requested SDBT.
  231. * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
  232. * Set ALERT_REQ mask in each SDBs trailer.
  233. * Returns zero if successful, <0 otherwise.
  234. */
  235. static int allocate_sdbt(int cpu)
  236. {
  237. int j, k, rc;
  238. unsigned long *sdbt;
  239. unsigned long sdb;
  240. unsigned long *tail;
  241. unsigned long *trailer;
  242. struct hws_cpu_buffer *cb;
  243. cb = &per_cpu(sampler_cpu_buffer, cpu);
  244. if (cb->first_sdbt)
  245. return -EINVAL;
  246. sdbt = NULL;
  247. tail = sdbt;
  248. for (j = 0; j < num_sdbt; j++) {
  249. sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
  250. mutex_lock(&hws_sem_oom);
  251. /* OOM killer might have been activated */
  252. barrier();
  253. if (oom_killer_was_active || !sdbt) {
  254. if (sdbt)
  255. free_page((unsigned long)sdbt);
  256. goto allocate_sdbt_error;
  257. }
  258. if (cb->first_sdbt == 0)
  259. cb->first_sdbt = (unsigned long)sdbt;
  260. /* link current page to tail of chain */
  261. if (tail)
  262. *tail = (unsigned long)(void *)sdbt + 1;
  263. mutex_unlock(&hws_sem_oom);
  264. for (k = 0; k < num_sdb; k++) {
  265. /* get and set SDB page */
  266. sdb = get_zeroed_page(GFP_KERNEL);
  267. mutex_lock(&hws_sem_oom);
  268. /* OOM killer might have been activated */
  269. barrier();
  270. if (oom_killer_was_active || !sdb) {
  271. if (sdb)
  272. free_page(sdb);
  273. goto allocate_sdbt_error;
  274. }
  275. *sdbt = sdb;
  276. trailer = trailer_entry_ptr(*sdbt);
  277. *trailer = ALERT_REQ_MASK;
  278. sdbt++;
  279. mutex_unlock(&hws_sem_oom);
  280. }
  281. tail = sdbt;
  282. }
  283. mutex_lock(&hws_sem_oom);
  284. if (oom_killer_was_active)
  285. goto allocate_sdbt_error;
  286. rc = 0;
  287. if (tail)
  288. *tail = (unsigned long)
  289. ((void *)cb->first_sdbt) + 1;
  290. allocate_sdbt_exit:
  291. mutex_unlock(&hws_sem_oom);
  292. return rc;
  293. allocate_sdbt_error:
  294. rc = -ENOMEM;
  295. goto allocate_sdbt_exit;
  296. }
  297. /*
  298. * deallocate_sdbt() - deallocate all sampler memory
  299. *
  300. * For each online CPU all SDBT trees are deallocated.
  301. * Returns the number of freed pages.
  302. */
  303. static int deallocate_sdbt(void)
  304. {
  305. int cpu;
  306. int counter;
  307. counter = 0;
  308. for_each_online_cpu(cpu) {
  309. unsigned long start;
  310. unsigned long sdbt;
  311. unsigned long *curr;
  312. struct hws_cpu_buffer *cb;
  313. cb = &per_cpu(sampler_cpu_buffer, cpu);
  314. if (!cb->first_sdbt)
  315. continue;
  316. sdbt = cb->first_sdbt;
  317. curr = (unsigned long *) sdbt;
  318. start = sdbt;
  319. /* we'll free the SDBT after all SDBs are processed... */
  320. while (1) {
  321. if (!*curr || !sdbt)
  322. break;
  323. /* watch for link entry reset if found */
  324. if (is_link_entry(curr)) {
  325. curr = get_next_sdbt(curr);
  326. if (sdbt)
  327. free_page(sdbt);
  328. /* we are done if we reach the start */
  329. if ((unsigned long) curr == start)
  330. break;
  331. else
  332. sdbt = (unsigned long) curr;
  333. } else {
  334. /* process SDB pointer */
  335. if (*curr) {
  336. free_page(*curr);
  337. curr++;
  338. }
  339. }
  340. counter++;
  341. }
  342. cb->first_sdbt = 0;
  343. }
  344. return counter;
  345. }
  346. static int start_sampling(int cpu)
  347. {
  348. int rc;
  349. struct hws_cpu_buffer *cb;
  350. cb = &per_cpu(sampler_cpu_buffer, cpu);
  351. rc = smp_ctl_ssctl_enable_activate(cpu, interval);
  352. if (rc) {
  353. printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
  354. goto start_exit;
  355. }
  356. rc = -EINVAL;
  357. if (!cb->qsi.es) {
  358. printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
  359. goto start_exit;
  360. }
  361. if (!cb->qsi.cs) {
  362. printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
  363. goto start_exit;
  364. }
  365. printk(KERN_INFO
  366. "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
  367. cpu, interval);
  368. rc = 0;
  369. start_exit:
  370. return rc;
  371. }
  372. static int stop_sampling(int cpu)
  373. {
  374. unsigned long v;
  375. int rc;
  376. struct hws_cpu_buffer *cb;
  377. rc = smp_ctl_qsi(cpu);
  378. WARN_ON(rc);
  379. cb = &per_cpu(sampler_cpu_buffer, cpu);
  380. if (!rc && !cb->qsi.es)
  381. printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
  382. rc = smp_ctl_ssctl_stop(cpu);
  383. if (rc) {
  384. printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
  385. cpu, rc);
  386. goto stop_exit;
  387. }
  388. printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
  389. stop_exit:
  390. v = cb->req_alert;
  391. if (v)
  392. printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
  393. " count=%lu.\n", cpu, v);
  394. v = cb->loss_of_sample_data;
  395. if (v)
  396. printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
  397. " count=%lu.\n", cpu, v);
  398. v = cb->invalid_entry_address;
  399. if (v)
  400. printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
  401. " count=%lu.\n", cpu, v);
  402. v = cb->incorrect_sdbt_entry;
  403. if (v)
  404. printk(KERN_ERR
  405. "hwsampler: CPU %d CPUMF Incorrect SDBT address,"
  406. " count=%lu.\n", cpu, v);
  407. v = cb->sample_auth_change_alert;
  408. if (v)
  409. printk(KERN_ERR
  410. "hwsampler: CPU %d CPUMF Sample authorization change,"
  411. " count=%lu.\n", cpu, v);
  412. return rc;
  413. }
  414. static int check_hardware_prerequisites(void)
  415. {
  416. if (!test_facility(68))
  417. return -EOPNOTSUPP;
  418. return 0;
  419. }
  420. /*
  421. * hws_oom_callback() - the OOM callback function
  422. *
  423. * In case the callback is invoked during memory allocation for the
  424. * hw sampler, all obtained memory is deallocated and a flag is set
  425. * so main sampler memory allocation can exit with a failure code.
  426. * In case the callback is invoked during sampling the hw sampler
  427. * is deactivated for all CPUs.
  428. */
  429. static int hws_oom_callback(struct notifier_block *nfb,
  430. unsigned long dummy, void *parm)
  431. {
  432. unsigned long *freed;
  433. int cpu;
  434. struct hws_cpu_buffer *cb;
  435. freed = parm;
  436. mutex_lock(&hws_sem_oom);
  437. if (hws_state == HWS_DEALLOCATED) {
  438. /* during memory allocation */
  439. if (oom_killer_was_active == 0) {
  440. oom_killer_was_active = 1;
  441. *freed += deallocate_sdbt();
  442. }
  443. } else {
  444. int i;
  445. cpu = get_cpu();
  446. cb = &per_cpu(sampler_cpu_buffer, cpu);
  447. if (!cb->oom) {
  448. for_each_online_cpu(i) {
  449. smp_ctl_ssctl_deactivate(i);
  450. cb->oom = 1;
  451. }
  452. cb->finish = 1;
  453. printk(KERN_INFO
  454. "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
  455. cpu);
  456. }
  457. }
  458. mutex_unlock(&hws_sem_oom);
  459. return NOTIFY_OK;
  460. }
  461. static struct notifier_block hws_oom_notifier = {
  462. .notifier_call = hws_oom_callback
  463. };
  464. static int hws_cpu_callback(struct notifier_block *nfb,
  465. unsigned long action, void *hcpu)
  466. {
  467. /* We do not have sampler space available for all possible CPUs.
  468. All CPUs should be online when hw sampling is activated. */
  469. return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
  470. }
  471. static struct notifier_block hws_cpu_notifier = {
  472. .notifier_call = hws_cpu_callback
  473. };
  474. /**
  475. * hwsampler_deactivate() - set hardware sampling temporarily inactive
  476. * @cpu: specifies the CPU to be set inactive.
  477. *
  478. * Returns 0 on success, !0 on failure.
  479. */
  480. int hwsampler_deactivate(unsigned int cpu)
  481. {
  482. /*
  483. * Deactivate hw sampling temporarily and flush the buffer
  484. * by pushing all the pending samples to oprofile buffer.
  485. *
  486. * This function can be called under one of the following conditions:
  487. * Memory unmap, task is exiting.
  488. */
  489. int rc;
  490. struct hws_cpu_buffer *cb;
  491. rc = 0;
  492. mutex_lock(&hws_sem);
  493. cb = &per_cpu(sampler_cpu_buffer, cpu);
  494. if (hws_state == HWS_STARTED) {
  495. rc = smp_ctl_qsi(cpu);
  496. WARN_ON(rc);
  497. if (cb->qsi.cs) {
  498. rc = smp_ctl_ssctl_deactivate(cpu);
  499. if (rc) {
  500. printk(KERN_INFO
  501. "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
  502. cb->finish = 1;
  503. hws_state = HWS_STOPPING;
  504. } else {
  505. hws_flush_all = 1;
  506. /* Add work to queue to read pending samples.*/
  507. queue_work_on(cpu, hws_wq, &cb->worker);
  508. }
  509. }
  510. }
  511. mutex_unlock(&hws_sem);
  512. if (hws_wq)
  513. flush_workqueue(hws_wq);
  514. return rc;
  515. }
  516. /**
  517. * hwsampler_activate() - activate/resume hardware sampling which was deactivated
  518. * @cpu: specifies the CPU to be set active.
  519. *
  520. * Returns 0 on success, !0 on failure.
  521. */
  522. int hwsampler_activate(unsigned int cpu)
  523. {
  524. /*
  525. * Re-activate hw sampling. This should be called in pair with
  526. * hwsampler_deactivate().
  527. */
  528. int rc;
  529. struct hws_cpu_buffer *cb;
  530. rc = 0;
  531. mutex_lock(&hws_sem);
  532. cb = &per_cpu(sampler_cpu_buffer, cpu);
  533. if (hws_state == HWS_STARTED) {
  534. rc = smp_ctl_qsi(cpu);
  535. WARN_ON(rc);
  536. if (!cb->qsi.cs) {
  537. hws_flush_all = 0;
  538. rc = smp_ctl_ssctl_enable_activate(cpu, interval);
  539. if (rc) {
  540. printk(KERN_ERR
  541. "CPU %d, CPUMF activate sampling failed.\n",
  542. cpu);
  543. }
  544. }
  545. }
  546. mutex_unlock(&hws_sem);
  547. return rc;
  548. }
  549. static void hws_ext_handler(unsigned int ext_int_code,
  550. unsigned int param32, unsigned long param64)
  551. {
  552. struct hws_cpu_buffer *cb;
  553. kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
  554. cb = &__get_cpu_var(sampler_cpu_buffer);
  555. atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
  556. if (hws_wq)
  557. queue_work(hws_wq, &cb->worker);
  558. }
  559. static int check_qsi_on_setup(void)
  560. {
  561. int rc;
  562. unsigned int cpu;
  563. struct hws_cpu_buffer *cb;
  564. for_each_online_cpu(cpu) {
  565. cb = &per_cpu(sampler_cpu_buffer, cpu);
  566. rc = smp_ctl_qsi(cpu);
  567. WARN_ON(rc);
  568. if (rc)
  569. return -EOPNOTSUPP;
  570. if (!cb->qsi.as) {
  571. printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
  572. return -EINVAL;
  573. }
  574. if (cb->qsi.es) {
  575. printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
  576. rc = smp_ctl_ssctl_stop(cpu);
  577. if (rc)
  578. return -EINVAL;
  579. printk(KERN_INFO
  580. "CPU %d, CPUMF Sampling stopped now.\n", cpu);
  581. }
  582. }
  583. return 0;
  584. }
  585. static int check_qsi_on_start(void)
  586. {
  587. unsigned int cpu;
  588. int rc;
  589. struct hws_cpu_buffer *cb;
  590. for_each_online_cpu(cpu) {
  591. cb = &per_cpu(sampler_cpu_buffer, cpu);
  592. rc = smp_ctl_qsi(cpu);
  593. WARN_ON(rc);
  594. if (!cb->qsi.as)
  595. return -EINVAL;
  596. if (cb->qsi.es)
  597. return -EINVAL;
  598. if (cb->qsi.cs)
  599. return -EINVAL;
  600. }
  601. return 0;
  602. }
  603. static void worker_on_start(unsigned int cpu)
  604. {
  605. struct hws_cpu_buffer *cb;
  606. cb = &per_cpu(sampler_cpu_buffer, cpu);
  607. cb->worker_entry = cb->first_sdbt;
  608. }
  609. static int worker_check_error(unsigned int cpu, int ext_params)
  610. {
  611. int rc;
  612. unsigned long *sdbt;
  613. struct hws_cpu_buffer *cb;
  614. rc = 0;
  615. cb = &per_cpu(sampler_cpu_buffer, cpu);
  616. sdbt = (unsigned long *) cb->worker_entry;
  617. if (!sdbt || !*sdbt)
  618. return -EINVAL;
  619. if (ext_params & EI_PRA)
  620. cb->req_alert++;
  621. if (ext_params & EI_LSDA)
  622. cb->loss_of_sample_data++;
  623. if (ext_params & EI_IEA) {
  624. cb->invalid_entry_address++;
  625. rc = -EINVAL;
  626. }
  627. if (ext_params & EI_ISE) {
  628. cb->incorrect_sdbt_entry++;
  629. rc = -EINVAL;
  630. }
  631. if (ext_params & EI_SACA) {
  632. cb->sample_auth_change_alert++;
  633. rc = -EINVAL;
  634. }
  635. return rc;
  636. }
  637. static void worker_on_finish(unsigned int cpu)
  638. {
  639. int rc, i;
  640. struct hws_cpu_buffer *cb;
  641. cb = &per_cpu(sampler_cpu_buffer, cpu);
  642. if (cb->finish) {
  643. rc = smp_ctl_qsi(cpu);
  644. WARN_ON(rc);
  645. if (cb->qsi.es) {
  646. printk(KERN_INFO
  647. "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
  648. cpu);
  649. rc = smp_ctl_ssctl_stop(cpu);
  650. if (rc)
  651. printk(KERN_INFO
  652. "hwsampler: CPU %d, CPUMF Deactivation failed.\n",
  653. cpu);
  654. for_each_online_cpu(i) {
  655. if (i == cpu)
  656. continue;
  657. if (!cb->finish) {
  658. cb->finish = 1;
  659. queue_work_on(i, hws_wq,
  660. &cb->worker);
  661. }
  662. }
  663. }
  664. }
  665. }
  666. static void worker_on_interrupt(unsigned int cpu)
  667. {
  668. unsigned long *sdbt;
  669. unsigned char done;
  670. struct hws_cpu_buffer *cb;
  671. cb = &per_cpu(sampler_cpu_buffer, cpu);
  672. sdbt = (unsigned long *) cb->worker_entry;
  673. done = 0;
  674. /* do not proceed if stop was entered,
  675. * forget the buffers not yet processed */
  676. while (!done && !cb->stop_mode) {
  677. unsigned long *trailer;
  678. struct hws_trailer_entry *te;
  679. unsigned long *dear = 0;
  680. trailer = trailer_entry_ptr(*sdbt);
  681. /* leave loop if no more work to do */
  682. if (!(*trailer & BUFFER_FULL_MASK)) {
  683. done = 1;
  684. if (!hws_flush_all)
  685. continue;
  686. }
  687. te = (struct hws_trailer_entry *)trailer;
  688. cb->sample_overflow += te->overflow;
  689. add_samples_to_oprofile(cpu, sdbt, dear);
  690. /* reset trailer */
  691. xchg((unsigned char *) te, 0x40);
  692. /* advance to next sdb slot in current sdbt */
  693. sdbt++;
  694. /* in case link bit is set use address w/o link bit */
  695. if (is_link_entry(sdbt))
  696. sdbt = get_next_sdbt(sdbt);
  697. cb->worker_entry = (unsigned long)sdbt;
  698. }
  699. }
  700. static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
  701. unsigned long *dear)
  702. {
  703. struct hws_data_entry *sample_data_ptr;
  704. unsigned long *trailer;
  705. trailer = trailer_entry_ptr(*sdbt);
  706. if (dear) {
  707. if (dear > trailer)
  708. return;
  709. trailer = dear;
  710. }
  711. sample_data_ptr = (struct hws_data_entry *)(*sdbt);
  712. while ((unsigned long *)sample_data_ptr < trailer) {
  713. struct pt_regs *regs = NULL;
  714. struct task_struct *tsk = NULL;
  715. /*
  716. * Check sampling mode, 1 indicates basic (=customer) sampling
  717. * mode.
  718. */
  719. if (sample_data_ptr->def != 1) {
  720. /* sample slot is not yet written */
  721. break;
  722. } else {
  723. /* make sure we don't use it twice,
  724. * the next time the sampler will set it again */
  725. sample_data_ptr->def = 0;
  726. }
  727. /* Get pt_regs. */
  728. if (sample_data_ptr->P == 1) {
  729. /* userspace sample */
  730. unsigned int pid = sample_data_ptr->prim_asn;
  731. rcu_read_lock();
  732. tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
  733. if (tsk)
  734. regs = task_pt_regs(tsk);
  735. rcu_read_unlock();
  736. } else {
  737. /* kernelspace sample */
  738. regs = task_pt_regs(current);
  739. }
  740. mutex_lock(&hws_sem);
  741. oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
  742. !sample_data_ptr->P, tsk);
  743. mutex_unlock(&hws_sem);
  744. sample_data_ptr++;
  745. }
  746. }
  747. static void worker(struct work_struct *work)
  748. {
  749. unsigned int cpu;
  750. int ext_params;
  751. struct hws_cpu_buffer *cb;
  752. cb = container_of(work, struct hws_cpu_buffer, worker);
  753. cpu = smp_processor_id();
  754. ext_params = atomic_xchg(&cb->ext_params, 0);
  755. if (!cb->worker_entry)
  756. worker_on_start(cpu);
  757. if (worker_check_error(cpu, ext_params))
  758. return;
  759. if (!cb->finish)
  760. worker_on_interrupt(cpu);
  761. if (cb->finish)
  762. worker_on_finish(cpu);
  763. }
  764. /**
  765. * hwsampler_allocate() - allocate memory for the hardware sampler
  766. * @sdbt: number of SDBTs per online CPU (must be > 0)
  767. * @sdb: number of SDBs per SDBT (minimum 1, maximum 511)
  768. *
  769. * Returns 0 on success, !0 on failure.
  770. */
  771. int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
  772. {
  773. int cpu, rc;
  774. mutex_lock(&hws_sem);
  775. rc = -EINVAL;
  776. if (hws_state != HWS_DEALLOCATED)
  777. goto allocate_exit;
  778. if (sdbt < 1)
  779. goto allocate_exit;
  780. if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
  781. goto allocate_exit;
  782. num_sdbt = sdbt;
  783. num_sdb = sdb;
  784. oom_killer_was_active = 0;
  785. register_oom_notifier(&hws_oom_notifier);
  786. for_each_online_cpu(cpu) {
  787. if (allocate_sdbt(cpu)) {
  788. unregister_oom_notifier(&hws_oom_notifier);
  789. goto allocate_error;
  790. }
  791. }
  792. unregister_oom_notifier(&hws_oom_notifier);
  793. if (oom_killer_was_active)
  794. goto allocate_error;
  795. hws_state = HWS_STOPPED;
  796. rc = 0;
  797. allocate_exit:
  798. mutex_unlock(&hws_sem);
  799. return rc;
  800. allocate_error:
  801. rc = -ENOMEM;
  802. printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
  803. goto allocate_exit;
  804. }
  805. /**
  806. * hwsampler_deallocate() - deallocate hardware sampler memory
  807. *
  808. * Returns 0 on success, !0 on failure.
  809. */
  810. int hwsampler_deallocate()
  811. {
  812. int rc;
  813. mutex_lock(&hws_sem);
  814. rc = -EINVAL;
  815. if (hws_state != HWS_STOPPED)
  816. goto deallocate_exit;
  817. ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
  818. deallocate_sdbt();
  819. hws_state = HWS_DEALLOCATED;
  820. rc = 0;
  821. deallocate_exit:
  822. mutex_unlock(&hws_sem);
  823. return rc;
  824. }
  825. unsigned long hwsampler_query_min_interval(void)
  826. {
  827. return min_sampler_rate;
  828. }
  829. unsigned long hwsampler_query_max_interval(void)
  830. {
  831. return max_sampler_rate;
  832. }
  833. unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
  834. {
  835. struct hws_cpu_buffer *cb;
  836. cb = &per_cpu(sampler_cpu_buffer, cpu);
  837. return cb->sample_overflow;
  838. }
  839. int hwsampler_setup()
  840. {
  841. int rc;
  842. int cpu;
  843. struct hws_cpu_buffer *cb;
  844. mutex_lock(&hws_sem);
  845. rc = -EINVAL;
  846. if (hws_state)
  847. goto setup_exit;
  848. hws_state = HWS_INIT;
  849. init_all_cpu_buffers();
  850. rc = check_hardware_prerequisites();
  851. if (rc)
  852. goto setup_exit;
  853. rc = check_qsi_on_setup();
  854. if (rc)
  855. goto setup_exit;
  856. rc = -EINVAL;
  857. hws_wq = create_workqueue("hwsampler");
  858. if (!hws_wq)
  859. goto setup_exit;
  860. register_cpu_notifier(&hws_cpu_notifier);
  861. for_each_online_cpu(cpu) {
  862. cb = &per_cpu(sampler_cpu_buffer, cpu);
  863. INIT_WORK(&cb->worker, worker);
  864. rc = smp_ctl_qsi(cpu);
  865. WARN_ON(rc);
  866. if (min_sampler_rate != cb->qsi.min_sampl_rate) {
  867. if (min_sampler_rate) {
  868. printk(KERN_WARNING
  869. "hwsampler: different min sampler rate values.\n");
  870. if (min_sampler_rate < cb->qsi.min_sampl_rate)
  871. min_sampler_rate =
  872. cb->qsi.min_sampl_rate;
  873. } else
  874. min_sampler_rate = cb->qsi.min_sampl_rate;
  875. }
  876. if (max_sampler_rate != cb->qsi.max_sampl_rate) {
  877. if (max_sampler_rate) {
  878. printk(KERN_WARNING
  879. "hwsampler: different max sampler rate values.\n");
  880. if (max_sampler_rate > cb->qsi.max_sampl_rate)
  881. max_sampler_rate =
  882. cb->qsi.max_sampl_rate;
  883. } else
  884. max_sampler_rate = cb->qsi.max_sampl_rate;
  885. }
  886. }
  887. register_external_interrupt(0x1407, hws_ext_handler);
  888. hws_state = HWS_DEALLOCATED;
  889. rc = 0;
  890. setup_exit:
  891. mutex_unlock(&hws_sem);
  892. return rc;
  893. }
  894. int hwsampler_shutdown()
  895. {
  896. int rc;
  897. mutex_lock(&hws_sem);
  898. rc = -EINVAL;
  899. if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
  900. mutex_unlock(&hws_sem);
  901. if (hws_wq)
  902. flush_workqueue(hws_wq);
  903. mutex_lock(&hws_sem);
  904. if (hws_state == HWS_STOPPED) {
  905. ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
  906. deallocate_sdbt();
  907. }
  908. if (hws_wq) {
  909. destroy_workqueue(hws_wq);
  910. hws_wq = NULL;
  911. }
  912. unregister_external_interrupt(0x1407, hws_ext_handler);
  913. hws_state = HWS_INIT;
  914. rc = 0;
  915. }
  916. mutex_unlock(&hws_sem);
  917. unregister_cpu_notifier(&hws_cpu_notifier);
  918. return rc;
  919. }
  920. /**
  921. * hwsampler_start_all() - start hardware sampling on all online CPUs
  922. * @rate: specifies the used interval when samples are taken
  923. *
  924. * Returns 0 on success, !0 on failure.
  925. */
  926. int hwsampler_start_all(unsigned long rate)
  927. {
  928. int rc, cpu;
  929. mutex_lock(&hws_sem);
  930. hws_oom = 0;
  931. rc = -EINVAL;
  932. if (hws_state != HWS_STOPPED)
  933. goto start_all_exit;
  934. interval = rate;
  935. /* fail if rate is not valid */
  936. if (interval < min_sampler_rate || interval > max_sampler_rate)
  937. goto start_all_exit;
  938. rc = check_qsi_on_start();
  939. if (rc)
  940. goto start_all_exit;
  941. rc = prepare_cpu_buffers();
  942. if (rc)
  943. goto start_all_exit;
  944. for_each_online_cpu(cpu) {
  945. rc = start_sampling(cpu);
  946. if (rc)
  947. break;
  948. }
  949. if (rc) {
  950. for_each_online_cpu(cpu) {
  951. stop_sampling(cpu);
  952. }
  953. goto start_all_exit;
  954. }
  955. hws_state = HWS_STARTED;
  956. rc = 0;
  957. start_all_exit:
  958. mutex_unlock(&hws_sem);
  959. if (rc)
  960. return rc;
  961. register_oom_notifier(&hws_oom_notifier);
  962. hws_oom = 1;
  963. hws_flush_all = 0;
  964. /* now let them in, 1407 CPUMF external interrupts */
  965. ctl_set_bit(0, 5); /* set CR0 bit 58 */
  966. return 0;
  967. }
  968. /**
  969. * hwsampler_stop_all() - stop hardware sampling on all online CPUs
  970. *
  971. * Returns 0 on success, !0 on failure.
  972. */
  973. int hwsampler_stop_all()
  974. {
  975. int tmp_rc, rc, cpu;
  976. struct hws_cpu_buffer *cb;
  977. mutex_lock(&hws_sem);
  978. rc = 0;
  979. if (hws_state == HWS_INIT) {
  980. mutex_unlock(&hws_sem);
  981. return rc;
  982. }
  983. hws_state = HWS_STOPPING;
  984. mutex_unlock(&hws_sem);
  985. for_each_online_cpu(cpu) {
  986. cb = &per_cpu(sampler_cpu_buffer, cpu);
  987. cb->stop_mode = 1;
  988. tmp_rc = stop_sampling(cpu);
  989. if (tmp_rc)
  990. rc = tmp_rc;
  991. }
  992. if (hws_wq)
  993. flush_workqueue(hws_wq);
  994. mutex_lock(&hws_sem);
  995. if (hws_oom) {
  996. unregister_oom_notifier(&hws_oom_notifier);
  997. hws_oom = 0;
  998. }
  999. hws_state = HWS_STOPPED;
  1000. mutex_unlock(&hws_sem);
  1001. return rc;
  1002. }