pmc.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * arch/powerpc/kernel/pmc.c
  3. *
  4. * Copyright (C) 2004 David Gibson, IBM Corporation.
  5. * Includes code formerly from arch/ppc/kernel/perfmon.c:
  6. * Author: Andy Fleming
  7. * Copyright (c) 2004 Freescale Semiconductor, Inc
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/config.h>
  15. #include <linux/errno.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/module.h>
  18. #include <asm/processor.h>
  19. #include <asm/pmc.h>
  20. #if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
  21. static void dummy_perf(struct pt_regs *regs)
  22. {
  23. unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
  24. pmgc0 &= ~PMGC0_PMIE;
  25. mtpmr(PMRN_PMGC0, pmgc0);
  26. }
  27. #elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
  28. #ifndef MMCR0_PMAO
  29. #define MMCR0_PMAO 0
  30. #endif
  31. /* Ensure exceptions are disabled */
  32. static void dummy_perf(struct pt_regs *regs)
  33. {
  34. unsigned int mmcr0 = mfspr(SPRN_MMCR0);
  35. mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
  36. mtspr(SPRN_MMCR0, mmcr0);
  37. }
  38. #else
  39. /* Ensure exceptions are disabled */
  40. static void dummy_perf(struct pt_regs *regs)
  41. {
  42. unsigned int mmcr0 = mfspr(SPRN_MMCR0);
  43. mmcr0 &= ~(MMCR0_PMXE);
  44. mtspr(SPRN_MMCR0, mmcr0);
  45. }
  46. #endif
  47. static DEFINE_SPINLOCK(pmc_owner_lock);
  48. static void *pmc_owner_caller; /* mostly for debugging */
  49. perf_irq_t perf_irq = dummy_perf;
  50. int reserve_pmc_hardware(perf_irq_t new_perf_irq)
  51. {
  52. int err = 0;
  53. spin_lock(&pmc_owner_lock);
  54. if (pmc_owner_caller) {
  55. printk(KERN_WARNING "reserve_pmc_hardware: "
  56. "PMC hardware busy (reserved by caller %p)\n",
  57. pmc_owner_caller);
  58. err = -EBUSY;
  59. goto out;
  60. }
  61. pmc_owner_caller = __builtin_return_address(0);
  62. perf_irq = new_perf_irq ? : dummy_perf;
  63. out:
  64. spin_unlock(&pmc_owner_lock);
  65. return err;
  66. }
  67. EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
  68. void release_pmc_hardware(void)
  69. {
  70. spin_lock(&pmc_owner_lock);
  71. WARN_ON(! pmc_owner_caller);
  72. pmc_owner_caller = NULL;
  73. perf_irq = dummy_perf;
  74. spin_unlock(&pmc_owner_lock);
  75. }
  76. EXPORT_SYMBOL_GPL(release_pmc_hardware);
  77. #ifdef CONFIG_PPC64
  78. void power4_enable_pmcs(void)
  79. {
  80. unsigned long hid0;
  81. hid0 = mfspr(SPRN_HID0);
  82. hid0 |= 1UL << (63 - 20);
  83. /* POWER4 requires the following sequence */
  84. asm volatile(
  85. "sync\n"
  86. "mtspr %1, %0\n"
  87. "mfspr %0, %1\n"
  88. "mfspr %0, %1\n"
  89. "mfspr %0, %1\n"
  90. "mfspr %0, %1\n"
  91. "mfspr %0, %1\n"
  92. "mfspr %0, %1\n"
  93. "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
  94. "memory");
  95. }
  96. #endif /* CONFIG_PPC64 */