pci_msi.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * Copyright IBM Corp. 2012
  3. *
  4. * Author(s):
  5. * Jan Glauber <jang@linux.vnet.ibm.com>
  6. */
  7. #define COMPONENT "zPCI"
  8. #define pr_fmt(fmt) COMPONENT ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/err.h>
  11. #include <linux/rculist.h>
  12. #include <linux/hash.h>
  13. #include <linux/pci.h>
  14. #include <linux/msi.h>
  15. #include <asm/hw_irq.h>
  16. /* mapping of irq numbers to msi_desc */
  17. static struct hlist_head *msi_hash;
  18. static unsigned int msihash_shift = 6;
  19. #define msi_hashfn(nr) hash_long(nr, msihash_shift)
  20. static DEFINE_SPINLOCK(msi_map_lock);
  21. struct msi_desc *__irq_get_msi_desc(unsigned int irq)
  22. {
  23. struct hlist_node *entry;
  24. struct msi_map *map;
  25. hlist_for_each_entry_rcu(map, entry,
  26. &msi_hash[msi_hashfn(irq)], msi_chain)
  27. if (map->irq == irq)
  28. return map->msi;
  29. return NULL;
  30. }
  31. int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
  32. {
  33. if (msi->msi_attrib.is_msix) {
  34. int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  35. PCI_MSIX_ENTRY_VECTOR_CTRL;
  36. msi->masked = readl(msi->mask_base + offset);
  37. writel(flag, msi->mask_base + offset);
  38. } else {
  39. if (msi->msi_attrib.maskbit) {
  40. int pos;
  41. u32 mask_bits;
  42. pos = (long) msi->mask_base;
  43. pci_read_config_dword(msi->dev, pos, &mask_bits);
  44. mask_bits &= ~(mask);
  45. mask_bits |= flag & mask;
  46. pci_write_config_dword(msi->dev, pos, mask_bits);
  47. } else {
  48. return 0;
  49. }
  50. }
  51. msi->msi_attrib.maskbit = !!flag;
  52. return 1;
  53. }
  54. int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
  55. unsigned int nr, int offset)
  56. {
  57. struct msi_map *map;
  58. struct msi_msg msg;
  59. int rc;
  60. map = kmalloc(sizeof(*map), GFP_KERNEL);
  61. if (map == NULL)
  62. return -ENOMEM;
  63. map->irq = nr;
  64. map->msi = msi;
  65. zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
  66. pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
  67. __func__, nr, msi_hashfn(nr));
  68. hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
  69. spin_lock(&msi_map_lock);
  70. rc = irq_set_msi_desc(nr, msi);
  71. if (rc) {
  72. spin_unlock(&msi_map_lock);
  73. hlist_del_rcu(&map->msi_chain);
  74. kfree(map);
  75. zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
  76. return rc;
  77. }
  78. spin_unlock(&msi_map_lock);
  79. msg.data = nr - offset;
  80. msg.address_lo = zdev->msi_addr & 0xffffffff;
  81. msg.address_hi = zdev->msi_addr >> 32;
  82. write_msi_msg(nr, &msg);
  83. return 0;
  84. }
  85. void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
  86. {
  87. int irq = msi->irq & ZPCI_MSI_MASK;
  88. struct msi_map *map;
  89. msi->msg.address_lo = 0;
  90. msi->msg.address_hi = 0;
  91. msi->msg.data = 0;
  92. msi->irq = 0;
  93. zpci_msi_set_mask_bits(msi, 1, 1);
  94. spin_lock(&msi_map_lock);
  95. map = zdev->msi_map[irq];
  96. hlist_del_rcu(&map->msi_chain);
  97. kfree(map);
  98. zdev->msi_map[irq] = NULL;
  99. spin_unlock(&msi_map_lock);
  100. }
  101. /*
  102. * The msi hash table has 256 entries which is good for 4..20
  103. * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
  104. * the hash table size adjustable later.
  105. */
  106. int __init zpci_msihash_init(void)
  107. {
  108. unsigned int i;
  109. msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
  110. if (!msi_hash)
  111. return -ENOMEM;
  112. for (i = 0; i < (1U << msihash_shift); i++)
  113. INIT_HLIST_HEAD(&msi_hash[i]);
  114. return 0;
  115. }
  116. void __init zpci_msihash_exit(void)
  117. {
  118. kfree(msi_hash);
  119. }