msi_ia64.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * MSI hooks for standard x86 apic
  3. */
  4. #include <linux/pci.h>
  5. #include <linux/irq.h>
  6. #include <linux/msi.h>
  7. #include <linux/dmar.h>
  8. #include <asm/smp.h>
  9. #include <asm/msidef.h>
  10. static struct irq_chip ia64_msi_chip;
  11. #ifdef CONFIG_SMP
  12. static void ia64_set_msi_irq_affinity(unsigned int irq,
  13. const cpumask_t *cpu_mask)
  14. {
  15. struct msi_msg msg;
  16. u32 addr, data;
  17. int cpu = first_cpu(*cpu_mask);
  18. if (!cpu_online(cpu))
  19. return;
  20. if (irq_prepare_move(irq, cpu))
  21. return;
  22. read_msi_msg(irq, &msg);
  23. addr = msg.address_lo;
  24. addr &= MSI_ADDR_DEST_ID_MASK;
  25. addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  26. msg.address_lo = addr;
  27. data = msg.data;
  28. data &= MSI_DATA_VECTOR_MASK;
  29. data |= MSI_DATA_VECTOR(irq_to_vector(irq));
  30. msg.data = data;
  31. write_msi_msg(irq, &msg);
  32. cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
  33. }
  34. #endif /* CONFIG_SMP */
  35. int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  36. {
  37. struct msi_msg msg;
  38. unsigned long dest_phys_id;
  39. int irq, vector;
  40. cpumask_t mask;
  41. irq = create_irq();
  42. if (irq < 0)
  43. return irq;
  44. set_irq_msi(irq, desc);
  45. cpus_and(mask, irq_to_domain(irq), cpu_online_map);
  46. dest_phys_id = cpu_physical_id(first_cpu(mask));
  47. vector = irq_to_vector(irq);
  48. msg.address_hi = 0;
  49. msg.address_lo =
  50. MSI_ADDR_HEADER |
  51. MSI_ADDR_DEST_MODE_PHYS |
  52. MSI_ADDR_REDIRECTION_CPU |
  53. MSI_ADDR_DEST_ID_CPU(dest_phys_id);
  54. msg.data =
  55. MSI_DATA_TRIGGER_EDGE |
  56. MSI_DATA_LEVEL_ASSERT |
  57. MSI_DATA_DELIVERY_FIXED |
  58. MSI_DATA_VECTOR(vector);
  59. write_msi_msg(irq, &msg);
  60. set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
  61. return 0;
  62. }
  63. void ia64_teardown_msi_irq(unsigned int irq)
  64. {
  65. destroy_irq(irq);
  66. }
  67. static void ia64_ack_msi_irq(unsigned int irq)
  68. {
  69. irq_complete_move(irq);
  70. move_native_irq(irq);
  71. ia64_eoi();
  72. }
  73. static int ia64_msi_retrigger_irq(unsigned int irq)
  74. {
  75. unsigned int vector = irq_to_vector(irq);
  76. ia64_resend_irq(vector);
  77. return 1;
  78. }
  79. /*
  80. * Generic ops used on most IA64 platforms.
  81. */
  82. static struct irq_chip ia64_msi_chip = {
  83. .name = "PCI-MSI",
  84. .mask = mask_msi_irq,
  85. .unmask = unmask_msi_irq,
  86. .ack = ia64_ack_msi_irq,
  87. #ifdef CONFIG_SMP
  88. .set_affinity = ia64_set_msi_irq_affinity,
  89. #endif
  90. .retrigger = ia64_msi_retrigger_irq,
  91. };
  92. int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  93. {
  94. if (platform_setup_msi_irq)
  95. return platform_setup_msi_irq(pdev, desc);
  96. return ia64_setup_msi_irq(pdev, desc);
  97. }
  98. void arch_teardown_msi_irq(unsigned int irq)
  99. {
  100. if (platform_teardown_msi_irq)
  101. return platform_teardown_msi_irq(irq);
  102. return ia64_teardown_msi_irq(irq);
  103. }
  104. #ifdef CONFIG_DMAR
  105. #ifdef CONFIG_SMP
  106. static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
  107. {
  108. struct irq_cfg *cfg = irq_cfg + irq;
  109. struct msi_msg msg;
  110. int cpu = cpumask_first(mask);
  111. if (!cpu_online(cpu))
  112. return;
  113. if (irq_prepare_move(irq, cpu))
  114. return;
  115. dmar_msi_read(irq, &msg);
  116. msg.data &= ~MSI_DATA_VECTOR_MASK;
  117. msg.data |= MSI_DATA_VECTOR(cfg->vector);
  118. msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
  119. msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  120. dmar_msi_write(irq, &msg);
  121. cpumask_copy(irq_desc[irq].affinity, mask);
  122. }
  123. #endif /* CONFIG_SMP */
  124. struct irq_chip dmar_msi_type = {
  125. .name = "DMAR_MSI",
  126. .unmask = dmar_msi_unmask,
  127. .mask = dmar_msi_mask,
  128. .ack = ia64_ack_msi_irq,
  129. #ifdef CONFIG_SMP
  130. .set_affinity = dmar_msi_set_affinity,
  131. #endif
  132. .retrigger = ia64_msi_retrigger_irq,
  133. };
  134. static int
  135. msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
  136. {
  137. struct irq_cfg *cfg = irq_cfg + irq;
  138. unsigned dest;
  139. cpumask_t mask;
  140. cpus_and(mask, irq_to_domain(irq), cpu_online_map);
  141. dest = cpu_physical_id(first_cpu(mask));
  142. msg->address_hi = 0;
  143. msg->address_lo =
  144. MSI_ADDR_HEADER |
  145. MSI_ADDR_DEST_MODE_PHYS |
  146. MSI_ADDR_REDIRECTION_CPU |
  147. MSI_ADDR_DEST_ID_CPU(dest);
  148. msg->data =
  149. MSI_DATA_TRIGGER_EDGE |
  150. MSI_DATA_LEVEL_ASSERT |
  151. MSI_DATA_DELIVERY_FIXED |
  152. MSI_DATA_VECTOR(cfg->vector);
  153. return 0;
  154. }
  155. int arch_setup_dmar_msi(unsigned int irq)
  156. {
  157. int ret;
  158. struct msi_msg msg;
  159. ret = msi_compose_msg(NULL, irq, &msg);
  160. if (ret < 0)
  161. return ret;
  162. dmar_msi_write(irq, &msg);
  163. set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
  164. "edge");
  165. return 0;
  166. }
  167. #endif /* CONFIG_DMAR */