htirq.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * File: htirq.c
  3. * Purpose: Hypertransport Interrupt Capability
  4. *
  5. * Copyright (C) 2006 Linux Networx
  6. * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/pci.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/slab.h>
  12. #include <linux/gfp.h>
  13. #include <linux/htirq.h>
  14. /* Global ht irq lock.
  15. *
  16. * This is needed to serialize access to the data port in hypertransport
  17. * irq capability.
  18. *
  19. * With multiple simultaneous hypertransport irq devices it might pay
  20. * to make this more fine grained. But start with simple, stupid, and correct.
  21. */
  22. static DEFINE_SPINLOCK(ht_irq_lock);
  23. struct ht_irq_cfg {
  24. struct pci_dev *dev;
  25. unsigned pos;
  26. unsigned idx;
  27. };
  28. void write_ht_irq_low(unsigned int irq, u32 data)
  29. {
  30. struct ht_irq_cfg *cfg = get_irq_data(irq);
  31. unsigned long flags;
  32. spin_lock_irqsave(&ht_irq_lock, flags);
  33. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
  34. pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
  35. spin_unlock_irqrestore(&ht_irq_lock, flags);
  36. }
  37. void write_ht_irq_high(unsigned int irq, u32 data)
  38. {
  39. struct ht_irq_cfg *cfg = get_irq_data(irq);
  40. unsigned long flags;
  41. spin_lock_irqsave(&ht_irq_lock, flags);
  42. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
  43. pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
  44. spin_unlock_irqrestore(&ht_irq_lock, flags);
  45. }
  46. u32 read_ht_irq_low(unsigned int irq)
  47. {
  48. struct ht_irq_cfg *cfg = get_irq_data(irq);
  49. unsigned long flags;
  50. u32 data;
  51. spin_lock_irqsave(&ht_irq_lock, flags);
  52. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
  53. pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
  54. spin_unlock_irqrestore(&ht_irq_lock, flags);
  55. return data;
  56. }
  57. u32 read_ht_irq_high(unsigned int irq)
  58. {
  59. struct ht_irq_cfg *cfg = get_irq_data(irq);
  60. unsigned long flags;
  61. u32 data;
  62. spin_lock_irqsave(&ht_irq_lock, flags);
  63. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
  64. pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
  65. spin_unlock_irqrestore(&ht_irq_lock, flags);
  66. return data;
  67. }
  68. void mask_ht_irq(unsigned int irq)
  69. {
  70. struct ht_irq_cfg *cfg;
  71. unsigned long flags;
  72. u32 data;
  73. cfg = get_irq_data(irq);
  74. spin_lock_irqsave(&ht_irq_lock, flags);
  75. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
  76. pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
  77. data |= 1;
  78. pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
  79. spin_unlock_irqrestore(&ht_irq_lock, flags);
  80. }
  81. void unmask_ht_irq(unsigned int irq)
  82. {
  83. struct ht_irq_cfg *cfg;
  84. unsigned long flags;
  85. u32 data;
  86. cfg = get_irq_data(irq);
  87. spin_lock_irqsave(&ht_irq_lock, flags);
  88. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
  89. pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
  90. data &= ~1;
  91. pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
  92. spin_unlock_irqrestore(&ht_irq_lock, flags);
  93. }
  94. /**
  95. * ht_create_irq - create an irq and attach it to a device.
  96. * @dev: The hypertransport device to find the irq capability on.
  97. * @idx: Which of the possible irqs to attach to.
  98. *
  99. * ht_create_irq is needs to be called for all hypertransport devices
  100. * that generate irqs.
  101. *
  102. * The irq number of the new irq or a negative error value is returned.
  103. */
  104. int ht_create_irq(struct pci_dev *dev, int idx)
  105. {
  106. struct ht_irq_cfg *cfg;
  107. unsigned long flags;
  108. u32 data;
  109. int max_irq;
  110. int pos;
  111. int irq;
  112. pos = pci_find_capability(dev, PCI_CAP_ID_HT);
  113. while (pos) {
  114. u8 subtype;
  115. pci_read_config_byte(dev, pos + 3, &subtype);
  116. if (subtype == HT_CAPTYPE_IRQ)
  117. break;
  118. pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_HT);
  119. }
  120. if (!pos)
  121. return -EINVAL;
  122. /* Verify the idx I want to use is in range */
  123. spin_lock_irqsave(&ht_irq_lock, flags);
  124. pci_write_config_byte(dev, pos + 2, 1);
  125. pci_read_config_dword(dev, pos + 4, &data);
  126. spin_unlock_irqrestore(&ht_irq_lock, flags);
  127. max_irq = (data >> 16) & 0xff;
  128. if ( idx > max_irq)
  129. return -EINVAL;
  130. cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
  131. if (!cfg)
  132. return -ENOMEM;
  133. cfg->dev = dev;
  134. cfg->pos = pos;
  135. cfg->idx = 0x10 + (idx * 2);
  136. irq = create_irq();
  137. if (irq < 0) {
  138. kfree(cfg);
  139. return -EBUSY;
  140. }
  141. set_irq_data(irq, cfg);
  142. if (arch_setup_ht_irq(irq, dev) < 0) {
  143. ht_destroy_irq(irq);
  144. return -EBUSY;
  145. }
  146. return irq;
  147. }
  148. /**
  149. * ht_destroy_irq - destroy an irq created with ht_create_irq
  150. *
  151. * This reverses ht_create_irq removing the specified irq from
  152. * existence. The irq should be free before this happens.
  153. */
  154. void ht_destroy_irq(unsigned int irq)
  155. {
  156. struct ht_irq_cfg *cfg;
  157. cfg = get_irq_data(irq);
  158. set_irq_chip(irq, NULL);
  159. set_irq_data(irq, NULL);
  160. destroy_irq(irq);
  161. kfree(cfg);
  162. }
  163. EXPORT_SYMBOL(ht_create_irq);
  164. EXPORT_SYMBOL(ht_destroy_irq);