access.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #include <linux/pci.h>
  2. #include <linux/module.h>
  3. #include <linux/ioport.h>
  4. #include <linux/wait.h>
  5. #include "pci.h"
  6. /*
  7. * This interrupt-safe spinlock protects all accesses to PCI
  8. * configuration space.
  9. */
  10. static DEFINE_SPINLOCK(pci_lock);
  11. /*
  12. * Wrappers for all PCI configuration access functions. They just check
  13. * alignment, do locking and call the low-level functions pointed to
  14. * by pci_dev->ops.
  15. */
  16. #define PCI_byte_BAD 0
  17. #define PCI_word_BAD (pos & 1)
  18. #define PCI_dword_BAD (pos & 3)
  19. #define PCI_OP_READ(size,type,len) \
  20. int pci_bus_read_config_##size \
  21. (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  22. { \
  23. int res; \
  24. unsigned long flags; \
  25. u32 data = 0; \
  26. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  27. spin_lock_irqsave(&pci_lock, flags); \
  28. res = bus->ops->read(bus, devfn, pos, len, &data); \
  29. *value = (type)data; \
  30. spin_unlock_irqrestore(&pci_lock, flags); \
  31. return res; \
  32. }
  33. #define PCI_OP_WRITE(size,type,len) \
  34. int pci_bus_write_config_##size \
  35. (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
  36. { \
  37. int res; \
  38. unsigned long flags; \
  39. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  40. spin_lock_irqsave(&pci_lock, flags); \
  41. res = bus->ops->write(bus, devfn, pos, len, value); \
  42. spin_unlock_irqrestore(&pci_lock, flags); \
  43. return res; \
  44. }
  45. PCI_OP_READ(byte, u8, 1)
  46. PCI_OP_READ(word, u16, 2)
  47. PCI_OP_READ(dword, u32, 4)
  48. PCI_OP_WRITE(byte, u8, 1)
  49. PCI_OP_WRITE(word, u16, 2)
  50. PCI_OP_WRITE(dword, u32, 4)
  51. EXPORT_SYMBOL(pci_bus_read_config_byte);
  52. EXPORT_SYMBOL(pci_bus_read_config_word);
  53. EXPORT_SYMBOL(pci_bus_read_config_dword);
  54. EXPORT_SYMBOL(pci_bus_write_config_byte);
  55. EXPORT_SYMBOL(pci_bus_write_config_word);
  56. EXPORT_SYMBOL(pci_bus_write_config_dword);
  57. /*
  58. * The following routines are to prevent the user from accessing PCI config
  59. * space when it's unsafe to do so. Some devices require this during BIST and
  60. * we're required to prevent it during D-state transitions.
  61. *
  62. * We have a bit per device to indicate it's blocked and a global wait queue
  63. * for callers to sleep on until devices are unblocked.
  64. */
  65. static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
  66. static noinline void pci_wait_ucfg(struct pci_dev *dev)
  67. {
  68. DECLARE_WAITQUEUE(wait, current);
  69. __add_wait_queue(&pci_ucfg_wait, &wait);
  70. do {
  71. set_current_state(TASK_UNINTERRUPTIBLE);
  72. spin_unlock_irq(&pci_lock);
  73. schedule();
  74. spin_lock_irq(&pci_lock);
  75. } while (dev->block_ucfg_access);
  76. __remove_wait_queue(&pci_ucfg_wait, &wait);
  77. }
  78. #define PCI_USER_READ_CONFIG(size,type) \
  79. int pci_user_read_config_##size \
  80. (struct pci_dev *dev, int pos, type *val) \
  81. { \
  82. int ret = 0; \
  83. u32 data = -1; \
  84. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  85. spin_lock_irq(&pci_lock); \
  86. if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
  87. ret = dev->bus->ops->read(dev->bus, dev->devfn, \
  88. pos, sizeof(type), &data); \
  89. spin_unlock_irq(&pci_lock); \
  90. *val = (type)data; \
  91. return ret; \
  92. }
  93. #define PCI_USER_WRITE_CONFIG(size,type) \
  94. int pci_user_write_config_##size \
  95. (struct pci_dev *dev, int pos, type val) \
  96. { \
  97. int ret = -EIO; \
  98. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  99. spin_lock_irq(&pci_lock); \
  100. if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
  101. ret = dev->bus->ops->write(dev->bus, dev->devfn, \
  102. pos, sizeof(type), val); \
  103. spin_unlock_irq(&pci_lock); \
  104. return ret; \
  105. }
  106. PCI_USER_READ_CONFIG(byte, u8)
  107. PCI_USER_READ_CONFIG(word, u16)
  108. PCI_USER_READ_CONFIG(dword, u32)
  109. PCI_USER_WRITE_CONFIG(byte, u8)
  110. PCI_USER_WRITE_CONFIG(word, u16)
  111. PCI_USER_WRITE_CONFIG(dword, u32)
  112. /**
  113. * pci_block_user_cfg_access - Block userspace PCI config reads/writes
  114. * @dev: pci device struct
  115. *
  116. * When user access is blocked, any reads or writes to config space will
  117. * sleep until access is unblocked again. We don't allow nesting of
  118. * block/unblock calls.
  119. */
  120. void pci_block_user_cfg_access(struct pci_dev *dev)
  121. {
  122. unsigned long flags;
  123. int was_blocked;
  124. spin_lock_irqsave(&pci_lock, flags);
  125. was_blocked = dev->block_ucfg_access;
  126. dev->block_ucfg_access = 1;
  127. spin_unlock_irqrestore(&pci_lock, flags);
  128. /* If we BUG() inside the pci_lock, we're guaranteed to hose
  129. * the machine */
  130. BUG_ON(was_blocked);
  131. }
  132. EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
  133. /**
  134. * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
  135. * @dev: pci device struct
  136. *
  137. * This function allows userspace PCI config accesses to resume.
  138. */
  139. void pci_unblock_user_cfg_access(struct pci_dev *dev)
  140. {
  141. unsigned long flags;
  142. spin_lock_irqsave(&pci_lock, flags);
  143. /* This indicates a problem in the caller, but we don't need
  144. * to kill them, unlike a double-block above. */
  145. WARN_ON(!dev->block_ucfg_access);
  146. dev->block_ucfg_access = 0;
  147. wake_up_all(&pci_ucfg_wait);
  148. spin_unlock_irqrestore(&pci_lock, flags);
  149. }
  150. EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);