access.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #include <linux/pci.h>
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/ioport.h>
  5. #include <linux/wait.h>
  6. #include "pci.h"
  7. /*
  8. * This interrupt-safe spinlock protects all accesses to PCI
  9. * configuration space.
  10. */
  11. static DEFINE_SPINLOCK(pci_lock);
  12. /*
  13. * Wrappers for all PCI configuration access functions. They just check
  14. * alignment, do locking and call the low-level functions pointed to
  15. * by pci_dev->ops.
  16. */
  17. #define PCI_byte_BAD 0
  18. #define PCI_word_BAD (pos & 1)
  19. #define PCI_dword_BAD (pos & 3)
  20. #define PCI_OP_READ(size,type,len) \
  21. int pci_bus_read_config_##size \
  22. (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  23. { \
  24. int res; \
  25. unsigned long flags; \
  26. u32 data = 0; \
  27. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  28. spin_lock_irqsave(&pci_lock, flags); \
  29. res = bus->ops->read(bus, devfn, pos, len, &data); \
  30. *value = (type)data; \
  31. spin_unlock_irqrestore(&pci_lock, flags); \
  32. return res; \
  33. }
  34. #define PCI_OP_WRITE(size,type,len) \
  35. int pci_bus_write_config_##size \
  36. (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
  37. { \
  38. int res; \
  39. unsigned long flags; \
  40. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  41. spin_lock_irqsave(&pci_lock, flags); \
  42. res = bus->ops->write(bus, devfn, pos, len, value); \
  43. spin_unlock_irqrestore(&pci_lock, flags); \
  44. return res; \
  45. }
  46. PCI_OP_READ(byte, u8, 1)
  47. PCI_OP_READ(word, u16, 2)
  48. PCI_OP_READ(dword, u32, 4)
  49. PCI_OP_WRITE(byte, u8, 1)
  50. PCI_OP_WRITE(word, u16, 2)
  51. PCI_OP_WRITE(dword, u32, 4)
  52. EXPORT_SYMBOL(pci_bus_read_config_byte);
  53. EXPORT_SYMBOL(pci_bus_read_config_word);
  54. EXPORT_SYMBOL(pci_bus_read_config_dword);
  55. EXPORT_SYMBOL(pci_bus_write_config_byte);
  56. EXPORT_SYMBOL(pci_bus_write_config_word);
  57. EXPORT_SYMBOL(pci_bus_write_config_dword);
  58. /*
  59. * The following routines are to prevent the user from accessing PCI config
  60. * space when it's unsafe to do so. Some devices require this during BIST and
  61. * we're required to prevent it during D-state transitions.
  62. *
  63. * We have a bit per device to indicate it's blocked and a global wait queue
  64. * for callers to sleep on until devices are unblocked.
  65. */
  66. static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
  67. static noinline void pci_wait_ucfg(struct pci_dev *dev)
  68. {
  69. DECLARE_WAITQUEUE(wait, current);
  70. __add_wait_queue(&pci_ucfg_wait, &wait);
  71. do {
  72. set_current_state(TASK_UNINTERRUPTIBLE);
  73. spin_unlock_irq(&pci_lock);
  74. schedule();
  75. spin_lock_irq(&pci_lock);
  76. } while (dev->block_ucfg_access);
  77. __remove_wait_queue(&pci_ucfg_wait, &wait);
  78. }
  79. #define PCI_USER_READ_CONFIG(size,type) \
  80. int pci_user_read_config_##size \
  81. (struct pci_dev *dev, int pos, type *val) \
  82. { \
  83. int ret = 0; \
  84. u32 data = -1; \
  85. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  86. spin_lock_irq(&pci_lock); \
  87. if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
  88. ret = dev->bus->ops->read(dev->bus, dev->devfn, \
  89. pos, sizeof(type), &data); \
  90. spin_unlock_irq(&pci_lock); \
  91. *val = (type)data; \
  92. return ret; \
  93. }
  94. #define PCI_USER_WRITE_CONFIG(size,type) \
  95. int pci_user_write_config_##size \
  96. (struct pci_dev *dev, int pos, type val) \
  97. { \
  98. int ret = -EIO; \
  99. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  100. spin_lock_irq(&pci_lock); \
  101. if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
  102. ret = dev->bus->ops->write(dev->bus, dev->devfn, \
  103. pos, sizeof(type), val); \
  104. spin_unlock_irq(&pci_lock); \
  105. return ret; \
  106. }
  107. PCI_USER_READ_CONFIG(byte, u8)
  108. PCI_USER_READ_CONFIG(word, u16)
  109. PCI_USER_READ_CONFIG(dword, u32)
  110. PCI_USER_WRITE_CONFIG(byte, u8)
  111. PCI_USER_WRITE_CONFIG(word, u16)
  112. PCI_USER_WRITE_CONFIG(dword, u32)
  113. /**
  114. * pci_block_user_cfg_access - Block userspace PCI config reads/writes
  115. * @dev: pci device struct
  116. *
  117. * When user access is blocked, any reads or writes to config space will
  118. * sleep until access is unblocked again. We don't allow nesting of
  119. * block/unblock calls.
  120. */
  121. void pci_block_user_cfg_access(struct pci_dev *dev)
  122. {
  123. unsigned long flags;
  124. int was_blocked;
  125. spin_lock_irqsave(&pci_lock, flags);
  126. was_blocked = dev->block_ucfg_access;
  127. dev->block_ucfg_access = 1;
  128. spin_unlock_irqrestore(&pci_lock, flags);
  129. /* If we BUG() inside the pci_lock, we're guaranteed to hose
  130. * the machine */
  131. BUG_ON(was_blocked);
  132. }
  133. EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
  134. /**
  135. * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
  136. * @dev: pci device struct
  137. *
  138. * This function allows userspace PCI config accesses to resume.
  139. */
  140. void pci_unblock_user_cfg_access(struct pci_dev *dev)
  141. {
  142. unsigned long flags;
  143. spin_lock_irqsave(&pci_lock, flags);
  144. /* This indicates a problem in the caller, but we don't need
  145. * to kill them, unlike a double-block above. */
  146. WARN_ON(!dev->block_ucfg_access);
  147. dev->block_ucfg_access = 0;
  148. wake_up_all(&pci_ucfg_wait);
  149. spin_unlock_irqrestore(&pci_lock, flags);
  150. }
  151. EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);