access.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. #include <linux/delay.h>
  2. #include <linux/pci.h>
  3. #include <linux/module.h>
  4. #include <linux/sched.h>
  5. #include <linux/ioport.h>
  6. #include <linux/wait.h>
  7. #include "pci.h"
  8. /*
  9. * This interrupt-safe spinlock protects all accesses to PCI
  10. * configuration space.
  11. */
  12. static DEFINE_SPINLOCK(pci_lock);
  13. /*
  14. * Wrappers for all PCI configuration access functions. They just check
  15. * alignment, do locking and call the low-level functions pointed to
  16. * by pci_dev->ops.
  17. */
  18. #define PCI_byte_BAD 0
  19. #define PCI_word_BAD (pos & 1)
  20. #define PCI_dword_BAD (pos & 3)
  21. #define PCI_OP_READ(size,type,len) \
  22. int pci_bus_read_config_##size \
  23. (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  24. { \
  25. int res; \
  26. unsigned long flags; \
  27. u32 data = 0; \
  28. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  29. spin_lock_irqsave(&pci_lock, flags); \
  30. res = bus->ops->read(bus, devfn, pos, len, &data); \
  31. *value = (type)data; \
  32. spin_unlock_irqrestore(&pci_lock, flags); \
  33. return res; \
  34. }
  35. #define PCI_OP_WRITE(size,type,len) \
  36. int pci_bus_write_config_##size \
  37. (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
  38. { \
  39. int res; \
  40. unsigned long flags; \
  41. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  42. spin_lock_irqsave(&pci_lock, flags); \
  43. res = bus->ops->write(bus, devfn, pos, len, value); \
  44. spin_unlock_irqrestore(&pci_lock, flags); \
  45. return res; \
  46. }
  47. PCI_OP_READ(byte, u8, 1)
  48. PCI_OP_READ(word, u16, 2)
  49. PCI_OP_READ(dword, u32, 4)
  50. PCI_OP_WRITE(byte, u8, 1)
  51. PCI_OP_WRITE(word, u16, 2)
  52. PCI_OP_WRITE(dword, u32, 4)
  53. EXPORT_SYMBOL(pci_bus_read_config_byte);
  54. EXPORT_SYMBOL(pci_bus_read_config_word);
  55. EXPORT_SYMBOL(pci_bus_read_config_dword);
  56. EXPORT_SYMBOL(pci_bus_write_config_byte);
  57. EXPORT_SYMBOL(pci_bus_write_config_word);
  58. EXPORT_SYMBOL(pci_bus_write_config_dword);
  59. /**
  60. * pci_bus_set_ops - Set raw operations of pci bus
  61. * @bus: pci bus struct
  62. * @ops: new raw operations
  63. *
  64. * Return previous raw operations
  65. */
  66. struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
  67. {
  68. struct pci_ops *old_ops;
  69. unsigned long flags;
  70. spin_lock_irqsave(&pci_lock, flags);
  71. old_ops = bus->ops;
  72. bus->ops = ops;
  73. spin_unlock_irqrestore(&pci_lock, flags);
  74. return old_ops;
  75. }
  76. EXPORT_SYMBOL(pci_bus_set_ops);
  77. /**
  78. * pci_read_vpd - Read one entry from Vital Product Data
  79. * @dev: pci device struct
  80. * @pos: offset in vpd space
  81. * @count: number of bytes to read
  82. * @buf: pointer to where to store result
  83. *
  84. */
  85. ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
  86. {
  87. if (!dev->vpd || !dev->vpd->ops)
  88. return -ENODEV;
  89. return dev->vpd->ops->read(dev, pos, count, buf);
  90. }
  91. EXPORT_SYMBOL(pci_read_vpd);
  92. /**
  93. * pci_write_vpd - Write entry to Vital Product Data
  94. * @dev: pci device struct
  95. * @pos: offset in vpd space
  96. * @count: number of bytes to write
  97. * @buf: buffer containing write data
  98. *
  99. */
  100. ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
  101. {
  102. if (!dev->vpd || !dev->vpd->ops)
  103. return -ENODEV;
  104. return dev->vpd->ops->write(dev, pos, count, buf);
  105. }
  106. EXPORT_SYMBOL(pci_write_vpd);
  107. /*
  108. * The following routines are to prevent the user from accessing PCI config
  109. * space when it's unsafe to do so. Some devices require this during BIST and
  110. * we're required to prevent it during D-state transitions.
  111. *
  112. * We have a bit per device to indicate it's blocked and a global wait queue
  113. * for callers to sleep on until devices are unblocked.
  114. */
  115. static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
  116. static noinline void pci_wait_ucfg(struct pci_dev *dev)
  117. {
  118. DECLARE_WAITQUEUE(wait, current);
  119. __add_wait_queue(&pci_ucfg_wait, &wait);
  120. do {
  121. set_current_state(TASK_UNINTERRUPTIBLE);
  122. spin_unlock_irq(&pci_lock);
  123. schedule();
  124. spin_lock_irq(&pci_lock);
  125. } while (dev->block_ucfg_access);
  126. __remove_wait_queue(&pci_ucfg_wait, &wait);
  127. }
  128. #define PCI_USER_READ_CONFIG(size,type) \
  129. int pci_user_read_config_##size \
  130. (struct pci_dev *dev, int pos, type *val) \
  131. { \
  132. int ret = 0; \
  133. u32 data = -1; \
  134. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  135. spin_lock_irq(&pci_lock); \
  136. if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
  137. ret = dev->bus->ops->read(dev->bus, dev->devfn, \
  138. pos, sizeof(type), &data); \
  139. spin_unlock_irq(&pci_lock); \
  140. *val = (type)data; \
  141. return ret; \
  142. }
  143. #define PCI_USER_WRITE_CONFIG(size,type) \
  144. int pci_user_write_config_##size \
  145. (struct pci_dev *dev, int pos, type val) \
  146. { \
  147. int ret = -EIO; \
  148. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  149. spin_lock_irq(&pci_lock); \
  150. if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
  151. ret = dev->bus->ops->write(dev->bus, dev->devfn, \
  152. pos, sizeof(type), val); \
  153. spin_unlock_irq(&pci_lock); \
  154. return ret; \
  155. }
  156. PCI_USER_READ_CONFIG(byte, u8)
  157. PCI_USER_READ_CONFIG(word, u16)
  158. PCI_USER_READ_CONFIG(dword, u32)
  159. PCI_USER_WRITE_CONFIG(byte, u8)
  160. PCI_USER_WRITE_CONFIG(word, u16)
  161. PCI_USER_WRITE_CONFIG(dword, u32)
  162. /* VPD access through PCI 2.2+ VPD capability */
  163. #define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)
  164. struct pci_vpd_pci22 {
  165. struct pci_vpd base;
  166. struct mutex lock;
  167. u16 flag;
  168. bool busy;
  169. u8 cap;
  170. };
  171. /*
  172. * Wait for last operation to complete.
  173. * This code has to spin since there is no other notification from the PCI
  174. * hardware. Since the VPD is often implemented by serial attachment to an
  175. * EEPROM, it may take many milliseconds to complete.
  176. */
  177. static int pci_vpd_pci22_wait(struct pci_dev *dev)
  178. {
  179. struct pci_vpd_pci22 *vpd =
  180. container_of(dev->vpd, struct pci_vpd_pci22, base);
  181. unsigned long timeout = jiffies + HZ/20 + 2;
  182. u16 status;
  183. int ret;
  184. if (!vpd->busy)
  185. return 0;
  186. for (;;) {
  187. ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  188. &status);
  189. if (ret)
  190. return ret;
  191. if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
  192. vpd->busy = false;
  193. return 0;
  194. }
  195. if (time_after(jiffies, timeout))
  196. return -ETIMEDOUT;
  197. if (fatal_signal_pending(current))
  198. return -EINTR;
  199. if (!cond_resched())
  200. udelay(10);
  201. }
  202. }
  203. static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
  204. void *arg)
  205. {
  206. struct pci_vpd_pci22 *vpd =
  207. container_of(dev->vpd, struct pci_vpd_pci22, base);
  208. int ret;
  209. loff_t end = pos + count;
  210. u8 *buf = arg;
  211. if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
  212. return -EINVAL;
  213. if (mutex_lock_killable(&vpd->lock))
  214. return -EINTR;
  215. ret = pci_vpd_pci22_wait(dev);
  216. if (ret < 0)
  217. goto out;
  218. while (pos < end) {
  219. u32 val;
  220. unsigned int i, skip;
  221. ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  222. pos & ~3);
  223. if (ret < 0)
  224. break;
  225. vpd->busy = true;
  226. vpd->flag = PCI_VPD_ADDR_F;
  227. ret = pci_vpd_pci22_wait(dev);
  228. if (ret < 0)
  229. break;
  230. ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
  231. if (ret < 0)
  232. break;
  233. skip = pos & 3;
  234. for (i = 0; i < sizeof(u32); i++) {
  235. if (i >= skip) {
  236. *buf++ = val;
  237. if (++pos == end)
  238. break;
  239. }
  240. val >>= 8;
  241. }
  242. }
  243. out:
  244. mutex_unlock(&vpd->lock);
  245. return ret ? ret : count;
  246. }
  247. static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
  248. const void *arg)
  249. {
  250. struct pci_vpd_pci22 *vpd =
  251. container_of(dev->vpd, struct pci_vpd_pci22, base);
  252. const u8 *buf = arg;
  253. loff_t end = pos + count;
  254. int ret = 0;
  255. if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
  256. return -EINVAL;
  257. if (mutex_lock_killable(&vpd->lock))
  258. return -EINTR;
  259. ret = pci_vpd_pci22_wait(dev);
  260. if (ret < 0)
  261. goto out;
  262. while (pos < end) {
  263. u32 val;
  264. val = *buf++;
  265. val |= *buf++ << 8;
  266. val |= *buf++ << 16;
  267. val |= *buf++ << 24;
  268. ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
  269. if (ret < 0)
  270. break;
  271. ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  272. pos | PCI_VPD_ADDR_F);
  273. if (ret < 0)
  274. break;
  275. vpd->busy = true;
  276. vpd->flag = 0;
  277. ret = pci_vpd_pci22_wait(dev);
  278. pos += sizeof(u32);
  279. }
  280. out:
  281. mutex_unlock(&vpd->lock);
  282. return ret ? ret : count;
  283. }
  284. static void pci_vpd_pci22_release(struct pci_dev *dev)
  285. {
  286. kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
  287. }
  288. static const struct pci_vpd_ops pci_vpd_pci22_ops = {
  289. .read = pci_vpd_pci22_read,
  290. .write = pci_vpd_pci22_write,
  291. .release = pci_vpd_pci22_release,
  292. };
  293. int pci_vpd_pci22_init(struct pci_dev *dev)
  294. {
  295. struct pci_vpd_pci22 *vpd;
  296. u8 cap;
  297. cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
  298. if (!cap)
  299. return -ENODEV;
  300. vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
  301. if (!vpd)
  302. return -ENOMEM;
  303. vpd->base.len = PCI_VPD_PCI22_SIZE;
  304. vpd->base.ops = &pci_vpd_pci22_ops;
  305. mutex_init(&vpd->lock);
  306. vpd->cap = cap;
  307. vpd->busy = false;
  308. dev->vpd = &vpd->base;
  309. return 0;
  310. }
  311. /**
  312. * pci_vpd_truncate - Set available Vital Product Data size
  313. * @dev: pci device struct
  314. * @size: available memory in bytes
  315. *
  316. * Adjust size of available VPD area.
  317. */
  318. int pci_vpd_truncate(struct pci_dev *dev, size_t size)
  319. {
  320. if (!dev->vpd)
  321. return -EINVAL;
  322. /* limited by the access method */
  323. if (size > dev->vpd->len)
  324. return -EINVAL;
  325. dev->vpd->len = size;
  326. if (dev->vpd->attr)
  327. dev->vpd->attr->size = size;
  328. return 0;
  329. }
  330. EXPORT_SYMBOL(pci_vpd_truncate);
  331. /**
  332. * pci_block_user_cfg_access - Block userspace PCI config reads/writes
  333. * @dev: pci device struct
  334. *
  335. * When user access is blocked, any reads or writes to config space will
  336. * sleep until access is unblocked again. We don't allow nesting of
  337. * block/unblock calls.
  338. */
  339. void pci_block_user_cfg_access(struct pci_dev *dev)
  340. {
  341. unsigned long flags;
  342. int was_blocked;
  343. spin_lock_irqsave(&pci_lock, flags);
  344. was_blocked = dev->block_ucfg_access;
  345. dev->block_ucfg_access = 1;
  346. spin_unlock_irqrestore(&pci_lock, flags);
  347. /* If we BUG() inside the pci_lock, we're guaranteed to hose
  348. * the machine */
  349. BUG_ON(was_blocked);
  350. }
  351. EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
  352. /**
  353. * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
  354. * @dev: pci device struct
  355. *
  356. * This function allows userspace PCI config accesses to resume.
  357. */
  358. void pci_unblock_user_cfg_access(struct pci_dev *dev)
  359. {
  360. unsigned long flags;
  361. spin_lock_irqsave(&pci_lock, flags);
  362. /* This indicates a problem in the caller, but we don't need
  363. * to kill them, unlike a double-block above. */
  364. WARN_ON(!dev->block_ucfg_access);
  365. dev->block_ucfg_access = 0;
  366. wake_up_all(&pci_ucfg_wait);
  367. spin_unlock_irqrestore(&pci_lock, flags);
  368. }
  369. EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);