ixp4xx_qmgr.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * Intel IXP4xx Queue Manager driver for Linux
  3. *
  4. * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. */
  10. #include <linux/ioport.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <mach/qmgr.h>
  15. struct qmgr_regs __iomem *qmgr_regs;
  16. static struct resource *mem_res;
  17. static spinlock_t qmgr_lock;
  18. static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
  19. static void (*irq_handlers[HALF_QUEUES])(void *pdev);
  20. static void *irq_pdevs[HALF_QUEUES];
  21. #if DEBUG_QMGR
  22. char qmgr_queue_descs[QUEUES][32];
  23. #endif
  24. void qmgr_set_irq(unsigned int queue, int src,
  25. void (*handler)(void *pdev), void *pdev)
  26. {
  27. u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */
  28. int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
  29. unsigned long flags;
  30. src &= 7;
  31. spin_lock_irqsave(&qmgr_lock, flags);
  32. __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg);
  33. irq_handlers[queue] = handler;
  34. irq_pdevs[queue] = pdev;
  35. spin_unlock_irqrestore(&qmgr_lock, flags);
  36. }
  37. static irqreturn_t qmgr_irq1(int irq, void *pdev)
  38. {
  39. int i;
  40. u32 val = __raw_readl(&qmgr_regs->irqstat[0]);
  41. __raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */
  42. for (i = 0; i < HALF_QUEUES; i++)
  43. if (val & (1 << i))
  44. irq_handlers[i](irq_pdevs[i]);
  45. return val ? IRQ_HANDLED : 0;
  46. }
  47. void qmgr_enable_irq(unsigned int queue)
  48. {
  49. unsigned long flags;
  50. spin_lock_irqsave(&qmgr_lock, flags);
  51. __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue),
  52. &qmgr_regs->irqen[0]);
  53. spin_unlock_irqrestore(&qmgr_lock, flags);
  54. }
  55. void qmgr_disable_irq(unsigned int queue)
  56. {
  57. unsigned long flags;
  58. spin_lock_irqsave(&qmgr_lock, flags);
  59. __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue),
  60. &qmgr_regs->irqen[0]);
  61. __raw_writel(1 << queue, &qmgr_regs->irqstat[0]); /* clear */
  62. spin_unlock_irqrestore(&qmgr_lock, flags);
  63. }
  64. static inline void shift_mask(u32 *mask)
  65. {
  66. mask[3] = mask[3] << 1 | mask[2] >> 31;
  67. mask[2] = mask[2] << 1 | mask[1] >> 31;
  68. mask[1] = mask[1] << 1 | mask[0] >> 31;
  69. mask[0] <<= 1;
  70. }
  71. #if DEBUG_QMGR
  72. int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
  73. unsigned int nearly_empty_watermark,
  74. unsigned int nearly_full_watermark,
  75. const char *desc_format, const char* name)
  76. #else
  77. int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
  78. unsigned int nearly_empty_watermark,
  79. unsigned int nearly_full_watermark)
  80. #endif
  81. {
  82. u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
  83. int err;
  84. if (queue >= HALF_QUEUES)
  85. return -ERANGE;
  86. if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
  87. return -EINVAL;
  88. switch (len) {
  89. case 16:
  90. cfg = 0 << 24;
  91. mask[0] = 0x1;
  92. break;
  93. case 32:
  94. cfg = 1 << 24;
  95. mask[0] = 0x3;
  96. break;
  97. case 64:
  98. cfg = 2 << 24;
  99. mask[0] = 0xF;
  100. break;
  101. case 128:
  102. cfg = 3 << 24;
  103. mask[0] = 0xFF;
  104. break;
  105. default:
  106. return -EINVAL;
  107. }
  108. cfg |= nearly_empty_watermark << 26;
  109. cfg |= nearly_full_watermark << 29;
  110. len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
  111. mask[1] = mask[2] = mask[3] = 0;
  112. if (!try_module_get(THIS_MODULE))
  113. return -ENODEV;
  114. spin_lock_irq(&qmgr_lock);
  115. if (__raw_readl(&qmgr_regs->sram[queue])) {
  116. err = -EBUSY;
  117. goto err;
  118. }
  119. while (1) {
  120. if (!(used_sram_bitmap[0] & mask[0]) &&
  121. !(used_sram_bitmap[1] & mask[1]) &&
  122. !(used_sram_bitmap[2] & mask[2]) &&
  123. !(used_sram_bitmap[3] & mask[3]))
  124. break; /* found free space */
  125. addr++;
  126. shift_mask(mask);
  127. if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
  128. printk(KERN_ERR "qmgr: no free SRAM space for"
  129. " queue %i\n", queue);
  130. err = -ENOMEM;
  131. goto err;
  132. }
  133. }
  134. used_sram_bitmap[0] |= mask[0];
  135. used_sram_bitmap[1] |= mask[1];
  136. used_sram_bitmap[2] |= mask[2];
  137. used_sram_bitmap[3] |= mask[3];
  138. __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
  139. #if DEBUG_QMGR
  140. snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
  141. desc_format, name);
  142. printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
  143. qmgr_queue_descs[queue], queue, addr);
  144. #endif
  145. spin_unlock_irq(&qmgr_lock);
  146. return 0;
  147. err:
  148. spin_unlock_irq(&qmgr_lock);
  149. module_put(THIS_MODULE);
  150. return err;
  151. }
  152. void qmgr_release_queue(unsigned int queue)
  153. {
  154. u32 cfg, addr, mask[4];
  155. BUG_ON(queue >= HALF_QUEUES); /* not in valid range */
  156. spin_lock_irq(&qmgr_lock);
  157. cfg = __raw_readl(&qmgr_regs->sram[queue]);
  158. addr = (cfg >> 14) & 0xFF;
  159. BUG_ON(!addr); /* not requested */
  160. switch ((cfg >> 24) & 3) {
  161. case 0: mask[0] = 0x1; break;
  162. case 1: mask[0] = 0x3; break;
  163. case 2: mask[0] = 0xF; break;
  164. case 3: mask[0] = 0xFF; break;
  165. }
  166. mask[1] = mask[2] = mask[3] = 0;
  167. while (addr--)
  168. shift_mask(mask);
  169. #if DEBUG_QMGR
  170. printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
  171. qmgr_queue_descs[queue], queue);
  172. qmgr_queue_descs[queue][0] = '\x0';
  173. #endif
  174. __raw_writel(0, &qmgr_regs->sram[queue]);
  175. used_sram_bitmap[0] &= ~mask[0];
  176. used_sram_bitmap[1] &= ~mask[1];
  177. used_sram_bitmap[2] &= ~mask[2];
  178. used_sram_bitmap[3] &= ~mask[3];
  179. irq_handlers[queue] = NULL; /* catch IRQ bugs */
  180. spin_unlock_irq(&qmgr_lock);
  181. module_put(THIS_MODULE);
  182. while ((addr = qmgr_get_entry(queue)))
  183. printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
  184. queue, addr);
  185. }
  186. static int qmgr_init(void)
  187. {
  188. int i, err;
  189. mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
  190. IXP4XX_QMGR_REGION_SIZE,
  191. "IXP4xx Queue Manager");
  192. if (mem_res == NULL)
  193. return -EBUSY;
  194. qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
  195. if (qmgr_regs == NULL) {
  196. err = -ENOMEM;
  197. goto error_map;
  198. }
  199. /* reset qmgr registers */
  200. for (i = 0; i < 4; i++) {
  201. __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
  202. __raw_writel(0, &qmgr_regs->irqsrc[i]);
  203. }
  204. for (i = 0; i < 2; i++) {
  205. __raw_writel(0, &qmgr_regs->stat2[i]);
  206. __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
  207. __raw_writel(0, &qmgr_regs->irqen[i]);
  208. }
  209. for (i = 0; i < QUEUES; i++)
  210. __raw_writel(0, &qmgr_regs->sram[i]);
  211. err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq1, 0,
  212. "IXP4xx Queue Manager", NULL);
  213. if (err) {
  214. printk(KERN_ERR "qmgr: failed to request IRQ%i\n",
  215. IRQ_IXP4XX_QM1);
  216. goto error_irq;
  217. }
  218. used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
  219. spin_lock_init(&qmgr_lock);
  220. printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
  221. return 0;
  222. error_irq:
  223. iounmap(qmgr_regs);
  224. error_map:
  225. release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
  226. return err;
  227. }
  228. static void qmgr_remove(void)
  229. {
  230. free_irq(IRQ_IXP4XX_QM1, NULL);
  231. synchronize_irq(IRQ_IXP4XX_QM1);
  232. iounmap(qmgr_regs);
  233. release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
  234. }
  235. module_init(qmgr_init);
  236. module_exit(qmgr_remove);
  237. MODULE_LICENSE("GPL v2");
  238. MODULE_AUTHOR("Krzysztof Halasa");
  239. EXPORT_SYMBOL(qmgr_regs);
  240. EXPORT_SYMBOL(qmgr_set_irq);
  241. EXPORT_SYMBOL(qmgr_enable_irq);
  242. EXPORT_SYMBOL(qmgr_disable_irq);
  243. #if DEBUG_QMGR
  244. EXPORT_SYMBOL(qmgr_queue_descs);
  245. EXPORT_SYMBOL(qmgr_request_queue);
  246. #else
  247. EXPORT_SYMBOL(__qmgr_request_queue);
  248. #endif
  249. EXPORT_SYMBOL(qmgr_release_queue);