sun_esp.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /* sun_esp.c: ESP front-end for Sparc SBUS systems.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/of.h>
  13. #include <linux/of_device.h>
  14. #include <asm/irq.h>
  15. #include <asm/io.h>
  16. #include <asm/dma.h>
  17. #include <scsi/scsi_host.h>
  18. #include "esp_scsi.h"
  19. #define DRV_MODULE_NAME "sun_esp"
  20. #define PFX DRV_MODULE_NAME ": "
  21. #define DRV_VERSION "1.100"
  22. #define DRV_MODULE_RELDATE "August 27, 2008"
  23. #define dma_read32(REG) \
  24. sbus_readl(esp->dma_regs + (REG))
  25. #define dma_write32(VAL, REG) \
  26. sbus_writel((VAL), esp->dma_regs + (REG))
  27. /* DVMA chip revisions */
  28. enum dvma_rev {
  29. dvmarev0,
  30. dvmaesc1,
  31. dvmarev1,
  32. dvmarev2,
  33. dvmarev3,
  34. dvmarevplus,
  35. dvmahme
  36. };
  37. static int __devinit esp_sbus_setup_dma(struct esp *esp,
  38. struct of_device *dma_of)
  39. {
  40. esp->dma = dma_of;
  41. esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
  42. resource_size(&dma_of->resource[0]),
  43. "espdma");
  44. if (!esp->dma_regs)
  45. return -ENOMEM;
  46. switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
  47. case DMA_VERS0:
  48. esp->dmarev = dvmarev0;
  49. break;
  50. case DMA_ESCV1:
  51. esp->dmarev = dvmaesc1;
  52. break;
  53. case DMA_VERS1:
  54. esp->dmarev = dvmarev1;
  55. break;
  56. case DMA_VERS2:
  57. esp->dmarev = dvmarev2;
  58. break;
  59. case DMA_VERHME:
  60. esp->dmarev = dvmahme;
  61. break;
  62. case DMA_VERSPLUS:
  63. esp->dmarev = dvmarevplus;
  64. break;
  65. }
  66. return 0;
  67. }
  68. static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
  69. {
  70. struct of_device *op = esp->dev;
  71. struct resource *res;
  72. /* On HME, two reg sets exist, first is DVMA,
  73. * second is ESP registers.
  74. */
  75. if (hme)
  76. res = &op->resource[1];
  77. else
  78. res = &op->resource[0];
  79. esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
  80. if (!esp->regs)
  81. return -ENOMEM;
  82. return 0;
  83. }
  84. static int __devinit esp_sbus_map_command_block(struct esp *esp)
  85. {
  86. struct of_device *op = esp->dev;
  87. esp->command_block = dma_alloc_coherent(&op->dev, 16,
  88. &esp->command_block_dma,
  89. GFP_ATOMIC);
  90. if (!esp->command_block)
  91. return -ENOMEM;
  92. return 0;
  93. }
  94. static int __devinit esp_sbus_register_irq(struct esp *esp)
  95. {
  96. struct Scsi_Host *host = esp->host;
  97. struct of_device *op = esp->dev;
  98. host->irq = op->irqs[0];
  99. return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
  100. }
  101. static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
  102. {
  103. struct of_device *op = esp->dev;
  104. struct device_node *dp;
  105. dp = op->node;
  106. esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
  107. if (esp->scsi_id != 0xff)
  108. goto done;
  109. esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
  110. if (esp->scsi_id != 0xff)
  111. goto done;
  112. esp->scsi_id = of_getintprop_default(espdma->node,
  113. "scsi-initiator-id", 7);
  114. done:
  115. esp->host->this_id = esp->scsi_id;
  116. esp->scsi_id_mask = (1 << esp->scsi_id);
  117. }
  118. static void __devinit esp_get_differential(struct esp *esp)
  119. {
  120. struct of_device *op = esp->dev;
  121. struct device_node *dp;
  122. dp = op->node;
  123. if (of_find_property(dp, "differential", NULL))
  124. esp->flags |= ESP_FLAG_DIFFERENTIAL;
  125. else
  126. esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
  127. }
  128. static void __devinit esp_get_clock_params(struct esp *esp)
  129. {
  130. struct of_device *op = esp->dev;
  131. struct device_node *bus_dp, *dp;
  132. int fmhz;
  133. dp = op->node;
  134. bus_dp = dp->parent;
  135. fmhz = of_getintprop_default(dp, "clock-frequency", 0);
  136. if (fmhz == 0)
  137. fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
  138. esp->cfreq = fmhz;
  139. }
  140. static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
  141. {
  142. struct device_node *dma_dp = dma_of->node;
  143. struct of_device *op = esp->dev;
  144. struct device_node *dp;
  145. u8 bursts, val;
  146. dp = op->node;
  147. bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
  148. val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
  149. if (val != 0xff)
  150. bursts &= val;
  151. val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
  152. if (val != 0xff)
  153. bursts &= val;
  154. if (bursts == 0xff ||
  155. (bursts & DMA_BURST16) == 0 ||
  156. (bursts & DMA_BURST32) == 0)
  157. bursts = (DMA_BURST32 - 1);
  158. esp->bursts = bursts;
  159. }
  160. static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma)
  161. {
  162. esp_get_scsi_id(esp, espdma);
  163. esp_get_differential(esp);
  164. esp_get_clock_params(esp);
  165. esp_get_bursts(esp, espdma);
  166. }
  167. static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
  168. {
  169. sbus_writeb(val, esp->regs + (reg * 4UL));
  170. }
  171. static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
  172. {
  173. return sbus_readb(esp->regs + (reg * 4UL));
  174. }
  175. static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
  176. size_t sz, int dir)
  177. {
  178. struct of_device *op = esp->dev;
  179. return dma_map_single(&op->dev, buf, sz, dir);
  180. }
  181. static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
  182. int num_sg, int dir)
  183. {
  184. struct of_device *op = esp->dev;
  185. return dma_map_sg(&op->dev, sg, num_sg, dir);
  186. }
  187. static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
  188. size_t sz, int dir)
  189. {
  190. struct of_device *op = esp->dev;
  191. dma_unmap_single(&op->dev, addr, sz, dir);
  192. }
  193. static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
  194. int num_sg, int dir)
  195. {
  196. struct of_device *op = esp->dev;
  197. dma_unmap_sg(&op->dev, sg, num_sg, dir);
  198. }
  199. static int sbus_esp_irq_pending(struct esp *esp)
  200. {
  201. if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
  202. return 1;
  203. return 0;
  204. }
  205. static void sbus_esp_reset_dma(struct esp *esp)
  206. {
  207. int can_do_burst16, can_do_burst32, can_do_burst64;
  208. int can_do_sbus64, lim;
  209. struct of_device *op;
  210. u32 val;
  211. can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
  212. can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
  213. can_do_burst64 = 0;
  214. can_do_sbus64 = 0;
  215. op = esp->dev;
  216. if (sbus_can_dma_64bit())
  217. can_do_sbus64 = 1;
  218. if (sbus_can_burst64())
  219. can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
  220. /* Put the DVMA into a known state. */
  221. if (esp->dmarev != dvmahme) {
  222. val = dma_read32(DMA_CSR);
  223. dma_write32(val | DMA_RST_SCSI, DMA_CSR);
  224. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  225. }
  226. switch (esp->dmarev) {
  227. case dvmahme:
  228. dma_write32(DMA_RESET_FAS366, DMA_CSR);
  229. dma_write32(DMA_RST_SCSI, DMA_CSR);
  230. esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
  231. DMA_SCSI_DISAB | DMA_INT_ENAB);
  232. esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
  233. DMA_BRST_SZ);
  234. if (can_do_burst64)
  235. esp->prev_hme_dmacsr |= DMA_BRST64;
  236. else if (can_do_burst32)
  237. esp->prev_hme_dmacsr |= DMA_BRST32;
  238. if (can_do_sbus64) {
  239. esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
  240. sbus_set_sbus64(&op->dev, esp->bursts);
  241. }
  242. lim = 1000;
  243. while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
  244. if (--lim == 0) {
  245. printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
  246. "will not clear!\n",
  247. esp->host->unique_id);
  248. break;
  249. }
  250. udelay(1);
  251. }
  252. dma_write32(0, DMA_CSR);
  253. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  254. dma_write32(0, DMA_ADDR);
  255. break;
  256. case dvmarev2:
  257. if (esp->rev != ESP100) {
  258. val = dma_read32(DMA_CSR);
  259. dma_write32(val | DMA_3CLKS, DMA_CSR);
  260. }
  261. break;
  262. case dvmarev3:
  263. val = dma_read32(DMA_CSR);
  264. val &= ~DMA_3CLKS;
  265. val |= DMA_2CLKS;
  266. if (can_do_burst32) {
  267. val &= ~DMA_BRST_SZ;
  268. val |= DMA_BRST32;
  269. }
  270. dma_write32(val, DMA_CSR);
  271. break;
  272. case dvmaesc1:
  273. val = dma_read32(DMA_CSR);
  274. val |= DMA_ADD_ENABLE;
  275. val &= ~DMA_BCNT_ENAB;
  276. if (!can_do_burst32 && can_do_burst16) {
  277. val |= DMA_ESC_BURST;
  278. } else {
  279. val &= ~(DMA_ESC_BURST);
  280. }
  281. dma_write32(val, DMA_CSR);
  282. break;
  283. default:
  284. break;
  285. }
  286. /* Enable interrupts. */
  287. val = dma_read32(DMA_CSR);
  288. dma_write32(val | DMA_INT_ENAB, DMA_CSR);
  289. }
  290. static void sbus_esp_dma_drain(struct esp *esp)
  291. {
  292. u32 csr;
  293. int lim;
  294. if (esp->dmarev == dvmahme)
  295. return;
  296. csr = dma_read32(DMA_CSR);
  297. if (!(csr & DMA_FIFO_ISDRAIN))
  298. return;
  299. if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
  300. dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
  301. lim = 1000;
  302. while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
  303. if (--lim == 0) {
  304. printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
  305. esp->host->unique_id);
  306. break;
  307. }
  308. udelay(1);
  309. }
  310. }
  311. static void sbus_esp_dma_invalidate(struct esp *esp)
  312. {
  313. if (esp->dmarev == dvmahme) {
  314. dma_write32(DMA_RST_SCSI, DMA_CSR);
  315. esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
  316. (DMA_PARITY_OFF | DMA_2CLKS |
  317. DMA_SCSI_DISAB | DMA_INT_ENAB)) &
  318. ~(DMA_ST_WRITE | DMA_ENABLE));
  319. dma_write32(0, DMA_CSR);
  320. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  321. /* This is necessary to avoid having the SCSI channel
  322. * engine lock up on us.
  323. */
  324. dma_write32(0, DMA_ADDR);
  325. } else {
  326. u32 val;
  327. int lim;
  328. lim = 1000;
  329. while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
  330. if (--lim == 0) {
  331. printk(KERN_ALERT PFX "esp%d: DMA will not "
  332. "invalidate!\n", esp->host->unique_id);
  333. break;
  334. }
  335. udelay(1);
  336. }
  337. val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
  338. val |= DMA_FIFO_INV;
  339. dma_write32(val, DMA_CSR);
  340. val &= ~DMA_FIFO_INV;
  341. dma_write32(val, DMA_CSR);
  342. }
  343. }
  344. static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
  345. u32 dma_count, int write, u8 cmd)
  346. {
  347. u32 csr;
  348. BUG_ON(!(cmd & ESP_CMD_DMA));
  349. sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
  350. sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
  351. if (esp->rev == FASHME) {
  352. sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
  353. sbus_esp_write8(esp, 0, FAS_RHI);
  354. scsi_esp_cmd(esp, cmd);
  355. csr = esp->prev_hme_dmacsr;
  356. csr |= DMA_SCSI_DISAB | DMA_ENABLE;
  357. if (write)
  358. csr |= DMA_ST_WRITE;
  359. else
  360. csr &= ~DMA_ST_WRITE;
  361. esp->prev_hme_dmacsr = csr;
  362. dma_write32(dma_count, DMA_COUNT);
  363. dma_write32(addr, DMA_ADDR);
  364. dma_write32(csr, DMA_CSR);
  365. } else {
  366. csr = dma_read32(DMA_CSR);
  367. csr |= DMA_ENABLE;
  368. if (write)
  369. csr |= DMA_ST_WRITE;
  370. else
  371. csr &= ~DMA_ST_WRITE;
  372. dma_write32(csr, DMA_CSR);
  373. if (esp->dmarev == dvmaesc1) {
  374. u32 end = PAGE_ALIGN(addr + dma_count + 16U);
  375. dma_write32(end - addr, DMA_COUNT);
  376. }
  377. dma_write32(addr, DMA_ADDR);
  378. scsi_esp_cmd(esp, cmd);
  379. }
  380. }
  381. static int sbus_esp_dma_error(struct esp *esp)
  382. {
  383. u32 csr = dma_read32(DMA_CSR);
  384. if (csr & DMA_HNDL_ERROR)
  385. return 1;
  386. return 0;
  387. }
  388. static const struct esp_driver_ops sbus_esp_ops = {
  389. .esp_write8 = sbus_esp_write8,
  390. .esp_read8 = sbus_esp_read8,
  391. .map_single = sbus_esp_map_single,
  392. .map_sg = sbus_esp_map_sg,
  393. .unmap_single = sbus_esp_unmap_single,
  394. .unmap_sg = sbus_esp_unmap_sg,
  395. .irq_pending = sbus_esp_irq_pending,
  396. .reset_dma = sbus_esp_reset_dma,
  397. .dma_drain = sbus_esp_dma_drain,
  398. .dma_invalidate = sbus_esp_dma_invalidate,
  399. .send_dma_cmd = sbus_esp_send_dma_cmd,
  400. .dma_error = sbus_esp_dma_error,
  401. };
  402. static int __devinit esp_sbus_probe_one(struct of_device *op,
  403. struct of_device *espdma,
  404. int hme)
  405. {
  406. struct scsi_host_template *tpnt = &scsi_esp_template;
  407. struct Scsi_Host *host;
  408. struct esp *esp;
  409. int err;
  410. host = scsi_host_alloc(tpnt, sizeof(struct esp));
  411. err = -ENOMEM;
  412. if (!host)
  413. goto fail;
  414. host->max_id = (hme ? 16 : 8);
  415. esp = shost_priv(host);
  416. esp->host = host;
  417. esp->dev = op;
  418. esp->ops = &sbus_esp_ops;
  419. if (hme)
  420. esp->flags |= ESP_FLAG_WIDE_CAPABLE;
  421. err = esp_sbus_setup_dma(esp, espdma);
  422. if (err < 0)
  423. goto fail_unlink;
  424. err = esp_sbus_map_regs(esp, hme);
  425. if (err < 0)
  426. goto fail_unlink;
  427. err = esp_sbus_map_command_block(esp);
  428. if (err < 0)
  429. goto fail_unmap_regs;
  430. err = esp_sbus_register_irq(esp);
  431. if (err < 0)
  432. goto fail_unmap_command_block;
  433. esp_sbus_get_props(esp, espdma);
  434. /* Before we try to touch the ESP chip, ESC1 dma can
  435. * come up with the reset bit set, so make sure that
  436. * is clear first.
  437. */
  438. if (esp->dmarev == dvmaesc1) {
  439. u32 val = dma_read32(DMA_CSR);
  440. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  441. }
  442. dev_set_drvdata(&op->dev, esp);
  443. err = scsi_esp_register(esp, &op->dev);
  444. if (err)
  445. goto fail_free_irq;
  446. return 0;
  447. fail_free_irq:
  448. free_irq(host->irq, esp);
  449. fail_unmap_command_block:
  450. dma_free_coherent(&op->dev, 16,
  451. esp->command_block,
  452. esp->command_block_dma);
  453. fail_unmap_regs:
  454. of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
  455. fail_unlink:
  456. scsi_host_put(host);
  457. fail:
  458. return err;
  459. }
  460. static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match)
  461. {
  462. struct device_node *dma_node = NULL;
  463. struct device_node *dp = op->node;
  464. struct of_device *dma_of = NULL;
  465. int hme = 0;
  466. if (dp->parent &&
  467. (!strcmp(dp->parent->name, "espdma") ||
  468. !strcmp(dp->parent->name, "dma")))
  469. dma_node = dp->parent;
  470. else if (!strcmp(dp->name, "SUNW,fas")) {
  471. dma_node = op->node;
  472. hme = 1;
  473. }
  474. if (dma_node)
  475. dma_of = of_find_device_by_node(dma_node);
  476. if (!dma_of)
  477. return -ENODEV;
  478. return esp_sbus_probe_one(op, dma_of, hme);
  479. }
  480. static int __devexit esp_sbus_remove(struct of_device *op)
  481. {
  482. struct esp *esp = dev_get_drvdata(&op->dev);
  483. struct of_device *dma_of = esp->dma;
  484. unsigned int irq = esp->host->irq;
  485. bool is_hme;
  486. u32 val;
  487. scsi_esp_unregister(esp);
  488. /* Disable interrupts. */
  489. val = dma_read32(DMA_CSR);
  490. dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
  491. free_irq(irq, esp);
  492. is_hme = (esp->dmarev == dvmahme);
  493. dma_free_coherent(&op->dev, 16,
  494. esp->command_block,
  495. esp->command_block_dma);
  496. of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
  497. SBUS_ESP_REG_SIZE);
  498. of_iounmap(&dma_of->resource[0], esp->dma_regs,
  499. resource_size(&dma_of->resource[0]));
  500. scsi_host_put(esp->host);
  501. dev_set_drvdata(&op->dev, NULL);
  502. return 0;
  503. }
  504. static const struct of_device_id esp_match[] = {
  505. {
  506. .name = "SUNW,esp",
  507. },
  508. {
  509. .name = "SUNW,fas",
  510. },
  511. {
  512. .name = "esp",
  513. },
  514. {},
  515. };
  516. MODULE_DEVICE_TABLE(of, esp_match);
  517. static struct of_platform_driver esp_sbus_driver = {
  518. .name = "esp",
  519. .match_table = esp_match,
  520. .probe = esp_sbus_probe,
  521. .remove = __devexit_p(esp_sbus_remove),
  522. };
  523. static int __init sunesp_init(void)
  524. {
  525. return of_register_driver(&esp_sbus_driver, &of_bus_type);
  526. }
  527. static void __exit sunesp_exit(void)
  528. {
  529. of_unregister_driver(&esp_sbus_driver);
  530. }
  531. MODULE_DESCRIPTION("Sun ESP SCSI driver");
  532. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  533. MODULE_LICENSE("GPL");
  534. MODULE_VERSION(DRV_VERSION);
  535. module_init(sunesp_init);
  536. module_exit(sunesp_exit);