sun_esp.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /* sun_esp.c: ESP front-end for Sparc SBUS systems.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <asm/irq.h>
  12. #include <asm/io.h>
  13. #include <asm/dma.h>
  14. #include <asm/sbus.h>
  15. #include <scsi/scsi_host.h>
  16. #include "esp_scsi.h"
  17. #define DRV_MODULE_NAME "sun_esp"
  18. #define PFX DRV_MODULE_NAME ": "
  19. #define DRV_VERSION "1.000"
  20. #define DRV_MODULE_RELDATE "April 19, 2007"
  21. #define dma_read32(REG) \
  22. sbus_readl(esp->dma_regs + (REG))
  23. #define dma_write32(VAL, REG) \
  24. sbus_writel((VAL), esp->dma_regs + (REG))
  25. /* DVMA chip revisions */
  26. enum dvma_rev {
  27. dvmarev0,
  28. dvmaesc1,
  29. dvmarev1,
  30. dvmarev2,
  31. dvmarev3,
  32. dvmarevplus,
  33. dvmahme
  34. };
  35. static int __devinit esp_sbus_setup_dma(struct esp *esp,
  36. struct of_device *dma_of)
  37. {
  38. esp->dma = dma_of;
  39. esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
  40. resource_size(&dma_of->resource[0]),
  41. "espdma");
  42. if (!esp->dma_regs)
  43. return -ENOMEM;
  44. switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
  45. case DMA_VERS0:
  46. esp->dmarev = dvmarev0;
  47. break;
  48. case DMA_ESCV1:
  49. esp->dmarev = dvmaesc1;
  50. break;
  51. case DMA_VERS1:
  52. esp->dmarev = dvmarev1;
  53. break;
  54. case DMA_VERS2:
  55. esp->dmarev = dvmarev2;
  56. break;
  57. case DMA_VERHME:
  58. esp->dmarev = dvmahme;
  59. break;
  60. case DMA_VERSPLUS:
  61. esp->dmarev = dvmarevplus;
  62. break;
  63. }
  64. return 0;
  65. }
  66. static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
  67. {
  68. struct sbus_dev *sdev = esp->dev;
  69. struct resource *res;
  70. /* On HME, two reg sets exist, first is DVMA,
  71. * second is ESP registers.
  72. */
  73. if (hme)
  74. res = &sdev->resource[1];
  75. else
  76. res = &sdev->resource[0];
  77. esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
  78. if (!esp->regs)
  79. return -ENOMEM;
  80. return 0;
  81. }
  82. static int __devinit esp_sbus_map_command_block(struct esp *esp)
  83. {
  84. struct sbus_dev *sdev = esp->dev;
  85. esp->command_block = sbus_alloc_consistent(&sdev->ofdev.dev, 16,
  86. &esp->command_block_dma);
  87. if (!esp->command_block)
  88. return -ENOMEM;
  89. return 0;
  90. }
  91. static int __devinit esp_sbus_register_irq(struct esp *esp)
  92. {
  93. struct Scsi_Host *host = esp->host;
  94. struct sbus_dev *sdev = esp->dev;
  95. host->irq = sdev->irqs[0];
  96. return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
  97. }
  98. static void __devinit esp_get_scsi_id(struct esp *esp)
  99. {
  100. struct sbus_dev *sdev = esp->dev;
  101. struct device_node *dp = sdev->ofdev.node;
  102. esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
  103. if (esp->scsi_id != 0xff)
  104. goto done;
  105. esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
  106. if (esp->scsi_id != 0xff)
  107. goto done;
  108. if (!sdev->bus) {
  109. /* SUN4 */
  110. esp->scsi_id = 7;
  111. goto done;
  112. }
  113. esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
  114. "scsi-initiator-id", 7);
  115. done:
  116. esp->host->this_id = esp->scsi_id;
  117. esp->scsi_id_mask = (1 << esp->scsi_id);
  118. }
  119. static void __devinit esp_get_differential(struct esp *esp)
  120. {
  121. struct sbus_dev *sdev = esp->dev;
  122. struct device_node *dp = sdev->ofdev.node;
  123. if (of_find_property(dp, "differential", NULL))
  124. esp->flags |= ESP_FLAG_DIFFERENTIAL;
  125. else
  126. esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
  127. }
  128. static void __devinit esp_get_clock_params(struct esp *esp)
  129. {
  130. struct sbus_dev *sdev = esp->dev;
  131. struct device_node *dp = sdev->ofdev.node;
  132. struct device_node *bus_dp;
  133. int fmhz;
  134. bus_dp = NULL;
  135. if (sdev != NULL && sdev->bus != NULL)
  136. bus_dp = sdev->bus->ofdev.node;
  137. fmhz = of_getintprop_default(dp, "clock-frequency", 0);
  138. if (fmhz == 0)
  139. fmhz = (!bus_dp) ? 0 :
  140. of_getintprop_default(bus_dp, "clock-frequency", 0);
  141. esp->cfreq = fmhz;
  142. }
  143. static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
  144. {
  145. struct device_node *dma_dp = dma_of->node;
  146. struct sbus_dev *sdev = esp->dev;
  147. struct device_node *dp;
  148. u8 bursts, val;
  149. dp = sdev->ofdev.node;
  150. bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
  151. val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
  152. if (val != 0xff)
  153. bursts &= val;
  154. if (sdev->bus) {
  155. u8 val = of_getintprop_default(sdev->bus->ofdev.node,
  156. "burst-sizes", 0xff);
  157. if (val != 0xff)
  158. bursts &= val;
  159. }
  160. if (bursts == 0xff ||
  161. (bursts & DMA_BURST16) == 0 ||
  162. (bursts & DMA_BURST32) == 0)
  163. bursts = (DMA_BURST32 - 1);
  164. esp->bursts = bursts;
  165. }
  166. static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma)
  167. {
  168. esp_get_scsi_id(esp);
  169. esp_get_differential(esp);
  170. esp_get_clock_params(esp);
  171. esp_get_bursts(esp, espdma);
  172. }
  173. static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
  174. {
  175. sbus_writeb(val, esp->regs + (reg * 4UL));
  176. }
  177. static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
  178. {
  179. return sbus_readb(esp->regs + (reg * 4UL));
  180. }
  181. static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
  182. size_t sz, int dir)
  183. {
  184. struct sbus_dev *sdev = esp->dev;
  185. return sbus_map_single(&sdev->ofdev.dev, buf, sz, dir);
  186. }
  187. static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
  188. int num_sg, int dir)
  189. {
  190. struct sbus_dev *sdev = esp->dev;
  191. return sbus_map_sg(&sdev->ofdev.dev, sg, num_sg, dir);
  192. }
  193. static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
  194. size_t sz, int dir)
  195. {
  196. struct sbus_dev *sdev = esp->dev;
  197. sbus_unmap_single(&sdev->ofdev.dev, addr, sz, dir);
  198. }
  199. static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
  200. int num_sg, int dir)
  201. {
  202. struct sbus_dev *sdev = esp->dev;
  203. sbus_unmap_sg(&sdev->ofdev.dev, sg, num_sg, dir);
  204. }
  205. static int sbus_esp_irq_pending(struct esp *esp)
  206. {
  207. if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
  208. return 1;
  209. return 0;
  210. }
  211. static void sbus_esp_reset_dma(struct esp *esp)
  212. {
  213. int can_do_burst16, can_do_burst32, can_do_burst64;
  214. int can_do_sbus64, lim;
  215. u32 val;
  216. can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
  217. can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
  218. can_do_burst64 = 0;
  219. can_do_sbus64 = 0;
  220. if (sbus_can_dma_64bit(esp->dev))
  221. can_do_sbus64 = 1;
  222. if (sbus_can_burst64(esp->sdev))
  223. can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
  224. /* Put the DVMA into a known state. */
  225. if (esp->dmarev != dvmahme) {
  226. val = dma_read32(DMA_CSR);
  227. dma_write32(val | DMA_RST_SCSI, DMA_CSR);
  228. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  229. }
  230. switch (esp->dmarev) {
  231. case dvmahme:
  232. dma_write32(DMA_RESET_FAS366, DMA_CSR);
  233. dma_write32(DMA_RST_SCSI, DMA_CSR);
  234. esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
  235. DMA_SCSI_DISAB | DMA_INT_ENAB);
  236. esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
  237. DMA_BRST_SZ);
  238. if (can_do_burst64)
  239. esp->prev_hme_dmacsr |= DMA_BRST64;
  240. else if (can_do_burst32)
  241. esp->prev_hme_dmacsr |= DMA_BRST32;
  242. if (can_do_sbus64) {
  243. esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
  244. sbus_set_sbus64(esp->dev, esp->bursts);
  245. }
  246. lim = 1000;
  247. while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
  248. if (--lim == 0) {
  249. printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
  250. "will not clear!\n",
  251. esp->host->unique_id);
  252. break;
  253. }
  254. udelay(1);
  255. }
  256. dma_write32(0, DMA_CSR);
  257. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  258. dma_write32(0, DMA_ADDR);
  259. break;
  260. case dvmarev2:
  261. if (esp->rev != ESP100) {
  262. val = dma_read32(DMA_CSR);
  263. dma_write32(val | DMA_3CLKS, DMA_CSR);
  264. }
  265. break;
  266. case dvmarev3:
  267. val = dma_read32(DMA_CSR);
  268. val &= ~DMA_3CLKS;
  269. val |= DMA_2CLKS;
  270. if (can_do_burst32) {
  271. val &= ~DMA_BRST_SZ;
  272. val |= DMA_BRST32;
  273. }
  274. dma_write32(val, DMA_CSR);
  275. break;
  276. case dvmaesc1:
  277. val = dma_read32(DMA_CSR);
  278. val |= DMA_ADD_ENABLE;
  279. val &= ~DMA_BCNT_ENAB;
  280. if (!can_do_burst32 && can_do_burst16) {
  281. val |= DMA_ESC_BURST;
  282. } else {
  283. val &= ~(DMA_ESC_BURST);
  284. }
  285. dma_write32(val, DMA_CSR);
  286. break;
  287. default:
  288. break;
  289. }
  290. /* Enable interrupts. */
  291. val = dma_read32(DMA_CSR);
  292. dma_write32(val | DMA_INT_ENAB, DMA_CSR);
  293. }
  294. static void sbus_esp_dma_drain(struct esp *esp)
  295. {
  296. u32 csr;
  297. int lim;
  298. if (esp->dmarev == dvmahme)
  299. return;
  300. csr = dma_read32(DMA_CSR);
  301. if (!(csr & DMA_FIFO_ISDRAIN))
  302. return;
  303. if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
  304. dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
  305. lim = 1000;
  306. while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
  307. if (--lim == 0) {
  308. printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
  309. esp->host->unique_id);
  310. break;
  311. }
  312. udelay(1);
  313. }
  314. }
  315. static void sbus_esp_dma_invalidate(struct esp *esp)
  316. {
  317. if (esp->dmarev == dvmahme) {
  318. dma_write32(DMA_RST_SCSI, DMA_CSR);
  319. esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
  320. (DMA_PARITY_OFF | DMA_2CLKS |
  321. DMA_SCSI_DISAB | DMA_INT_ENAB)) &
  322. ~(DMA_ST_WRITE | DMA_ENABLE));
  323. dma_write32(0, DMA_CSR);
  324. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  325. /* This is necessary to avoid having the SCSI channel
  326. * engine lock up on us.
  327. */
  328. dma_write32(0, DMA_ADDR);
  329. } else {
  330. u32 val;
  331. int lim;
  332. lim = 1000;
  333. while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
  334. if (--lim == 0) {
  335. printk(KERN_ALERT PFX "esp%d: DMA will not "
  336. "invalidate!\n", esp->host->unique_id);
  337. break;
  338. }
  339. udelay(1);
  340. }
  341. val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
  342. val |= DMA_FIFO_INV;
  343. dma_write32(val, DMA_CSR);
  344. val &= ~DMA_FIFO_INV;
  345. dma_write32(val, DMA_CSR);
  346. }
  347. }
  348. static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
  349. u32 dma_count, int write, u8 cmd)
  350. {
  351. u32 csr;
  352. BUG_ON(!(cmd & ESP_CMD_DMA));
  353. sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
  354. sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
  355. if (esp->rev == FASHME) {
  356. sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
  357. sbus_esp_write8(esp, 0, FAS_RHI);
  358. scsi_esp_cmd(esp, cmd);
  359. csr = esp->prev_hme_dmacsr;
  360. csr |= DMA_SCSI_DISAB | DMA_ENABLE;
  361. if (write)
  362. csr |= DMA_ST_WRITE;
  363. else
  364. csr &= ~DMA_ST_WRITE;
  365. esp->prev_hme_dmacsr = csr;
  366. dma_write32(dma_count, DMA_COUNT);
  367. dma_write32(addr, DMA_ADDR);
  368. dma_write32(csr, DMA_CSR);
  369. } else {
  370. csr = dma_read32(DMA_CSR);
  371. csr |= DMA_ENABLE;
  372. if (write)
  373. csr |= DMA_ST_WRITE;
  374. else
  375. csr &= ~DMA_ST_WRITE;
  376. dma_write32(csr, DMA_CSR);
  377. if (esp->dmarev == dvmaesc1) {
  378. u32 end = PAGE_ALIGN(addr + dma_count + 16U);
  379. dma_write32(end - addr, DMA_COUNT);
  380. }
  381. dma_write32(addr, DMA_ADDR);
  382. scsi_esp_cmd(esp, cmd);
  383. }
  384. }
  385. static int sbus_esp_dma_error(struct esp *esp)
  386. {
  387. u32 csr = dma_read32(DMA_CSR);
  388. if (csr & DMA_HNDL_ERROR)
  389. return 1;
  390. return 0;
  391. }
  392. static const struct esp_driver_ops sbus_esp_ops = {
  393. .esp_write8 = sbus_esp_write8,
  394. .esp_read8 = sbus_esp_read8,
  395. .map_single = sbus_esp_map_single,
  396. .map_sg = sbus_esp_map_sg,
  397. .unmap_single = sbus_esp_unmap_single,
  398. .unmap_sg = sbus_esp_unmap_sg,
  399. .irq_pending = sbus_esp_irq_pending,
  400. .reset_dma = sbus_esp_reset_dma,
  401. .dma_drain = sbus_esp_dma_drain,
  402. .dma_invalidate = sbus_esp_dma_invalidate,
  403. .send_dma_cmd = sbus_esp_send_dma_cmd,
  404. .dma_error = sbus_esp_dma_error,
  405. };
  406. static int __devinit esp_sbus_probe_one(struct device *dev,
  407. struct sbus_dev *esp_dev,
  408. struct of_device *espdma,
  409. struct sbus_bus *sbus,
  410. int hme)
  411. {
  412. struct scsi_host_template *tpnt = &scsi_esp_template;
  413. struct Scsi_Host *host;
  414. struct esp *esp;
  415. int err;
  416. host = scsi_host_alloc(tpnt, sizeof(struct esp));
  417. err = -ENOMEM;
  418. if (!host)
  419. goto fail;
  420. host->max_id = (hme ? 16 : 8);
  421. esp = shost_priv(host);
  422. esp->host = host;
  423. esp->dev = esp_dev;
  424. esp->ops = &sbus_esp_ops;
  425. if (hme)
  426. esp->flags |= ESP_FLAG_WIDE_CAPABLE;
  427. err = esp_sbus_setup_dma(esp, espdma);
  428. if (err < 0)
  429. goto fail_unlink;
  430. err = esp_sbus_map_regs(esp, hme);
  431. if (err < 0)
  432. goto fail_unlink;
  433. err = esp_sbus_map_command_block(esp);
  434. if (err < 0)
  435. goto fail_unmap_regs;
  436. err = esp_sbus_register_irq(esp);
  437. if (err < 0)
  438. goto fail_unmap_command_block;
  439. esp_sbus_get_props(esp, espdma);
  440. /* Before we try to touch the ESP chip, ESC1 dma can
  441. * come up with the reset bit set, so make sure that
  442. * is clear first.
  443. */
  444. if (esp->dmarev == dvmaesc1) {
  445. u32 val = dma_read32(DMA_CSR);
  446. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  447. }
  448. dev_set_drvdata(&esp_dev->ofdev.dev, esp);
  449. err = scsi_esp_register(esp, dev);
  450. if (err)
  451. goto fail_free_irq;
  452. return 0;
  453. fail_free_irq:
  454. free_irq(host->irq, esp);
  455. fail_unmap_command_block:
  456. sbus_free_consistent(&esp_dev->ofdev.dev, 16,
  457. esp->command_block,
  458. esp->command_block_dma);
  459. fail_unmap_regs:
  460. sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
  461. fail_unlink:
  462. scsi_host_put(host);
  463. fail:
  464. return err;
  465. }
  466. static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
  467. {
  468. struct sbus_dev *sdev = to_sbus_device(&dev->dev);
  469. struct device_node *dma_node = NULL;
  470. struct device_node *dp = dev->node;
  471. struct of_device *dma_of = NULL;
  472. int hme = 0;
  473. if (dp->parent &&
  474. (!strcmp(dp->parent->name, "espdma") ||
  475. !strcmp(dp->parent->name, "dma")))
  476. dma_node = dp->parent;
  477. else if (!strcmp(dp->name, "SUNW,fas")) {
  478. dma_node = sdev->ofdev.node;
  479. hme = 1;
  480. }
  481. if (dma_node)
  482. dma_of = of_find_device_by_node(dma_node);
  483. if (!dma_of)
  484. return -ENODEV;
  485. return esp_sbus_probe_one(&dev->dev, sdev, dma_of,
  486. sdev->bus, hme);
  487. }
  488. static int __devexit esp_sbus_remove(struct of_device *dev)
  489. {
  490. struct esp *esp = dev_get_drvdata(&dev->dev);
  491. struct sbus_dev *sdev = esp->dev;
  492. struct of_device *dma_of = esp->dma;
  493. unsigned int irq = esp->host->irq;
  494. u32 val;
  495. scsi_esp_unregister(esp);
  496. /* Disable interrupts. */
  497. val = dma_read32(DMA_CSR);
  498. dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
  499. free_irq(irq, esp);
  500. sbus_free_consistent(&sdev->ofdev.dev, 16,
  501. esp->command_block,
  502. esp->command_block_dma);
  503. sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
  504. of_iounmap(&dma_of->resource[0], esp->dma_regs,
  505. resource_size(&dma_of->resource[0]));
  506. scsi_host_put(esp->host);
  507. return 0;
  508. }
  509. static struct of_device_id esp_match[] = {
  510. {
  511. .name = "SUNW,esp",
  512. },
  513. {
  514. .name = "SUNW,fas",
  515. },
  516. {
  517. .name = "esp",
  518. },
  519. {},
  520. };
  521. MODULE_DEVICE_TABLE(of, esp_match);
  522. static struct of_platform_driver esp_sbus_driver = {
  523. .name = "esp",
  524. .match_table = esp_match,
  525. .probe = esp_sbus_probe,
  526. .remove = __devexit_p(esp_sbus_remove),
  527. };
  528. static int __init sunesp_init(void)
  529. {
  530. return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
  531. }
  532. static void __exit sunesp_exit(void)
  533. {
  534. of_unregister_driver(&esp_sbus_driver);
  535. }
  536. MODULE_DESCRIPTION("Sun ESP SCSI driver");
  537. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  538. MODULE_LICENSE("GPL");
  539. MODULE_VERSION(DRV_VERSION);
  540. module_init(sunesp_init);
  541. module_exit(sunesp_exit);