parport_ip32.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250
  1. /* Low-level parallel port routines for built-in port on SGI IP32
  2. *
  3. * Author: Arnaud Giersch <arnaud.giersch@free.fr>
  4. *
  5. * Based on parport_pc.c by
  6. * Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
  7. * Andrea Arcangeli, et al.
  8. *
  9. * Thanks to Ilya A. Volynets-Evenbakh for his help.
  10. *
  11. * Copyright (C) 2005, 2006 Arnaud Giersch.
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms of the GNU General Public License as published by the Free
  15. * Software Foundation; either version 2 of the License, or (at your option)
  16. * any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful, but WITHOUT
  19. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  20. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  21. * more details.
  22. *
  23. * You should have received a copy of the GNU General Public License along
  24. * with this program; if not, write to the Free Software Foundation, Inc., 59
  25. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. */
  27. /* Current status:
  28. *
  29. * Basic SPP and PS2 modes are supported.
  30. * Support for parallel port IRQ is present.
  31. * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
  32. * supported.
  33. * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with
  34. * or without interrupt support.
  35. *
  36. * Hardware ECP mode is not fully implemented (ecp_read_data and
  37. * ecp_write_addr are actually missing).
  38. *
  39. * To do:
  40. *
  41. * Fully implement ECP mode.
  42. * EPP and ECP mode need to be tested. I currently do not own any
  43. * peripheral supporting these extended mode, and cannot test them.
  44. * If DMA mode works well, decide if support for PIO FIFO modes should be
  45. * dropped.
  46. * Use the io{read,write} family functions when they become available in
  47. * the linux-mips.org tree. Note: the MIPS specific functions readsb()
  48. * and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
  49. * respectively.
  50. */
  51. /* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
  52. * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
  53. * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte
  54. * FIFO buffer and supports DMA transfers.
  55. *
  56. * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
  57. *
  58. * Theoretically, we could simply use the parport_pc module. It is however
  59. * not so simple. The parport_pc code assumes that the parallel port
  60. * registers are port-mapped. On the O2, they are memory-mapped.
  61. * Furthermore, each register is replicated on 256 consecutive addresses (as
  62. * it is for the built-in serial ports on the same chip).
  63. */
  64. /*--- Some configuration defines ---------------------------------------*/
  65. /* DEBUG_PARPORT_IP32
  66. * 0 disable debug
  67. * 1 standard level: pr_debug1 is enabled
  68. * 2 parport_ip32_dump_state is enabled
  69. * >=3 verbose level: pr_debug is enabled
  70. */
  71. #if !defined(DEBUG_PARPORT_IP32)
  72. # define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */
  73. #endif
  74. /*----------------------------------------------------------------------*/
  75. /* Setup DEBUG macros. This is done before any includes, just in case we
  76. * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
  77. */
  78. #if DEBUG_PARPORT_IP32 == 1
  79. # warning DEBUG_PARPORT_IP32 == 1
  80. #elif DEBUG_PARPORT_IP32 == 2
  81. # warning DEBUG_PARPORT_IP32 == 2
  82. #elif DEBUG_PARPORT_IP32 >= 3
  83. # warning DEBUG_PARPORT_IP32 >= 3
  84. # if !defined(DEBUG)
  85. # define DEBUG /* enable pr_debug() in kernel.h */
  86. # endif
  87. #endif
  88. #include <linux/completion.h>
  89. #include <linux/delay.h>
  90. #include <linux/dma-mapping.h>
  91. #include <linux/err.h>
  92. #include <linux/init.h>
  93. #include <linux/interrupt.h>
  94. #include <linux/jiffies.h>
  95. #include <linux/kernel.h>
  96. #include <linux/module.h>
  97. #include <linux/parport.h>
  98. #include <linux/sched.h>
  99. #include <linux/spinlock.h>
  100. #include <linux/stddef.h>
  101. #include <linux/types.h>
  102. #include <asm/io.h>
  103. #include <asm/ip32/ip32_ints.h>
  104. #include <asm/ip32/mace.h>
  105. /*--- Global variables -------------------------------------------------*/
  106. /* Verbose probing on by default for debugging. */
  107. #if DEBUG_PARPORT_IP32 >= 1
  108. # define DEFAULT_VERBOSE_PROBING 1
  109. #else
  110. # define DEFAULT_VERBOSE_PROBING 0
  111. #endif
  112. /* Default prefix for printk */
  113. #define PPIP32 "parport_ip32: "
  114. /*
  115. * These are the module parameters:
  116. * @features: bit mask of features to enable/disable
  117. * (all enabled by default)
  118. * @verbose_probing: log chit-chat during initialization
  119. */
  120. #define PARPORT_IP32_ENABLE_IRQ (1U << 0)
  121. #define PARPORT_IP32_ENABLE_DMA (1U << 1)
  122. #define PARPORT_IP32_ENABLE_SPP (1U << 2)
  123. #define PARPORT_IP32_ENABLE_EPP (1U << 3)
  124. #define PARPORT_IP32_ENABLE_ECP (1U << 4)
  125. static unsigned int features = ~0U;
  126. static int verbose_probing = DEFAULT_VERBOSE_PROBING;
  127. /* We do not support more than one port. */
  128. static struct parport *this_port = NULL;
  129. /* Timing constants for FIFO modes. */
  130. #define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
  131. #define FIFO_POLLING_INTERVAL 50 /* microseconds */
  132. /*--- I/O register definitions -----------------------------------------*/
  133. /**
  134. * struct parport_ip32_regs - virtual addresses of parallel port registers
  135. * @data: Data Register
  136. * @dsr: Device Status Register
  137. * @dcr: Device Control Register
  138. * @eppAddr: EPP Address Register
  139. * @eppData0: EPP Data Register 0
  140. * @eppData1: EPP Data Register 1
  141. * @eppData2: EPP Data Register 2
  142. * @eppData3: EPP Data Register 3
  143. * @ecpAFifo: ECP Address FIFO
  144. * @fifo: General FIFO register. The same address is used for:
  145. * - cFifo, the Parallel Port DATA FIFO
  146. * - ecpDFifo, the ECP Data FIFO
  147. * - tFifo, the ECP Test FIFO
  148. * @cnfgA: Configuration Register A
  149. * @cnfgB: Configuration Register B
  150. * @ecr: Extended Control Register
  151. */
  152. struct parport_ip32_regs {
  153. void __iomem *data;
  154. void __iomem *dsr;
  155. void __iomem *dcr;
  156. void __iomem *eppAddr;
  157. void __iomem *eppData0;
  158. void __iomem *eppData1;
  159. void __iomem *eppData2;
  160. void __iomem *eppData3;
  161. void __iomem *ecpAFifo;
  162. void __iomem *fifo;
  163. void __iomem *cnfgA;
  164. void __iomem *cnfgB;
  165. void __iomem *ecr;
  166. };
  167. /* Device Status Register */
  168. #define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */
  169. #define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */
  170. #define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */
  171. #define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */
  172. #define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */
  173. #define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */
  174. /* #define DSR_reserved (1U << 1) */
  175. #define DSR_TIMEOUT (1U << 0) /* EPP timeout */
  176. /* Device Control Register */
  177. /* #define DCR_reserved (1U << 7) | (1U << 6) */
  178. #define DCR_DIR (1U << 5) /* direction */
  179. #define DCR_IRQ (1U << 4) /* interrupt on nAck */
  180. #define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */
  181. #define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */
  182. #define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */
  183. #define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */
  184. /* ECP Configuration Register A */
  185. #define CNFGA_IRQ (1U << 7)
  186. #define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
  187. #define CNFGA_ID_SHIFT 4
  188. #define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
  189. #define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
  190. #define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
  191. /* #define CNFGA_reserved (1U << 3) */
  192. #define CNFGA_nBYTEINTRANS (1U << 2)
  193. #define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
  194. /* ECP Configuration Register B */
  195. #define CNFGB_COMPRESS (1U << 7)
  196. #define CNFGB_INTRVAL (1U << 6)
  197. #define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
  198. #define CNFGB_IRQ_SHIFT 3
  199. #define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
  200. #define CNFGB_DMA_SHIFT 0
  201. /* Extended Control Register */
  202. #define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
  203. #define ECR_MODE_SHIFT 5
  204. #define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
  205. #define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
  206. #define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
  207. #define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
  208. #define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
  209. /* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */
  210. #define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
  211. #define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
  212. #define ECR_nERRINTR (1U << 4)
  213. #define ECR_DMAEN (1U << 3)
  214. #define ECR_SERVINTR (1U << 2)
  215. #define ECR_F_FULL (1U << 1)
  216. #define ECR_F_EMPTY (1U << 0)
  217. /*--- Private data -----------------------------------------------------*/
  218. /**
  219. * enum parport_ip32_irq_mode - operation mode of interrupt handler
  220. * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer
  221. * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally
  222. */
  223. enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
  224. /**
  225. * struct parport_ip32_private - private stuff for &struct parport
  226. * @regs: register addresses
  227. * @dcr_cache: cached contents of DCR
  228. * @dcr_writable: bit mask of writable DCR bits
  229. * @pword: number of bytes per PWord
  230. * @fifo_depth: number of PWords that FIFO will hold
  231. * @readIntrThreshold: minimum number of PWords we can read
  232. * if we get an interrupt
  233. * @writeIntrThreshold: minimum number of PWords we can write
  234. * if we get an interrupt
  235. * @irq_mode: operation mode of interrupt handler for this port
  236. * @irq_complete: mutex used to wait for an interrupt to occur
  237. */
  238. struct parport_ip32_private {
  239. struct parport_ip32_regs regs;
  240. unsigned int dcr_cache;
  241. unsigned int dcr_writable;
  242. unsigned int pword;
  243. unsigned int fifo_depth;
  244. unsigned int readIntrThreshold;
  245. unsigned int writeIntrThreshold;
  246. enum parport_ip32_irq_mode irq_mode;
  247. struct completion irq_complete;
  248. };
  249. /*--- Debug code -------------------------------------------------------*/
  250. /*
  251. * pr_debug1 - print debug messages
  252. *
  253. * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
  254. */
  255. #if DEBUG_PARPORT_IP32 >= 1
  256. # define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
  257. #else /* DEBUG_PARPORT_IP32 < 1 */
  258. # define pr_debug1(...) do { } while (0)
  259. #endif
  260. /*
  261. * pr_trace, pr_trace1 - trace function calls
  262. * @p: pointer to &struct parport
  263. * @fmt: printk format string
  264. * @...: parameters for format string
  265. *
  266. * Macros used to trace function calls. The given string is formatted after
  267. * function name. pr_trace() uses pr_debug(), and pr_trace1() uses
  268. * pr_debug1(). __pr_trace() is the low-level macro and is not to be used
  269. * directly.
  270. */
  271. #define __pr_trace(pr, p, fmt, ...) \
  272. pr("%s: %s" fmt "\n", \
  273. ({ const struct parport *__p = (p); \
  274. __p ? __p->name : "parport_ip32"; }), \
  275. __func__ , ##__VA_ARGS__)
  276. #define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
  277. #define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
  278. /*
  279. * __pr_probe, pr_probe - print message if @verbose_probing is true
  280. * @p: pointer to &struct parport
  281. * @fmt: printk format string
  282. * @...: parameters for format string
  283. *
  284. * For new lines, use pr_probe(). Use __pr_probe() for continued lines.
  285. */
  286. #define __pr_probe(...) \
  287. do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
  288. #define pr_probe(p, fmt, ...) \
  289. __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
  290. /*
  291. * parport_ip32_dump_state - print register status of parport
  292. * @p: pointer to &struct parport
  293. * @str: string to add in message
  294. * @show_ecp_config: shall we dump ECP configuration registers too?
  295. *
  296. * This function is only here for debugging purpose, and should be used with
  297. * care. Reading the parallel port registers may have undesired side effects.
  298. * Especially if @show_ecp_config is true, the parallel port is resetted.
  299. * This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
  300. */
  301. #if DEBUG_PARPORT_IP32 >= 2
  302. static void parport_ip32_dump_state(struct parport *p, char *str,
  303. unsigned int show_ecp_config)
  304. {
  305. struct parport_ip32_private * const priv = p->physport->private_data;
  306. unsigned int i;
  307. printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
  308. {
  309. static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
  310. "ECP", "EPP", "???",
  311. "TST", "CFG"};
  312. unsigned int ecr = readb(priv->regs.ecr);
  313. printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
  314. printk(" %s",
  315. ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
  316. if (ecr & ECR_nERRINTR)
  317. printk(",nErrIntrEn");
  318. if (ecr & ECR_DMAEN)
  319. printk(",dmaEn");
  320. if (ecr & ECR_SERVINTR)
  321. printk(",serviceIntr");
  322. if (ecr & ECR_F_FULL)
  323. printk(",f_full");
  324. if (ecr & ECR_F_EMPTY)
  325. printk(",f_empty");
  326. printk("\n");
  327. }
  328. if (show_ecp_config) {
  329. unsigned int oecr, cnfgA, cnfgB;
  330. oecr = readb(priv->regs.ecr);
  331. writeb(ECR_MODE_PS2, priv->regs.ecr);
  332. writeb(ECR_MODE_CFG, priv->regs.ecr);
  333. cnfgA = readb(priv->regs.cnfgA);
  334. cnfgB = readb(priv->regs.cnfgB);
  335. writeb(ECR_MODE_PS2, priv->regs.ecr);
  336. writeb(oecr, priv->regs.ecr);
  337. printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
  338. printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
  339. switch (cnfgA & CNFGA_ID_MASK) {
  340. case CNFGA_ID_8:
  341. printk(",8 bits");
  342. break;
  343. case CNFGA_ID_16:
  344. printk(",16 bits");
  345. break;
  346. case CNFGA_ID_32:
  347. printk(",32 bits");
  348. break;
  349. default:
  350. printk(",unknown ID");
  351. break;
  352. }
  353. if (!(cnfgA & CNFGA_nBYTEINTRANS))
  354. printk(",ByteInTrans");
  355. if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
  356. printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
  357. ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
  358. printk("\n");
  359. printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
  360. printk(" irq=%u,dma=%u",
  361. (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
  362. (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
  363. printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
  364. if (cnfgB & CNFGB_COMPRESS)
  365. printk(",compress");
  366. printk("\n");
  367. }
  368. for (i = 0; i < 2; i++) {
  369. unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
  370. printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
  371. i ? "soft" : "hard", dcr);
  372. printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
  373. if (dcr & DCR_IRQ)
  374. printk(",ackIntEn");
  375. if (!(dcr & DCR_SELECT))
  376. printk(",nSelectIn");
  377. if (dcr & DCR_nINIT)
  378. printk(",nInit");
  379. if (!(dcr & DCR_AUTOFD))
  380. printk(",nAutoFD");
  381. if (!(dcr & DCR_STROBE))
  382. printk(",nStrobe");
  383. printk("\n");
  384. }
  385. #define sep (f++ ? ',' : ' ')
  386. {
  387. unsigned int f = 0;
  388. unsigned int dsr = readb(priv->regs.dsr);
  389. printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
  390. if (!(dsr & DSR_nBUSY))
  391. printk("%cBusy", sep);
  392. if (dsr & DSR_nACK)
  393. printk("%cnAck", sep);
  394. if (dsr & DSR_PERROR)
  395. printk("%cPError", sep);
  396. if (dsr & DSR_SELECT)
  397. printk("%cSelect", sep);
  398. if (dsr & DSR_nFAULT)
  399. printk("%cnFault", sep);
  400. if (!(dsr & DSR_nPRINT))
  401. printk("%c(Print)", sep);
  402. if (dsr & DSR_TIMEOUT)
  403. printk("%cTimeout", sep);
  404. printk("\n");
  405. }
  406. #undef sep
  407. }
  408. #else /* DEBUG_PARPORT_IP32 < 2 */
  409. #define parport_ip32_dump_state(...) do { } while (0)
  410. #endif
  411. /*
  412. * CHECK_EXTRA_BITS - track and log extra bits
  413. * @p: pointer to &struct parport
  414. * @b: byte to inspect
  415. * @m: bit mask of authorized bits
  416. *
  417. * This is used to track and log extra bits that should not be there in
  418. * parport_ip32_write_control() and parport_ip32_frob_control(). It is only
  419. * defined if %DEBUG_PARPORT_IP32 >= 1.
  420. */
  421. #if DEBUG_PARPORT_IP32 >= 1
  422. #define CHECK_EXTRA_BITS(p, b, m) \
  423. do { \
  424. unsigned int __b = (b), __m = (m); \
  425. if (__b & ~__m) \
  426. pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
  427. "0x%02x/0x%02x\n", \
  428. (p)->name, __func__, #b, __b, __m); \
  429. } while (0)
  430. #else /* DEBUG_PARPORT_IP32 < 1 */
  431. #define CHECK_EXTRA_BITS(...) do { } while (0)
  432. #endif
  433. /*--- IP32 parallel port DMA operations --------------------------------*/
  434. /**
  435. * struct parport_ip32_dma_data - private data needed for DMA operation
  436. * @dir: DMA direction (from or to device)
  437. * @buf: buffer physical address
  438. * @len: buffer length
  439. * @next: address of next bytes to DMA transfer
  440. * @left: number of bytes remaining
  441. * @ctx: next context to write (0: context_a; 1: context_b)
  442. * @irq_on: are the DMA IRQs currently enabled?
  443. * @lock: spinlock to protect access to the structure
  444. */
  445. struct parport_ip32_dma_data {
  446. enum dma_data_direction dir;
  447. dma_addr_t buf;
  448. dma_addr_t next;
  449. size_t len;
  450. size_t left;
  451. unsigned int ctx;
  452. unsigned int irq_on;
  453. spinlock_t lock;
  454. };
  455. static struct parport_ip32_dma_data parport_ip32_dma;
  456. /**
  457. * parport_ip32_dma_setup_context - setup next DMA context
  458. * @limit: maximum data size for the context
  459. *
  460. * The alignment constraints must be verified in caller function, and the
  461. * parameter @limit must be set accordingly.
  462. */
  463. static void parport_ip32_dma_setup_context(unsigned int limit)
  464. {
  465. unsigned long flags;
  466. spin_lock_irqsave(&parport_ip32_dma.lock, flags);
  467. if (parport_ip32_dma.left > 0) {
  468. /* Note: ctxreg is "volatile" here only because
  469. * mace->perif.ctrl.parport.context_a and context_b are
  470. * "volatile". */
  471. volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
  472. &mace->perif.ctrl.parport.context_a :
  473. &mace->perif.ctrl.parport.context_b;
  474. u64 count;
  475. u64 ctxval;
  476. if (parport_ip32_dma.left <= limit) {
  477. count = parport_ip32_dma.left;
  478. ctxval = MACEPAR_CONTEXT_LASTFLAG;
  479. } else {
  480. count = limit;
  481. ctxval = 0;
  482. }
  483. pr_trace(NULL,
  484. "(%u): 0x%04x:0x%04x, %u -> %u%s",
  485. limit,
  486. (unsigned int)parport_ip32_dma.buf,
  487. (unsigned int)parport_ip32_dma.next,
  488. (unsigned int)count,
  489. parport_ip32_dma.ctx, ctxval ? "*" : "");
  490. ctxval |= parport_ip32_dma.next &
  491. MACEPAR_CONTEXT_BASEADDR_MASK;
  492. ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
  493. MACEPAR_CONTEXT_DATALEN_MASK;
  494. writeq(ctxval, ctxreg);
  495. parport_ip32_dma.next += count;
  496. parport_ip32_dma.left -= count;
  497. parport_ip32_dma.ctx ^= 1U;
  498. }
  499. /* If there is nothing more to send, disable IRQs to avoid to
  500. * face an IRQ storm which can lock the machine. Disable them
  501. * only once. */
  502. if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
  503. pr_debug(PPIP32 "IRQ off (ctx)\n");
  504. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  505. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  506. parport_ip32_dma.irq_on = 0;
  507. }
  508. spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
  509. }
  510. /**
  511. * parport_ip32_dma_interrupt - DMA interrupt handler
  512. * @irq: interrupt number
  513. * @dev_id: unused
  514. */
  515. static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id)
  516. {
  517. if (parport_ip32_dma.left)
  518. pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
  519. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  520. return IRQ_HANDLED;
  521. }
  522. #if DEBUG_PARPORT_IP32
  523. static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
  524. {
  525. pr_trace1(NULL, "(%d)", irq);
  526. return IRQ_HANDLED;
  527. }
  528. #endif
  529. /**
  530. * parport_ip32_dma_start - begins a DMA transfer
  531. * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
  532. * @addr: pointer to data buffer
  533. * @count: buffer size
  534. *
  535. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  536. * correctly balanced.
  537. */
  538. static int parport_ip32_dma_start(enum dma_data_direction dir,
  539. void *addr, size_t count)
  540. {
  541. unsigned int limit;
  542. u64 ctrl;
  543. pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
  544. /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must
  545. * be 64 bytes aligned. */
  546. BUG_ON(dir != DMA_TO_DEVICE);
  547. /* Reset DMA controller */
  548. ctrl = MACEPAR_CTLSTAT_RESET;
  549. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  550. /* DMA IRQs should normally be enabled */
  551. if (!parport_ip32_dma.irq_on) {
  552. WARN_ON(1);
  553. enable_irq(MACEISA_PAR_CTXA_IRQ);
  554. enable_irq(MACEISA_PAR_CTXB_IRQ);
  555. parport_ip32_dma.irq_on = 1;
  556. }
  557. /* Prepare DMA pointers */
  558. parport_ip32_dma.dir = dir;
  559. parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir);
  560. parport_ip32_dma.len = count;
  561. parport_ip32_dma.next = parport_ip32_dma.buf;
  562. parport_ip32_dma.left = parport_ip32_dma.len;
  563. parport_ip32_dma.ctx = 0;
  564. /* Setup DMA direction and first two contexts */
  565. ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
  566. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  567. /* Single transfer should not cross a 4K page boundary */
  568. limit = MACEPAR_CONTEXT_DATA_BOUND -
  569. (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
  570. parport_ip32_dma_setup_context(limit);
  571. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  572. /* Real start of DMA transfer */
  573. ctrl |= MACEPAR_CTLSTAT_ENABLE;
  574. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  575. return 0;
  576. }
  577. /**
  578. * parport_ip32_dma_stop - ends a running DMA transfer
  579. *
  580. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  581. * correctly balanced.
  582. */
  583. static void parport_ip32_dma_stop(void)
  584. {
  585. u64 ctx_a;
  586. u64 ctx_b;
  587. u64 ctrl;
  588. u64 diag;
  589. size_t res[2]; /* {[0] = res_a, [1] = res_b} */
  590. pr_trace(NULL, "()");
  591. /* Disable IRQs */
  592. spin_lock_irq(&parport_ip32_dma.lock);
  593. if (parport_ip32_dma.irq_on) {
  594. pr_debug(PPIP32 "IRQ off (stop)\n");
  595. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  596. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  597. parport_ip32_dma.irq_on = 0;
  598. }
  599. spin_unlock_irq(&parport_ip32_dma.lock);
  600. /* Force IRQ synchronization, even if the IRQs were disabled
  601. * elsewhere. */
  602. synchronize_irq(MACEISA_PAR_CTXA_IRQ);
  603. synchronize_irq(MACEISA_PAR_CTXB_IRQ);
  604. /* Stop DMA transfer */
  605. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  606. ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
  607. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  608. /* Adjust residue (parport_ip32_dma.left) */
  609. ctx_a = readq(&mace->perif.ctrl.parport.context_a);
  610. ctx_b = readq(&mace->perif.ctrl.parport.context_b);
  611. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  612. diag = readq(&mace->perif.ctrl.parport.diagnostic);
  613. res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
  614. 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
  615. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  616. 0;
  617. res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
  618. 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
  619. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  620. 0;
  621. if (diag & MACEPAR_DIAG_DMACTIVE)
  622. res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
  623. 1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
  624. MACEPAR_DIAG_CTRSHIFT);
  625. parport_ip32_dma.left += res[0] + res[1];
  626. /* Reset DMA controller, and re-enable IRQs */
  627. ctrl = MACEPAR_CTLSTAT_RESET;
  628. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  629. pr_debug(PPIP32 "IRQ on (stop)\n");
  630. enable_irq(MACEISA_PAR_CTXA_IRQ);
  631. enable_irq(MACEISA_PAR_CTXB_IRQ);
  632. parport_ip32_dma.irq_on = 1;
  633. dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len,
  634. parport_ip32_dma.dir);
  635. }
  636. /**
  637. * parport_ip32_dma_get_residue - get residue from last DMA transfer
  638. *
  639. * Returns the number of bytes remaining from last DMA transfer.
  640. */
  641. static inline size_t parport_ip32_dma_get_residue(void)
  642. {
  643. return parport_ip32_dma.left;
  644. }
  645. /**
  646. * parport_ip32_dma_register - initialize DMA engine
  647. *
  648. * Returns zero for success.
  649. */
  650. static int parport_ip32_dma_register(void)
  651. {
  652. int err;
  653. spin_lock_init(&parport_ip32_dma.lock);
  654. parport_ip32_dma.irq_on = 1;
  655. /* Reset DMA controller */
  656. writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
  657. /* Request IRQs */
  658. err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
  659. 0, "parport_ip32", NULL);
  660. if (err)
  661. goto fail_a;
  662. err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
  663. 0, "parport_ip32", NULL);
  664. if (err)
  665. goto fail_b;
  666. #if DEBUG_PARPORT_IP32
  667. /* FIXME - what is this IRQ for? */
  668. err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
  669. 0, "parport_ip32", NULL);
  670. if (err)
  671. goto fail_merr;
  672. #endif
  673. return 0;
  674. #if DEBUG_PARPORT_IP32
  675. fail_merr:
  676. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  677. #endif
  678. fail_b:
  679. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  680. fail_a:
  681. return err;
  682. }
  683. /**
  684. * parport_ip32_dma_unregister - release and free resources for DMA engine
  685. */
  686. static void parport_ip32_dma_unregister(void)
  687. {
  688. #if DEBUG_PARPORT_IP32
  689. free_irq(MACEISA_PAR_MERR_IRQ, NULL);
  690. #endif
  691. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  692. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  693. }
  694. /*--- Interrupt handlers and associates --------------------------------*/
  695. /**
  696. * parport_ip32_wakeup - wakes up code waiting for an interrupt
  697. * @p: pointer to &struct parport
  698. */
  699. static inline void parport_ip32_wakeup(struct parport *p)
  700. {
  701. struct parport_ip32_private * const priv = p->physport->private_data;
  702. complete(&priv->irq_complete);
  703. }
  704. /**
  705. * parport_ip32_interrupt - interrupt handler
  706. * @irq: interrupt number
  707. * @dev_id: pointer to &struct parport
  708. *
  709. * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
  710. * %PARPORT_IP32_IRQ_FWD.
  711. */
  712. static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id)
  713. {
  714. struct parport * const p = dev_id;
  715. struct parport_ip32_private * const priv = p->physport->private_data;
  716. enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
  717. switch (irq_mode) {
  718. case PARPORT_IP32_IRQ_FWD:
  719. return parport_irq_handler(irq, dev_id);
  720. case PARPORT_IP32_IRQ_HERE:
  721. parport_ip32_wakeup(p);
  722. break;
  723. }
  724. return IRQ_HANDLED;
  725. }
  726. /*--- Some utility function to manipulate ECR register -----------------*/
  727. /**
  728. * parport_ip32_read_econtrol - read contents of the ECR register
  729. * @p: pointer to &struct parport
  730. */
  731. static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
  732. {
  733. struct parport_ip32_private * const priv = p->physport->private_data;
  734. return readb(priv->regs.ecr);
  735. }
  736. /**
  737. * parport_ip32_write_econtrol - write new contents to the ECR register
  738. * @p: pointer to &struct parport
  739. * @c: new value to write
  740. */
  741. static inline void parport_ip32_write_econtrol(struct parport *p,
  742. unsigned int c)
  743. {
  744. struct parport_ip32_private * const priv = p->physport->private_data;
  745. writeb(c, priv->regs.ecr);
  746. }
  747. /**
  748. * parport_ip32_frob_econtrol - change bits from the ECR register
  749. * @p: pointer to &struct parport
  750. * @mask: bit mask of bits to change
  751. * @val: new value for changed bits
  752. *
  753. * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
  754. * in @val, and write the result to the ECR.
  755. */
  756. static inline void parport_ip32_frob_econtrol(struct parport *p,
  757. unsigned int mask,
  758. unsigned int val)
  759. {
  760. unsigned int c;
  761. c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
  762. parport_ip32_write_econtrol(p, c);
  763. }
  764. /**
  765. * parport_ip32_set_mode - change mode of ECP port
  766. * @p: pointer to &struct parport
  767. * @mode: new mode to write in ECR
  768. *
  769. * ECR is reset in a sane state (interrupts and DMA disabled), and placed in
  770. * mode @mode. Go through PS2 mode if needed.
  771. */
  772. static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
  773. {
  774. unsigned int omode;
  775. mode &= ECR_MODE_MASK;
  776. omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
  777. if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
  778. || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
  779. /* We have to go through PS2 mode */
  780. unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  781. parport_ip32_write_econtrol(p, ecr);
  782. }
  783. parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
  784. }
  785. /*--- Basic functions needed for parport -------------------------------*/
  786. /**
  787. * parport_ip32_read_data - return current contents of the DATA register
  788. * @p: pointer to &struct parport
  789. */
  790. static inline unsigned char parport_ip32_read_data(struct parport *p)
  791. {
  792. struct parport_ip32_private * const priv = p->physport->private_data;
  793. return readb(priv->regs.data);
  794. }
  795. /**
  796. * parport_ip32_write_data - set new contents for the DATA register
  797. * @p: pointer to &struct parport
  798. * @d: new value to write
  799. */
  800. static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
  801. {
  802. struct parport_ip32_private * const priv = p->physport->private_data;
  803. writeb(d, priv->regs.data);
  804. }
  805. /**
  806. * parport_ip32_read_status - return current contents of the DSR register
  807. * @p: pointer to &struct parport
  808. */
  809. static inline unsigned char parport_ip32_read_status(struct parport *p)
  810. {
  811. struct parport_ip32_private * const priv = p->physport->private_data;
  812. return readb(priv->regs.dsr);
  813. }
  814. /**
  815. * __parport_ip32_read_control - return cached contents of the DCR register
  816. * @p: pointer to &struct parport
  817. */
  818. static inline unsigned int __parport_ip32_read_control(struct parport *p)
  819. {
  820. struct parport_ip32_private * const priv = p->physport->private_data;
  821. return priv->dcr_cache; /* use soft copy */
  822. }
  823. /**
  824. * __parport_ip32_write_control - set new contents for the DCR register
  825. * @p: pointer to &struct parport
  826. * @c: new value to write
  827. */
  828. static inline void __parport_ip32_write_control(struct parport *p,
  829. unsigned int c)
  830. {
  831. struct parport_ip32_private * const priv = p->physport->private_data;
  832. CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
  833. c &= priv->dcr_writable; /* only writable bits */
  834. writeb(c, priv->regs.dcr);
  835. priv->dcr_cache = c; /* update soft copy */
  836. }
  837. /**
  838. * __parport_ip32_frob_control - change bits from the DCR register
  839. * @p: pointer to &struct parport
  840. * @mask: bit mask of bits to change
  841. * @val: new value for changed bits
  842. *
  843. * This is equivalent to read from the DCR, mask out the bits in @mask,
  844. * exclusive-or with the bits in @val, and write the result to the DCR.
  845. * Actually, the cached contents of the DCR is used.
  846. */
  847. static inline void __parport_ip32_frob_control(struct parport *p,
  848. unsigned int mask,
  849. unsigned int val)
  850. {
  851. unsigned int c;
  852. c = (__parport_ip32_read_control(p) & ~mask) ^ val;
  853. __parport_ip32_write_control(p, c);
  854. }
  855. /**
  856. * parport_ip32_read_control - return cached contents of the DCR register
  857. * @p: pointer to &struct parport
  858. *
  859. * The return value is masked so as to only return the value of %DCR_STROBE,
  860. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  861. */
  862. static inline unsigned char parport_ip32_read_control(struct parport *p)
  863. {
  864. const unsigned int rm =
  865. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  866. return __parport_ip32_read_control(p) & rm;
  867. }
  868. /**
  869. * parport_ip32_write_control - set new contents for the DCR register
  870. * @p: pointer to &struct parport
  871. * @c: new value to write
  872. *
  873. * The value is masked so as to only change the value of %DCR_STROBE,
  874. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  875. */
  876. static inline void parport_ip32_write_control(struct parport *p,
  877. unsigned char c)
  878. {
  879. const unsigned int wm =
  880. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  881. CHECK_EXTRA_BITS(p, c, wm);
  882. __parport_ip32_frob_control(p, wm, c & wm);
  883. }
  884. /**
  885. * parport_ip32_frob_control - change bits from the DCR register
  886. * @p: pointer to &struct parport
  887. * @mask: bit mask of bits to change
  888. * @val: new value for changed bits
  889. *
  890. * This differs from __parport_ip32_frob_control() in that it only allows to
  891. * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  892. */
  893. static inline unsigned char parport_ip32_frob_control(struct parport *p,
  894. unsigned char mask,
  895. unsigned char val)
  896. {
  897. const unsigned int wm =
  898. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  899. CHECK_EXTRA_BITS(p, mask, wm);
  900. CHECK_EXTRA_BITS(p, val, wm);
  901. __parport_ip32_frob_control(p, mask & wm, val & wm);
  902. return parport_ip32_read_control(p);
  903. }
  904. /**
  905. * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
  906. * @p: pointer to &struct parport
  907. */
  908. static inline void parport_ip32_disable_irq(struct parport *p)
  909. {
  910. __parport_ip32_frob_control(p, DCR_IRQ, 0);
  911. }
  912. /**
  913. * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
  914. * @p: pointer to &struct parport
  915. */
  916. static inline void parport_ip32_enable_irq(struct parport *p)
  917. {
  918. __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
  919. }
  920. /**
  921. * parport_ip32_data_forward - enable host-to-peripheral communications
  922. * @p: pointer to &struct parport
  923. *
  924. * Enable the data line drivers, for 8-bit host-to-peripheral communications.
  925. */
  926. static inline void parport_ip32_data_forward(struct parport *p)
  927. {
  928. __parport_ip32_frob_control(p, DCR_DIR, 0);
  929. }
  930. /**
  931. * parport_ip32_data_reverse - enable peripheral-to-host communications
  932. * @p: pointer to &struct parport
  933. *
  934. * Place the data bus in a high impedance state, if @p->modes has the
  935. * PARPORT_MODE_TRISTATE bit set.
  936. */
  937. static inline void parport_ip32_data_reverse(struct parport *p)
  938. {
  939. __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
  940. }
  941. /**
  942. * parport_ip32_init_state - for core parport code
  943. * @dev: pointer to &struct pardevice
  944. * @s: pointer to &struct parport_state to initialize
  945. */
  946. static void parport_ip32_init_state(struct pardevice *dev,
  947. struct parport_state *s)
  948. {
  949. s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
  950. s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  951. }
  952. /**
  953. * parport_ip32_save_state - for core parport code
  954. * @p: pointer to &struct parport
  955. * @s: pointer to &struct parport_state to save state to
  956. */
  957. static void parport_ip32_save_state(struct parport *p,
  958. struct parport_state *s)
  959. {
  960. s->u.ip32.dcr = __parport_ip32_read_control(p);
  961. s->u.ip32.ecr = parport_ip32_read_econtrol(p);
  962. }
  963. /**
  964. * parport_ip32_restore_state - for core parport code
  965. * @p: pointer to &struct parport
  966. * @s: pointer to &struct parport_state to restore state from
  967. */
  968. static void parport_ip32_restore_state(struct parport *p,
  969. struct parport_state *s)
  970. {
  971. parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
  972. parport_ip32_write_econtrol(p, s->u.ip32.ecr);
  973. __parport_ip32_write_control(p, s->u.ip32.dcr);
  974. }
  975. /*--- EPP mode functions -----------------------------------------------*/
  976. /**
  977. * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
  978. * @p: pointer to &struct parport
  979. *
  980. * Returns 1 if the Timeout bit is clear, and 0 otherwise.
  981. */
  982. static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
  983. {
  984. struct parport_ip32_private * const priv = p->physport->private_data;
  985. unsigned int cleared;
  986. if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
  987. cleared = 1;
  988. else {
  989. unsigned int r;
  990. /* To clear timeout some chips require double read */
  991. parport_ip32_read_status(p);
  992. r = parport_ip32_read_status(p);
  993. /* Some reset by writing 1 */
  994. writeb(r | DSR_TIMEOUT, priv->regs.dsr);
  995. /* Others by writing 0 */
  996. writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
  997. r = parport_ip32_read_status(p);
  998. cleared = !(r & DSR_TIMEOUT);
  999. }
  1000. pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
  1001. return cleared;
  1002. }
  1003. /**
  1004. * parport_ip32_epp_read - generic EPP read function
  1005. * @eppreg: I/O register to read from
  1006. * @p: pointer to &struct parport
  1007. * @buf: buffer to store read data
  1008. * @len: length of buffer @buf
  1009. * @flags: may be PARPORT_EPP_FAST
  1010. */
  1011. static size_t parport_ip32_epp_read(void __iomem *eppreg,
  1012. struct parport *p, void *buf,
  1013. size_t len, int flags)
  1014. {
  1015. struct parport_ip32_private * const priv = p->physport->private_data;
  1016. size_t got;
  1017. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1018. parport_ip32_data_reverse(p);
  1019. parport_ip32_write_control(p, DCR_nINIT);
  1020. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1021. readsb(eppreg, buf, len);
  1022. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1023. parport_ip32_clear_epp_timeout(p);
  1024. return -EIO;
  1025. }
  1026. got = len;
  1027. } else {
  1028. u8 *bufp = buf;
  1029. for (got = 0; got < len; got++) {
  1030. *bufp++ = readb(eppreg);
  1031. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1032. parport_ip32_clear_epp_timeout(p);
  1033. break;
  1034. }
  1035. }
  1036. }
  1037. parport_ip32_data_forward(p);
  1038. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1039. return got;
  1040. }
  1041. /**
  1042. * parport_ip32_epp_write - generic EPP write function
  1043. * @eppreg: I/O register to write to
  1044. * @p: pointer to &struct parport
  1045. * @buf: buffer of data to write
  1046. * @len: length of buffer @buf
  1047. * @flags: may be PARPORT_EPP_FAST
  1048. */
  1049. static size_t parport_ip32_epp_write(void __iomem *eppreg,
  1050. struct parport *p, const void *buf,
  1051. size_t len, int flags)
  1052. {
  1053. struct parport_ip32_private * const priv = p->physport->private_data;
  1054. size_t written;
  1055. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1056. parport_ip32_data_forward(p);
  1057. parport_ip32_write_control(p, DCR_nINIT);
  1058. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1059. writesb(eppreg, buf, len);
  1060. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1061. parport_ip32_clear_epp_timeout(p);
  1062. return -EIO;
  1063. }
  1064. written = len;
  1065. } else {
  1066. const u8 *bufp = buf;
  1067. for (written = 0; written < len; written++) {
  1068. writeb(*bufp++, eppreg);
  1069. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1070. parport_ip32_clear_epp_timeout(p);
  1071. break;
  1072. }
  1073. }
  1074. }
  1075. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1076. return written;
  1077. }
  1078. /**
  1079. * parport_ip32_epp_read_data - read a block of data in EPP mode
  1080. * @p: pointer to &struct parport
  1081. * @buf: buffer to store read data
  1082. * @len: length of buffer @buf
  1083. * @flags: may be PARPORT_EPP_FAST
  1084. */
  1085. static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
  1086. size_t len, int flags)
  1087. {
  1088. struct parport_ip32_private * const priv = p->physport->private_data;
  1089. return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
  1090. }
  1091. /**
  1092. * parport_ip32_epp_write_data - write a block of data in EPP mode
  1093. * @p: pointer to &struct parport
  1094. * @buf: buffer of data to write
  1095. * @len: length of buffer @buf
  1096. * @flags: may be PARPORT_EPP_FAST
  1097. */
  1098. static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
  1099. size_t len, int flags)
  1100. {
  1101. struct parport_ip32_private * const priv = p->physport->private_data;
  1102. return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
  1103. }
  1104. /**
  1105. * parport_ip32_epp_read_addr - read a block of addresses in EPP mode
  1106. * @p: pointer to &struct parport
  1107. * @buf: buffer to store read data
  1108. * @len: length of buffer @buf
  1109. * @flags: may be PARPORT_EPP_FAST
  1110. */
  1111. static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
  1112. size_t len, int flags)
  1113. {
  1114. struct parport_ip32_private * const priv = p->physport->private_data;
  1115. return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
  1116. }
  1117. /**
  1118. * parport_ip32_epp_write_addr - write a block of addresses in EPP mode
  1119. * @p: pointer to &struct parport
  1120. * @buf: buffer of data to write
  1121. * @len: length of buffer @buf
  1122. * @flags: may be PARPORT_EPP_FAST
  1123. */
  1124. static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
  1125. size_t len, int flags)
  1126. {
  1127. struct parport_ip32_private * const priv = p->physport->private_data;
  1128. return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
  1129. }
  1130. /*--- ECP mode functions (FIFO) ----------------------------------------*/
  1131. /**
  1132. * parport_ip32_fifo_wait_break - check if the waiting function should return
  1133. * @p: pointer to &struct parport
  1134. * @expire: timeout expiring date, in jiffies
  1135. *
  1136. * parport_ip32_fifo_wait_break() checks if the waiting function should return
  1137. * immediately or not. The break conditions are:
  1138. * - expired timeout;
  1139. * - a pending signal;
  1140. * - nFault asserted low.
  1141. * This function also calls cond_resched().
  1142. */
  1143. static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
  1144. unsigned long expire)
  1145. {
  1146. cond_resched();
  1147. if (time_after(jiffies, expire)) {
  1148. pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
  1149. return 1;
  1150. }
  1151. if (signal_pending(current)) {
  1152. pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
  1153. return 1;
  1154. }
  1155. if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
  1156. pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
  1157. return 1;
  1158. }
  1159. return 0;
  1160. }
  1161. /**
  1162. * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
  1163. * @p: pointer to &struct parport
  1164. *
  1165. * Returns the number of bytes that can safely be written in the FIFO. A
  1166. * return value of zero means that the calling function should terminate as
  1167. * fast as possible.
  1168. */
  1169. static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
  1170. {
  1171. struct parport_ip32_private * const priv = p->physport->private_data;
  1172. struct parport * const physport = p->physport;
  1173. unsigned long expire;
  1174. unsigned int count;
  1175. unsigned int ecr;
  1176. expire = jiffies + physport->cad->timeout;
  1177. count = 0;
  1178. while (1) {
  1179. if (parport_ip32_fifo_wait_break(p, expire))
  1180. break;
  1181. /* Check FIFO state. We do nothing when the FIFO is nor full,
  1182. * nor empty. It appears that the FIFO full bit is not always
  1183. * reliable, the FIFO state is sometimes wrongly reported, and
  1184. * the chip gets confused if we give it another byte. */
  1185. ecr = parport_ip32_read_econtrol(p);
  1186. if (ecr & ECR_F_EMPTY) {
  1187. /* FIFO is empty, fill it up */
  1188. count = priv->fifo_depth;
  1189. break;
  1190. }
  1191. /* Wait a moment... */
  1192. udelay(FIFO_POLLING_INTERVAL);
  1193. } /* while (1) */
  1194. return count;
  1195. }
  1196. /**
  1197. * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
  1198. * @p: pointer to &struct parport
  1199. *
  1200. * Returns the number of bytes that can safely be written in the FIFO. A
  1201. * return value of zero means that the calling function should terminate as
  1202. * fast as possible.
  1203. */
  1204. static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
  1205. {
  1206. static unsigned int lost_interrupt = 0;
  1207. struct parport_ip32_private * const priv = p->physport->private_data;
  1208. struct parport * const physport = p->physport;
  1209. unsigned long nfault_timeout;
  1210. unsigned long expire;
  1211. unsigned int count;
  1212. unsigned int ecr;
  1213. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1214. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1215. expire = jiffies + physport->cad->timeout;
  1216. count = 0;
  1217. while (1) {
  1218. if (parport_ip32_fifo_wait_break(p, expire))
  1219. break;
  1220. /* Initialize mutex used to take interrupts into account */
  1221. INIT_COMPLETION(priv->irq_complete);
  1222. /* Enable serviceIntr */
  1223. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1224. /* Enabling serviceIntr while the FIFO is empty does not
  1225. * always generate an interrupt, so check for emptiness
  1226. * now. */
  1227. ecr = parport_ip32_read_econtrol(p);
  1228. if (!(ecr & ECR_F_EMPTY)) {
  1229. /* FIFO is not empty: wait for an interrupt or a
  1230. * timeout to occur */
  1231. wait_for_completion_interruptible_timeout(
  1232. &priv->irq_complete, nfault_timeout);
  1233. ecr = parport_ip32_read_econtrol(p);
  1234. if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
  1235. && !lost_interrupt) {
  1236. printk(KERN_WARNING PPIP32
  1237. "%s: lost interrupt in %s\n",
  1238. p->name, __func__);
  1239. lost_interrupt = 1;
  1240. }
  1241. }
  1242. /* Disable serviceIntr */
  1243. parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
  1244. /* Check FIFO state */
  1245. if (ecr & ECR_F_EMPTY) {
  1246. /* FIFO is empty, fill it up */
  1247. count = priv->fifo_depth;
  1248. break;
  1249. } else if (ecr & ECR_SERVINTR) {
  1250. /* FIFO is not empty, but we know that can safely push
  1251. * writeIntrThreshold bytes into it */
  1252. count = priv->writeIntrThreshold;
  1253. break;
  1254. }
  1255. /* FIFO is not empty, and we did not get any interrupt.
  1256. * Either it's time to check for nFault, or a signal is
  1257. * pending. This is verified in
  1258. * parport_ip32_fifo_wait_break(), so we continue the loop. */
  1259. } /* while (1) */
  1260. return count;
  1261. }
  1262. /**
  1263. * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
  1264. * @p: pointer to &struct parport
  1265. * @buf: buffer of data to write
  1266. * @len: length of buffer @buf
  1267. *
  1268. * Uses PIO to write the contents of the buffer @buf into the parallel port
  1269. * FIFO. Returns the number of bytes that were actually written. It can work
  1270. * with or without the help of interrupts. The parallel port must be
  1271. * correctly initialized before calling parport_ip32_fifo_write_block_pio().
  1272. */
  1273. static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
  1274. const void *buf, size_t len)
  1275. {
  1276. struct parport_ip32_private * const priv = p->physport->private_data;
  1277. const u8 *bufp = buf;
  1278. size_t left = len;
  1279. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1280. while (left > 0) {
  1281. unsigned int count;
  1282. count = (p->irq == PARPORT_IRQ_NONE) ?
  1283. parport_ip32_fwp_wait_polling(p) :
  1284. parport_ip32_fwp_wait_interrupt(p);
  1285. if (count == 0)
  1286. break; /* Transmission should be stopped */
  1287. if (count > left)
  1288. count = left;
  1289. if (count == 1) {
  1290. writeb(*bufp, priv->regs.fifo);
  1291. bufp++, left--;
  1292. } else {
  1293. writesb(priv->regs.fifo, bufp, count);
  1294. bufp += count, left -= count;
  1295. }
  1296. }
  1297. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1298. return len - left;
  1299. }
  1300. /**
  1301. * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
  1302. * @p: pointer to &struct parport
  1303. * @buf: buffer of data to write
  1304. * @len: length of buffer @buf
  1305. *
  1306. * Uses DMA to write the contents of the buffer @buf into the parallel port
  1307. * FIFO. Returns the number of bytes that were actually written. The
  1308. * parallel port must be correctly initialized before calling
  1309. * parport_ip32_fifo_write_block_dma().
  1310. */
  1311. static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
  1312. const void *buf, size_t len)
  1313. {
  1314. struct parport_ip32_private * const priv = p->physport->private_data;
  1315. struct parport * const physport = p->physport;
  1316. unsigned long nfault_timeout;
  1317. unsigned long expire;
  1318. size_t written;
  1319. unsigned int ecr;
  1320. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1321. parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
  1322. INIT_COMPLETION(priv->irq_complete);
  1323. parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
  1324. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1325. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1326. expire = jiffies + physport->cad->timeout;
  1327. while (1) {
  1328. if (parport_ip32_fifo_wait_break(p, expire))
  1329. break;
  1330. wait_for_completion_interruptible_timeout(&priv->irq_complete,
  1331. nfault_timeout);
  1332. ecr = parport_ip32_read_econtrol(p);
  1333. if (ecr & ECR_SERVINTR)
  1334. break; /* DMA transfer just finished */
  1335. }
  1336. parport_ip32_dma_stop();
  1337. written = len - parport_ip32_dma_get_residue();
  1338. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1339. return written;
  1340. }
  1341. /**
  1342. * parport_ip32_fifo_write_block - write a block of data
  1343. * @p: pointer to &struct parport
  1344. * @buf: buffer of data to write
  1345. * @len: length of buffer @buf
  1346. *
  1347. * Uses PIO or DMA to write the contents of the buffer @buf into the parallel
  1348. * p FIFO. Returns the number of bytes that were actually written.
  1349. */
  1350. static size_t parport_ip32_fifo_write_block(struct parport *p,
  1351. const void *buf, size_t len)
  1352. {
  1353. size_t written = 0;
  1354. if (len)
  1355. /* FIXME - Maybe some threshold value should be set for @len
  1356. * under which we revert to PIO mode? */
  1357. written = (p->modes & PARPORT_MODE_DMA) ?
  1358. parport_ip32_fifo_write_block_dma(p, buf, len) :
  1359. parport_ip32_fifo_write_block_pio(p, buf, len);
  1360. return written;
  1361. }
  1362. /**
  1363. * parport_ip32_drain_fifo - wait for FIFO to empty
  1364. * @p: pointer to &struct parport
  1365. * @timeout: timeout, in jiffies
  1366. *
  1367. * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or
  1368. * 0 if the timeout @timeout is reached before, or if a signal is pending.
  1369. */
  1370. static unsigned int parport_ip32_drain_fifo(struct parport *p,
  1371. unsigned long timeout)
  1372. {
  1373. unsigned long expire = jiffies + timeout;
  1374. unsigned int polling_interval;
  1375. unsigned int counter;
  1376. /* Busy wait for approx. 200us */
  1377. for (counter = 0; counter < 40; counter++) {
  1378. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1379. break;
  1380. if (time_after(jiffies, expire))
  1381. break;
  1382. if (signal_pending(current))
  1383. break;
  1384. udelay(5);
  1385. }
  1386. /* Poll slowly. Polling interval starts with 1 millisecond, and is
  1387. * increased exponentially until 128. */
  1388. polling_interval = 1; /* msecs */
  1389. while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
  1390. if (time_after_eq(jiffies, expire))
  1391. break;
  1392. msleep_interruptible(polling_interval);
  1393. if (signal_pending(current))
  1394. break;
  1395. if (polling_interval < 128)
  1396. polling_interval *= 2;
  1397. }
  1398. return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
  1399. }
  1400. /**
  1401. * parport_ip32_get_fifo_residue - reset FIFO
  1402. * @p: pointer to &struct parport
  1403. * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
  1404. *
  1405. * This function resets FIFO, and returns the number of bytes remaining in it.
  1406. */
  1407. static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
  1408. unsigned int mode)
  1409. {
  1410. struct parport_ip32_private * const priv = p->physport->private_data;
  1411. unsigned int residue;
  1412. unsigned int cnfga;
  1413. /* FIXME - We are missing one byte if the printer is off-line. I
  1414. * don't know how to detect this. It looks that the full bit is not
  1415. * always reliable. For the moment, the problem is avoided in most
  1416. * cases by testing for BUSY in parport_ip32_compat_write_data().
  1417. */
  1418. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1419. residue = 0;
  1420. else {
  1421. pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
  1422. /* Stop all transfers.
  1423. *
  1424. * Microsoft's document instructs to drive DCR_STROBE to 0,
  1425. * but it doesn't work (at least in Compatibility mode, not
  1426. * tested in ECP mode). Switching directly to Test mode (as
  1427. * in parport_pc) is not an option: it does confuse the port,
  1428. * ECP service interrupts are no more working after that. A
  1429. * hard reset is then needed to revert to a sane state.
  1430. *
  1431. * Let's hope that the FIFO is really stuck and that the
  1432. * peripheral doesn't wake up now.
  1433. */
  1434. parport_ip32_frob_control(p, DCR_STROBE, 0);
  1435. /* Fill up FIFO */
  1436. for (residue = priv->fifo_depth; residue > 0; residue--) {
  1437. if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
  1438. break;
  1439. writeb(0x00, priv->regs.fifo);
  1440. }
  1441. }
  1442. if (residue)
  1443. pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
  1444. p->name, residue,
  1445. (residue == 1) ? " was" : "s were");
  1446. /* Now reset the FIFO */
  1447. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1448. /* Host recovery for ECP mode */
  1449. if (mode == ECR_MODE_ECP) {
  1450. parport_ip32_data_reverse(p);
  1451. parport_ip32_frob_control(p, DCR_nINIT, 0);
  1452. if (parport_wait_peripheral(p, DSR_PERROR, 0))
  1453. pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
  1454. p->name, __func__);
  1455. parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
  1456. parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
  1457. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
  1458. pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
  1459. p->name, __func__);
  1460. }
  1461. /* Adjust residue if needed */
  1462. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1463. cnfga = readb(priv->regs.cnfgA);
  1464. if (!(cnfga & CNFGA_nBYTEINTRANS)) {
  1465. pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
  1466. p->name, cnfga);
  1467. pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
  1468. p->name);
  1469. residue++;
  1470. }
  1471. /* Don't care about partial PWords since we do not support
  1472. * PWord != 1 byte. */
  1473. /* Back to forward PS2 mode. */
  1474. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1475. parport_ip32_data_forward(p);
  1476. return residue;
  1477. }
  1478. /**
  1479. * parport_ip32_compat_write_data - write a block of data in SPP mode
  1480. * @p: pointer to &struct parport
  1481. * @buf: buffer of data to write
  1482. * @len: length of buffer @buf
  1483. * @flags: ignored
  1484. */
  1485. static size_t parport_ip32_compat_write_data(struct parport *p,
  1486. const void *buf, size_t len,
  1487. int flags)
  1488. {
  1489. static unsigned int ready_before = 1;
  1490. struct parport_ip32_private * const priv = p->physport->private_data;
  1491. struct parport * const physport = p->physport;
  1492. size_t written = 0;
  1493. /* Special case: a timeout of zero means we cannot call schedule().
  1494. * Also if O_NONBLOCK is set then use the default implementation. */
  1495. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1496. return parport_ieee1284_write_compat(p, buf, len, flags);
  1497. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1498. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1499. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1500. parport_ip32_data_forward(p);
  1501. parport_ip32_disable_irq(p);
  1502. parport_ip32_set_mode(p, ECR_MODE_PPF);
  1503. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1504. /* Wait for peripheral to become ready */
  1505. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1506. DSR_nBUSY | DSR_nFAULT)) {
  1507. /* Avoid to flood the logs */
  1508. if (ready_before)
  1509. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1510. p->name, __func__);
  1511. ready_before = 0;
  1512. goto stop;
  1513. }
  1514. ready_before = 1;
  1515. written = parport_ip32_fifo_write_block(p, buf, len);
  1516. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1517. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1518. /* Check for a potential residue */
  1519. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
  1520. /* Then, wait for BUSY to get low. */
  1521. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1522. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1523. p->name, __func__);
  1524. stop:
  1525. /* Reset FIFO */
  1526. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1527. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1528. return written;
  1529. }
  1530. /*
  1531. * FIXME - Insert here parport_ip32_ecp_read_data().
  1532. */
  1533. /**
  1534. * parport_ip32_ecp_write_data - write a block of data in ECP mode
  1535. * @p: pointer to &struct parport
  1536. * @buf: buffer of data to write
  1537. * @len: length of buffer @buf
  1538. * @flags: ignored
  1539. */
  1540. static size_t parport_ip32_ecp_write_data(struct parport *p,
  1541. const void *buf, size_t len,
  1542. int flags)
  1543. {
  1544. static unsigned int ready_before = 1;
  1545. struct parport_ip32_private * const priv = p->physport->private_data;
  1546. struct parport * const physport = p->physport;
  1547. size_t written = 0;
  1548. /* Special case: a timeout of zero means we cannot call schedule().
  1549. * Also if O_NONBLOCK is set then use the default implementation. */
  1550. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1551. return parport_ieee1284_ecp_write_data(p, buf, len, flags);
  1552. /* Negotiate to forward mode if necessary. */
  1553. if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
  1554. /* Event 47: Set nInit high. */
  1555. parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
  1556. DCR_nINIT | DCR_AUTOFD);
  1557. /* Event 49: PError goes high. */
  1558. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
  1559. printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
  1560. p->name, __func__);
  1561. physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
  1562. return 0;
  1563. }
  1564. }
  1565. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1566. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1567. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1568. parport_ip32_data_forward(p);
  1569. parport_ip32_disable_irq(p);
  1570. parport_ip32_set_mode(p, ECR_MODE_ECP);
  1571. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1572. /* Wait for peripheral to become ready */
  1573. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1574. DSR_nBUSY | DSR_nFAULT)) {
  1575. /* Avoid to flood the logs */
  1576. if (ready_before)
  1577. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1578. p->name, __func__);
  1579. ready_before = 0;
  1580. goto stop;
  1581. }
  1582. ready_before = 1;
  1583. written = parport_ip32_fifo_write_block(p, buf, len);
  1584. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1585. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1586. /* Check for a potential residue */
  1587. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
  1588. /* Then, wait for BUSY to get low. */
  1589. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1590. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1591. p->name, __func__);
  1592. stop:
  1593. /* Reset FIFO */
  1594. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1595. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1596. return written;
  1597. }
  1598. /*
  1599. * FIXME - Insert here parport_ip32_ecp_write_addr().
  1600. */
  1601. /*--- Default parport operations ---------------------------------------*/
  1602. static __initdata struct parport_operations parport_ip32_ops = {
  1603. .write_data = parport_ip32_write_data,
  1604. .read_data = parport_ip32_read_data,
  1605. .write_control = parport_ip32_write_control,
  1606. .read_control = parport_ip32_read_control,
  1607. .frob_control = parport_ip32_frob_control,
  1608. .read_status = parport_ip32_read_status,
  1609. .enable_irq = parport_ip32_enable_irq,
  1610. .disable_irq = parport_ip32_disable_irq,
  1611. .data_forward = parport_ip32_data_forward,
  1612. .data_reverse = parport_ip32_data_reverse,
  1613. .init_state = parport_ip32_init_state,
  1614. .save_state = parport_ip32_save_state,
  1615. .restore_state = parport_ip32_restore_state,
  1616. .epp_write_data = parport_ieee1284_epp_write_data,
  1617. .epp_read_data = parport_ieee1284_epp_read_data,
  1618. .epp_write_addr = parport_ieee1284_epp_write_addr,
  1619. .epp_read_addr = parport_ieee1284_epp_read_addr,
  1620. .ecp_write_data = parport_ieee1284_ecp_write_data,
  1621. .ecp_read_data = parport_ieee1284_ecp_read_data,
  1622. .ecp_write_addr = parport_ieee1284_ecp_write_addr,
  1623. .compat_write_data = parport_ieee1284_write_compat,
  1624. .nibble_read_data = parport_ieee1284_read_nibble,
  1625. .byte_read_data = parport_ieee1284_read_byte,
  1626. .owner = THIS_MODULE,
  1627. };
  1628. /*--- Device detection -------------------------------------------------*/
  1629. /**
  1630. * parport_ip32_ecp_supported - check for an ECP port
  1631. * @p: pointer to the &parport structure
  1632. *
  1633. * Returns 1 if an ECP port is found, and 0 otherwise. This function actually
  1634. * checks if an Extended Control Register seems to be present. On successful
  1635. * return, the port is placed in SPP mode.
  1636. */
  1637. static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
  1638. {
  1639. struct parport_ip32_private * const priv = p->physport->private_data;
  1640. unsigned int ecr;
  1641. ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  1642. writeb(ecr, priv->regs.ecr);
  1643. if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
  1644. goto fail;
  1645. pr_probe(p, "Found working ECR register\n");
  1646. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1647. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1648. return 1;
  1649. fail:
  1650. pr_probe(p, "ECR register not found\n");
  1651. return 0;
  1652. }
  1653. /**
  1654. * parport_ip32_fifo_supported - check for FIFO parameters
  1655. * @p: pointer to the &parport structure
  1656. *
  1657. * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on
  1658. * success, and 0 otherwise. Adjust FIFO parameters in the parport structure.
  1659. * On return, the port is placed in SPP mode.
  1660. */
  1661. static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
  1662. {
  1663. struct parport_ip32_private * const priv = p->physport->private_data;
  1664. unsigned int configa, configb;
  1665. unsigned int pword;
  1666. unsigned int i;
  1667. /* Configuration mode */
  1668. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1669. configa = readb(priv->regs.cnfgA);
  1670. configb = readb(priv->regs.cnfgB);
  1671. /* Find out PWord size */
  1672. switch (configa & CNFGA_ID_MASK) {
  1673. case CNFGA_ID_8:
  1674. pword = 1;
  1675. break;
  1676. case CNFGA_ID_16:
  1677. pword = 2;
  1678. break;
  1679. case CNFGA_ID_32:
  1680. pword = 4;
  1681. break;
  1682. default:
  1683. pr_probe(p, "Unknown implementation ID: 0x%0x\n",
  1684. (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
  1685. goto fail;
  1686. break;
  1687. }
  1688. if (pword != 1) {
  1689. pr_probe(p, "Unsupported PWord size: %u\n", pword);
  1690. goto fail;
  1691. }
  1692. priv->pword = pword;
  1693. pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
  1694. /* Check for compression support */
  1695. writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
  1696. if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
  1697. pr_probe(p, "Hardware compression detected (unsupported)\n");
  1698. writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
  1699. /* Reset FIFO and go in test mode (no interrupt, no DMA) */
  1700. parport_ip32_set_mode(p, ECR_MODE_TST);
  1701. /* FIFO must be empty now */
  1702. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1703. pr_probe(p, "FIFO not reset\n");
  1704. goto fail;
  1705. }
  1706. /* Find out FIFO depth. */
  1707. priv->fifo_depth = 0;
  1708. for (i = 0; i < 1024; i++) {
  1709. if (readb(priv->regs.ecr) & ECR_F_FULL) {
  1710. /* FIFO full */
  1711. priv->fifo_depth = i;
  1712. break;
  1713. }
  1714. writeb((u8)i, priv->regs.fifo);
  1715. }
  1716. if (i >= 1024) {
  1717. pr_probe(p, "Can't fill FIFO\n");
  1718. goto fail;
  1719. }
  1720. if (!priv->fifo_depth) {
  1721. pr_probe(p, "Can't get FIFO depth\n");
  1722. goto fail;
  1723. }
  1724. pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
  1725. /* Enable interrupts */
  1726. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1727. /* Find out writeIntrThreshold: number of PWords we know we can write
  1728. * if we get an interrupt. */
  1729. priv->writeIntrThreshold = 0;
  1730. for (i = 0; i < priv->fifo_depth; i++) {
  1731. if (readb(priv->regs.fifo) != (u8)i) {
  1732. pr_probe(p, "Invalid data in FIFO\n");
  1733. goto fail;
  1734. }
  1735. if (!priv->writeIntrThreshold
  1736. && readb(priv->regs.ecr) & ECR_SERVINTR)
  1737. /* writeIntrThreshold reached */
  1738. priv->writeIntrThreshold = i + 1;
  1739. if (i + 1 < priv->fifo_depth
  1740. && readb(priv->regs.ecr) & ECR_F_EMPTY) {
  1741. /* FIFO empty before the last byte? */
  1742. pr_probe(p, "Data lost in FIFO\n");
  1743. goto fail;
  1744. }
  1745. }
  1746. if (!priv->writeIntrThreshold) {
  1747. pr_probe(p, "Can't get writeIntrThreshold\n");
  1748. goto fail;
  1749. }
  1750. pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
  1751. /* FIFO must be empty now */
  1752. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1753. pr_probe(p, "Can't empty FIFO\n");
  1754. goto fail;
  1755. }
  1756. /* Reset FIFO */
  1757. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1758. /* Set reverse direction (must be in PS2 mode) */
  1759. parport_ip32_data_reverse(p);
  1760. /* Test FIFO, no interrupt, no DMA */
  1761. parport_ip32_set_mode(p, ECR_MODE_TST);
  1762. /* Enable interrupts */
  1763. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1764. /* Find out readIntrThreshold: number of PWords we can read if we get
  1765. * an interrupt. */
  1766. priv->readIntrThreshold = 0;
  1767. for (i = 0; i < priv->fifo_depth; i++) {
  1768. writeb(0xaa, priv->regs.fifo);
  1769. if (readb(priv->regs.ecr) & ECR_SERVINTR) {
  1770. /* readIntrThreshold reached */
  1771. priv->readIntrThreshold = i + 1;
  1772. break;
  1773. }
  1774. }
  1775. if (!priv->readIntrThreshold) {
  1776. pr_probe(p, "Can't get readIntrThreshold\n");
  1777. goto fail;
  1778. }
  1779. pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
  1780. /* Reset ECR */
  1781. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1782. parport_ip32_data_forward(p);
  1783. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1784. return 1;
  1785. fail:
  1786. priv->fifo_depth = 0;
  1787. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1788. return 0;
  1789. }
  1790. /*--- Initialization code ----------------------------------------------*/
  1791. /**
  1792. * parport_ip32_make_isa_registers - compute (ISA) register addresses
  1793. * @regs: pointer to &struct parport_ip32_regs to fill
  1794. * @base: base address of standard and EPP registers
  1795. * @base_hi: base address of ECP registers
  1796. * @regshift: how much to shift register offset by
  1797. *
  1798. * Compute register addresses, according to the ISA standard. The addresses
  1799. * of the standard and EPP registers are computed from address @base. The
  1800. * addresses of the ECP registers are computed from address @base_hi.
  1801. */
  1802. static void __init
  1803. parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
  1804. void __iomem *base, void __iomem *base_hi,
  1805. unsigned int regshift)
  1806. {
  1807. #define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
  1808. #define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
  1809. *regs = (struct parport_ip32_regs){
  1810. .data = r_base(0),
  1811. .dsr = r_base(1),
  1812. .dcr = r_base(2),
  1813. .eppAddr = r_base(3),
  1814. .eppData0 = r_base(4),
  1815. .eppData1 = r_base(5),
  1816. .eppData2 = r_base(6),
  1817. .eppData3 = r_base(7),
  1818. .ecpAFifo = r_base(0),
  1819. .fifo = r_base_hi(0),
  1820. .cnfgA = r_base_hi(0),
  1821. .cnfgB = r_base_hi(1),
  1822. .ecr = r_base_hi(2)
  1823. };
  1824. #undef r_base_hi
  1825. #undef r_base
  1826. }
  1827. /**
  1828. * parport_ip32_probe_port - probe and register IP32 built-in parallel port
  1829. *
  1830. * Returns the new allocated &parport structure. On error, an error code is
  1831. * encoded in return value with the ERR_PTR function.
  1832. */
  1833. static __init struct parport *parport_ip32_probe_port(void)
  1834. {
  1835. struct parport_ip32_regs regs;
  1836. struct parport_ip32_private *priv = NULL;
  1837. struct parport_operations *ops = NULL;
  1838. struct parport *p = NULL;
  1839. int err;
  1840. parport_ip32_make_isa_registers(&regs, &mace->isa.parallel,
  1841. &mace->isa.ecp1284, 8 /* regshift */);
  1842. ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
  1843. priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
  1844. p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
  1845. if (ops == NULL || priv == NULL || p == NULL) {
  1846. err = -ENOMEM;
  1847. goto fail;
  1848. }
  1849. p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
  1850. p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
  1851. p->private_data = priv;
  1852. *ops = parport_ip32_ops;
  1853. *priv = (struct parport_ip32_private){
  1854. .regs = regs,
  1855. .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT |
  1856. DCR_AUTOFD | DCR_STROBE,
  1857. .irq_mode = PARPORT_IP32_IRQ_FWD,
  1858. };
  1859. init_completion(&priv->irq_complete);
  1860. /* Probe port. */
  1861. if (!parport_ip32_ecp_supported(p)) {
  1862. err = -ENODEV;
  1863. goto fail;
  1864. }
  1865. parport_ip32_dump_state(p, "begin init", 0);
  1866. /* We found what looks like a working ECR register. Simply assume
  1867. * that all modes are correctly supported. Enable basic modes. */
  1868. p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
  1869. p->modes |= PARPORT_MODE_TRISTATE;
  1870. if (!parport_ip32_fifo_supported(p)) {
  1871. printk(KERN_WARNING PPIP32
  1872. "%s: error: FIFO disabled\n", p->name);
  1873. /* Disable hardware modes depending on a working FIFO. */
  1874. features &= ~PARPORT_IP32_ENABLE_SPP;
  1875. features &= ~PARPORT_IP32_ENABLE_ECP;
  1876. /* DMA is not needed if FIFO is not supported. */
  1877. features &= ~PARPORT_IP32_ENABLE_DMA;
  1878. }
  1879. /* Request IRQ */
  1880. if (features & PARPORT_IP32_ENABLE_IRQ) {
  1881. int irq = MACEISA_PARALLEL_IRQ;
  1882. if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
  1883. printk(KERN_WARNING PPIP32
  1884. "%s: error: IRQ disabled\n", p->name);
  1885. /* DMA cannot work without interrupts. */
  1886. features &= ~PARPORT_IP32_ENABLE_DMA;
  1887. } else {
  1888. pr_probe(p, "Interrupt support enabled\n");
  1889. p->irq = irq;
  1890. priv->dcr_writable |= DCR_IRQ;
  1891. }
  1892. }
  1893. /* Allocate DMA resources */
  1894. if (features & PARPORT_IP32_ENABLE_DMA) {
  1895. if (parport_ip32_dma_register())
  1896. printk(KERN_WARNING PPIP32
  1897. "%s: error: DMA disabled\n", p->name);
  1898. else {
  1899. pr_probe(p, "DMA support enabled\n");
  1900. p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
  1901. p->modes |= PARPORT_MODE_DMA;
  1902. }
  1903. }
  1904. if (features & PARPORT_IP32_ENABLE_SPP) {
  1905. /* Enable compatibility FIFO mode */
  1906. p->ops->compat_write_data = parport_ip32_compat_write_data;
  1907. p->modes |= PARPORT_MODE_COMPAT;
  1908. pr_probe(p, "Hardware support for SPP mode enabled\n");
  1909. }
  1910. if (features & PARPORT_IP32_ENABLE_EPP) {
  1911. /* Set up access functions to use EPP hardware. */
  1912. p->ops->epp_read_data = parport_ip32_epp_read_data;
  1913. p->ops->epp_write_data = parport_ip32_epp_write_data;
  1914. p->ops->epp_read_addr = parport_ip32_epp_read_addr;
  1915. p->ops->epp_write_addr = parport_ip32_epp_write_addr;
  1916. p->modes |= PARPORT_MODE_EPP;
  1917. pr_probe(p, "Hardware support for EPP mode enabled\n");
  1918. }
  1919. if (features & PARPORT_IP32_ENABLE_ECP) {
  1920. /* Enable ECP FIFO mode */
  1921. p->ops->ecp_write_data = parport_ip32_ecp_write_data;
  1922. /* FIXME - not implemented */
  1923. /* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */
  1924. /* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
  1925. p->modes |= PARPORT_MODE_ECP;
  1926. pr_probe(p, "Hardware support for ECP mode enabled\n");
  1927. }
  1928. /* Initialize the port with sensible values */
  1929. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1930. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1931. parport_ip32_data_forward(p);
  1932. parport_ip32_disable_irq(p);
  1933. parport_ip32_write_data(p, 0x00);
  1934. parport_ip32_dump_state(p, "end init", 0);
  1935. /* Print out what we found */
  1936. printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
  1937. p->name, p->base, p->base_hi);
  1938. if (p->irq != PARPORT_IRQ_NONE)
  1939. printk(", irq %d", p->irq);
  1940. printk(" [");
  1941. #define printmode(x) if (p->modes & PARPORT_MODE_##x) \
  1942. printk("%s%s", f++ ? "," : "", #x)
  1943. {
  1944. unsigned int f = 0;
  1945. printmode(PCSPP);
  1946. printmode(TRISTATE);
  1947. printmode(COMPAT);
  1948. printmode(EPP);
  1949. printmode(ECP);
  1950. printmode(DMA);
  1951. }
  1952. #undef printmode
  1953. printk("]\n");
  1954. parport_announce_port(p);
  1955. return p;
  1956. fail:
  1957. if (p)
  1958. parport_put_port(p);
  1959. kfree(priv);
  1960. kfree(ops);
  1961. return ERR_PTR(err);
  1962. }
  1963. /**
  1964. * parport_ip32_unregister_port - unregister a parallel port
  1965. * @p: pointer to the &struct parport
  1966. *
  1967. * Unregisters a parallel port and free previously allocated resources
  1968. * (memory, IRQ, ...).
  1969. */
  1970. static __exit void parport_ip32_unregister_port(struct parport *p)
  1971. {
  1972. struct parport_ip32_private * const priv = p->physport->private_data;
  1973. struct parport_operations *ops = p->ops;
  1974. parport_remove_port(p);
  1975. if (p->modes & PARPORT_MODE_DMA)
  1976. parport_ip32_dma_unregister();
  1977. if (p->irq != PARPORT_IRQ_NONE)
  1978. free_irq(p->irq, p);
  1979. parport_put_port(p);
  1980. kfree(priv);
  1981. kfree(ops);
  1982. }
  1983. /**
  1984. * parport_ip32_init - module initialization function
  1985. */
  1986. static int __init parport_ip32_init(void)
  1987. {
  1988. pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
  1989. pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__);
  1990. this_port = parport_ip32_probe_port();
  1991. return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
  1992. }
  1993. /**
  1994. * parport_ip32_exit - module termination function
  1995. */
  1996. static void __exit parport_ip32_exit(void)
  1997. {
  1998. parport_ip32_unregister_port(this_port);
  1999. }
  2000. /*--- Module stuff -----------------------------------------------------*/
  2001. MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>");
  2002. MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
  2003. MODULE_LICENSE("GPL");
  2004. MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */
  2005. module_init(parport_ip32_init);
  2006. module_exit(parport_ip32_exit);
  2007. module_param(verbose_probing, bool, S_IRUGO);
  2008. MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
  2009. module_param(features, uint, S_IRUGO);
  2010. MODULE_PARM_DESC(features,
  2011. "Bit mask of features to enable"
  2012. ", bit 0: IRQ support"
  2013. ", bit 1: DMA support"
  2014. ", bit 2: hardware SPP mode"
  2015. ", bit 3: hardware EPP mode"
  2016. ", bit 4: hardware ECP mode");
  2017. /*--- Inform (X)Emacs about preferred coding style ---------------------*/
  2018. /*
  2019. * Local Variables:
  2020. * mode: c
  2021. * c-file-style: "linux"
  2022. * indent-tabs-mode: t
  2023. * tab-width: 8
  2024. * fill-column: 78
  2025. * ispell-local-dictionary: "american"
  2026. * End:
  2027. */