parport_ip32.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253
  1. /* Low-level parallel port routines for built-in port on SGI IP32
  2. *
  3. * Author: Arnaud Giersch <arnaud.giersch@free.fr>
  4. *
  5. * Based on parport_pc.c by
  6. * Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
  7. * Andrea Arcangeli, et al.
  8. *
  9. * Thanks to Ilya A. Volynets-Evenbakh for his help.
  10. *
  11. * Copyright (C) 2005, 2006 Arnaud Giersch.
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms of the GNU General Public License as published by the Free
  15. * Software Foundation; either version 2 of the License, or (at your option)
  16. * any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful, but WITHOUT
  19. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  20. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  21. * more details.
  22. *
  23. * You should have received a copy of the GNU General Public License along
  24. * with this program; if not, write to the Free Software Foundation, Inc., 59
  25. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. */
  27. /* Current status:
  28. *
  29. * Basic SPP and PS2 modes are supported.
  30. * Support for parallel port IRQ is present.
  31. * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
  32. * supported.
  33. * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with
  34. * or without interrupt support.
  35. *
  36. * Hardware ECP mode is not fully implemented (ecp_read_data and
  37. * ecp_write_addr are actually missing).
  38. *
  39. * To do:
  40. *
  41. * Fully implement ECP mode.
  42. * EPP and ECP mode need to be tested. I currently do not own any
  43. * peripheral supporting these extended mode, and cannot test them.
  44. * If DMA mode works well, decide if support for PIO FIFO modes should be
  45. * dropped.
  46. * Use the io{read,write} family functions when they become available in
  47. * the linux-mips.org tree. Note: the MIPS specific functions readsb()
  48. * and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
  49. * respectively.
  50. */
  51. /* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
  52. * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
  53. * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte
  54. * FIFO buffer and supports DMA transfers.
  55. *
  56. * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
  57. *
  58. * Theoretically, we could simply use the parport_pc module. It is however
  59. * not so simple. The parport_pc code assumes that the parallel port
  60. * registers are port-mapped. On the O2, they are memory-mapped.
  61. * Furthermore, each register is replicated on 256 consecutive addresses (as
  62. * it is for the built-in serial ports on the same chip).
  63. */
  64. /*--- Some configuration defines ---------------------------------------*/
  65. /* DEBUG_PARPORT_IP32
  66. * 0 disable debug
  67. * 1 standard level: pr_debug1 is enabled
  68. * 2 parport_ip32_dump_state is enabled
  69. * >=3 verbose level: pr_debug is enabled
  70. */
  71. #if !defined(DEBUG_PARPORT_IP32)
  72. # define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */
  73. #endif
  74. /*----------------------------------------------------------------------*/
  75. /* Setup DEBUG macros. This is done before any includes, just in case we
  76. * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
  77. */
  78. #if DEBUG_PARPORT_IP32 == 1
  79. # warning DEBUG_PARPORT_IP32 == 1
  80. #elif DEBUG_PARPORT_IP32 == 2
  81. # warning DEBUG_PARPORT_IP32 == 2
  82. #elif DEBUG_PARPORT_IP32 >= 3
  83. # warning DEBUG_PARPORT_IP32 >= 3
  84. # if !defined(DEBUG)
  85. # define DEBUG /* enable pr_debug() in kernel.h */
  86. # endif
  87. #endif
  88. #include <linux/completion.h>
  89. #include <linux/delay.h>
  90. #include <linux/dma-mapping.h>
  91. #include <linux/err.h>
  92. #include <linux/init.h>
  93. #include <linux/interrupt.h>
  94. #include <linux/jiffies.h>
  95. #include <linux/kernel.h>
  96. #include <linux/module.h>
  97. #include <linux/parport.h>
  98. #include <linux/sched.h>
  99. #include <linux/spinlock.h>
  100. #include <linux/stddef.h>
  101. #include <linux/types.h>
  102. #include <asm/io.h>
  103. #include <asm/ip32/ip32_ints.h>
  104. #include <asm/ip32/mace.h>
  105. /*--- Global variables -------------------------------------------------*/
  106. /* Verbose probing on by default for debugging. */
  107. #if DEBUG_PARPORT_IP32 >= 1
  108. # define DEFAULT_VERBOSE_PROBING 1
  109. #else
  110. # define DEFAULT_VERBOSE_PROBING 0
  111. #endif
  112. /* Default prefix for printk */
  113. #define PPIP32 "parport_ip32: "
  114. /*
  115. * These are the module parameters:
  116. * @features: bit mask of features to enable/disable
  117. * (all enabled by default)
  118. * @verbose_probing: log chit-chat during initialization
  119. */
  120. #define PARPORT_IP32_ENABLE_IRQ (1U << 0)
  121. #define PARPORT_IP32_ENABLE_DMA (1U << 1)
  122. #define PARPORT_IP32_ENABLE_SPP (1U << 2)
  123. #define PARPORT_IP32_ENABLE_EPP (1U << 3)
  124. #define PARPORT_IP32_ENABLE_ECP (1U << 4)
  125. static unsigned int features = ~0U;
  126. static int verbose_probing = DEFAULT_VERBOSE_PROBING;
  127. /* We do not support more than one port. */
  128. static struct parport *this_port = NULL;
  129. /* Timing constants for FIFO modes. */
  130. #define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
  131. #define FIFO_POLLING_INTERVAL 50 /* microseconds */
  132. /*--- I/O register definitions -----------------------------------------*/
  133. /**
  134. * struct parport_ip32_regs - virtual addresses of parallel port registers
  135. * @data: Data Register
  136. * @dsr: Device Status Register
  137. * @dcr: Device Control Register
  138. * @eppAddr: EPP Address Register
  139. * @eppData0: EPP Data Register 0
  140. * @eppData1: EPP Data Register 1
  141. * @eppData2: EPP Data Register 2
  142. * @eppData3: EPP Data Register 3
  143. * @ecpAFifo: ECP Address FIFO
  144. * @fifo: General FIFO register. The same address is used for:
  145. * - cFifo, the Parallel Port DATA FIFO
  146. * - ecpDFifo, the ECP Data FIFO
  147. * - tFifo, the ECP Test FIFO
  148. * @cnfgA: Configuration Register A
  149. * @cnfgB: Configuration Register B
  150. * @ecr: Extended Control Register
  151. */
  152. struct parport_ip32_regs {
  153. void __iomem *data;
  154. void __iomem *dsr;
  155. void __iomem *dcr;
  156. void __iomem *eppAddr;
  157. void __iomem *eppData0;
  158. void __iomem *eppData1;
  159. void __iomem *eppData2;
  160. void __iomem *eppData3;
  161. void __iomem *ecpAFifo;
  162. void __iomem *fifo;
  163. void __iomem *cnfgA;
  164. void __iomem *cnfgB;
  165. void __iomem *ecr;
  166. };
  167. /* Device Status Register */
  168. #define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */
  169. #define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */
  170. #define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */
  171. #define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */
  172. #define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */
  173. #define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */
  174. /* #define DSR_reserved (1U << 1) */
  175. #define DSR_TIMEOUT (1U << 0) /* EPP timeout */
  176. /* Device Control Register */
  177. /* #define DCR_reserved (1U << 7) | (1U << 6) */
  178. #define DCR_DIR (1U << 5) /* direction */
  179. #define DCR_IRQ (1U << 4) /* interrupt on nAck */
  180. #define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */
  181. #define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */
  182. #define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */
  183. #define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */
  184. /* ECP Configuration Register A */
  185. #define CNFGA_IRQ (1U << 7)
  186. #define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
  187. #define CNFGA_ID_SHIFT 4
  188. #define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
  189. #define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
  190. #define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
  191. /* #define CNFGA_reserved (1U << 3) */
  192. #define CNFGA_nBYTEINTRANS (1U << 2)
  193. #define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
  194. /* ECP Configuration Register B */
  195. #define CNFGB_COMPRESS (1U << 7)
  196. #define CNFGB_INTRVAL (1U << 6)
  197. #define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
  198. #define CNFGB_IRQ_SHIFT 3
  199. #define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
  200. #define CNFGB_DMA_SHIFT 0
  201. /* Extended Control Register */
  202. #define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
  203. #define ECR_MODE_SHIFT 5
  204. #define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
  205. #define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
  206. #define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
  207. #define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
  208. #define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
  209. /* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */
  210. #define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
  211. #define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
  212. #define ECR_nERRINTR (1U << 4)
  213. #define ECR_DMAEN (1U << 3)
  214. #define ECR_SERVINTR (1U << 2)
  215. #define ECR_F_FULL (1U << 1)
  216. #define ECR_F_EMPTY (1U << 0)
  217. /*--- Private data -----------------------------------------------------*/
  218. /**
  219. * enum parport_ip32_irq_mode - operation mode of interrupt handler
  220. * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer
  221. * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally
  222. */
  223. enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
  224. /**
  225. * struct parport_ip32_private - private stuff for &struct parport
  226. * @regs: register addresses
  227. * @dcr_cache: cached contents of DCR
  228. * @dcr_writable: bit mask of writable DCR bits
  229. * @pword: number of bytes per PWord
  230. * @fifo_depth: number of PWords that FIFO will hold
  231. * @readIntrThreshold: minimum number of PWords we can read
  232. * if we get an interrupt
  233. * @writeIntrThreshold: minimum number of PWords we can write
  234. * if we get an interrupt
  235. * @irq_mode: operation mode of interrupt handler for this port
  236. * @irq_complete: mutex used to wait for an interrupt to occur
  237. */
  238. struct parport_ip32_private {
  239. struct parport_ip32_regs regs;
  240. unsigned int dcr_cache;
  241. unsigned int dcr_writable;
  242. unsigned int pword;
  243. unsigned int fifo_depth;
  244. unsigned int readIntrThreshold;
  245. unsigned int writeIntrThreshold;
  246. enum parport_ip32_irq_mode irq_mode;
  247. struct completion irq_complete;
  248. };
  249. /*--- Debug code -------------------------------------------------------*/
  250. /*
  251. * pr_debug1 - print debug messages
  252. *
  253. * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
  254. */
  255. #if DEBUG_PARPORT_IP32 >= 1
  256. # define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
  257. #else /* DEBUG_PARPORT_IP32 < 1 */
  258. # define pr_debug1(...) do { } while (0)
  259. #endif
  260. /*
  261. * pr_trace, pr_trace1 - trace function calls
  262. * @p: pointer to &struct parport
  263. * @fmt: printk format string
  264. * @...: parameters for format string
  265. *
  266. * Macros used to trace function calls. The given string is formatted after
  267. * function name. pr_trace() uses pr_debug(), and pr_trace1() uses
  268. * pr_debug1(). __pr_trace() is the low-level macro and is not to be used
  269. * directly.
  270. */
  271. #define __pr_trace(pr, p, fmt, ...) \
  272. pr("%s: %s" fmt "\n", \
  273. ({ const struct parport *__p = (p); \
  274. __p ? __p->name : "parport_ip32"; }), \
  275. __func__ , ##__VA_ARGS__)
  276. #define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
  277. #define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
  278. /*
  279. * __pr_probe, pr_probe - print message if @verbose_probing is true
  280. * @p: pointer to &struct parport
  281. * @fmt: printk format string
  282. * @...: parameters for format string
  283. *
  284. * For new lines, use pr_probe(). Use __pr_probe() for continued lines.
  285. */
  286. #define __pr_probe(...) \
  287. do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
  288. #define pr_probe(p, fmt, ...) \
  289. __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
  290. /*
  291. * parport_ip32_dump_state - print register status of parport
  292. * @p: pointer to &struct parport
  293. * @str: string to add in message
  294. * @show_ecp_config: shall we dump ECP configuration registers too?
  295. *
  296. * This function is only here for debugging purpose, and should be used with
  297. * care. Reading the parallel port registers may have undesired side effects.
  298. * Especially if @show_ecp_config is true, the parallel port is resetted.
  299. * This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
  300. */
  301. #if DEBUG_PARPORT_IP32 >= 2
  302. static void parport_ip32_dump_state(struct parport *p, char *str,
  303. unsigned int show_ecp_config)
  304. {
  305. struct parport_ip32_private * const priv = p->physport->private_data;
  306. unsigned int i;
  307. printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
  308. {
  309. static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
  310. "ECP", "EPP", "???",
  311. "TST", "CFG"};
  312. unsigned int ecr = readb(priv->regs.ecr);
  313. printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
  314. printk(" %s",
  315. ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
  316. if (ecr & ECR_nERRINTR)
  317. printk(",nErrIntrEn");
  318. if (ecr & ECR_DMAEN)
  319. printk(",dmaEn");
  320. if (ecr & ECR_SERVINTR)
  321. printk(",serviceIntr");
  322. if (ecr & ECR_F_FULL)
  323. printk(",f_full");
  324. if (ecr & ECR_F_EMPTY)
  325. printk(",f_empty");
  326. printk("\n");
  327. }
  328. if (show_ecp_config) {
  329. unsigned int oecr, cnfgA, cnfgB;
  330. oecr = readb(priv->regs.ecr);
  331. writeb(ECR_MODE_PS2, priv->regs.ecr);
  332. writeb(ECR_MODE_CFG, priv->regs.ecr);
  333. cnfgA = readb(priv->regs.cnfgA);
  334. cnfgB = readb(priv->regs.cnfgB);
  335. writeb(ECR_MODE_PS2, priv->regs.ecr);
  336. writeb(oecr, priv->regs.ecr);
  337. printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
  338. printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
  339. switch (cnfgA & CNFGA_ID_MASK) {
  340. case CNFGA_ID_8:
  341. printk(",8 bits");
  342. break;
  343. case CNFGA_ID_16:
  344. printk(",16 bits");
  345. break;
  346. case CNFGA_ID_32:
  347. printk(",32 bits");
  348. break;
  349. default:
  350. printk(",unknown ID");
  351. break;
  352. }
  353. if (!(cnfgA & CNFGA_nBYTEINTRANS))
  354. printk(",ByteInTrans");
  355. if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
  356. printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
  357. ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
  358. printk("\n");
  359. printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
  360. printk(" irq=%u,dma=%u",
  361. (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
  362. (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
  363. printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
  364. if (cnfgB & CNFGB_COMPRESS)
  365. printk(",compress");
  366. printk("\n");
  367. }
  368. for (i = 0; i < 2; i++) {
  369. unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
  370. printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
  371. i ? "soft" : "hard", dcr);
  372. printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
  373. if (dcr & DCR_IRQ)
  374. printk(",ackIntEn");
  375. if (!(dcr & DCR_SELECT))
  376. printk(",nSelectIn");
  377. if (dcr & DCR_nINIT)
  378. printk(",nInit");
  379. if (!(dcr & DCR_AUTOFD))
  380. printk(",nAutoFD");
  381. if (!(dcr & DCR_STROBE))
  382. printk(",nStrobe");
  383. printk("\n");
  384. }
  385. #define sep (f++ ? ',' : ' ')
  386. {
  387. unsigned int f = 0;
  388. unsigned int dsr = readb(priv->regs.dsr);
  389. printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
  390. if (!(dsr & DSR_nBUSY))
  391. printk("%cBusy", sep);
  392. if (dsr & DSR_nACK)
  393. printk("%cnAck", sep);
  394. if (dsr & DSR_PERROR)
  395. printk("%cPError", sep);
  396. if (dsr & DSR_SELECT)
  397. printk("%cSelect", sep);
  398. if (dsr & DSR_nFAULT)
  399. printk("%cnFault", sep);
  400. if (!(dsr & DSR_nPRINT))
  401. printk("%c(Print)", sep);
  402. if (dsr & DSR_TIMEOUT)
  403. printk("%cTimeout", sep);
  404. printk("\n");
  405. }
  406. #undef sep
  407. }
  408. #else /* DEBUG_PARPORT_IP32 < 2 */
  409. #define parport_ip32_dump_state(...) do { } while (0)
  410. #endif
  411. /*
  412. * CHECK_EXTRA_BITS - track and log extra bits
  413. * @p: pointer to &struct parport
  414. * @b: byte to inspect
  415. * @m: bit mask of authorized bits
  416. *
  417. * This is used to track and log extra bits that should not be there in
  418. * parport_ip32_write_control() and parport_ip32_frob_control(). It is only
  419. * defined if %DEBUG_PARPORT_IP32 >= 1.
  420. */
  421. #if DEBUG_PARPORT_IP32 >= 1
  422. #define CHECK_EXTRA_BITS(p, b, m) \
  423. do { \
  424. unsigned int __b = (b), __m = (m); \
  425. if (__b & ~__m) \
  426. pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
  427. "0x%02x/0x%02x\n", \
  428. (p)->name, __func__, #b, __b, __m); \
  429. } while (0)
  430. #else /* DEBUG_PARPORT_IP32 < 1 */
  431. #define CHECK_EXTRA_BITS(...) do { } while (0)
  432. #endif
  433. /*--- IP32 parallel port DMA operations --------------------------------*/
  434. /**
  435. * struct parport_ip32_dma_data - private data needed for DMA operation
  436. * @dir: DMA direction (from or to device)
  437. * @buf: buffer physical address
  438. * @len: buffer length
  439. * @next: address of next bytes to DMA transfer
  440. * @left: number of bytes remaining
  441. * @ctx: next context to write (0: context_a; 1: context_b)
  442. * @irq_on: are the DMA IRQs currently enabled?
  443. * @lock: spinlock to protect access to the structure
  444. */
  445. struct parport_ip32_dma_data {
  446. enum dma_data_direction dir;
  447. dma_addr_t buf;
  448. dma_addr_t next;
  449. size_t len;
  450. size_t left;
  451. unsigned int ctx;
  452. unsigned int irq_on;
  453. spinlock_t lock;
  454. };
  455. static struct parport_ip32_dma_data parport_ip32_dma;
  456. /**
  457. * parport_ip32_dma_setup_context - setup next DMA context
  458. * @limit: maximum data size for the context
  459. *
  460. * The alignment constraints must be verified in caller function, and the
  461. * parameter @limit must be set accordingly.
  462. */
  463. static void parport_ip32_dma_setup_context(unsigned int limit)
  464. {
  465. unsigned long flags;
  466. spin_lock_irqsave(&parport_ip32_dma.lock, flags);
  467. if (parport_ip32_dma.left > 0) {
  468. /* Note: ctxreg is "volatile" here only because
  469. * mace->perif.ctrl.parport.context_a and context_b are
  470. * "volatile". */
  471. volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
  472. &mace->perif.ctrl.parport.context_a :
  473. &mace->perif.ctrl.parport.context_b;
  474. u64 count;
  475. u64 ctxval;
  476. if (parport_ip32_dma.left <= limit) {
  477. count = parport_ip32_dma.left;
  478. ctxval = MACEPAR_CONTEXT_LASTFLAG;
  479. } else {
  480. count = limit;
  481. ctxval = 0;
  482. }
  483. pr_trace(NULL,
  484. "(%u): 0x%04x:0x%04x, %u -> %u%s",
  485. limit,
  486. (unsigned int)parport_ip32_dma.buf,
  487. (unsigned int)parport_ip32_dma.next,
  488. (unsigned int)count,
  489. parport_ip32_dma.ctx, ctxval ? "*" : "");
  490. ctxval |= parport_ip32_dma.next &
  491. MACEPAR_CONTEXT_BASEADDR_MASK;
  492. ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
  493. MACEPAR_CONTEXT_DATALEN_MASK;
  494. writeq(ctxval, ctxreg);
  495. parport_ip32_dma.next += count;
  496. parport_ip32_dma.left -= count;
  497. parport_ip32_dma.ctx ^= 1U;
  498. }
  499. /* If there is nothing more to send, disable IRQs to avoid to
  500. * face an IRQ storm which can lock the machine. Disable them
  501. * only once. */
  502. if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
  503. pr_debug(PPIP32 "IRQ off (ctx)\n");
  504. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  505. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  506. parport_ip32_dma.irq_on = 0;
  507. }
  508. spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
  509. }
  510. /**
  511. * parport_ip32_dma_interrupt - DMA interrupt handler
  512. * @irq: interrupt number
  513. * @dev_id: unused
  514. * @regs: pointer to &struct pt_regs
  515. */
  516. static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id,
  517. struct pt_regs *regs)
  518. {
  519. if (parport_ip32_dma.left)
  520. pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
  521. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  522. return IRQ_HANDLED;
  523. }
  524. #if DEBUG_PARPORT_IP32
  525. static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id,
  526. struct pt_regs *regs)
  527. {
  528. pr_trace1(NULL, "(%d)", irq);
  529. return IRQ_HANDLED;
  530. }
  531. #endif
  532. /**
  533. * parport_ip32_dma_start - begins a DMA transfer
  534. * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
  535. * @addr: pointer to data buffer
  536. * @count: buffer size
  537. *
  538. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  539. * correctly balanced.
  540. */
  541. static int parport_ip32_dma_start(enum dma_data_direction dir,
  542. void *addr, size_t count)
  543. {
  544. unsigned int limit;
  545. u64 ctrl;
  546. pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
  547. /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must
  548. * be 64 bytes aligned. */
  549. BUG_ON(dir != DMA_TO_DEVICE);
  550. /* Reset DMA controller */
  551. ctrl = MACEPAR_CTLSTAT_RESET;
  552. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  553. /* DMA IRQs should normally be enabled */
  554. if (!parport_ip32_dma.irq_on) {
  555. WARN_ON(1);
  556. enable_irq(MACEISA_PAR_CTXA_IRQ);
  557. enable_irq(MACEISA_PAR_CTXB_IRQ);
  558. parport_ip32_dma.irq_on = 1;
  559. }
  560. /* Prepare DMA pointers */
  561. parport_ip32_dma.dir = dir;
  562. parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir);
  563. parport_ip32_dma.len = count;
  564. parport_ip32_dma.next = parport_ip32_dma.buf;
  565. parport_ip32_dma.left = parport_ip32_dma.len;
  566. parport_ip32_dma.ctx = 0;
  567. /* Setup DMA direction and first two contexts */
  568. ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
  569. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  570. /* Single transfer should not cross a 4K page boundary */
  571. limit = MACEPAR_CONTEXT_DATA_BOUND -
  572. (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
  573. parport_ip32_dma_setup_context(limit);
  574. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  575. /* Real start of DMA transfer */
  576. ctrl |= MACEPAR_CTLSTAT_ENABLE;
  577. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  578. return 0;
  579. }
  580. /**
  581. * parport_ip32_dma_stop - ends a running DMA transfer
  582. *
  583. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  584. * correctly balanced.
  585. */
  586. static void parport_ip32_dma_stop(void)
  587. {
  588. u64 ctx_a;
  589. u64 ctx_b;
  590. u64 ctrl;
  591. u64 diag;
  592. size_t res[2]; /* {[0] = res_a, [1] = res_b} */
  593. pr_trace(NULL, "()");
  594. /* Disable IRQs */
  595. spin_lock_irq(&parport_ip32_dma.lock);
  596. if (parport_ip32_dma.irq_on) {
  597. pr_debug(PPIP32 "IRQ off (stop)\n");
  598. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  599. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  600. parport_ip32_dma.irq_on = 0;
  601. }
  602. spin_unlock_irq(&parport_ip32_dma.lock);
  603. /* Force IRQ synchronization, even if the IRQs were disabled
  604. * elsewhere. */
  605. synchronize_irq(MACEISA_PAR_CTXA_IRQ);
  606. synchronize_irq(MACEISA_PAR_CTXB_IRQ);
  607. /* Stop DMA transfer */
  608. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  609. ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
  610. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  611. /* Adjust residue (parport_ip32_dma.left) */
  612. ctx_a = readq(&mace->perif.ctrl.parport.context_a);
  613. ctx_b = readq(&mace->perif.ctrl.parport.context_b);
  614. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  615. diag = readq(&mace->perif.ctrl.parport.diagnostic);
  616. res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
  617. 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
  618. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  619. 0;
  620. res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
  621. 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
  622. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  623. 0;
  624. if (diag & MACEPAR_DIAG_DMACTIVE)
  625. res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
  626. 1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
  627. MACEPAR_DIAG_CTRSHIFT);
  628. parport_ip32_dma.left += res[0] + res[1];
  629. /* Reset DMA controller, and re-enable IRQs */
  630. ctrl = MACEPAR_CTLSTAT_RESET;
  631. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  632. pr_debug(PPIP32 "IRQ on (stop)\n");
  633. enable_irq(MACEISA_PAR_CTXA_IRQ);
  634. enable_irq(MACEISA_PAR_CTXB_IRQ);
  635. parport_ip32_dma.irq_on = 1;
  636. dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len,
  637. parport_ip32_dma.dir);
  638. }
  639. /**
  640. * parport_ip32_dma_get_residue - get residue from last DMA transfer
  641. *
  642. * Returns the number of bytes remaining from last DMA transfer.
  643. */
  644. static inline size_t parport_ip32_dma_get_residue(void)
  645. {
  646. return parport_ip32_dma.left;
  647. }
  648. /**
  649. * parport_ip32_dma_register - initialize DMA engine
  650. *
  651. * Returns zero for success.
  652. */
  653. static int parport_ip32_dma_register(void)
  654. {
  655. int err;
  656. spin_lock_init(&parport_ip32_dma.lock);
  657. parport_ip32_dma.irq_on = 1;
  658. /* Reset DMA controller */
  659. writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
  660. /* Request IRQs */
  661. err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
  662. 0, "parport_ip32", NULL);
  663. if (err)
  664. goto fail_a;
  665. err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
  666. 0, "parport_ip32", NULL);
  667. if (err)
  668. goto fail_b;
  669. #if DEBUG_PARPORT_IP32
  670. /* FIXME - what is this IRQ for? */
  671. err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
  672. 0, "parport_ip32", NULL);
  673. if (err)
  674. goto fail_merr;
  675. #endif
  676. return 0;
  677. #if DEBUG_PARPORT_IP32
  678. fail_merr:
  679. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  680. #endif
  681. fail_b:
  682. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  683. fail_a:
  684. return err;
  685. }
  686. /**
  687. * parport_ip32_dma_unregister - release and free resources for DMA engine
  688. */
  689. static void parport_ip32_dma_unregister(void)
  690. {
  691. #if DEBUG_PARPORT_IP32
  692. free_irq(MACEISA_PAR_MERR_IRQ, NULL);
  693. #endif
  694. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  695. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  696. }
  697. /*--- Interrupt handlers and associates --------------------------------*/
  698. /**
  699. * parport_ip32_wakeup - wakes up code waiting for an interrupt
  700. * @p: pointer to &struct parport
  701. */
  702. static inline void parport_ip32_wakeup(struct parport *p)
  703. {
  704. struct parport_ip32_private * const priv = p->physport->private_data;
  705. complete(&priv->irq_complete);
  706. }
  707. /**
  708. * parport_ip32_interrupt - interrupt handler
  709. * @irq: interrupt number
  710. * @dev_id: pointer to &struct parport
  711. * @regs: pointer to &struct pt_regs
  712. *
  713. * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
  714. * %PARPORT_IP32_IRQ_FWD.
  715. */
  716. static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id,
  717. struct pt_regs *regs)
  718. {
  719. struct parport * const p = dev_id;
  720. struct parport_ip32_private * const priv = p->physport->private_data;
  721. enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
  722. switch (irq_mode) {
  723. case PARPORT_IP32_IRQ_FWD:
  724. parport_generic_irq(irq, p, regs);
  725. break;
  726. case PARPORT_IP32_IRQ_HERE:
  727. parport_ip32_wakeup(p);
  728. break;
  729. }
  730. return IRQ_HANDLED;
  731. }
  732. /*--- Some utility function to manipulate ECR register -----------------*/
  733. /**
  734. * parport_ip32_read_econtrol - read contents of the ECR register
  735. * @p: pointer to &struct parport
  736. */
  737. static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
  738. {
  739. struct parport_ip32_private * const priv = p->physport->private_data;
  740. return readb(priv->regs.ecr);
  741. }
  742. /**
  743. * parport_ip32_write_econtrol - write new contents to the ECR register
  744. * @p: pointer to &struct parport
  745. * @c: new value to write
  746. */
  747. static inline void parport_ip32_write_econtrol(struct parport *p,
  748. unsigned int c)
  749. {
  750. struct parport_ip32_private * const priv = p->physport->private_data;
  751. writeb(c, priv->regs.ecr);
  752. }
  753. /**
  754. * parport_ip32_frob_econtrol - change bits from the ECR register
  755. * @p: pointer to &struct parport
  756. * @mask: bit mask of bits to change
  757. * @val: new value for changed bits
  758. *
  759. * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
  760. * in @val, and write the result to the ECR.
  761. */
  762. static inline void parport_ip32_frob_econtrol(struct parport *p,
  763. unsigned int mask,
  764. unsigned int val)
  765. {
  766. unsigned int c;
  767. c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
  768. parport_ip32_write_econtrol(p, c);
  769. }
  770. /**
  771. * parport_ip32_set_mode - change mode of ECP port
  772. * @p: pointer to &struct parport
  773. * @mode: new mode to write in ECR
  774. *
  775. * ECR is reset in a sane state (interrupts and DMA disabled), and placed in
  776. * mode @mode. Go through PS2 mode if needed.
  777. */
  778. static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
  779. {
  780. unsigned int omode;
  781. mode &= ECR_MODE_MASK;
  782. omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
  783. if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
  784. || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
  785. /* We have to go through PS2 mode */
  786. unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  787. parport_ip32_write_econtrol(p, ecr);
  788. }
  789. parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
  790. }
  791. /*--- Basic functions needed for parport -------------------------------*/
  792. /**
  793. * parport_ip32_read_data - return current contents of the DATA register
  794. * @p: pointer to &struct parport
  795. */
  796. static inline unsigned char parport_ip32_read_data(struct parport *p)
  797. {
  798. struct parport_ip32_private * const priv = p->physport->private_data;
  799. return readb(priv->regs.data);
  800. }
  801. /**
  802. * parport_ip32_write_data - set new contents for the DATA register
  803. * @p: pointer to &struct parport
  804. * @d: new value to write
  805. */
  806. static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
  807. {
  808. struct parport_ip32_private * const priv = p->physport->private_data;
  809. writeb(d, priv->regs.data);
  810. }
  811. /**
  812. * parport_ip32_read_status - return current contents of the DSR register
  813. * @p: pointer to &struct parport
  814. */
  815. static inline unsigned char parport_ip32_read_status(struct parport *p)
  816. {
  817. struct parport_ip32_private * const priv = p->physport->private_data;
  818. return readb(priv->regs.dsr);
  819. }
  820. /**
  821. * __parport_ip32_read_control - return cached contents of the DCR register
  822. * @p: pointer to &struct parport
  823. */
  824. static inline unsigned int __parport_ip32_read_control(struct parport *p)
  825. {
  826. struct parport_ip32_private * const priv = p->physport->private_data;
  827. return priv->dcr_cache; /* use soft copy */
  828. }
  829. /**
  830. * __parport_ip32_write_control - set new contents for the DCR register
  831. * @p: pointer to &struct parport
  832. * @c: new value to write
  833. */
  834. static inline void __parport_ip32_write_control(struct parport *p,
  835. unsigned int c)
  836. {
  837. struct parport_ip32_private * const priv = p->physport->private_data;
  838. CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
  839. c &= priv->dcr_writable; /* only writable bits */
  840. writeb(c, priv->regs.dcr);
  841. priv->dcr_cache = c; /* update soft copy */
  842. }
  843. /**
  844. * __parport_ip32_frob_control - change bits from the DCR register
  845. * @p: pointer to &struct parport
  846. * @mask: bit mask of bits to change
  847. * @val: new value for changed bits
  848. *
  849. * This is equivalent to read from the DCR, mask out the bits in @mask,
  850. * exclusive-or with the bits in @val, and write the result to the DCR.
  851. * Actually, the cached contents of the DCR is used.
  852. */
  853. static inline void __parport_ip32_frob_control(struct parport *p,
  854. unsigned int mask,
  855. unsigned int val)
  856. {
  857. unsigned int c;
  858. c = (__parport_ip32_read_control(p) & ~mask) ^ val;
  859. __parport_ip32_write_control(p, c);
  860. }
  861. /**
  862. * parport_ip32_read_control - return cached contents of the DCR register
  863. * @p: pointer to &struct parport
  864. *
  865. * The return value is masked so as to only return the value of %DCR_STROBE,
  866. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  867. */
  868. static inline unsigned char parport_ip32_read_control(struct parport *p)
  869. {
  870. const unsigned int rm =
  871. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  872. return __parport_ip32_read_control(p) & rm;
  873. }
  874. /**
  875. * parport_ip32_write_control - set new contents for the DCR register
  876. * @p: pointer to &struct parport
  877. * @c: new value to write
  878. *
  879. * The value is masked so as to only change the value of %DCR_STROBE,
  880. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  881. */
  882. static inline void parport_ip32_write_control(struct parport *p,
  883. unsigned char c)
  884. {
  885. const unsigned int wm =
  886. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  887. CHECK_EXTRA_BITS(p, c, wm);
  888. __parport_ip32_frob_control(p, wm, c & wm);
  889. }
  890. /**
  891. * parport_ip32_frob_control - change bits from the DCR register
  892. * @p: pointer to &struct parport
  893. * @mask: bit mask of bits to change
  894. * @val: new value for changed bits
  895. *
  896. * This differs from __parport_ip32_frob_control() in that it only allows to
  897. * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  898. */
  899. static inline unsigned char parport_ip32_frob_control(struct parport *p,
  900. unsigned char mask,
  901. unsigned char val)
  902. {
  903. const unsigned int wm =
  904. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  905. CHECK_EXTRA_BITS(p, mask, wm);
  906. CHECK_EXTRA_BITS(p, val, wm);
  907. __parport_ip32_frob_control(p, mask & wm, val & wm);
  908. return parport_ip32_read_control(p);
  909. }
  910. /**
  911. * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
  912. * @p: pointer to &struct parport
  913. */
  914. static inline void parport_ip32_disable_irq(struct parport *p)
  915. {
  916. __parport_ip32_frob_control(p, DCR_IRQ, 0);
  917. }
  918. /**
  919. * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
  920. * @p: pointer to &struct parport
  921. */
  922. static inline void parport_ip32_enable_irq(struct parport *p)
  923. {
  924. __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
  925. }
  926. /**
  927. * parport_ip32_data_forward - enable host-to-peripheral communications
  928. * @p: pointer to &struct parport
  929. *
  930. * Enable the data line drivers, for 8-bit host-to-peripheral communications.
  931. */
  932. static inline void parport_ip32_data_forward(struct parport *p)
  933. {
  934. __parport_ip32_frob_control(p, DCR_DIR, 0);
  935. }
  936. /**
  937. * parport_ip32_data_reverse - enable peripheral-to-host communications
  938. * @p: pointer to &struct parport
  939. *
  940. * Place the data bus in a high impedance state, if @p->modes has the
  941. * PARPORT_MODE_TRISTATE bit set.
  942. */
  943. static inline void parport_ip32_data_reverse(struct parport *p)
  944. {
  945. __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
  946. }
  947. /**
  948. * parport_ip32_init_state - for core parport code
  949. * @dev: pointer to &struct pardevice
  950. * @s: pointer to &struct parport_state to initialize
  951. */
  952. static void parport_ip32_init_state(struct pardevice *dev,
  953. struct parport_state *s)
  954. {
  955. s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
  956. s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  957. }
  958. /**
  959. * parport_ip32_save_state - for core parport code
  960. * @p: pointer to &struct parport
  961. * @s: pointer to &struct parport_state to save state to
  962. */
  963. static void parport_ip32_save_state(struct parport *p,
  964. struct parport_state *s)
  965. {
  966. s->u.ip32.dcr = __parport_ip32_read_control(p);
  967. s->u.ip32.ecr = parport_ip32_read_econtrol(p);
  968. }
  969. /**
  970. * parport_ip32_restore_state - for core parport code
  971. * @p: pointer to &struct parport
  972. * @s: pointer to &struct parport_state to restore state from
  973. */
  974. static void parport_ip32_restore_state(struct parport *p,
  975. struct parport_state *s)
  976. {
  977. parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
  978. parport_ip32_write_econtrol(p, s->u.ip32.ecr);
  979. __parport_ip32_write_control(p, s->u.ip32.dcr);
  980. }
  981. /*--- EPP mode functions -----------------------------------------------*/
  982. /**
  983. * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
  984. * @p: pointer to &struct parport
  985. *
  986. * Returns 1 if the Timeout bit is clear, and 0 otherwise.
  987. */
  988. static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
  989. {
  990. struct parport_ip32_private * const priv = p->physport->private_data;
  991. unsigned int cleared;
  992. if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
  993. cleared = 1;
  994. else {
  995. unsigned int r;
  996. /* To clear timeout some chips require double read */
  997. parport_ip32_read_status(p);
  998. r = parport_ip32_read_status(p);
  999. /* Some reset by writing 1 */
  1000. writeb(r | DSR_TIMEOUT, priv->regs.dsr);
  1001. /* Others by writing 0 */
  1002. writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
  1003. r = parport_ip32_read_status(p);
  1004. cleared = !(r & DSR_TIMEOUT);
  1005. }
  1006. pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
  1007. return cleared;
  1008. }
  1009. /**
  1010. * parport_ip32_epp_read - generic EPP read function
  1011. * @eppreg: I/O register to read from
  1012. * @p: pointer to &struct parport
  1013. * @buf: buffer to store read data
  1014. * @len: length of buffer @buf
  1015. * @flags: may be PARPORT_EPP_FAST
  1016. */
  1017. static size_t parport_ip32_epp_read(void __iomem *eppreg,
  1018. struct parport *p, void *buf,
  1019. size_t len, int flags)
  1020. {
  1021. struct parport_ip32_private * const priv = p->physport->private_data;
  1022. size_t got;
  1023. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1024. parport_ip32_data_reverse(p);
  1025. parport_ip32_write_control(p, DCR_nINIT);
  1026. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1027. readsb(eppreg, buf, len);
  1028. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1029. parport_ip32_clear_epp_timeout(p);
  1030. return -EIO;
  1031. }
  1032. got = len;
  1033. } else {
  1034. u8 *bufp = buf;
  1035. for (got = 0; got < len; got++) {
  1036. *bufp++ = readb(eppreg);
  1037. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1038. parport_ip32_clear_epp_timeout(p);
  1039. break;
  1040. }
  1041. }
  1042. }
  1043. parport_ip32_data_forward(p);
  1044. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1045. return got;
  1046. }
  1047. /**
  1048. * parport_ip32_epp_write - generic EPP write function
  1049. * @eppreg: I/O register to write to
  1050. * @p: pointer to &struct parport
  1051. * @buf: buffer of data to write
  1052. * @len: length of buffer @buf
  1053. * @flags: may be PARPORT_EPP_FAST
  1054. */
  1055. static size_t parport_ip32_epp_write(void __iomem *eppreg,
  1056. struct parport *p, const void *buf,
  1057. size_t len, int flags)
  1058. {
  1059. struct parport_ip32_private * const priv = p->physport->private_data;
  1060. size_t written;
  1061. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1062. parport_ip32_data_forward(p);
  1063. parport_ip32_write_control(p, DCR_nINIT);
  1064. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1065. writesb(eppreg, buf, len);
  1066. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1067. parport_ip32_clear_epp_timeout(p);
  1068. return -EIO;
  1069. }
  1070. written = len;
  1071. } else {
  1072. const u8 *bufp = buf;
  1073. for (written = 0; written < len; written++) {
  1074. writeb(*bufp++, eppreg);
  1075. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1076. parport_ip32_clear_epp_timeout(p);
  1077. break;
  1078. }
  1079. }
  1080. }
  1081. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1082. return written;
  1083. }
  1084. /**
  1085. * parport_ip32_epp_read_data - read a block of data in EPP mode
  1086. * @p: pointer to &struct parport
  1087. * @buf: buffer to store read data
  1088. * @len: length of buffer @buf
  1089. * @flags: may be PARPORT_EPP_FAST
  1090. */
  1091. static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
  1092. size_t len, int flags)
  1093. {
  1094. struct parport_ip32_private * const priv = p->physport->private_data;
  1095. return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
  1096. }
  1097. /**
  1098. * parport_ip32_epp_write_data - write a block of data in EPP mode
  1099. * @p: pointer to &struct parport
  1100. * @buf: buffer of data to write
  1101. * @len: length of buffer @buf
  1102. * @flags: may be PARPORT_EPP_FAST
  1103. */
  1104. static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
  1105. size_t len, int flags)
  1106. {
  1107. struct parport_ip32_private * const priv = p->physport->private_data;
  1108. return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
  1109. }
  1110. /**
  1111. * parport_ip32_epp_read_addr - read a block of addresses in EPP mode
  1112. * @p: pointer to &struct parport
  1113. * @buf: buffer to store read data
  1114. * @len: length of buffer @buf
  1115. * @flags: may be PARPORT_EPP_FAST
  1116. */
  1117. static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
  1118. size_t len, int flags)
  1119. {
  1120. struct parport_ip32_private * const priv = p->physport->private_data;
  1121. return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
  1122. }
  1123. /**
  1124. * parport_ip32_epp_write_addr - write a block of addresses in EPP mode
  1125. * @p: pointer to &struct parport
  1126. * @buf: buffer of data to write
  1127. * @len: length of buffer @buf
  1128. * @flags: may be PARPORT_EPP_FAST
  1129. */
  1130. static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
  1131. size_t len, int flags)
  1132. {
  1133. struct parport_ip32_private * const priv = p->physport->private_data;
  1134. return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
  1135. }
  1136. /*--- ECP mode functions (FIFO) ----------------------------------------*/
  1137. /**
  1138. * parport_ip32_fifo_wait_break - check if the waiting function should return
  1139. * @p: pointer to &struct parport
  1140. * @expire: timeout expiring date, in jiffies
  1141. *
  1142. * parport_ip32_fifo_wait_break() checks if the waiting function should return
  1143. * immediately or not. The break conditions are:
  1144. * - expired timeout;
  1145. * - a pending signal;
  1146. * - nFault asserted low.
  1147. * This function also calls cond_resched().
  1148. */
  1149. static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
  1150. unsigned long expire)
  1151. {
  1152. cond_resched();
  1153. if (time_after(jiffies, expire)) {
  1154. pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
  1155. return 1;
  1156. }
  1157. if (signal_pending(current)) {
  1158. pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
  1159. return 1;
  1160. }
  1161. if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
  1162. pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
  1163. return 1;
  1164. }
  1165. return 0;
  1166. }
  1167. /**
  1168. * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
  1169. * @p: pointer to &struct parport
  1170. *
  1171. * Returns the number of bytes that can safely be written in the FIFO. A
  1172. * return value of zero means that the calling function should terminate as
  1173. * fast as possible.
  1174. */
  1175. static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
  1176. {
  1177. struct parport_ip32_private * const priv = p->physport->private_data;
  1178. struct parport * const physport = p->physport;
  1179. unsigned long expire;
  1180. unsigned int count;
  1181. unsigned int ecr;
  1182. expire = jiffies + physport->cad->timeout;
  1183. count = 0;
  1184. while (1) {
  1185. if (parport_ip32_fifo_wait_break(p, expire))
  1186. break;
  1187. /* Check FIFO state. We do nothing when the FIFO is nor full,
  1188. * nor empty. It appears that the FIFO full bit is not always
  1189. * reliable, the FIFO state is sometimes wrongly reported, and
  1190. * the chip gets confused if we give it another byte. */
  1191. ecr = parport_ip32_read_econtrol(p);
  1192. if (ecr & ECR_F_EMPTY) {
  1193. /* FIFO is empty, fill it up */
  1194. count = priv->fifo_depth;
  1195. break;
  1196. }
  1197. /* Wait a moment... */
  1198. udelay(FIFO_POLLING_INTERVAL);
  1199. } /* while (1) */
  1200. return count;
  1201. }
  1202. /**
  1203. * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
  1204. * @p: pointer to &struct parport
  1205. *
  1206. * Returns the number of bytes that can safely be written in the FIFO. A
  1207. * return value of zero means that the calling function should terminate as
  1208. * fast as possible.
  1209. */
  1210. static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
  1211. {
  1212. static unsigned int lost_interrupt = 0;
  1213. struct parport_ip32_private * const priv = p->physport->private_data;
  1214. struct parport * const physport = p->physport;
  1215. unsigned long nfault_timeout;
  1216. unsigned long expire;
  1217. unsigned int count;
  1218. unsigned int ecr;
  1219. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1220. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1221. expire = jiffies + physport->cad->timeout;
  1222. count = 0;
  1223. while (1) {
  1224. if (parport_ip32_fifo_wait_break(p, expire))
  1225. break;
  1226. /* Initialize mutex used to take interrupts into account */
  1227. INIT_COMPLETION(priv->irq_complete);
  1228. /* Enable serviceIntr */
  1229. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1230. /* Enabling serviceIntr while the FIFO is empty does not
  1231. * always generate an interrupt, so check for emptiness
  1232. * now. */
  1233. ecr = parport_ip32_read_econtrol(p);
  1234. if (!(ecr & ECR_F_EMPTY)) {
  1235. /* FIFO is not empty: wait for an interrupt or a
  1236. * timeout to occur */
  1237. wait_for_completion_interruptible_timeout(
  1238. &priv->irq_complete, nfault_timeout);
  1239. ecr = parport_ip32_read_econtrol(p);
  1240. if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
  1241. && !lost_interrupt) {
  1242. printk(KERN_WARNING PPIP32
  1243. "%s: lost interrupt in %s\n",
  1244. p->name, __func__);
  1245. lost_interrupt = 1;
  1246. }
  1247. }
  1248. /* Disable serviceIntr */
  1249. parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
  1250. /* Check FIFO state */
  1251. if (ecr & ECR_F_EMPTY) {
  1252. /* FIFO is empty, fill it up */
  1253. count = priv->fifo_depth;
  1254. break;
  1255. } else if (ecr & ECR_SERVINTR) {
  1256. /* FIFO is not empty, but we know that can safely push
  1257. * writeIntrThreshold bytes into it */
  1258. count = priv->writeIntrThreshold;
  1259. break;
  1260. }
  1261. /* FIFO is not empty, and we did not get any interrupt.
  1262. * Either it's time to check for nFault, or a signal is
  1263. * pending. This is verified in
  1264. * parport_ip32_fifo_wait_break(), so we continue the loop. */
  1265. } /* while (1) */
  1266. return count;
  1267. }
  1268. /**
  1269. * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
  1270. * @p: pointer to &struct parport
  1271. * @buf: buffer of data to write
  1272. * @len: length of buffer @buf
  1273. *
  1274. * Uses PIO to write the contents of the buffer @buf into the parallel port
  1275. * FIFO. Returns the number of bytes that were actually written. It can work
  1276. * with or without the help of interrupts. The parallel port must be
  1277. * correctly initialized before calling parport_ip32_fifo_write_block_pio().
  1278. */
  1279. static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
  1280. const void *buf, size_t len)
  1281. {
  1282. struct parport_ip32_private * const priv = p->physport->private_data;
  1283. const u8 *bufp = buf;
  1284. size_t left = len;
  1285. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1286. while (left > 0) {
  1287. unsigned int count;
  1288. count = (p->irq == PARPORT_IRQ_NONE) ?
  1289. parport_ip32_fwp_wait_polling(p) :
  1290. parport_ip32_fwp_wait_interrupt(p);
  1291. if (count == 0)
  1292. break; /* Transmission should be stopped */
  1293. if (count > left)
  1294. count = left;
  1295. if (count == 1) {
  1296. writeb(*bufp, priv->regs.fifo);
  1297. bufp++, left--;
  1298. } else {
  1299. writesb(priv->regs.fifo, bufp, count);
  1300. bufp += count, left -= count;
  1301. }
  1302. }
  1303. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1304. return len - left;
  1305. }
  1306. /**
  1307. * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
  1308. * @p: pointer to &struct parport
  1309. * @buf: buffer of data to write
  1310. * @len: length of buffer @buf
  1311. *
  1312. * Uses DMA to write the contents of the buffer @buf into the parallel port
  1313. * FIFO. Returns the number of bytes that were actually written. The
  1314. * parallel port must be correctly initialized before calling
  1315. * parport_ip32_fifo_write_block_dma().
  1316. */
  1317. static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
  1318. const void *buf, size_t len)
  1319. {
  1320. struct parport_ip32_private * const priv = p->physport->private_data;
  1321. struct parport * const physport = p->physport;
  1322. unsigned long nfault_timeout;
  1323. unsigned long expire;
  1324. size_t written;
  1325. unsigned int ecr;
  1326. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1327. parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
  1328. INIT_COMPLETION(priv->irq_complete);
  1329. parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
  1330. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1331. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1332. expire = jiffies + physport->cad->timeout;
  1333. while (1) {
  1334. if (parport_ip32_fifo_wait_break(p, expire))
  1335. break;
  1336. wait_for_completion_interruptible_timeout(&priv->irq_complete,
  1337. nfault_timeout);
  1338. ecr = parport_ip32_read_econtrol(p);
  1339. if (ecr & ECR_SERVINTR)
  1340. break; /* DMA transfer just finished */
  1341. }
  1342. parport_ip32_dma_stop();
  1343. written = len - parport_ip32_dma_get_residue();
  1344. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1345. return written;
  1346. }
  1347. /**
  1348. * parport_ip32_fifo_write_block - write a block of data
  1349. * @p: pointer to &struct parport
  1350. * @buf: buffer of data to write
  1351. * @len: length of buffer @buf
  1352. *
  1353. * Uses PIO or DMA to write the contents of the buffer @buf into the parallel
  1354. * p FIFO. Returns the number of bytes that were actually written.
  1355. */
  1356. static size_t parport_ip32_fifo_write_block(struct parport *p,
  1357. const void *buf, size_t len)
  1358. {
  1359. size_t written = 0;
  1360. if (len)
  1361. /* FIXME - Maybe some threshold value should be set for @len
  1362. * under which we revert to PIO mode? */
  1363. written = (p->modes & PARPORT_MODE_DMA) ?
  1364. parport_ip32_fifo_write_block_dma(p, buf, len) :
  1365. parport_ip32_fifo_write_block_pio(p, buf, len);
  1366. return written;
  1367. }
  1368. /**
  1369. * parport_ip32_drain_fifo - wait for FIFO to empty
  1370. * @p: pointer to &struct parport
  1371. * @timeout: timeout, in jiffies
  1372. *
  1373. * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or
  1374. * 0 if the timeout @timeout is reached before, or if a signal is pending.
  1375. */
  1376. static unsigned int parport_ip32_drain_fifo(struct parport *p,
  1377. unsigned long timeout)
  1378. {
  1379. unsigned long expire = jiffies + timeout;
  1380. unsigned int polling_interval;
  1381. unsigned int counter;
  1382. /* Busy wait for approx. 200us */
  1383. for (counter = 0; counter < 40; counter++) {
  1384. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1385. break;
  1386. if (time_after(jiffies, expire))
  1387. break;
  1388. if (signal_pending(current))
  1389. break;
  1390. udelay(5);
  1391. }
  1392. /* Poll slowly. Polling interval starts with 1 millisecond, and is
  1393. * increased exponentially until 128. */
  1394. polling_interval = 1; /* msecs */
  1395. while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
  1396. if (time_after_eq(jiffies, expire))
  1397. break;
  1398. msleep_interruptible(polling_interval);
  1399. if (signal_pending(current))
  1400. break;
  1401. if (polling_interval < 128)
  1402. polling_interval *= 2;
  1403. }
  1404. return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
  1405. }
  1406. /**
  1407. * parport_ip32_get_fifo_residue - reset FIFO
  1408. * @p: pointer to &struct parport
  1409. * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
  1410. *
  1411. * This function resets FIFO, and returns the number of bytes remaining in it.
  1412. */
  1413. static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
  1414. unsigned int mode)
  1415. {
  1416. struct parport_ip32_private * const priv = p->physport->private_data;
  1417. unsigned int residue;
  1418. unsigned int cnfga;
  1419. /* FIXME - We are missing one byte if the printer is off-line. I
  1420. * don't know how to detect this. It looks that the full bit is not
  1421. * always reliable. For the moment, the problem is avoided in most
  1422. * cases by testing for BUSY in parport_ip32_compat_write_data().
  1423. */
  1424. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1425. residue = 0;
  1426. else {
  1427. pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
  1428. /* Stop all transfers.
  1429. *
  1430. * Microsoft's document instructs to drive DCR_STROBE to 0,
  1431. * but it doesn't work (at least in Compatibility mode, not
  1432. * tested in ECP mode). Switching directly to Test mode (as
  1433. * in parport_pc) is not an option: it does confuse the port,
  1434. * ECP service interrupts are no more working after that. A
  1435. * hard reset is then needed to revert to a sane state.
  1436. *
  1437. * Let's hope that the FIFO is really stuck and that the
  1438. * peripheral doesn't wake up now.
  1439. */
  1440. parport_ip32_frob_control(p, DCR_STROBE, 0);
  1441. /* Fill up FIFO */
  1442. for (residue = priv->fifo_depth; residue > 0; residue--) {
  1443. if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
  1444. break;
  1445. writeb(0x00, priv->regs.fifo);
  1446. }
  1447. }
  1448. if (residue)
  1449. pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
  1450. p->name, residue,
  1451. (residue == 1) ? " was" : "s were");
  1452. /* Now reset the FIFO */
  1453. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1454. /* Host recovery for ECP mode */
  1455. if (mode == ECR_MODE_ECP) {
  1456. parport_ip32_data_reverse(p);
  1457. parport_ip32_frob_control(p, DCR_nINIT, 0);
  1458. if (parport_wait_peripheral(p, DSR_PERROR, 0))
  1459. pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
  1460. p->name, __func__);
  1461. parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
  1462. parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
  1463. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
  1464. pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
  1465. p->name, __func__);
  1466. }
  1467. /* Adjust residue if needed */
  1468. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1469. cnfga = readb(priv->regs.cnfgA);
  1470. if (!(cnfga & CNFGA_nBYTEINTRANS)) {
  1471. pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
  1472. p->name, cnfga);
  1473. pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
  1474. p->name);
  1475. residue++;
  1476. }
  1477. /* Don't care about partial PWords since we do not support
  1478. * PWord != 1 byte. */
  1479. /* Back to forward PS2 mode. */
  1480. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1481. parport_ip32_data_forward(p);
  1482. return residue;
  1483. }
  1484. /**
  1485. * parport_ip32_compat_write_data - write a block of data in SPP mode
  1486. * @p: pointer to &struct parport
  1487. * @buf: buffer of data to write
  1488. * @len: length of buffer @buf
  1489. * @flags: ignored
  1490. */
  1491. static size_t parport_ip32_compat_write_data(struct parport *p,
  1492. const void *buf, size_t len,
  1493. int flags)
  1494. {
  1495. static unsigned int ready_before = 1;
  1496. struct parport_ip32_private * const priv = p->physport->private_data;
  1497. struct parport * const physport = p->physport;
  1498. size_t written = 0;
  1499. /* Special case: a timeout of zero means we cannot call schedule().
  1500. * Also if O_NONBLOCK is set then use the default implementation. */
  1501. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1502. return parport_ieee1284_write_compat(p, buf, len, flags);
  1503. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1504. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1505. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1506. parport_ip32_data_forward(p);
  1507. parport_ip32_disable_irq(p);
  1508. parport_ip32_set_mode(p, ECR_MODE_PPF);
  1509. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1510. /* Wait for peripheral to become ready */
  1511. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1512. DSR_nBUSY | DSR_nFAULT)) {
  1513. /* Avoid to flood the logs */
  1514. if (ready_before)
  1515. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1516. p->name, __func__);
  1517. ready_before = 0;
  1518. goto stop;
  1519. }
  1520. ready_before = 1;
  1521. written = parport_ip32_fifo_write_block(p, buf, len);
  1522. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1523. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1524. /* Check for a potential residue */
  1525. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
  1526. /* Then, wait for BUSY to get low. */
  1527. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1528. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1529. p->name, __func__);
  1530. stop:
  1531. /* Reset FIFO */
  1532. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1533. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1534. return written;
  1535. }
  1536. /*
  1537. * FIXME - Insert here parport_ip32_ecp_read_data().
  1538. */
  1539. /**
  1540. * parport_ip32_ecp_write_data - write a block of data in ECP mode
  1541. * @p: pointer to &struct parport
  1542. * @buf: buffer of data to write
  1543. * @len: length of buffer @buf
  1544. * @flags: ignored
  1545. */
  1546. static size_t parport_ip32_ecp_write_data(struct parport *p,
  1547. const void *buf, size_t len,
  1548. int flags)
  1549. {
  1550. static unsigned int ready_before = 1;
  1551. struct parport_ip32_private * const priv = p->physport->private_data;
  1552. struct parport * const physport = p->physport;
  1553. size_t written = 0;
  1554. /* Special case: a timeout of zero means we cannot call schedule().
  1555. * Also if O_NONBLOCK is set then use the default implementation. */
  1556. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1557. return parport_ieee1284_ecp_write_data(p, buf, len, flags);
  1558. /* Negotiate to forward mode if necessary. */
  1559. if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
  1560. /* Event 47: Set nInit high. */
  1561. parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
  1562. DCR_nINIT | DCR_AUTOFD);
  1563. /* Event 49: PError goes high. */
  1564. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
  1565. printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
  1566. p->name, __func__);
  1567. physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
  1568. return 0;
  1569. }
  1570. }
  1571. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1572. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1573. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1574. parport_ip32_data_forward(p);
  1575. parport_ip32_disable_irq(p);
  1576. parport_ip32_set_mode(p, ECR_MODE_ECP);
  1577. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1578. /* Wait for peripheral to become ready */
  1579. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1580. DSR_nBUSY | DSR_nFAULT)) {
  1581. /* Avoid to flood the logs */
  1582. if (ready_before)
  1583. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1584. p->name, __func__);
  1585. ready_before = 0;
  1586. goto stop;
  1587. }
  1588. ready_before = 1;
  1589. written = parport_ip32_fifo_write_block(p, buf, len);
  1590. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1591. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1592. /* Check for a potential residue */
  1593. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
  1594. /* Then, wait for BUSY to get low. */
  1595. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1596. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1597. p->name, __func__);
  1598. stop:
  1599. /* Reset FIFO */
  1600. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1601. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1602. return written;
  1603. }
  1604. /*
  1605. * FIXME - Insert here parport_ip32_ecp_write_addr().
  1606. */
  1607. /*--- Default parport operations ---------------------------------------*/
  1608. static __initdata struct parport_operations parport_ip32_ops = {
  1609. .write_data = parport_ip32_write_data,
  1610. .read_data = parport_ip32_read_data,
  1611. .write_control = parport_ip32_write_control,
  1612. .read_control = parport_ip32_read_control,
  1613. .frob_control = parport_ip32_frob_control,
  1614. .read_status = parport_ip32_read_status,
  1615. .enable_irq = parport_ip32_enable_irq,
  1616. .disable_irq = parport_ip32_disable_irq,
  1617. .data_forward = parport_ip32_data_forward,
  1618. .data_reverse = parport_ip32_data_reverse,
  1619. .init_state = parport_ip32_init_state,
  1620. .save_state = parport_ip32_save_state,
  1621. .restore_state = parport_ip32_restore_state,
  1622. .epp_write_data = parport_ieee1284_epp_write_data,
  1623. .epp_read_data = parport_ieee1284_epp_read_data,
  1624. .epp_write_addr = parport_ieee1284_epp_write_addr,
  1625. .epp_read_addr = parport_ieee1284_epp_read_addr,
  1626. .ecp_write_data = parport_ieee1284_ecp_write_data,
  1627. .ecp_read_data = parport_ieee1284_ecp_read_data,
  1628. .ecp_write_addr = parport_ieee1284_ecp_write_addr,
  1629. .compat_write_data = parport_ieee1284_write_compat,
  1630. .nibble_read_data = parport_ieee1284_read_nibble,
  1631. .byte_read_data = parport_ieee1284_read_byte,
  1632. .owner = THIS_MODULE,
  1633. };
  1634. /*--- Device detection -------------------------------------------------*/
  1635. /**
  1636. * parport_ip32_ecp_supported - check for an ECP port
  1637. * @p: pointer to the &parport structure
  1638. *
  1639. * Returns 1 if an ECP port is found, and 0 otherwise. This function actually
  1640. * checks if an Extended Control Register seems to be present. On successful
  1641. * return, the port is placed in SPP mode.
  1642. */
  1643. static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
  1644. {
  1645. struct parport_ip32_private * const priv = p->physport->private_data;
  1646. unsigned int ecr;
  1647. ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  1648. writeb(ecr, priv->regs.ecr);
  1649. if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
  1650. goto fail;
  1651. pr_probe(p, "Found working ECR register\n");
  1652. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1653. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1654. return 1;
  1655. fail:
  1656. pr_probe(p, "ECR register not found\n");
  1657. return 0;
  1658. }
  1659. /**
  1660. * parport_ip32_fifo_supported - check for FIFO parameters
  1661. * @p: pointer to the &parport structure
  1662. *
  1663. * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on
  1664. * success, and 0 otherwise. Adjust FIFO parameters in the parport structure.
  1665. * On return, the port is placed in SPP mode.
  1666. */
  1667. static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
  1668. {
  1669. struct parport_ip32_private * const priv = p->physport->private_data;
  1670. unsigned int configa, configb;
  1671. unsigned int pword;
  1672. unsigned int i;
  1673. /* Configuration mode */
  1674. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1675. configa = readb(priv->regs.cnfgA);
  1676. configb = readb(priv->regs.cnfgB);
  1677. /* Find out PWord size */
  1678. switch (configa & CNFGA_ID_MASK) {
  1679. case CNFGA_ID_8:
  1680. pword = 1;
  1681. break;
  1682. case CNFGA_ID_16:
  1683. pword = 2;
  1684. break;
  1685. case CNFGA_ID_32:
  1686. pword = 4;
  1687. break;
  1688. default:
  1689. pr_probe(p, "Unknown implementation ID: 0x%0x\n",
  1690. (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
  1691. goto fail;
  1692. break;
  1693. }
  1694. if (pword != 1) {
  1695. pr_probe(p, "Unsupported PWord size: %u\n", pword);
  1696. goto fail;
  1697. }
  1698. priv->pword = pword;
  1699. pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
  1700. /* Check for compression support */
  1701. writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
  1702. if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
  1703. pr_probe(p, "Hardware compression detected (unsupported)\n");
  1704. writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
  1705. /* Reset FIFO and go in test mode (no interrupt, no DMA) */
  1706. parport_ip32_set_mode(p, ECR_MODE_TST);
  1707. /* FIFO must be empty now */
  1708. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1709. pr_probe(p, "FIFO not reset\n");
  1710. goto fail;
  1711. }
  1712. /* Find out FIFO depth. */
  1713. priv->fifo_depth = 0;
  1714. for (i = 0; i < 1024; i++) {
  1715. if (readb(priv->regs.ecr) & ECR_F_FULL) {
  1716. /* FIFO full */
  1717. priv->fifo_depth = i;
  1718. break;
  1719. }
  1720. writeb((u8)i, priv->regs.fifo);
  1721. }
  1722. if (i >= 1024) {
  1723. pr_probe(p, "Can't fill FIFO\n");
  1724. goto fail;
  1725. }
  1726. if (!priv->fifo_depth) {
  1727. pr_probe(p, "Can't get FIFO depth\n");
  1728. goto fail;
  1729. }
  1730. pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
  1731. /* Enable interrupts */
  1732. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1733. /* Find out writeIntrThreshold: number of PWords we know we can write
  1734. * if we get an interrupt. */
  1735. priv->writeIntrThreshold = 0;
  1736. for (i = 0; i < priv->fifo_depth; i++) {
  1737. if (readb(priv->regs.fifo) != (u8)i) {
  1738. pr_probe(p, "Invalid data in FIFO\n");
  1739. goto fail;
  1740. }
  1741. if (!priv->writeIntrThreshold
  1742. && readb(priv->regs.ecr) & ECR_SERVINTR)
  1743. /* writeIntrThreshold reached */
  1744. priv->writeIntrThreshold = i + 1;
  1745. if (i + 1 < priv->fifo_depth
  1746. && readb(priv->regs.ecr) & ECR_F_EMPTY) {
  1747. /* FIFO empty before the last byte? */
  1748. pr_probe(p, "Data lost in FIFO\n");
  1749. goto fail;
  1750. }
  1751. }
  1752. if (!priv->writeIntrThreshold) {
  1753. pr_probe(p, "Can't get writeIntrThreshold\n");
  1754. goto fail;
  1755. }
  1756. pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
  1757. /* FIFO must be empty now */
  1758. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1759. pr_probe(p, "Can't empty FIFO\n");
  1760. goto fail;
  1761. }
  1762. /* Reset FIFO */
  1763. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1764. /* Set reverse direction (must be in PS2 mode) */
  1765. parport_ip32_data_reverse(p);
  1766. /* Test FIFO, no interrupt, no DMA */
  1767. parport_ip32_set_mode(p, ECR_MODE_TST);
  1768. /* Enable interrupts */
  1769. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1770. /* Find out readIntrThreshold: number of PWords we can read if we get
  1771. * an interrupt. */
  1772. priv->readIntrThreshold = 0;
  1773. for (i = 0; i < priv->fifo_depth; i++) {
  1774. writeb(0xaa, priv->regs.fifo);
  1775. if (readb(priv->regs.ecr) & ECR_SERVINTR) {
  1776. /* readIntrThreshold reached */
  1777. priv->readIntrThreshold = i + 1;
  1778. break;
  1779. }
  1780. }
  1781. if (!priv->readIntrThreshold) {
  1782. pr_probe(p, "Can't get readIntrThreshold\n");
  1783. goto fail;
  1784. }
  1785. pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
  1786. /* Reset ECR */
  1787. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1788. parport_ip32_data_forward(p);
  1789. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1790. return 1;
  1791. fail:
  1792. priv->fifo_depth = 0;
  1793. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1794. return 0;
  1795. }
  1796. /*--- Initialization code ----------------------------------------------*/
  1797. /**
  1798. * parport_ip32_make_isa_registers - compute (ISA) register addresses
  1799. * @regs: pointer to &struct parport_ip32_regs to fill
  1800. * @base: base address of standard and EPP registers
  1801. * @base_hi: base address of ECP registers
  1802. * @regshift: how much to shift register offset by
  1803. *
  1804. * Compute register addresses, according to the ISA standard. The addresses
  1805. * of the standard and EPP registers are computed from address @base. The
  1806. * addresses of the ECP registers are computed from address @base_hi.
  1807. */
  1808. static void __init
  1809. parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
  1810. void __iomem *base, void __iomem *base_hi,
  1811. unsigned int regshift)
  1812. {
  1813. #define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
  1814. #define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
  1815. *regs = (struct parport_ip32_regs){
  1816. .data = r_base(0),
  1817. .dsr = r_base(1),
  1818. .dcr = r_base(2),
  1819. .eppAddr = r_base(3),
  1820. .eppData0 = r_base(4),
  1821. .eppData1 = r_base(5),
  1822. .eppData2 = r_base(6),
  1823. .eppData3 = r_base(7),
  1824. .ecpAFifo = r_base(0),
  1825. .fifo = r_base_hi(0),
  1826. .cnfgA = r_base_hi(0),
  1827. .cnfgB = r_base_hi(1),
  1828. .ecr = r_base_hi(2)
  1829. };
  1830. #undef r_base_hi
  1831. #undef r_base
  1832. }
  1833. /**
  1834. * parport_ip32_probe_port - probe and register IP32 built-in parallel port
  1835. *
  1836. * Returns the new allocated &parport structure. On error, an error code is
  1837. * encoded in return value with the ERR_PTR function.
  1838. */
  1839. static __init struct parport *parport_ip32_probe_port(void)
  1840. {
  1841. struct parport_ip32_regs regs;
  1842. struct parport_ip32_private *priv = NULL;
  1843. struct parport_operations *ops = NULL;
  1844. struct parport *p = NULL;
  1845. int err;
  1846. parport_ip32_make_isa_registers(&regs, &mace->isa.parallel,
  1847. &mace->isa.ecp1284, 8 /* regshift */);
  1848. ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
  1849. priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
  1850. p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
  1851. if (ops == NULL || priv == NULL || p == NULL) {
  1852. err = -ENOMEM;
  1853. goto fail;
  1854. }
  1855. p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
  1856. p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
  1857. p->private_data = priv;
  1858. *ops = parport_ip32_ops;
  1859. *priv = (struct parport_ip32_private){
  1860. .regs = regs,
  1861. .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT |
  1862. DCR_AUTOFD | DCR_STROBE,
  1863. .irq_mode = PARPORT_IP32_IRQ_FWD,
  1864. };
  1865. init_completion(&priv->irq_complete);
  1866. /* Probe port. */
  1867. if (!parport_ip32_ecp_supported(p)) {
  1868. err = -ENODEV;
  1869. goto fail;
  1870. }
  1871. parport_ip32_dump_state(p, "begin init", 0);
  1872. /* We found what looks like a working ECR register. Simply assume
  1873. * that all modes are correctly supported. Enable basic modes. */
  1874. p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
  1875. p->modes |= PARPORT_MODE_TRISTATE;
  1876. if (!parport_ip32_fifo_supported(p)) {
  1877. printk(KERN_WARNING PPIP32
  1878. "%s: error: FIFO disabled\n", p->name);
  1879. /* Disable hardware modes depending on a working FIFO. */
  1880. features &= ~PARPORT_IP32_ENABLE_SPP;
  1881. features &= ~PARPORT_IP32_ENABLE_ECP;
  1882. /* DMA is not needed if FIFO is not supported. */
  1883. features &= ~PARPORT_IP32_ENABLE_DMA;
  1884. }
  1885. /* Request IRQ */
  1886. if (features & PARPORT_IP32_ENABLE_IRQ) {
  1887. int irq = MACEISA_PARALLEL_IRQ;
  1888. if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
  1889. printk(KERN_WARNING PPIP32
  1890. "%s: error: IRQ disabled\n", p->name);
  1891. /* DMA cannot work without interrupts. */
  1892. features &= ~PARPORT_IP32_ENABLE_DMA;
  1893. } else {
  1894. pr_probe(p, "Interrupt support enabled\n");
  1895. p->irq = irq;
  1896. priv->dcr_writable |= DCR_IRQ;
  1897. }
  1898. }
  1899. /* Allocate DMA resources */
  1900. if (features & PARPORT_IP32_ENABLE_DMA) {
  1901. if (parport_ip32_dma_register())
  1902. printk(KERN_WARNING PPIP32
  1903. "%s: error: DMA disabled\n", p->name);
  1904. else {
  1905. pr_probe(p, "DMA support enabled\n");
  1906. p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
  1907. p->modes |= PARPORT_MODE_DMA;
  1908. }
  1909. }
  1910. if (features & PARPORT_IP32_ENABLE_SPP) {
  1911. /* Enable compatibility FIFO mode */
  1912. p->ops->compat_write_data = parport_ip32_compat_write_data;
  1913. p->modes |= PARPORT_MODE_COMPAT;
  1914. pr_probe(p, "Hardware support for SPP mode enabled\n");
  1915. }
  1916. if (features & PARPORT_IP32_ENABLE_EPP) {
  1917. /* Set up access functions to use EPP hardware. */
  1918. p->ops->epp_read_data = parport_ip32_epp_read_data;
  1919. p->ops->epp_write_data = parport_ip32_epp_write_data;
  1920. p->ops->epp_read_addr = parport_ip32_epp_read_addr;
  1921. p->ops->epp_write_addr = parport_ip32_epp_write_addr;
  1922. p->modes |= PARPORT_MODE_EPP;
  1923. pr_probe(p, "Hardware support for EPP mode enabled\n");
  1924. }
  1925. if (features & PARPORT_IP32_ENABLE_ECP) {
  1926. /* Enable ECP FIFO mode */
  1927. p->ops->ecp_write_data = parport_ip32_ecp_write_data;
  1928. /* FIXME - not implemented */
  1929. /* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */
  1930. /* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
  1931. p->modes |= PARPORT_MODE_ECP;
  1932. pr_probe(p, "Hardware support for ECP mode enabled\n");
  1933. }
  1934. /* Initialize the port with sensible values */
  1935. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1936. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1937. parport_ip32_data_forward(p);
  1938. parport_ip32_disable_irq(p);
  1939. parport_ip32_write_data(p, 0x00);
  1940. parport_ip32_dump_state(p, "end init", 0);
  1941. /* Print out what we found */
  1942. printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
  1943. p->name, p->base, p->base_hi);
  1944. if (p->irq != PARPORT_IRQ_NONE)
  1945. printk(", irq %d", p->irq);
  1946. printk(" [");
  1947. #define printmode(x) if (p->modes & PARPORT_MODE_##x) \
  1948. printk("%s%s", f++ ? "," : "", #x)
  1949. {
  1950. unsigned int f = 0;
  1951. printmode(PCSPP);
  1952. printmode(TRISTATE);
  1953. printmode(COMPAT);
  1954. printmode(EPP);
  1955. printmode(ECP);
  1956. printmode(DMA);
  1957. }
  1958. #undef printmode
  1959. printk("]\n");
  1960. parport_announce_port(p);
  1961. return p;
  1962. fail:
  1963. if (p)
  1964. parport_put_port(p);
  1965. kfree(priv);
  1966. kfree(ops);
  1967. return ERR_PTR(err);
  1968. }
  1969. /**
  1970. * parport_ip32_unregister_port - unregister a parallel port
  1971. * @p: pointer to the &struct parport
  1972. *
  1973. * Unregisters a parallel port and free previously allocated resources
  1974. * (memory, IRQ, ...).
  1975. */
  1976. static __exit void parport_ip32_unregister_port(struct parport *p)
  1977. {
  1978. struct parport_ip32_private * const priv = p->physport->private_data;
  1979. struct parport_operations *ops = p->ops;
  1980. parport_remove_port(p);
  1981. if (p->modes & PARPORT_MODE_DMA)
  1982. parport_ip32_dma_unregister();
  1983. if (p->irq != PARPORT_IRQ_NONE)
  1984. free_irq(p->irq, p);
  1985. parport_put_port(p);
  1986. kfree(priv);
  1987. kfree(ops);
  1988. }
  1989. /**
  1990. * parport_ip32_init - module initialization function
  1991. */
  1992. static int __init parport_ip32_init(void)
  1993. {
  1994. pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
  1995. pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__);
  1996. this_port = parport_ip32_probe_port();
  1997. return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
  1998. }
  1999. /**
  2000. * parport_ip32_exit - module termination function
  2001. */
  2002. static void __exit parport_ip32_exit(void)
  2003. {
  2004. parport_ip32_unregister_port(this_port);
  2005. }
  2006. /*--- Module stuff -----------------------------------------------------*/
  2007. MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>");
  2008. MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
  2009. MODULE_LICENSE("GPL");
  2010. MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */
  2011. module_init(parport_ip32_init);
  2012. module_exit(parport_ip32_exit);
  2013. module_param(verbose_probing, bool, S_IRUGO);
  2014. MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
  2015. module_param(features, uint, S_IRUGO);
  2016. MODULE_PARM_DESC(features,
  2017. "Bit mask of features to enable"
  2018. ", bit 0: IRQ support"
  2019. ", bit 1: DMA support"
  2020. ", bit 2: hardware SPP mode"
  2021. ", bit 3: hardware EPP mode"
  2022. ", bit 4: hardware ECP mode");
  2023. /*--- Inform (X)Emacs about preferred coding style ---------------------*/
  2024. /*
  2025. * Local Variables:
  2026. * mode: c
  2027. * c-file-style: "linux"
  2028. * indent-tabs-mode: t
  2029. * tab-width: 8
  2030. * fill-column: 78
  2031. * ispell-local-dictionary: "american"
  2032. * End:
  2033. */