via-cuda.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /*
  2. * Device driver for the via-cuda on Apple Powermacs.
  3. *
  4. * The VIA (versatile interface adapter) interfaces to the CUDA,
  5. * a 6805 microprocessor core which controls the ADB (Apple Desktop
  6. * Bus) which connects to the keyboard and mouse. The CUDA also
  7. * controls system power and the RTC (real time clock) chip.
  8. *
  9. * Copyright (C) 1996 Paul Mackerras.
  10. */
  11. #include <stdarg.h>
  12. #include <linux/types.h>
  13. #include <linux/errno.h>
  14. #include <linux/kernel.h>
  15. #include <linux/delay.h>
  16. #include <linux/adb.h>
  17. #include <linux/cuda.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #ifdef CONFIG_PPC
  21. #include <asm/prom.h>
  22. #include <asm/machdep.h>
  23. #else
  24. #include <asm/macintosh.h>
  25. #include <asm/macints.h>
  26. #include <asm/machw.h>
  27. #include <asm/mac_via.h>
  28. #endif
  29. #include <asm/io.h>
  30. #include <asm/system.h>
  31. #include <linux/init.h>
  32. static volatile unsigned char __iomem *via;
  33. static DEFINE_SPINLOCK(cuda_lock);
  34. /* VIA registers - spaced 0x200 bytes apart */
  35. #define RS 0x200 /* skip between registers */
  36. #define B 0 /* B-side data */
  37. #define A RS /* A-side data */
  38. #define DIRB (2*RS) /* B-side direction (1=output) */
  39. #define DIRA (3*RS) /* A-side direction (1=output) */
  40. #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
  41. #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
  42. #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
  43. #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
  44. #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */
  45. #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */
  46. #define SR (10*RS) /* Shift register */
  47. #define ACR (11*RS) /* Auxiliary control register */
  48. #define PCR (12*RS) /* Peripheral control register */
  49. #define IFR (13*RS) /* Interrupt flag register */
  50. #define IER (14*RS) /* Interrupt enable register */
  51. #define ANH (15*RS) /* A-side data, no handshake */
  52. /* Bits in B data register: all active low */
  53. #define TREQ 0x08 /* Transfer request (input) */
  54. #define TACK 0x10 /* Transfer acknowledge (output) */
  55. #define TIP 0x20 /* Transfer in progress (output) */
  56. /* Bits in ACR */
  57. #define SR_CTRL 0x1c /* Shift register control bits */
  58. #define SR_EXT 0x0c /* Shift on external clock */
  59. #define SR_OUT 0x10 /* Shift out if 1 */
  60. /* Bits in IFR and IER */
  61. #define IER_SET 0x80 /* set bits in IER */
  62. #define IER_CLR 0 /* clear bits in IER */
  63. #define SR_INT 0x04 /* Shift register full/empty */
  64. static enum cuda_state {
  65. idle,
  66. sent_first_byte,
  67. sending,
  68. reading,
  69. read_done,
  70. awaiting_reply
  71. } cuda_state;
  72. static struct adb_request *current_req;
  73. static struct adb_request *last_req;
  74. static unsigned char cuda_rbuf[16];
  75. static unsigned char *reply_ptr;
  76. static int reading_reply;
  77. static int data_index;
  78. static int cuda_irq;
  79. #ifdef CONFIG_PPC
  80. static struct device_node *vias;
  81. #endif
  82. static int cuda_fully_inited;
  83. #ifdef CONFIG_ADB
  84. static int cuda_probe(void);
  85. static int cuda_init(void);
  86. static int cuda_send_request(struct adb_request *req, int sync);
  87. static int cuda_adb_autopoll(int devs);
  88. static int cuda_reset_adb_bus(void);
  89. #endif /* CONFIG_ADB */
  90. static int cuda_init_via(void);
  91. static void cuda_start(void);
  92. static irqreturn_t cuda_interrupt(int irq, void *arg);
  93. static void cuda_input(unsigned char *buf, int nb);
  94. void cuda_poll(void);
  95. static int cuda_write(struct adb_request *req);
  96. int cuda_request(struct adb_request *req,
  97. void (*done)(struct adb_request *), int nbytes, ...);
  98. #ifdef CONFIG_ADB
  99. struct adb_driver via_cuda_driver = {
  100. "CUDA",
  101. cuda_probe,
  102. cuda_init,
  103. cuda_send_request,
  104. cuda_adb_autopoll,
  105. cuda_poll,
  106. cuda_reset_adb_bus
  107. };
  108. #endif /* CONFIG_ADB */
  109. #ifdef CONFIG_PPC
  110. int __init find_via_cuda(void)
  111. {
  112. struct adb_request req;
  113. phys_addr_t taddr;
  114. const u32 *reg;
  115. int err;
  116. if (vias != 0)
  117. return 1;
  118. vias = of_find_node_by_name(NULL, "via-cuda");
  119. if (vias == 0)
  120. return 0;
  121. reg = of_get_property(vias, "reg", NULL);
  122. if (reg == NULL) {
  123. printk(KERN_ERR "via-cuda: No \"reg\" property !\n");
  124. goto fail;
  125. }
  126. taddr = of_translate_address(vias, reg);
  127. if (taddr == 0) {
  128. printk(KERN_ERR "via-cuda: Can't translate address !\n");
  129. goto fail;
  130. }
  131. via = ioremap(taddr, 0x2000);
  132. if (via == NULL) {
  133. printk(KERN_ERR "via-cuda: Can't map address !\n");
  134. goto fail;
  135. }
  136. cuda_state = idle;
  137. sys_ctrler = SYS_CTRLER_CUDA;
  138. err = cuda_init_via();
  139. if (err) {
  140. printk(KERN_ERR "cuda_init_via() failed\n");
  141. via = NULL;
  142. return 0;
  143. }
  144. /* Clear and enable interrupts, but only on PPC. On 68K it's done */
  145. /* for us by the main VIA driver in arch/m68k/mac/via.c */
  146. out_8(&via[IFR], 0x7f); /* clear interrupts by writing 1s */
  147. out_8(&via[IER], IER_SET|SR_INT); /* enable interrupt from SR */
  148. /* enable autopoll */
  149. cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1);
  150. while (!req.complete)
  151. cuda_poll();
  152. return 1;
  153. fail:
  154. of_node_put(vias);
  155. vias = NULL;
  156. return 0;
  157. }
  158. #endif /* CONFIG_PPC */
  159. static int __init via_cuda_start(void)
  160. {
  161. if (via == NULL)
  162. return -ENODEV;
  163. #ifdef CONFIG_MAC
  164. cuda_irq = IRQ_MAC_ADB;
  165. #else /* CONFIG_MAC */
  166. cuda_irq = irq_of_parse_and_map(vias, 0);
  167. if (cuda_irq == NO_IRQ) {
  168. printk(KERN_ERR "via-cuda: can't map interrupts for %s\n",
  169. vias->full_name);
  170. return -ENODEV;
  171. }
  172. #endif /* CONFIG_MAC */
  173. if (request_irq(cuda_irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) {
  174. printk(KERN_ERR "via-cuda: can't request irq %d\n", cuda_irq);
  175. return -EAGAIN;
  176. }
  177. printk("Macintosh CUDA driver v0.5 for Unified ADB.\n");
  178. cuda_fully_inited = 1;
  179. return 0;
  180. }
  181. device_initcall(via_cuda_start);
  182. #ifdef CONFIG_ADB
  183. static int
  184. cuda_probe(void)
  185. {
  186. #ifdef CONFIG_PPC
  187. if (sys_ctrler != SYS_CTRLER_CUDA)
  188. return -ENODEV;
  189. #else
  190. if (macintosh_config->adb_type != MAC_ADB_CUDA)
  191. return -ENODEV;
  192. via = via1;
  193. #endif
  194. return 0;
  195. }
  196. static int __init
  197. cuda_init(void)
  198. {
  199. #ifdef CONFIG_PPC
  200. if (via == NULL)
  201. return -ENODEV;
  202. return 0;
  203. #else
  204. int err = cuda_init_via();
  205. if (err) {
  206. printk(KERN_ERR "cuda_init_via() failed\n");
  207. return -ENODEV;
  208. }
  209. out_8(&via[IER], IER_SET|SR_INT); /* enable interrupt from SR */
  210. return via_cuda_start();
  211. #endif
  212. }
  213. #endif /* CONFIG_ADB */
  214. #define WAIT_FOR(cond, what) \
  215. do { \
  216. int x; \
  217. for (x = 1000; !(cond); --x) { \
  218. if (x == 0) { \
  219. printk("Timeout waiting for " what "\n"); \
  220. return -ENXIO; \
  221. } \
  222. udelay(100); \
  223. } \
  224. } while (0)
  225. static int
  226. cuda_init_via(void)
  227. {
  228. out_8(&via[DIRB], (in_8(&via[DIRB]) | TACK | TIP) & ~TREQ); /* TACK & TIP out */
  229. out_8(&via[B], in_8(&via[B]) | TACK | TIP); /* negate them */
  230. out_8(&via[ACR] ,(in_8(&via[ACR]) & ~SR_CTRL) | SR_EXT); /* SR data in */
  231. (void)in_8(&via[SR]); /* clear any left-over data */
  232. #ifdef CONFIG_PPC
  233. out_8(&via[IER], 0x7f); /* disable interrupts from VIA */
  234. (void)in_8(&via[IER]);
  235. #else
  236. out_8(&via[IER], SR_INT); /* disable SR interrupt from VIA */
  237. #endif
  238. /* delay 4ms and then clear any pending interrupt */
  239. mdelay(4);
  240. (void)in_8(&via[SR]);
  241. out_8(&via[IFR], SR_INT);
  242. /* sync with the CUDA - assert TACK without TIP */
  243. out_8(&via[B], in_8(&via[B]) & ~TACK);
  244. /* wait for the CUDA to assert TREQ in response */
  245. WAIT_FOR((in_8(&via[B]) & TREQ) == 0, "CUDA response to sync");
  246. /* wait for the interrupt and then clear it */
  247. WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (2)");
  248. (void)in_8(&via[SR]);
  249. out_8(&via[IFR], SR_INT);
  250. /* finish the sync by negating TACK */
  251. out_8(&via[B], in_8(&via[B]) | TACK);
  252. /* wait for the CUDA to negate TREQ and the corresponding interrupt */
  253. WAIT_FOR(in_8(&via[B]) & TREQ, "CUDA response to sync (3)");
  254. WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (4)");
  255. (void)in_8(&via[SR]);
  256. out_8(&via[IFR], SR_INT);
  257. out_8(&via[B], in_8(&via[B]) | TIP); /* should be unnecessary */
  258. return 0;
  259. }
  260. #ifdef CONFIG_ADB
  261. /* Send an ADB command */
  262. static int
  263. cuda_send_request(struct adb_request *req, int sync)
  264. {
  265. int i;
  266. if ((via == NULL) || !cuda_fully_inited) {
  267. req->complete = 1;
  268. return -ENXIO;
  269. }
  270. req->reply_expected = 1;
  271. i = cuda_write(req);
  272. if (i)
  273. return i;
  274. if (sync) {
  275. while (!req->complete)
  276. cuda_poll();
  277. }
  278. return 0;
  279. }
  280. /* Enable/disable autopolling */
  281. static int
  282. cuda_adb_autopoll(int devs)
  283. {
  284. struct adb_request req;
  285. if ((via == NULL) || !cuda_fully_inited)
  286. return -ENXIO;
  287. cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, (devs? 1: 0));
  288. while (!req.complete)
  289. cuda_poll();
  290. return 0;
  291. }
  292. /* Reset adb bus - how do we do this?? */
  293. static int
  294. cuda_reset_adb_bus(void)
  295. {
  296. struct adb_request req;
  297. if ((via == NULL) || !cuda_fully_inited)
  298. return -ENXIO;
  299. cuda_request(&req, NULL, 2, ADB_PACKET, 0); /* maybe? */
  300. while (!req.complete)
  301. cuda_poll();
  302. return 0;
  303. }
  304. #endif /* CONFIG_ADB */
  305. /* Construct and send a cuda request */
  306. int
  307. cuda_request(struct adb_request *req, void (*done)(struct adb_request *),
  308. int nbytes, ...)
  309. {
  310. va_list list;
  311. int i;
  312. if (via == NULL) {
  313. req->complete = 1;
  314. return -ENXIO;
  315. }
  316. req->nbytes = nbytes;
  317. req->done = done;
  318. va_start(list, nbytes);
  319. for (i = 0; i < nbytes; ++i)
  320. req->data[i] = va_arg(list, int);
  321. va_end(list);
  322. req->reply_expected = 1;
  323. return cuda_write(req);
  324. }
  325. static int
  326. cuda_write(struct adb_request *req)
  327. {
  328. unsigned long flags;
  329. if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) {
  330. req->complete = 1;
  331. return -EINVAL;
  332. }
  333. req->next = NULL;
  334. req->sent = 0;
  335. req->complete = 0;
  336. req->reply_len = 0;
  337. spin_lock_irqsave(&cuda_lock, flags);
  338. if (current_req != 0) {
  339. last_req->next = req;
  340. last_req = req;
  341. } else {
  342. current_req = req;
  343. last_req = req;
  344. if (cuda_state == idle)
  345. cuda_start();
  346. }
  347. spin_unlock_irqrestore(&cuda_lock, flags);
  348. return 0;
  349. }
  350. static void
  351. cuda_start(void)
  352. {
  353. struct adb_request *req;
  354. /* assert cuda_state == idle */
  355. /* get the packet to send */
  356. req = current_req;
  357. if (req == 0)
  358. return;
  359. if ((in_8(&via[B]) & TREQ) == 0)
  360. return; /* a byte is coming in from the CUDA */
  361. /* set the shift register to shift out and send a byte */
  362. out_8(&via[ACR], in_8(&via[ACR]) | SR_OUT);
  363. out_8(&via[SR], req->data[0]);
  364. out_8(&via[B], in_8(&via[B]) & ~TIP);
  365. cuda_state = sent_first_byte;
  366. }
  367. void
  368. cuda_poll(void)
  369. {
  370. /* cuda_interrupt only takes a normal lock, we disable
  371. * interrupts here to avoid re-entering and thus deadlocking.
  372. */
  373. disable_irq(cuda_irq);
  374. cuda_interrupt(0, NULL);
  375. enable_irq(cuda_irq);
  376. }
  377. static irqreturn_t
  378. cuda_interrupt(int irq, void *arg)
  379. {
  380. int status;
  381. struct adb_request *req = NULL;
  382. unsigned char ibuf[16];
  383. int ibuf_len = 0;
  384. int complete = 0;
  385. spin_lock(&cuda_lock);
  386. /* On powermacs, this handler is registered for the VIA IRQ. But it uses
  387. * just the shift register IRQ -- other VIA interrupt sources are disabled.
  388. * On m68k macs, the VIA IRQ sources are dispatched individually. Unless
  389. * we are polling, the shift register IRQ flag has already been cleared.
  390. */
  391. #ifdef CONFIG_MAC
  392. if (!arg)
  393. #endif
  394. {
  395. if ((in_8(&via[IFR]) & SR_INT) == 0) {
  396. spin_unlock(&cuda_lock);
  397. return IRQ_NONE;
  398. } else {
  399. out_8(&via[IFR], SR_INT);
  400. }
  401. }
  402. status = (~in_8(&via[B]) & (TIP|TREQ)) | (in_8(&via[ACR]) & SR_OUT);
  403. /* printk("cuda_interrupt: state=%d status=%x\n", cuda_state, status); */
  404. switch (cuda_state) {
  405. case idle:
  406. /* CUDA has sent us the first byte of data - unsolicited */
  407. if (status != TREQ)
  408. printk("cuda: state=idle, status=%x\n", status);
  409. (void)in_8(&via[SR]);
  410. out_8(&via[B], in_8(&via[B]) & ~TIP);
  411. cuda_state = reading;
  412. reply_ptr = cuda_rbuf;
  413. reading_reply = 0;
  414. break;
  415. case awaiting_reply:
  416. /* CUDA has sent us the first byte of data of a reply */
  417. if (status != TREQ)
  418. printk("cuda: state=awaiting_reply, status=%x\n", status);
  419. (void)in_8(&via[SR]);
  420. out_8(&via[B], in_8(&via[B]) & ~TIP);
  421. cuda_state = reading;
  422. reply_ptr = current_req->reply;
  423. reading_reply = 1;
  424. break;
  425. case sent_first_byte:
  426. if (status == TREQ + TIP + SR_OUT) {
  427. /* collision */
  428. out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
  429. (void)in_8(&via[SR]);
  430. out_8(&via[B], in_8(&via[B]) | TIP | TACK);
  431. cuda_state = idle;
  432. } else {
  433. /* assert status == TIP + SR_OUT */
  434. if (status != TIP + SR_OUT)
  435. printk("cuda: state=sent_first_byte status=%x\n", status);
  436. out_8(&via[SR], current_req->data[1]);
  437. out_8(&via[B], in_8(&via[B]) ^ TACK);
  438. data_index = 2;
  439. cuda_state = sending;
  440. }
  441. break;
  442. case sending:
  443. req = current_req;
  444. if (data_index >= req->nbytes) {
  445. out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
  446. (void)in_8(&via[SR]);
  447. out_8(&via[B], in_8(&via[B]) | TACK | TIP);
  448. req->sent = 1;
  449. if (req->reply_expected) {
  450. cuda_state = awaiting_reply;
  451. } else {
  452. current_req = req->next;
  453. complete = 1;
  454. /* not sure about this */
  455. cuda_state = idle;
  456. cuda_start();
  457. }
  458. } else {
  459. out_8(&via[SR], req->data[data_index++]);
  460. out_8(&via[B], in_8(&via[B]) ^ TACK);
  461. }
  462. break;
  463. case reading:
  464. *reply_ptr++ = in_8(&via[SR]);
  465. if (status == TIP) {
  466. /* that's all folks */
  467. out_8(&via[B], in_8(&via[B]) | TACK | TIP);
  468. cuda_state = read_done;
  469. } else {
  470. /* assert status == TIP | TREQ */
  471. if (status != TIP + TREQ)
  472. printk("cuda: state=reading status=%x\n", status);
  473. out_8(&via[B], in_8(&via[B]) ^ TACK);
  474. }
  475. break;
  476. case read_done:
  477. (void)in_8(&via[SR]);
  478. if (reading_reply) {
  479. req = current_req;
  480. req->reply_len = reply_ptr - req->reply;
  481. if (req->data[0] == ADB_PACKET) {
  482. /* Have to adjust the reply from ADB commands */
  483. if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) {
  484. /* the 0x2 bit indicates no response */
  485. req->reply_len = 0;
  486. } else {
  487. /* leave just the command and result bytes in the reply */
  488. req->reply_len -= 2;
  489. memmove(req->reply, req->reply + 2, req->reply_len);
  490. }
  491. }
  492. current_req = req->next;
  493. complete = 1;
  494. } else {
  495. /* This is tricky. We must break the spinlock to call
  496. * cuda_input. However, doing so means we might get
  497. * re-entered from another CPU getting an interrupt
  498. * or calling cuda_poll(). I ended up using the stack
  499. * (it's only for 16 bytes) and moving the actual
  500. * call to cuda_input to outside of the lock.
  501. */
  502. ibuf_len = reply_ptr - cuda_rbuf;
  503. memcpy(ibuf, cuda_rbuf, ibuf_len);
  504. }
  505. if (status == TREQ) {
  506. out_8(&via[B], in_8(&via[B]) & ~TIP);
  507. cuda_state = reading;
  508. reply_ptr = cuda_rbuf;
  509. reading_reply = 0;
  510. } else {
  511. cuda_state = idle;
  512. cuda_start();
  513. }
  514. break;
  515. default:
  516. printk("cuda_interrupt: unknown cuda_state %d?\n", cuda_state);
  517. }
  518. spin_unlock(&cuda_lock);
  519. if (complete && req) {
  520. void (*done)(struct adb_request *) = req->done;
  521. mb();
  522. req->complete = 1;
  523. /* Here, we assume that if the request has a done member, the
  524. * struct request will survive to setting req->complete to 1
  525. */
  526. if (done)
  527. (*done)(req);
  528. }
  529. if (ibuf_len)
  530. cuda_input(ibuf, ibuf_len);
  531. return IRQ_HANDLED;
  532. }
  533. static void
  534. cuda_input(unsigned char *buf, int nb)
  535. {
  536. int i;
  537. switch (buf[0]) {
  538. case ADB_PACKET:
  539. #ifdef CONFIG_XMON
  540. if (nb == 5 && buf[2] == 0x2c) {
  541. extern int xmon_wants_key, xmon_adb_keycode;
  542. if (xmon_wants_key) {
  543. xmon_adb_keycode = buf[3];
  544. return;
  545. }
  546. }
  547. #endif /* CONFIG_XMON */
  548. #ifdef CONFIG_ADB
  549. adb_input(buf+2, nb-2, buf[1] & 0x40);
  550. #endif /* CONFIG_ADB */
  551. break;
  552. default:
  553. printk("data from cuda (%d bytes):", nb);
  554. for (i = 0; i < nb; ++i)
  555. printk(" %.2x", buf[i]);
  556. printk("\n");
  557. }
  558. }