pcilynx.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554
  1. /*
  2. * pcilynx.c - Texas Instruments PCILynx driver
  3. * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
  4. * Stephan Linz <linz@mazet.de>
  5. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software Foundation,
  19. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. /*
  22. * Contributions:
  23. *
  24. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  25. * reading bus info block (containing GUID) from serial
  26. * eeprom via i2c and storing it in config ROM
  27. * Reworked code for initiating bus resets
  28. * (long, short, with or without hold-off)
  29. * Enhancements in async and iso send code
  30. */
  31. #include <linux/kernel.h>
  32. #include <linux/slab.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/wait.h>
  35. #include <linux/errno.h>
  36. #include <linux/module.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/init.h>
  39. #include <linux/pci.h>
  40. #include <linux/fs.h>
  41. #include <linux/poll.h>
  42. #include <linux/kdev_t.h>
  43. #include <linux/dma-mapping.h>
  44. #include <asm/byteorder.h>
  45. #include <asm/atomic.h>
  46. #include <asm/io.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/irq.h>
  49. #include "csr1212.h"
  50. #include "ieee1394.h"
  51. #include "ieee1394_types.h"
  52. #include "hosts.h"
  53. #include "ieee1394_core.h"
  54. #include "highlevel.h"
  55. #include "pcilynx.h"
  56. #include <linux/i2c.h>
  57. #include <linux/i2c-algo-bit.h>
  58. /* print general (card independent) information */
  59. #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  60. /* print card specific information */
  61. #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  62. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  63. #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  64. #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  65. #else
  66. #define PRINT_GD(level, fmt, args...) do {} while (0)
  67. #define PRINTD(level, card, fmt, args...) do {} while (0)
  68. #endif
  69. /* Module Parameters */
  70. static int skip_eeprom;
  71. module_param(skip_eeprom, int, 0444);
  72. MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
  73. static struct hpsb_host_driver lynx_driver;
  74. static unsigned int card_id;
  75. /*
  76. * I2C stuff
  77. */
  78. /* the i2c stuff was inspired by i2c-philips-par.c */
  79. static void bit_setscl(void *data, int state)
  80. {
  81. if (state) {
  82. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
  83. } else {
  84. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
  85. }
  86. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  87. }
  88. static void bit_setsda(void *data, int state)
  89. {
  90. if (state) {
  91. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
  92. } else {
  93. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
  94. }
  95. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  96. }
  97. static int bit_getscl(void *data)
  98. {
  99. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
  100. }
  101. static int bit_getsda(void *data)
  102. {
  103. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
  104. }
  105. static struct i2c_algo_bit_data bit_data = {
  106. .setsda = bit_setsda,
  107. .setscl = bit_setscl,
  108. .getsda = bit_getsda,
  109. .getscl = bit_getscl,
  110. .udelay = 5,
  111. .timeout = 100,
  112. };
  113. /*
  114. * PCL handling functions.
  115. */
  116. static pcl_t alloc_pcl(struct ti_lynx *lynx)
  117. {
  118. u8 m;
  119. int i, j;
  120. spin_lock(&lynx->lock);
  121. /* FIXME - use ffz() to make this readable */
  122. for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
  123. m = lynx->pcl_bmap[i];
  124. for (j = 0; j < 8; j++) {
  125. if (m & 1<<j) {
  126. continue;
  127. }
  128. m |= 1<<j;
  129. lynx->pcl_bmap[i] = m;
  130. spin_unlock(&lynx->lock);
  131. return 8 * i + j;
  132. }
  133. }
  134. spin_unlock(&lynx->lock);
  135. return -1;
  136. }
  137. #if 0
  138. static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
  139. {
  140. int off, bit;
  141. off = pclid / 8;
  142. bit = pclid % 8;
  143. if (pclid < 0) {
  144. return;
  145. }
  146. spin_lock(&lynx->lock);
  147. if (lynx->pcl_bmap[off] & 1<<bit) {
  148. lynx->pcl_bmap[off] &= ~(1<<bit);
  149. } else {
  150. PRINT(KERN_ERR, lynx->id,
  151. "attempted to free unallocated PCL %d", pclid);
  152. }
  153. spin_unlock(&lynx->lock);
  154. }
  155. /* functions useful for debugging */
  156. static void pretty_print_pcl(const struct ti_pcl *pcl)
  157. {
  158. int i;
  159. printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
  160. pcl->next, pcl->user_data, pcl->pcl_status,
  161. pcl->remaining_transfer_count, pcl->next_data_buffer);
  162. printk("PCL");
  163. for (i=0; i<13; i++) {
  164. printk(" c%x:%08x d%x:%08x",
  165. i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
  166. if (!(i & 0x3) && (i != 12)) printk("\nPCL");
  167. }
  168. printk("\n");
  169. }
  170. static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
  171. {
  172. struct ti_pcl pcl;
  173. get_pcl(lynx, pclid, &pcl);
  174. pretty_print_pcl(&pcl);
  175. }
  176. #endif
  177. /***********************************
  178. * IEEE-1394 functionality section *
  179. ***********************************/
  180. static int get_phy_reg(struct ti_lynx *lynx, int addr)
  181. {
  182. int retval;
  183. int i = 0;
  184. unsigned long flags;
  185. if (addr > 15) {
  186. PRINT(KERN_ERR, lynx->id,
  187. "%s: PHY register address %d out of range",
  188. __func__, addr);
  189. return -1;
  190. }
  191. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  192. reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
  193. do {
  194. retval = reg_read(lynx, LINK_PHY);
  195. if (i > 10000) {
  196. PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
  197. __func__);
  198. retval = -1;
  199. break;
  200. }
  201. i++;
  202. } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
  203. reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
  204. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  205. if (retval != -1) {
  206. return retval & 0xff;
  207. } else {
  208. return -1;
  209. }
  210. }
  211. static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
  212. {
  213. unsigned long flags;
  214. if (addr > 15) {
  215. PRINT(KERN_ERR, lynx->id,
  216. "%s: PHY register address %d out of range", __func__, addr);
  217. return -1;
  218. }
  219. if (val > 0xff) {
  220. PRINT(KERN_ERR, lynx->id,
  221. "%s: PHY register value %d out of range", __func__, val);
  222. return -1;
  223. }
  224. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  225. reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
  226. | LINK_PHY_WDATA(val));
  227. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  228. return 0;
  229. }
  230. static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
  231. {
  232. int reg;
  233. if (page > 7) {
  234. PRINT(KERN_ERR, lynx->id,
  235. "%s: PHY page %d out of range", __func__, page);
  236. return -1;
  237. }
  238. reg = get_phy_reg(lynx, 7);
  239. if (reg != -1) {
  240. reg &= 0x1f;
  241. reg |= (page << 5);
  242. set_phy_reg(lynx, 7, reg);
  243. return 0;
  244. } else {
  245. return -1;
  246. }
  247. }
  248. #if 0 /* not needed at this time */
  249. static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
  250. {
  251. int reg;
  252. if (port > 15) {
  253. PRINT(KERN_ERR, lynx->id,
  254. "%s: PHY port %d out of range", __func__, port);
  255. return -1;
  256. }
  257. reg = get_phy_reg(lynx, 7);
  258. if (reg != -1) {
  259. reg &= 0xf0;
  260. reg |= port;
  261. set_phy_reg(lynx, 7, reg);
  262. return 0;
  263. } else {
  264. return -1;
  265. }
  266. }
  267. #endif
  268. static u32 get_phy_vendorid(struct ti_lynx *lynx)
  269. {
  270. u32 pvid = 0;
  271. sel_phy_reg_page(lynx, 1);
  272. pvid |= (get_phy_reg(lynx, 10) << 16);
  273. pvid |= (get_phy_reg(lynx, 11) << 8);
  274. pvid |= get_phy_reg(lynx, 12);
  275. PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
  276. return pvid;
  277. }
  278. static u32 get_phy_productid(struct ti_lynx *lynx)
  279. {
  280. u32 id = 0;
  281. sel_phy_reg_page(lynx, 1);
  282. id |= (get_phy_reg(lynx, 13) << 16);
  283. id |= (get_phy_reg(lynx, 14) << 8);
  284. id |= get_phy_reg(lynx, 15);
  285. PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
  286. return id;
  287. }
  288. static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
  289. struct hpsb_host *host)
  290. {
  291. quadlet_t lsid;
  292. char phyreg[7];
  293. int i;
  294. phyreg[0] = lynx->phy_reg0;
  295. for (i = 1; i < 7; i++) {
  296. phyreg[i] = get_phy_reg(lynx, i);
  297. }
  298. /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
  299. more than 3 ports on the PHY anyway. */
  300. lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
  301. lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
  302. lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
  303. if (!hpsb_disable_irm)
  304. lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
  305. /* lsid |= 1 << 11; *//* set contender (hack) */
  306. lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
  307. for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
  308. if (phyreg[3 + i] & 0x4) {
  309. lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
  310. << (6 - i*2);
  311. } else {
  312. lsid |= 1 << (6 - i*2);
  313. }
  314. }
  315. cpu_to_be32s(&lsid);
  316. PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
  317. return lsid;
  318. }
  319. static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
  320. {
  321. quadlet_t *q = lynx->rcv_page;
  322. int phyid, isroot, size;
  323. quadlet_t lsid = 0;
  324. int i;
  325. if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
  326. size = lynx->selfid_size;
  327. phyid = lynx->phy_reg0;
  328. i = (size > 16 ? 16 : size) / 4 - 1;
  329. while (i >= 0) {
  330. cpu_to_be32s(&q[i]);
  331. i--;
  332. }
  333. if (!lynx->phyic.reg_1394a) {
  334. lsid = generate_own_selfid(lynx, host);
  335. }
  336. isroot = (phyid & 2) != 0;
  337. phyid >>= 2;
  338. PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
  339. phyid, (isroot ? "root" : "not root"));
  340. reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
  341. if (!lynx->phyic.reg_1394a && !size) {
  342. hpsb_selfid_received(host, lsid);
  343. }
  344. while (size > 0) {
  345. struct selfid *sid = (struct selfid *)q;
  346. if (!lynx->phyic.reg_1394a && !sid->extended
  347. && (sid->phy_id == (phyid + 1))) {
  348. hpsb_selfid_received(host, lsid);
  349. }
  350. if (q[0] == ~q[1]) {
  351. PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
  352. q[0]);
  353. hpsb_selfid_received(host, q[0]);
  354. } else {
  355. PRINT(KERN_INFO, lynx->id,
  356. "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
  357. }
  358. q += 2;
  359. size -= 8;
  360. }
  361. if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
  362. hpsb_selfid_received(host, lsid);
  363. }
  364. hpsb_selfid_complete(host, phyid, isroot);
  365. if (host->in_bus_reset) return; /* in bus reset again */
  366. if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
  367. reg_set_bits(lynx, LINK_CONTROL,
  368. LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
  369. | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
  370. }
  371. /* This must be called with the respective queue_lock held. */
  372. static void send_next(struct ti_lynx *lynx, int what)
  373. {
  374. struct ti_pcl pcl;
  375. struct lynx_send_data *d;
  376. struct hpsb_packet *packet;
  377. #if 0 /* has been removed from ieee1394 core */
  378. d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
  379. #else
  380. d = &lynx->async;
  381. #endif
  382. if (!list_empty(&d->pcl_queue)) {
  383. PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
  384. BUG();
  385. }
  386. packet = driver_packet(d->queue.next);
  387. list_move_tail(&packet->driver_list, &d->pcl_queue);
  388. d->header_dma = pci_map_single(lynx->dev, packet->header,
  389. packet->header_size, PCI_DMA_TODEVICE);
  390. if (packet->data_size) {
  391. d->data_dma = pci_map_single(lynx->dev, packet->data,
  392. packet->data_size,
  393. PCI_DMA_TODEVICE);
  394. } else {
  395. d->data_dma = 0;
  396. }
  397. pcl.next = PCL_NEXT_INVALID;
  398. pcl.async_error_next = PCL_NEXT_INVALID;
  399. pcl.pcl_status = 0;
  400. pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
  401. #ifndef __BIG_ENDIAN
  402. pcl.buffer[0].control |= PCL_BIGENDIAN;
  403. #endif
  404. pcl.buffer[0].pointer = d->header_dma;
  405. pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
  406. pcl.buffer[1].pointer = d->data_dma;
  407. switch (packet->type) {
  408. case hpsb_async:
  409. pcl.buffer[0].control |= PCL_CMD_XMT;
  410. break;
  411. #if 0 /* has been removed from ieee1394 core */
  412. case hpsb_iso:
  413. pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
  414. break;
  415. #endif
  416. case hpsb_raw:
  417. pcl.buffer[0].control |= PCL_CMD_UNFXMT;
  418. break;
  419. }
  420. put_pcl(lynx, d->pcl, &pcl);
  421. run_pcl(lynx, d->pcl_start, d->channel);
  422. }
  423. /* called from subsystem core */
  424. static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
  425. {
  426. struct ti_lynx *lynx = host->hostdata;
  427. struct lynx_send_data *d;
  428. unsigned long flags;
  429. if (packet->data_size >= 4096) {
  430. PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
  431. packet->data_size);
  432. return -EOVERFLOW;
  433. }
  434. switch (packet->type) {
  435. case hpsb_async:
  436. case hpsb_raw:
  437. d = &lynx->async;
  438. break;
  439. #if 0 /* has been removed from ieee1394 core */
  440. case hpsb_iso:
  441. d = &lynx->iso_send;
  442. break;
  443. #endif
  444. default:
  445. PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
  446. packet->type);
  447. return -EINVAL;
  448. }
  449. if (packet->tcode == TCODE_WRITEQ
  450. || packet->tcode == TCODE_READQ_RESPONSE) {
  451. cpu_to_be32s(&packet->header[3]);
  452. }
  453. spin_lock_irqsave(&d->queue_lock, flags);
  454. list_add_tail(&packet->driver_list, &d->queue);
  455. if (list_empty(&d->pcl_queue))
  456. send_next(lynx, packet->type);
  457. spin_unlock_irqrestore(&d->queue_lock, flags);
  458. return 0;
  459. }
  460. /* called from subsystem core */
  461. static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
  462. {
  463. struct ti_lynx *lynx = host->hostdata;
  464. int retval = 0;
  465. struct hpsb_packet *packet;
  466. LIST_HEAD(packet_list);
  467. unsigned long flags;
  468. int phy_reg;
  469. switch (cmd) {
  470. case RESET_BUS:
  471. if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
  472. retval = 0;
  473. break;
  474. }
  475. switch (arg) {
  476. case SHORT_RESET:
  477. if (lynx->phyic.reg_1394a) {
  478. phy_reg = get_phy_reg(lynx, 5);
  479. if (phy_reg == -1) {
  480. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  481. retval = -1;
  482. break;
  483. }
  484. phy_reg |= 0x40;
  485. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
  486. lynx->selfid_size = -1;
  487. lynx->phy_reg0 = -1;
  488. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  489. break;
  490. } else {
  491. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  492. /* fall through to long bus reset */
  493. }
  494. case LONG_RESET:
  495. phy_reg = get_phy_reg(lynx, 1);
  496. if (phy_reg == -1) {
  497. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  498. retval = -1;
  499. break;
  500. }
  501. phy_reg |= 0x40;
  502. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
  503. lynx->selfid_size = -1;
  504. lynx->phy_reg0 = -1;
  505. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  506. break;
  507. case SHORT_RESET_NO_FORCE_ROOT:
  508. if (lynx->phyic.reg_1394a) {
  509. phy_reg = get_phy_reg(lynx, 1);
  510. if (phy_reg == -1) {
  511. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  512. retval = -1;
  513. break;
  514. }
  515. if (phy_reg & 0x80) {
  516. phy_reg &= ~0x80;
  517. set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
  518. }
  519. phy_reg = get_phy_reg(lynx, 5);
  520. if (phy_reg == -1) {
  521. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  522. retval = -1;
  523. break;
  524. }
  525. phy_reg |= 0x40;
  526. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
  527. lynx->selfid_size = -1;
  528. lynx->phy_reg0 = -1;
  529. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  530. break;
  531. } else {
  532. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  533. /* fall through to long bus reset */
  534. }
  535. case LONG_RESET_NO_FORCE_ROOT:
  536. phy_reg = get_phy_reg(lynx, 1);
  537. if (phy_reg == -1) {
  538. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  539. retval = -1;
  540. break;
  541. }
  542. phy_reg &= ~0x80;
  543. phy_reg |= 0x40;
  544. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
  545. lynx->selfid_size = -1;
  546. lynx->phy_reg0 = -1;
  547. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  548. break;
  549. case SHORT_RESET_FORCE_ROOT:
  550. if (lynx->phyic.reg_1394a) {
  551. phy_reg = get_phy_reg(lynx, 1);
  552. if (phy_reg == -1) {
  553. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  554. retval = -1;
  555. break;
  556. }
  557. if (!(phy_reg & 0x80)) {
  558. phy_reg |= 0x80;
  559. set_phy_reg(lynx, 1, phy_reg); /* set RHB */
  560. }
  561. phy_reg = get_phy_reg(lynx, 5);
  562. if (phy_reg == -1) {
  563. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  564. retval = -1;
  565. break;
  566. }
  567. phy_reg |= 0x40;
  568. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
  569. lynx->selfid_size = -1;
  570. lynx->phy_reg0 = -1;
  571. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  572. break;
  573. } else {
  574. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  575. /* fall through to long bus reset */
  576. }
  577. case LONG_RESET_FORCE_ROOT:
  578. phy_reg = get_phy_reg(lynx, 1);
  579. if (phy_reg == -1) {
  580. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  581. retval = -1;
  582. break;
  583. }
  584. phy_reg |= 0xc0;
  585. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
  586. lynx->selfid_size = -1;
  587. lynx->phy_reg0 = -1;
  588. set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
  589. break;
  590. default:
  591. PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
  592. retval = -1;
  593. }
  594. break;
  595. case GET_CYCLE_COUNTER:
  596. retval = reg_read(lynx, CYCLE_TIMER);
  597. break;
  598. case SET_CYCLE_COUNTER:
  599. reg_write(lynx, CYCLE_TIMER, arg);
  600. break;
  601. case SET_BUS_ID:
  602. reg_write(lynx, LINK_ID,
  603. (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
  604. break;
  605. case ACT_CYCLE_MASTER:
  606. if (arg) {
  607. reg_set_bits(lynx, LINK_CONTROL,
  608. LINK_CONTROL_CYCMASTER);
  609. } else {
  610. reg_clear_bits(lynx, LINK_CONTROL,
  611. LINK_CONTROL_CYCMASTER);
  612. }
  613. break;
  614. case CANCEL_REQUESTS:
  615. spin_lock_irqsave(&lynx->async.queue_lock, flags);
  616. reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
  617. list_splice_init(&lynx->async.queue, &packet_list);
  618. if (list_empty(&lynx->async.pcl_queue)) {
  619. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  620. PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
  621. } else {
  622. struct ti_pcl pcl;
  623. u32 ack;
  624. PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
  625. get_pcl(lynx, lynx->async.pcl, &pcl);
  626. packet = driver_packet(lynx->async.pcl_queue.next);
  627. list_del_init(&packet->driver_list);
  628. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  629. packet->header_size, PCI_DMA_TODEVICE);
  630. if (packet->data_size) {
  631. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  632. packet->data_size, PCI_DMA_TODEVICE);
  633. }
  634. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  635. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  636. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  637. ack = (pcl.pcl_status >> 15) & 0xf;
  638. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  639. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  640. } else {
  641. ack = (pcl.pcl_status >> 15) & 0xf;
  642. }
  643. } else {
  644. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  645. ack = ACKX_ABORTED;
  646. }
  647. hpsb_packet_sent(host, packet, ack);
  648. }
  649. while (!list_empty(&packet_list)) {
  650. packet = driver_packet(packet_list.next);
  651. list_del_init(&packet->driver_list);
  652. hpsb_packet_sent(host, packet, ACKX_ABORTED);
  653. }
  654. break;
  655. #if 0 /* has been removed from ieee1394 core */
  656. case ISO_LISTEN_CHANNEL:
  657. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  658. if (lynx->iso_rcv.chan_count++ == 0) {
  659. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  660. DMA_WORD1_CMP_ENABLE_MASTER);
  661. }
  662. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  663. break;
  664. case ISO_UNLISTEN_CHANNEL:
  665. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  666. if (--lynx->iso_rcv.chan_count == 0) {
  667. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  668. 0);
  669. }
  670. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  671. break;
  672. #endif
  673. default:
  674. PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
  675. retval = -1;
  676. }
  677. return retval;
  678. }
  679. /***************************************
  680. * IEEE-1394 functionality section END *
  681. ***************************************/
  682. /********************************************************
  683. * Global stuff (interrupt handler, init/shutdown code) *
  684. ********************************************************/
  685. static irqreturn_t lynx_irq_handler(int irq, void *dev_id)
  686. {
  687. struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
  688. struct hpsb_host *host = lynx->host;
  689. u32 intmask;
  690. u32 linkint;
  691. linkint = reg_read(lynx, LINK_INT_STATUS);
  692. intmask = reg_read(lynx, PCI_INT_STATUS);
  693. if (!(intmask & PCI_INT_INT_PEND))
  694. return IRQ_NONE;
  695. PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
  696. linkint);
  697. reg_write(lynx, LINK_INT_STATUS, linkint);
  698. reg_write(lynx, PCI_INT_STATUS, intmask);
  699. if (intmask & PCI_INT_1394) {
  700. if (linkint & LINK_INT_PHY_TIMEOUT) {
  701. PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
  702. }
  703. if (linkint & LINK_INT_PHY_BUSRESET) {
  704. PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
  705. lynx->selfid_size = -1;
  706. lynx->phy_reg0 = -1;
  707. if (!host->in_bus_reset)
  708. hpsb_bus_reset(host);
  709. }
  710. if (linkint & LINK_INT_PHY_REG_RCVD) {
  711. u32 reg;
  712. spin_lock(&lynx->phy_reg_lock);
  713. reg = reg_read(lynx, LINK_PHY);
  714. spin_unlock(&lynx->phy_reg_lock);
  715. if (!host->in_bus_reset) {
  716. PRINT(KERN_INFO, lynx->id,
  717. "phy reg received without reset");
  718. } else if (reg & 0xf00) {
  719. PRINT(KERN_INFO, lynx->id,
  720. "unsolicited phy reg %d received",
  721. (reg >> 8) & 0xf);
  722. } else {
  723. lynx->phy_reg0 = reg & 0xff;
  724. handle_selfid(lynx, host);
  725. }
  726. }
  727. if (linkint & LINK_INT_ISO_STUCK) {
  728. PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
  729. }
  730. if (linkint & LINK_INT_ASYNC_STUCK) {
  731. PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
  732. }
  733. if (linkint & LINK_INT_SENT_REJECT) {
  734. PRINT(KERN_INFO, lynx->id, "sent reject");
  735. }
  736. if (linkint & LINK_INT_TX_INVALID_TC) {
  737. PRINT(KERN_INFO, lynx->id, "invalid transaction code");
  738. }
  739. if (linkint & LINK_INT_GRF_OVERFLOW) {
  740. /* flush FIFO if overflow happens during reset */
  741. if (host->in_bus_reset)
  742. reg_write(lynx, FIFO_CONTROL,
  743. FIFO_CONTROL_GRF_FLUSH);
  744. PRINT(KERN_INFO, lynx->id, "GRF overflow");
  745. }
  746. if (linkint & LINK_INT_ITF_UNDERFLOW) {
  747. PRINT(KERN_INFO, lynx->id, "ITF underflow");
  748. }
  749. if (linkint & LINK_INT_ATF_UNDERFLOW) {
  750. PRINT(KERN_INFO, lynx->id, "ATF underflow");
  751. }
  752. }
  753. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
  754. PRINTD(KERN_DEBUG, lynx->id, "iso receive");
  755. spin_lock(&lynx->iso_rcv.lock);
  756. lynx->iso_rcv.stat[lynx->iso_rcv.next] =
  757. reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
  758. lynx->iso_rcv.used++;
  759. lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
  760. if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
  761. || !lynx->iso_rcv.chan_count) {
  762. PRINTD(KERN_DEBUG, lynx->id, "stopped");
  763. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  764. }
  765. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
  766. CHANNEL_ISO_RCV);
  767. spin_unlock(&lynx->iso_rcv.lock);
  768. tasklet_schedule(&lynx->iso_rcv.tq);
  769. }
  770. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
  771. PRINTD(KERN_DEBUG, lynx->id, "async sent");
  772. spin_lock(&lynx->async.queue_lock);
  773. if (list_empty(&lynx->async.pcl_queue)) {
  774. spin_unlock(&lynx->async.queue_lock);
  775. PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
  776. } else {
  777. struct ti_pcl pcl;
  778. u32 ack;
  779. struct hpsb_packet *packet;
  780. get_pcl(lynx, lynx->async.pcl, &pcl);
  781. packet = driver_packet(lynx->async.pcl_queue.next);
  782. list_del_init(&packet->driver_list);
  783. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  784. packet->header_size, PCI_DMA_TODEVICE);
  785. if (packet->data_size) {
  786. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  787. packet->data_size, PCI_DMA_TODEVICE);
  788. }
  789. if (!list_empty(&lynx->async.queue)) {
  790. send_next(lynx, hpsb_async);
  791. }
  792. spin_unlock(&lynx->async.queue_lock);
  793. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  794. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  795. ack = (pcl.pcl_status >> 15) & 0xf;
  796. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  797. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  798. } else {
  799. ack = (pcl.pcl_status >> 15) & 0xf;
  800. }
  801. } else {
  802. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  803. ack = ACKX_SEND_ERROR;
  804. }
  805. hpsb_packet_sent(host, packet, ack);
  806. }
  807. }
  808. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
  809. PRINTD(KERN_DEBUG, lynx->id, "iso sent");
  810. spin_lock(&lynx->iso_send.queue_lock);
  811. if (list_empty(&lynx->iso_send.pcl_queue)) {
  812. spin_unlock(&lynx->iso_send.queue_lock);
  813. PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
  814. } else {
  815. struct ti_pcl pcl;
  816. u32 ack;
  817. struct hpsb_packet *packet;
  818. get_pcl(lynx, lynx->iso_send.pcl, &pcl);
  819. packet = driver_packet(lynx->iso_send.pcl_queue.next);
  820. list_del_init(&packet->driver_list);
  821. pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
  822. packet->header_size, PCI_DMA_TODEVICE);
  823. if (packet->data_size) {
  824. pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
  825. packet->data_size, PCI_DMA_TODEVICE);
  826. }
  827. #if 0 /* has been removed from ieee1394 core */
  828. if (!list_empty(&lynx->iso_send.queue)) {
  829. send_next(lynx, hpsb_iso);
  830. }
  831. #endif
  832. spin_unlock(&lynx->iso_send.queue_lock);
  833. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  834. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  835. ack = (pcl.pcl_status >> 15) & 0xf;
  836. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  837. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  838. } else {
  839. ack = (pcl.pcl_status >> 15) & 0xf;
  840. }
  841. } else {
  842. PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
  843. ack = ACKX_SEND_ERROR;
  844. }
  845. hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
  846. }
  847. }
  848. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
  849. /* general receive DMA completed */
  850. int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
  851. PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
  852. stat & 0x1fff);
  853. if (stat & DMA_CHAN_STAT_SELFID) {
  854. lynx->selfid_size = stat & 0x1fff;
  855. handle_selfid(lynx, host);
  856. } else {
  857. quadlet_t *q_data = lynx->rcv_page;
  858. if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
  859. || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
  860. cpu_to_be32s(q_data + 3);
  861. }
  862. hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
  863. }
  864. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  865. }
  866. return IRQ_HANDLED;
  867. }
  868. static void iso_rcv_bh(struct ti_lynx *lynx)
  869. {
  870. unsigned int idx;
  871. quadlet_t *data;
  872. unsigned long flags;
  873. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  874. while (lynx->iso_rcv.used) {
  875. idx = lynx->iso_rcv.last;
  876. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  877. data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
  878. + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
  879. if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
  880. PRINT(KERN_ERR, lynx->id,
  881. "iso length mismatch 0x%08x/0x%08x", *data,
  882. lynx->iso_rcv.stat[idx]);
  883. }
  884. if (lynx->iso_rcv.stat[idx]
  885. & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
  886. PRINT(KERN_INFO, lynx->id,
  887. "iso receive error on %d to 0x%p", idx, data);
  888. } else {
  889. hpsb_packet_received(lynx->host, data,
  890. lynx->iso_rcv.stat[idx] & 0x1fff,
  891. 0);
  892. }
  893. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  894. lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
  895. lynx->iso_rcv.used--;
  896. }
  897. if (lynx->iso_rcv.chan_count) {
  898. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  899. DMA_WORD1_CMP_ENABLE_MASTER);
  900. }
  901. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  902. }
  903. static void remove_card(struct pci_dev *dev)
  904. {
  905. struct ti_lynx *lynx;
  906. struct device *lynx_dev;
  907. int i;
  908. lynx = pci_get_drvdata(dev);
  909. if (!lynx) return;
  910. pci_set_drvdata(dev, NULL);
  911. lynx_dev = get_device(&lynx->host->device);
  912. switch (lynx->state) {
  913. case is_host:
  914. reg_write(lynx, PCI_INT_ENABLE, 0);
  915. hpsb_remove_host(lynx->host);
  916. case have_intr:
  917. reg_write(lynx, PCI_INT_ENABLE, 0);
  918. free_irq(lynx->dev->irq, lynx);
  919. /* Disable IRM Contender and LCtrl */
  920. if (lynx->phyic.reg_1394a)
  921. set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
  922. /* Let all other nodes know to ignore us */
  923. lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
  924. case have_iomappings:
  925. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  926. /* Fix buggy cards with autoboot pin not tied low: */
  927. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  928. iounmap(lynx->registers);
  929. iounmap(lynx->local_rom);
  930. iounmap(lynx->local_ram);
  931. iounmap(lynx->aux_port);
  932. case have_1394_buffers:
  933. for (i = 0; i < ISORCV_PAGES; i++) {
  934. if (lynx->iso_rcv.page[i]) {
  935. pci_free_consistent(lynx->dev, PAGE_SIZE,
  936. lynx->iso_rcv.page[i],
  937. lynx->iso_rcv.page_dma[i]);
  938. }
  939. }
  940. pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
  941. lynx->rcv_page_dma);
  942. case have_aux_buf:
  943. case have_pcl_mem:
  944. pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
  945. lynx->pcl_mem_dma);
  946. case clear:
  947. /* do nothing - already freed */
  948. ;
  949. }
  950. tasklet_kill(&lynx->iso_rcv.tq);
  951. if (lynx_dev)
  952. put_device(lynx_dev);
  953. }
  954. static int __devinit add_card(struct pci_dev *dev,
  955. const struct pci_device_id *devid_is_unused)
  956. {
  957. #define FAIL(fmt, args...) do { \
  958. PRINT_G(KERN_ERR, fmt , ## args); \
  959. remove_card(dev); \
  960. return error; \
  961. } while (0)
  962. char irq_buf[16];
  963. struct hpsb_host *host;
  964. struct ti_lynx *lynx; /* shortcut to currently handled device */
  965. struct ti_pcl pcl;
  966. u32 *pcli;
  967. int i;
  968. int error;
  969. error = -ENXIO;
  970. if (pci_set_dma_mask(dev, DMA_BIT_MASK(32)))
  971. FAIL("DMA address limits not supported for PCILynx hardware");
  972. if (pci_enable_device(dev))
  973. FAIL("failed to enable PCILynx hardware");
  974. pci_set_master(dev);
  975. error = -ENOMEM;
  976. host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
  977. if (!host) FAIL("failed to allocate control structure memory");
  978. lynx = host->hostdata;
  979. lynx->id = card_id++;
  980. lynx->dev = dev;
  981. lynx->state = clear;
  982. lynx->host = host;
  983. host->pdev = dev;
  984. pci_set_drvdata(dev, lynx);
  985. spin_lock_init(&lynx->lock);
  986. spin_lock_init(&lynx->phy_reg_lock);
  987. lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
  988. &lynx->pcl_mem_dma);
  989. if (lynx->pcl_mem != NULL) {
  990. lynx->state = have_pcl_mem;
  991. PRINT(KERN_INFO, lynx->id,
  992. "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
  993. lynx->pcl_mem);
  994. } else {
  995. FAIL("failed to allocate PCL memory area");
  996. }
  997. lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
  998. &lynx->rcv_page_dma);
  999. if (lynx->rcv_page == NULL) {
  1000. FAIL("failed to allocate receive buffer");
  1001. }
  1002. lynx->state = have_1394_buffers;
  1003. for (i = 0; i < ISORCV_PAGES; i++) {
  1004. lynx->iso_rcv.page[i] =
  1005. pci_alloc_consistent(dev, PAGE_SIZE,
  1006. &lynx->iso_rcv.page_dma[i]);
  1007. if (lynx->iso_rcv.page[i] == NULL) {
  1008. FAIL("failed to allocate iso receive buffers");
  1009. }
  1010. }
  1011. lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
  1012. PCILYNX_MAX_REGISTER);
  1013. lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
  1014. lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
  1015. lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
  1016. PCILYNX_MAX_MEMORY);
  1017. lynx->state = have_iomappings;
  1018. if (lynx->registers == NULL) {
  1019. FAIL("failed to remap registers - card not accessible");
  1020. }
  1021. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  1022. /* Fix buggy cards with autoboot pin not tied low: */
  1023. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  1024. sprintf (irq_buf, "%d", dev->irq);
  1025. if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
  1026. PCILYNX_DRIVER_NAME, lynx)) {
  1027. PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
  1028. lynx->state = have_intr;
  1029. } else {
  1030. FAIL("failed to allocate shared interrupt %s", irq_buf);
  1031. }
  1032. /* alloc_pcl return values are not checked, it is expected that the
  1033. * provided PCL space is sufficient for the initial allocations */
  1034. lynx->rcv_pcl = alloc_pcl(lynx);
  1035. lynx->rcv_pcl_start = alloc_pcl(lynx);
  1036. lynx->async.pcl = alloc_pcl(lynx);
  1037. lynx->async.pcl_start = alloc_pcl(lynx);
  1038. lynx->iso_send.pcl = alloc_pcl(lynx);
  1039. lynx->iso_send.pcl_start = alloc_pcl(lynx);
  1040. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1041. lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
  1042. }
  1043. lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
  1044. /* all allocations successful - simple init stuff follows */
  1045. reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  1046. tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
  1047. (unsigned long)lynx);
  1048. spin_lock_init(&lynx->iso_rcv.lock);
  1049. spin_lock_init(&lynx->async.queue_lock);
  1050. lynx->async.channel = CHANNEL_ASYNC_SEND;
  1051. spin_lock_init(&lynx->iso_send.queue_lock);
  1052. lynx->iso_send.channel = CHANNEL_ISO_SEND;
  1053. PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
  1054. "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
  1055. lynx->local_ram, lynx->aux_port);
  1056. /* now, looking for PHY register set */
  1057. if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  1058. lynx->phyic.reg_1394a = 1;
  1059. PRINT(KERN_INFO, lynx->id,
  1060. "found 1394a conform PHY (using extended register set)");
  1061. lynx->phyic.vendor = get_phy_vendorid(lynx);
  1062. lynx->phyic.product = get_phy_productid(lynx);
  1063. } else {
  1064. lynx->phyic.reg_1394a = 0;
  1065. PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  1066. }
  1067. lynx->selfid_size = -1;
  1068. lynx->phy_reg0 = -1;
  1069. INIT_LIST_HEAD(&lynx->async.queue);
  1070. INIT_LIST_HEAD(&lynx->async.pcl_queue);
  1071. INIT_LIST_HEAD(&lynx->iso_send.queue);
  1072. INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
  1073. pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
  1074. put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
  1075. pcl.next = PCL_NEXT_INVALID;
  1076. pcl.async_error_next = PCL_NEXT_INVALID;
  1077. pcl.buffer[0].control = PCL_CMD_RCV | 16;
  1078. #ifndef __BIG_ENDIAN
  1079. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1080. #endif
  1081. pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
  1082. pcl.buffer[0].pointer = lynx->rcv_page_dma;
  1083. pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
  1084. put_pcl(lynx, lynx->rcv_pcl, &pcl);
  1085. pcl.next = pcl_bus(lynx, lynx->async.pcl);
  1086. pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
  1087. put_pcl(lynx, lynx->async.pcl_start, &pcl);
  1088. pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
  1089. pcl.async_error_next = PCL_NEXT_INVALID;
  1090. put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
  1091. pcl.next = PCL_NEXT_INVALID;
  1092. pcl.async_error_next = PCL_NEXT_INVALID;
  1093. pcl.buffer[0].control = PCL_CMD_RCV | 4;
  1094. #ifndef __BIG_ENDIAN
  1095. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1096. #endif
  1097. pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
  1098. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1099. int page = i / ISORCV_PER_PAGE;
  1100. int sec = i % ISORCV_PER_PAGE;
  1101. pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
  1102. + sec * MAX_ISORCV_SIZE;
  1103. pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
  1104. put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
  1105. }
  1106. pcli = (u32 *)&pcl;
  1107. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1108. pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
  1109. }
  1110. put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
  1111. /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
  1112. reg_write(lynx, FIFO_SIZES, 0x003030a0);
  1113. /* 20 byte threshold before triggering PCI transfer */
  1114. reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
  1115. /* threshold on both send FIFOs before transmitting:
  1116. FIFO size - cache line size - 1 */
  1117. i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
  1118. i = 0x30 - i - 1;
  1119. reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
  1120. reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
  1121. reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
  1122. | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
  1123. | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
  1124. | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
  1125. | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
  1126. | LINK_INT_ATF_UNDERFLOW);
  1127. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1128. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
  1129. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1130. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
  1131. DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
  1132. | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
  1133. | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
  1134. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  1135. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1136. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
  1137. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1138. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  1139. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
  1140. reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
  1141. | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
  1142. | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
  1143. | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
  1144. if (!lynx->phyic.reg_1394a) {
  1145. if (!hpsb_disable_irm) {
  1146. /* attempt to enable contender bit -FIXME- would this
  1147. * work elsewhere? */
  1148. reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
  1149. reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
  1150. }
  1151. } else {
  1152. /* set the contender (if appropriate) and LCtrl bit in the
  1153. * extended PHY register set. (Should check that PHY_02_EXTENDED
  1154. * is set in register 2?)
  1155. */
  1156. i = get_phy_reg(lynx, 4);
  1157. i |= PHY_04_LCTRL;
  1158. if (hpsb_disable_irm)
  1159. i &= ~PHY_04_CONTENDER;
  1160. else
  1161. i |= PHY_04_CONTENDER;
  1162. if (i != -1) set_phy_reg(lynx, 4, i);
  1163. }
  1164. if (!skip_eeprom)
  1165. {
  1166. /* needed for i2c communication with serial eeprom */
  1167. struct i2c_adapter *i2c_ad;
  1168. struct i2c_algo_bit_data i2c_adapter_data;
  1169. error = -ENOMEM;
  1170. i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
  1171. if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
  1172. strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
  1173. i2c_adapter_data = bit_data;
  1174. i2c_ad->algo_data = &i2c_adapter_data;
  1175. i2c_adapter_data.data = lynx;
  1176. i2c_ad->dev.parent = &dev->dev;
  1177. PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
  1178. reg_read(lynx, SERIAL_EEPROM_CONTROL));
  1179. /* reset hardware to sane state */
  1180. lynx->i2c_driven_state = 0x00000070;
  1181. reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
  1182. if (i2c_bit_add_bus(i2c_ad) < 0)
  1183. {
  1184. kfree(i2c_ad);
  1185. error = -ENXIO;
  1186. FAIL("unable to register i2c");
  1187. }
  1188. else
  1189. {
  1190. /* do i2c stuff */
  1191. unsigned char i2c_cmd = 0x10;
  1192. struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
  1193. { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
  1194. };
  1195. /* we use i2c_transfer because we have no i2c_client
  1196. at hand */
  1197. if (i2c_transfer(i2c_ad, msg, 2) < 0) {
  1198. PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
  1199. } else {
  1200. PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
  1201. /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
  1202. * generation(1394a) and link_spd(1394a) field and recalculate
  1203. * the CRC */
  1204. for (i = 0; i < 5 ; i++)
  1205. PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
  1206. i, be32_to_cpu(lynx->bus_info_block[i]));
  1207. /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
  1208. if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
  1209. (lynx->bus_info_block[1] == IEEE1394_BUSID_MAGIC))
  1210. {
  1211. PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
  1212. } else {
  1213. kfree(i2c_ad);
  1214. error = -ENXIO;
  1215. FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
  1216. }
  1217. }
  1218. i2c_del_adapter(i2c_ad);
  1219. kfree(i2c_ad);
  1220. }
  1221. }
  1222. host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
  1223. host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
  1224. host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
  1225. host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
  1226. if (!lynx->phyic.reg_1394a)
  1227. host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
  1228. else
  1229. host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
  1230. if (hpsb_add_host(host)) {
  1231. error = -ENOMEM;
  1232. FAIL("Failed to register host with highlevel");
  1233. }
  1234. lynx->state = is_host;
  1235. return 0;
  1236. #undef FAIL
  1237. }
  1238. static struct pci_device_id pci_table[] = {
  1239. {
  1240. .vendor = PCI_VENDOR_ID_TI,
  1241. .device = PCI_DEVICE_ID_TI_PCILYNX,
  1242. .subvendor = PCI_ANY_ID,
  1243. .subdevice = PCI_ANY_ID,
  1244. },
  1245. { } /* Terminating entry */
  1246. };
  1247. static struct pci_driver lynx_pci_driver = {
  1248. .name = PCILYNX_DRIVER_NAME,
  1249. .id_table = pci_table,
  1250. .probe = add_card,
  1251. .remove = remove_card,
  1252. };
  1253. static struct hpsb_host_driver lynx_driver = {
  1254. .owner = THIS_MODULE,
  1255. .name = PCILYNX_DRIVER_NAME,
  1256. .set_hw_config_rom = NULL,
  1257. .transmit_packet = lynx_transmit,
  1258. .devctl = lynx_devctl,
  1259. .isoctl = NULL,
  1260. };
  1261. MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
  1262. MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
  1263. MODULE_LICENSE("GPL");
  1264. MODULE_SUPPORTED_DEVICE("pcilynx");
  1265. MODULE_DEVICE_TABLE(pci, pci_table);
  1266. static int __init pcilynx_init(void)
  1267. {
  1268. int ret;
  1269. ret = pci_register_driver(&lynx_pci_driver);
  1270. if (ret < 0) {
  1271. PRINT_G(KERN_ERR, "PCI module init failed");
  1272. return ret;
  1273. }
  1274. return 0;
  1275. }
  1276. static void __exit pcilynx_cleanup(void)
  1277. {
  1278. pci_unregister_driver(&lynx_pci_driver);
  1279. }
  1280. module_init(pcilynx_init);
  1281. module_exit(pcilynx_cleanup);