pcilynx.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569
  1. /*
  2. * pcilynx.c - Texas Instruments PCILynx driver
  3. * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
  4. * Stephan Linz <linz@mazet.de>
  5. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software Foundation,
  19. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. /*
  22. * Contributions:
  23. *
  24. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  25. * reading bus info block (containing GUID) from serial
  26. * eeprom via i2c and storing it in config ROM
  27. * Reworked code for initiating bus resets
  28. * (long, short, with or without hold-off)
  29. * Enhancements in async and iso send code
  30. */
  31. #include <linux/kernel.h>
  32. #include <linux/slab.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/wait.h>
  35. #include <linux/errno.h>
  36. #include <linux/module.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/init.h>
  39. #include <linux/pci.h>
  40. #include <linux/fs.h>
  41. #include <linux/poll.h>
  42. #include <linux/kdev_t.h>
  43. #include <linux/dma-mapping.h>
  44. #include <asm/byteorder.h>
  45. #include <asm/atomic.h>
  46. #include <asm/io.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/irq.h>
  49. #include "csr1212.h"
  50. #include "ieee1394.h"
  51. #include "ieee1394_types.h"
  52. #include "hosts.h"
  53. #include "ieee1394_core.h"
  54. #include "highlevel.h"
  55. #include "pcilynx.h"
  56. #include <linux/i2c.h>
  57. #include <linux/i2c-algo-bit.h>
  58. /* print general (card independent) information */
  59. #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  60. /* print card specific information */
  61. #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  62. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  63. #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  64. #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  65. #else
  66. #define PRINT_GD(level, fmt, args...) do {} while (0)
  67. #define PRINTD(level, card, fmt, args...) do {} while (0)
  68. #endif
  69. /* Module Parameters */
  70. static int skip_eeprom;
  71. module_param(skip_eeprom, int, 0444);
  72. MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
  73. static struct hpsb_host_driver lynx_driver;
  74. static unsigned int card_id;
  75. /*
  76. * I2C stuff
  77. */
  78. /* the i2c stuff was inspired by i2c-philips-par.c */
  79. static void bit_setscl(void *data, int state)
  80. {
  81. if (state) {
  82. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
  83. } else {
  84. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
  85. }
  86. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  87. }
  88. static void bit_setsda(void *data, int state)
  89. {
  90. if (state) {
  91. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
  92. } else {
  93. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
  94. }
  95. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  96. }
  97. static int bit_getscl(void *data)
  98. {
  99. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
  100. }
  101. static int bit_getsda(void *data)
  102. {
  103. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
  104. }
  105. static int bit_reg(struct i2c_client *client)
  106. {
  107. return 0;
  108. }
  109. static int bit_unreg(struct i2c_client *client)
  110. {
  111. return 0;
  112. }
  113. static struct i2c_algo_bit_data bit_data = {
  114. .setsda = bit_setsda,
  115. .setscl = bit_setscl,
  116. .getsda = bit_getsda,
  117. .getscl = bit_getscl,
  118. .udelay = 5,
  119. .mdelay = 5,
  120. .timeout = 100,
  121. };
  122. static struct i2c_adapter bit_ops = {
  123. .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
  124. .client_register = bit_reg,
  125. .client_unregister = bit_unreg,
  126. .name = "PCILynx I2C",
  127. };
  128. /*
  129. * PCL handling functions.
  130. */
  131. static pcl_t alloc_pcl(struct ti_lynx *lynx)
  132. {
  133. u8 m;
  134. int i, j;
  135. spin_lock(&lynx->lock);
  136. /* FIXME - use ffz() to make this readable */
  137. for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
  138. m = lynx->pcl_bmap[i];
  139. for (j = 0; j < 8; j++) {
  140. if (m & 1<<j) {
  141. continue;
  142. }
  143. m |= 1<<j;
  144. lynx->pcl_bmap[i] = m;
  145. spin_unlock(&lynx->lock);
  146. return 8 * i + j;
  147. }
  148. }
  149. spin_unlock(&lynx->lock);
  150. return -1;
  151. }
  152. #if 0
  153. static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
  154. {
  155. int off, bit;
  156. off = pclid / 8;
  157. bit = pclid % 8;
  158. if (pclid < 0) {
  159. return;
  160. }
  161. spin_lock(&lynx->lock);
  162. if (lynx->pcl_bmap[off] & 1<<bit) {
  163. lynx->pcl_bmap[off] &= ~(1<<bit);
  164. } else {
  165. PRINT(KERN_ERR, lynx->id,
  166. "attempted to free unallocated PCL %d", pclid);
  167. }
  168. spin_unlock(&lynx->lock);
  169. }
  170. /* functions useful for debugging */
  171. static void pretty_print_pcl(const struct ti_pcl *pcl)
  172. {
  173. int i;
  174. printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
  175. pcl->next, pcl->user_data, pcl->pcl_status,
  176. pcl->remaining_transfer_count, pcl->next_data_buffer);
  177. printk("PCL");
  178. for (i=0; i<13; i++) {
  179. printk(" c%x:%08x d%x:%08x",
  180. i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
  181. if (!(i & 0x3) && (i != 12)) printk("\nPCL");
  182. }
  183. printk("\n");
  184. }
  185. static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
  186. {
  187. struct ti_pcl pcl;
  188. get_pcl(lynx, pclid, &pcl);
  189. pretty_print_pcl(&pcl);
  190. }
  191. #endif
  192. /***********************************
  193. * IEEE-1394 functionality section *
  194. ***********************************/
  195. static int get_phy_reg(struct ti_lynx *lynx, int addr)
  196. {
  197. int retval;
  198. int i = 0;
  199. unsigned long flags;
  200. if (addr > 15) {
  201. PRINT(KERN_ERR, lynx->id,
  202. "%s: PHY register address %d out of range",
  203. __FUNCTION__, addr);
  204. return -1;
  205. }
  206. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  207. reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
  208. do {
  209. retval = reg_read(lynx, LINK_PHY);
  210. if (i > 10000) {
  211. PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
  212. __FUNCTION__);
  213. retval = -1;
  214. break;
  215. }
  216. i++;
  217. } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
  218. reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
  219. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  220. if (retval != -1) {
  221. return retval & 0xff;
  222. } else {
  223. return -1;
  224. }
  225. }
  226. static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
  227. {
  228. unsigned long flags;
  229. if (addr > 15) {
  230. PRINT(KERN_ERR, lynx->id,
  231. "%s: PHY register address %d out of range", __FUNCTION__, addr);
  232. return -1;
  233. }
  234. if (val > 0xff) {
  235. PRINT(KERN_ERR, lynx->id,
  236. "%s: PHY register value %d out of range", __FUNCTION__, val);
  237. return -1;
  238. }
  239. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  240. reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
  241. | LINK_PHY_WDATA(val));
  242. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  243. return 0;
  244. }
  245. static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
  246. {
  247. int reg;
  248. if (page > 7) {
  249. PRINT(KERN_ERR, lynx->id,
  250. "%s: PHY page %d out of range", __FUNCTION__, page);
  251. return -1;
  252. }
  253. reg = get_phy_reg(lynx, 7);
  254. if (reg != -1) {
  255. reg &= 0x1f;
  256. reg |= (page << 5);
  257. set_phy_reg(lynx, 7, reg);
  258. return 0;
  259. } else {
  260. return -1;
  261. }
  262. }
  263. #if 0 /* not needed at this time */
  264. static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
  265. {
  266. int reg;
  267. if (port > 15) {
  268. PRINT(KERN_ERR, lynx->id,
  269. "%s: PHY port %d out of range", __FUNCTION__, port);
  270. return -1;
  271. }
  272. reg = get_phy_reg(lynx, 7);
  273. if (reg != -1) {
  274. reg &= 0xf0;
  275. reg |= port;
  276. set_phy_reg(lynx, 7, reg);
  277. return 0;
  278. } else {
  279. return -1;
  280. }
  281. }
  282. #endif
  283. static u32 get_phy_vendorid(struct ti_lynx *lynx)
  284. {
  285. u32 pvid = 0;
  286. sel_phy_reg_page(lynx, 1);
  287. pvid |= (get_phy_reg(lynx, 10) << 16);
  288. pvid |= (get_phy_reg(lynx, 11) << 8);
  289. pvid |= get_phy_reg(lynx, 12);
  290. PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
  291. return pvid;
  292. }
  293. static u32 get_phy_productid(struct ti_lynx *lynx)
  294. {
  295. u32 id = 0;
  296. sel_phy_reg_page(lynx, 1);
  297. id |= (get_phy_reg(lynx, 13) << 16);
  298. id |= (get_phy_reg(lynx, 14) << 8);
  299. id |= get_phy_reg(lynx, 15);
  300. PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
  301. return id;
  302. }
  303. static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
  304. struct hpsb_host *host)
  305. {
  306. quadlet_t lsid;
  307. char phyreg[7];
  308. int i;
  309. phyreg[0] = lynx->phy_reg0;
  310. for (i = 1; i < 7; i++) {
  311. phyreg[i] = get_phy_reg(lynx, i);
  312. }
  313. /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
  314. more than 3 ports on the PHY anyway. */
  315. lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
  316. lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
  317. lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
  318. if (!hpsb_disable_irm)
  319. lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
  320. /* lsid |= 1 << 11; *//* set contender (hack) */
  321. lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
  322. for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
  323. if (phyreg[3 + i] & 0x4) {
  324. lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
  325. << (6 - i*2);
  326. } else {
  327. lsid |= 1 << (6 - i*2);
  328. }
  329. }
  330. cpu_to_be32s(&lsid);
  331. PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
  332. return lsid;
  333. }
  334. static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
  335. {
  336. quadlet_t *q = lynx->rcv_page;
  337. int phyid, isroot, size;
  338. quadlet_t lsid = 0;
  339. int i;
  340. if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
  341. size = lynx->selfid_size;
  342. phyid = lynx->phy_reg0;
  343. i = (size > 16 ? 16 : size) / 4 - 1;
  344. while (i >= 0) {
  345. cpu_to_be32s(&q[i]);
  346. i--;
  347. }
  348. if (!lynx->phyic.reg_1394a) {
  349. lsid = generate_own_selfid(lynx, host);
  350. }
  351. isroot = (phyid & 2) != 0;
  352. phyid >>= 2;
  353. PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
  354. phyid, (isroot ? "root" : "not root"));
  355. reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
  356. if (!lynx->phyic.reg_1394a && !size) {
  357. hpsb_selfid_received(host, lsid);
  358. }
  359. while (size > 0) {
  360. struct selfid *sid = (struct selfid *)q;
  361. if (!lynx->phyic.reg_1394a && !sid->extended
  362. && (sid->phy_id == (phyid + 1))) {
  363. hpsb_selfid_received(host, lsid);
  364. }
  365. if (q[0] == ~q[1]) {
  366. PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
  367. q[0]);
  368. hpsb_selfid_received(host, q[0]);
  369. } else {
  370. PRINT(KERN_INFO, lynx->id,
  371. "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
  372. }
  373. q += 2;
  374. size -= 8;
  375. }
  376. if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
  377. hpsb_selfid_received(host, lsid);
  378. }
  379. hpsb_selfid_complete(host, phyid, isroot);
  380. if (host->in_bus_reset) return; /* in bus reset again */
  381. if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
  382. reg_set_bits(lynx, LINK_CONTROL,
  383. LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
  384. | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
  385. }
  386. /* This must be called with the respective queue_lock held. */
  387. static void send_next(struct ti_lynx *lynx, int what)
  388. {
  389. struct ti_pcl pcl;
  390. struct lynx_send_data *d;
  391. struct hpsb_packet *packet;
  392. d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
  393. if (!list_empty(&d->pcl_queue)) {
  394. PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
  395. BUG();
  396. }
  397. packet = driver_packet(d->queue.next);
  398. list_move_tail(&packet->driver_list, &d->pcl_queue);
  399. d->header_dma = pci_map_single(lynx->dev, packet->header,
  400. packet->header_size, PCI_DMA_TODEVICE);
  401. if (packet->data_size) {
  402. d->data_dma = pci_map_single(lynx->dev, packet->data,
  403. packet->data_size,
  404. PCI_DMA_TODEVICE);
  405. } else {
  406. d->data_dma = 0;
  407. }
  408. pcl.next = PCL_NEXT_INVALID;
  409. pcl.async_error_next = PCL_NEXT_INVALID;
  410. pcl.pcl_status = 0;
  411. pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
  412. #ifndef __BIG_ENDIAN
  413. pcl.buffer[0].control |= PCL_BIGENDIAN;
  414. #endif
  415. pcl.buffer[0].pointer = d->header_dma;
  416. pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
  417. pcl.buffer[1].pointer = d->data_dma;
  418. switch (packet->type) {
  419. case hpsb_async:
  420. pcl.buffer[0].control |= PCL_CMD_XMT;
  421. break;
  422. case hpsb_iso:
  423. pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
  424. break;
  425. case hpsb_raw:
  426. pcl.buffer[0].control |= PCL_CMD_UNFXMT;
  427. break;
  428. }
  429. put_pcl(lynx, d->pcl, &pcl);
  430. run_pcl(lynx, d->pcl_start, d->channel);
  431. }
  432. /* called from subsystem core */
  433. static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
  434. {
  435. struct ti_lynx *lynx = host->hostdata;
  436. struct lynx_send_data *d;
  437. unsigned long flags;
  438. if (packet->data_size >= 4096) {
  439. PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
  440. packet->data_size);
  441. return -EOVERFLOW;
  442. }
  443. switch (packet->type) {
  444. case hpsb_async:
  445. case hpsb_raw:
  446. d = &lynx->async;
  447. break;
  448. case hpsb_iso:
  449. d = &lynx->iso_send;
  450. break;
  451. default:
  452. PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
  453. packet->type);
  454. return -EINVAL;
  455. }
  456. if (packet->tcode == TCODE_WRITEQ
  457. || packet->tcode == TCODE_READQ_RESPONSE) {
  458. cpu_to_be32s(&packet->header[3]);
  459. }
  460. spin_lock_irqsave(&d->queue_lock, flags);
  461. list_add_tail(&packet->driver_list, &d->queue);
  462. if (list_empty(&d->pcl_queue))
  463. send_next(lynx, packet->type);
  464. spin_unlock_irqrestore(&d->queue_lock, flags);
  465. return 0;
  466. }
  467. /* called from subsystem core */
  468. static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
  469. {
  470. struct ti_lynx *lynx = host->hostdata;
  471. int retval = 0;
  472. struct hpsb_packet *packet;
  473. LIST_HEAD(packet_list);
  474. unsigned long flags;
  475. int phy_reg;
  476. switch (cmd) {
  477. case RESET_BUS:
  478. if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
  479. retval = 0;
  480. break;
  481. }
  482. switch (arg) {
  483. case SHORT_RESET:
  484. if (lynx->phyic.reg_1394a) {
  485. phy_reg = get_phy_reg(lynx, 5);
  486. if (phy_reg == -1) {
  487. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  488. retval = -1;
  489. break;
  490. }
  491. phy_reg |= 0x40;
  492. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
  493. lynx->selfid_size = -1;
  494. lynx->phy_reg0 = -1;
  495. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  496. break;
  497. } else {
  498. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  499. /* fall through to long bus reset */
  500. }
  501. case LONG_RESET:
  502. phy_reg = get_phy_reg(lynx, 1);
  503. if (phy_reg == -1) {
  504. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  505. retval = -1;
  506. break;
  507. }
  508. phy_reg |= 0x40;
  509. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
  510. lynx->selfid_size = -1;
  511. lynx->phy_reg0 = -1;
  512. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  513. break;
  514. case SHORT_RESET_NO_FORCE_ROOT:
  515. if (lynx->phyic.reg_1394a) {
  516. phy_reg = get_phy_reg(lynx, 1);
  517. if (phy_reg == -1) {
  518. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  519. retval = -1;
  520. break;
  521. }
  522. if (phy_reg & 0x80) {
  523. phy_reg &= ~0x80;
  524. set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
  525. }
  526. phy_reg = get_phy_reg(lynx, 5);
  527. if (phy_reg == -1) {
  528. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  529. retval = -1;
  530. break;
  531. }
  532. phy_reg |= 0x40;
  533. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
  534. lynx->selfid_size = -1;
  535. lynx->phy_reg0 = -1;
  536. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  537. break;
  538. } else {
  539. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  540. /* fall through to long bus reset */
  541. }
  542. case LONG_RESET_NO_FORCE_ROOT:
  543. phy_reg = get_phy_reg(lynx, 1);
  544. if (phy_reg == -1) {
  545. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  546. retval = -1;
  547. break;
  548. }
  549. phy_reg &= ~0x80;
  550. phy_reg |= 0x40;
  551. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
  552. lynx->selfid_size = -1;
  553. lynx->phy_reg0 = -1;
  554. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  555. break;
  556. case SHORT_RESET_FORCE_ROOT:
  557. if (lynx->phyic.reg_1394a) {
  558. phy_reg = get_phy_reg(lynx, 1);
  559. if (phy_reg == -1) {
  560. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  561. retval = -1;
  562. break;
  563. }
  564. if (!(phy_reg & 0x80)) {
  565. phy_reg |= 0x80;
  566. set_phy_reg(lynx, 1, phy_reg); /* set RHB */
  567. }
  568. phy_reg = get_phy_reg(lynx, 5);
  569. if (phy_reg == -1) {
  570. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  571. retval = -1;
  572. break;
  573. }
  574. phy_reg |= 0x40;
  575. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
  576. lynx->selfid_size = -1;
  577. lynx->phy_reg0 = -1;
  578. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  579. break;
  580. } else {
  581. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  582. /* fall through to long bus reset */
  583. }
  584. case LONG_RESET_FORCE_ROOT:
  585. phy_reg = get_phy_reg(lynx, 1);
  586. if (phy_reg == -1) {
  587. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  588. retval = -1;
  589. break;
  590. }
  591. phy_reg |= 0xc0;
  592. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
  593. lynx->selfid_size = -1;
  594. lynx->phy_reg0 = -1;
  595. set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
  596. break;
  597. default:
  598. PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
  599. retval = -1;
  600. }
  601. break;
  602. case GET_CYCLE_COUNTER:
  603. retval = reg_read(lynx, CYCLE_TIMER);
  604. break;
  605. case SET_CYCLE_COUNTER:
  606. reg_write(lynx, CYCLE_TIMER, arg);
  607. break;
  608. case SET_BUS_ID:
  609. reg_write(lynx, LINK_ID,
  610. (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
  611. break;
  612. case ACT_CYCLE_MASTER:
  613. if (arg) {
  614. reg_set_bits(lynx, LINK_CONTROL,
  615. LINK_CONTROL_CYCMASTER);
  616. } else {
  617. reg_clear_bits(lynx, LINK_CONTROL,
  618. LINK_CONTROL_CYCMASTER);
  619. }
  620. break;
  621. case CANCEL_REQUESTS:
  622. spin_lock_irqsave(&lynx->async.queue_lock, flags);
  623. reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
  624. list_splice(&lynx->async.queue, &packet_list);
  625. INIT_LIST_HEAD(&lynx->async.queue);
  626. if (list_empty(&lynx->async.pcl_queue)) {
  627. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  628. PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
  629. } else {
  630. struct ti_pcl pcl;
  631. u32 ack;
  632. struct hpsb_packet *packet;
  633. PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
  634. get_pcl(lynx, lynx->async.pcl, &pcl);
  635. packet = driver_packet(lynx->async.pcl_queue.next);
  636. list_del_init(&packet->driver_list);
  637. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  638. packet->header_size, PCI_DMA_TODEVICE);
  639. if (packet->data_size) {
  640. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  641. packet->data_size, PCI_DMA_TODEVICE);
  642. }
  643. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  644. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  645. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  646. ack = (pcl.pcl_status >> 15) & 0xf;
  647. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  648. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  649. } else {
  650. ack = (pcl.pcl_status >> 15) & 0xf;
  651. }
  652. } else {
  653. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  654. ack = ACKX_ABORTED;
  655. }
  656. hpsb_packet_sent(host, packet, ack);
  657. }
  658. while (!list_empty(&packet_list)) {
  659. packet = driver_packet(packet_list.next);
  660. list_del_init(&packet->driver_list);
  661. hpsb_packet_sent(host, packet, ACKX_ABORTED);
  662. }
  663. break;
  664. case ISO_LISTEN_CHANNEL:
  665. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  666. if (lynx->iso_rcv.chan_count++ == 0) {
  667. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  668. DMA_WORD1_CMP_ENABLE_MASTER);
  669. }
  670. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  671. break;
  672. case ISO_UNLISTEN_CHANNEL:
  673. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  674. if (--lynx->iso_rcv.chan_count == 0) {
  675. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  676. 0);
  677. }
  678. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  679. break;
  680. default:
  681. PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
  682. retval = -1;
  683. }
  684. return retval;
  685. }
  686. /***************************************
  687. * IEEE-1394 functionality section END *
  688. ***************************************/
  689. /********************************************************
  690. * Global stuff (interrupt handler, init/shutdown code) *
  691. ********************************************************/
  692. static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
  693. struct pt_regs *regs_are_unused)
  694. {
  695. struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
  696. struct hpsb_host *host = lynx->host;
  697. u32 intmask;
  698. u32 linkint;
  699. linkint = reg_read(lynx, LINK_INT_STATUS);
  700. intmask = reg_read(lynx, PCI_INT_STATUS);
  701. if (!(intmask & PCI_INT_INT_PEND))
  702. return IRQ_NONE;
  703. PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
  704. linkint);
  705. reg_write(lynx, LINK_INT_STATUS, linkint);
  706. reg_write(lynx, PCI_INT_STATUS, intmask);
  707. if (intmask & PCI_INT_1394) {
  708. if (linkint & LINK_INT_PHY_TIMEOUT) {
  709. PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
  710. }
  711. if (linkint & LINK_INT_PHY_BUSRESET) {
  712. PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
  713. lynx->selfid_size = -1;
  714. lynx->phy_reg0 = -1;
  715. if (!host->in_bus_reset)
  716. hpsb_bus_reset(host);
  717. }
  718. if (linkint & LINK_INT_PHY_REG_RCVD) {
  719. u32 reg;
  720. spin_lock(&lynx->phy_reg_lock);
  721. reg = reg_read(lynx, LINK_PHY);
  722. spin_unlock(&lynx->phy_reg_lock);
  723. if (!host->in_bus_reset) {
  724. PRINT(KERN_INFO, lynx->id,
  725. "phy reg received without reset");
  726. } else if (reg & 0xf00) {
  727. PRINT(KERN_INFO, lynx->id,
  728. "unsolicited phy reg %d received",
  729. (reg >> 8) & 0xf);
  730. } else {
  731. lynx->phy_reg0 = reg & 0xff;
  732. handle_selfid(lynx, host);
  733. }
  734. }
  735. if (linkint & LINK_INT_ISO_STUCK) {
  736. PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
  737. }
  738. if (linkint & LINK_INT_ASYNC_STUCK) {
  739. PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
  740. }
  741. if (linkint & LINK_INT_SENT_REJECT) {
  742. PRINT(KERN_INFO, lynx->id, "sent reject");
  743. }
  744. if (linkint & LINK_INT_TX_INVALID_TC) {
  745. PRINT(KERN_INFO, lynx->id, "invalid transaction code");
  746. }
  747. if (linkint & LINK_INT_GRF_OVERFLOW) {
  748. /* flush FIFO if overflow happens during reset */
  749. if (host->in_bus_reset)
  750. reg_write(lynx, FIFO_CONTROL,
  751. FIFO_CONTROL_GRF_FLUSH);
  752. PRINT(KERN_INFO, lynx->id, "GRF overflow");
  753. }
  754. if (linkint & LINK_INT_ITF_UNDERFLOW) {
  755. PRINT(KERN_INFO, lynx->id, "ITF underflow");
  756. }
  757. if (linkint & LINK_INT_ATF_UNDERFLOW) {
  758. PRINT(KERN_INFO, lynx->id, "ATF underflow");
  759. }
  760. }
  761. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
  762. PRINTD(KERN_DEBUG, lynx->id, "iso receive");
  763. spin_lock(&lynx->iso_rcv.lock);
  764. lynx->iso_rcv.stat[lynx->iso_rcv.next] =
  765. reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
  766. lynx->iso_rcv.used++;
  767. lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
  768. if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
  769. || !lynx->iso_rcv.chan_count) {
  770. PRINTD(KERN_DEBUG, lynx->id, "stopped");
  771. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  772. }
  773. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
  774. CHANNEL_ISO_RCV);
  775. spin_unlock(&lynx->iso_rcv.lock);
  776. tasklet_schedule(&lynx->iso_rcv.tq);
  777. }
  778. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
  779. PRINTD(KERN_DEBUG, lynx->id, "async sent");
  780. spin_lock(&lynx->async.queue_lock);
  781. if (list_empty(&lynx->async.pcl_queue)) {
  782. spin_unlock(&lynx->async.queue_lock);
  783. PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
  784. } else {
  785. struct ti_pcl pcl;
  786. u32 ack;
  787. struct hpsb_packet *packet;
  788. get_pcl(lynx, lynx->async.pcl, &pcl);
  789. packet = driver_packet(lynx->async.pcl_queue.next);
  790. list_del_init(&packet->driver_list);
  791. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  792. packet->header_size, PCI_DMA_TODEVICE);
  793. if (packet->data_size) {
  794. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  795. packet->data_size, PCI_DMA_TODEVICE);
  796. }
  797. if (!list_empty(&lynx->async.queue)) {
  798. send_next(lynx, hpsb_async);
  799. }
  800. spin_unlock(&lynx->async.queue_lock);
  801. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  802. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  803. ack = (pcl.pcl_status >> 15) & 0xf;
  804. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  805. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  806. } else {
  807. ack = (pcl.pcl_status >> 15) & 0xf;
  808. }
  809. } else {
  810. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  811. ack = ACKX_SEND_ERROR;
  812. }
  813. hpsb_packet_sent(host, packet, ack);
  814. }
  815. }
  816. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
  817. PRINTD(KERN_DEBUG, lynx->id, "iso sent");
  818. spin_lock(&lynx->iso_send.queue_lock);
  819. if (list_empty(&lynx->iso_send.pcl_queue)) {
  820. spin_unlock(&lynx->iso_send.queue_lock);
  821. PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
  822. } else {
  823. struct ti_pcl pcl;
  824. u32 ack;
  825. struct hpsb_packet *packet;
  826. get_pcl(lynx, lynx->iso_send.pcl, &pcl);
  827. packet = driver_packet(lynx->iso_send.pcl_queue.next);
  828. list_del_init(&packet->driver_list);
  829. pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
  830. packet->header_size, PCI_DMA_TODEVICE);
  831. if (packet->data_size) {
  832. pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
  833. packet->data_size, PCI_DMA_TODEVICE);
  834. }
  835. if (!list_empty(&lynx->iso_send.queue)) {
  836. send_next(lynx, hpsb_iso);
  837. }
  838. spin_unlock(&lynx->iso_send.queue_lock);
  839. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  840. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  841. ack = (pcl.pcl_status >> 15) & 0xf;
  842. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  843. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  844. } else {
  845. ack = (pcl.pcl_status >> 15) & 0xf;
  846. }
  847. } else {
  848. PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
  849. ack = ACKX_SEND_ERROR;
  850. }
  851. hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
  852. }
  853. }
  854. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
  855. /* general receive DMA completed */
  856. int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
  857. PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
  858. stat & 0x1fff);
  859. if (stat & DMA_CHAN_STAT_SELFID) {
  860. lynx->selfid_size = stat & 0x1fff;
  861. handle_selfid(lynx, host);
  862. } else {
  863. quadlet_t *q_data = lynx->rcv_page;
  864. if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
  865. || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
  866. cpu_to_be32s(q_data + 3);
  867. }
  868. hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
  869. }
  870. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  871. }
  872. return IRQ_HANDLED;
  873. }
  874. static void iso_rcv_bh(struct ti_lynx *lynx)
  875. {
  876. unsigned int idx;
  877. quadlet_t *data;
  878. unsigned long flags;
  879. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  880. while (lynx->iso_rcv.used) {
  881. idx = lynx->iso_rcv.last;
  882. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  883. data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
  884. + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
  885. if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
  886. PRINT(KERN_ERR, lynx->id,
  887. "iso length mismatch 0x%08x/0x%08x", *data,
  888. lynx->iso_rcv.stat[idx]);
  889. }
  890. if (lynx->iso_rcv.stat[idx]
  891. & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
  892. PRINT(KERN_INFO, lynx->id,
  893. "iso receive error on %d to 0x%p", idx, data);
  894. } else {
  895. hpsb_packet_received(lynx->host, data,
  896. lynx->iso_rcv.stat[idx] & 0x1fff,
  897. 0);
  898. }
  899. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  900. lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
  901. lynx->iso_rcv.used--;
  902. }
  903. if (lynx->iso_rcv.chan_count) {
  904. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  905. DMA_WORD1_CMP_ENABLE_MASTER);
  906. }
  907. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  908. }
  909. static void remove_card(struct pci_dev *dev)
  910. {
  911. struct ti_lynx *lynx;
  912. struct device *lynx_dev;
  913. int i;
  914. lynx = pci_get_drvdata(dev);
  915. if (!lynx) return;
  916. pci_set_drvdata(dev, NULL);
  917. lynx_dev = get_device(&lynx->host->device);
  918. switch (lynx->state) {
  919. case is_host:
  920. reg_write(lynx, PCI_INT_ENABLE, 0);
  921. hpsb_remove_host(lynx->host);
  922. case have_intr:
  923. reg_write(lynx, PCI_INT_ENABLE, 0);
  924. free_irq(lynx->dev->irq, lynx);
  925. /* Disable IRM Contender and LCtrl */
  926. if (lynx->phyic.reg_1394a)
  927. set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
  928. /* Let all other nodes know to ignore us */
  929. lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
  930. case have_iomappings:
  931. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  932. /* Fix buggy cards with autoboot pin not tied low: */
  933. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  934. iounmap(lynx->registers);
  935. iounmap(lynx->local_rom);
  936. iounmap(lynx->local_ram);
  937. iounmap(lynx->aux_port);
  938. case have_1394_buffers:
  939. for (i = 0; i < ISORCV_PAGES; i++) {
  940. if (lynx->iso_rcv.page[i]) {
  941. pci_free_consistent(lynx->dev, PAGE_SIZE,
  942. lynx->iso_rcv.page[i],
  943. lynx->iso_rcv.page_dma[i]);
  944. }
  945. }
  946. pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
  947. lynx->rcv_page_dma);
  948. case have_aux_buf:
  949. case have_pcl_mem:
  950. pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
  951. lynx->pcl_mem_dma);
  952. case clear:
  953. /* do nothing - already freed */
  954. ;
  955. }
  956. tasklet_kill(&lynx->iso_rcv.tq);
  957. if (lynx_dev)
  958. put_device(lynx_dev);
  959. }
  960. static int __devinit add_card(struct pci_dev *dev,
  961. const struct pci_device_id *devid_is_unused)
  962. {
  963. #define FAIL(fmt, args...) do { \
  964. PRINT_G(KERN_ERR, fmt , ## args); \
  965. remove_card(dev); \
  966. return error; \
  967. } while (0)
  968. char irq_buf[16];
  969. struct hpsb_host *host;
  970. struct ti_lynx *lynx; /* shortcut to currently handled device */
  971. struct ti_pcl pcl;
  972. u32 *pcli;
  973. int i;
  974. int error;
  975. error = -ENXIO;
  976. if (pci_set_dma_mask(dev, DMA_32BIT_MASK))
  977. FAIL("DMA address limits not supported for PCILynx hardware");
  978. if (pci_enable_device(dev))
  979. FAIL("failed to enable PCILynx hardware");
  980. pci_set_master(dev);
  981. error = -ENOMEM;
  982. host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
  983. if (!host) FAIL("failed to allocate control structure memory");
  984. lynx = host->hostdata;
  985. lynx->id = card_id++;
  986. lynx->dev = dev;
  987. lynx->state = clear;
  988. lynx->host = host;
  989. host->pdev = dev;
  990. pci_set_drvdata(dev, lynx);
  991. spin_lock_init(&lynx->lock);
  992. spin_lock_init(&lynx->phy_reg_lock);
  993. lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
  994. &lynx->pcl_mem_dma);
  995. if (lynx->pcl_mem != NULL) {
  996. lynx->state = have_pcl_mem;
  997. PRINT(KERN_INFO, lynx->id,
  998. "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
  999. lynx->pcl_mem);
  1000. } else {
  1001. FAIL("failed to allocate PCL memory area");
  1002. }
  1003. lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
  1004. &lynx->rcv_page_dma);
  1005. if (lynx->rcv_page == NULL) {
  1006. FAIL("failed to allocate receive buffer");
  1007. }
  1008. lynx->state = have_1394_buffers;
  1009. for (i = 0; i < ISORCV_PAGES; i++) {
  1010. lynx->iso_rcv.page[i] =
  1011. pci_alloc_consistent(dev, PAGE_SIZE,
  1012. &lynx->iso_rcv.page_dma[i]);
  1013. if (lynx->iso_rcv.page[i] == NULL) {
  1014. FAIL("failed to allocate iso receive buffers");
  1015. }
  1016. }
  1017. lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
  1018. PCILYNX_MAX_REGISTER);
  1019. lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
  1020. lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
  1021. lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
  1022. PCILYNX_MAX_MEMORY);
  1023. lynx->state = have_iomappings;
  1024. if (lynx->registers == NULL) {
  1025. FAIL("failed to remap registers - card not accessible");
  1026. }
  1027. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  1028. /* Fix buggy cards with autoboot pin not tied low: */
  1029. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  1030. sprintf (irq_buf, "%d", dev->irq);
  1031. if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
  1032. PCILYNX_DRIVER_NAME, lynx)) {
  1033. PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
  1034. lynx->state = have_intr;
  1035. } else {
  1036. FAIL("failed to allocate shared interrupt %s", irq_buf);
  1037. }
  1038. /* alloc_pcl return values are not checked, it is expected that the
  1039. * provided PCL space is sufficient for the initial allocations */
  1040. lynx->rcv_pcl = alloc_pcl(lynx);
  1041. lynx->rcv_pcl_start = alloc_pcl(lynx);
  1042. lynx->async.pcl = alloc_pcl(lynx);
  1043. lynx->async.pcl_start = alloc_pcl(lynx);
  1044. lynx->iso_send.pcl = alloc_pcl(lynx);
  1045. lynx->iso_send.pcl_start = alloc_pcl(lynx);
  1046. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1047. lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
  1048. }
  1049. lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
  1050. /* all allocations successful - simple init stuff follows */
  1051. reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  1052. tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
  1053. (unsigned long)lynx);
  1054. spin_lock_init(&lynx->iso_rcv.lock);
  1055. spin_lock_init(&lynx->async.queue_lock);
  1056. lynx->async.channel = CHANNEL_ASYNC_SEND;
  1057. spin_lock_init(&lynx->iso_send.queue_lock);
  1058. lynx->iso_send.channel = CHANNEL_ISO_SEND;
  1059. PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
  1060. "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
  1061. lynx->local_ram, lynx->aux_port);
  1062. /* now, looking for PHY register set */
  1063. if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  1064. lynx->phyic.reg_1394a = 1;
  1065. PRINT(KERN_INFO, lynx->id,
  1066. "found 1394a conform PHY (using extended register set)");
  1067. lynx->phyic.vendor = get_phy_vendorid(lynx);
  1068. lynx->phyic.product = get_phy_productid(lynx);
  1069. } else {
  1070. lynx->phyic.reg_1394a = 0;
  1071. PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  1072. }
  1073. lynx->selfid_size = -1;
  1074. lynx->phy_reg0 = -1;
  1075. INIT_LIST_HEAD(&lynx->async.queue);
  1076. INIT_LIST_HEAD(&lynx->async.pcl_queue);
  1077. INIT_LIST_HEAD(&lynx->iso_send.queue);
  1078. INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
  1079. pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
  1080. put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
  1081. pcl.next = PCL_NEXT_INVALID;
  1082. pcl.async_error_next = PCL_NEXT_INVALID;
  1083. pcl.buffer[0].control = PCL_CMD_RCV | 16;
  1084. #ifndef __BIG_ENDIAN
  1085. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1086. #endif
  1087. pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
  1088. pcl.buffer[0].pointer = lynx->rcv_page_dma;
  1089. pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
  1090. put_pcl(lynx, lynx->rcv_pcl, &pcl);
  1091. pcl.next = pcl_bus(lynx, lynx->async.pcl);
  1092. pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
  1093. put_pcl(lynx, lynx->async.pcl_start, &pcl);
  1094. pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
  1095. pcl.async_error_next = PCL_NEXT_INVALID;
  1096. put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
  1097. pcl.next = PCL_NEXT_INVALID;
  1098. pcl.async_error_next = PCL_NEXT_INVALID;
  1099. pcl.buffer[0].control = PCL_CMD_RCV | 4;
  1100. #ifndef __BIG_ENDIAN
  1101. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1102. #endif
  1103. pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
  1104. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1105. int page = i / ISORCV_PER_PAGE;
  1106. int sec = i % ISORCV_PER_PAGE;
  1107. pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
  1108. + sec * MAX_ISORCV_SIZE;
  1109. pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
  1110. put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
  1111. }
  1112. pcli = (u32 *)&pcl;
  1113. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1114. pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
  1115. }
  1116. put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
  1117. /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
  1118. reg_write(lynx, FIFO_SIZES, 0x003030a0);
  1119. /* 20 byte threshold before triggering PCI transfer */
  1120. reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
  1121. /* threshold on both send FIFOs before transmitting:
  1122. FIFO size - cache line size - 1 */
  1123. i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
  1124. i = 0x30 - i - 1;
  1125. reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
  1126. reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
  1127. reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
  1128. | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
  1129. | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
  1130. | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
  1131. | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
  1132. | LINK_INT_ATF_UNDERFLOW);
  1133. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1134. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
  1135. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1136. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
  1137. DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
  1138. | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
  1139. | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
  1140. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  1141. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1142. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
  1143. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1144. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  1145. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
  1146. reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
  1147. | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
  1148. | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
  1149. | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
  1150. if (!lynx->phyic.reg_1394a) {
  1151. if (!hpsb_disable_irm) {
  1152. /* attempt to enable contender bit -FIXME- would this
  1153. * work elsewhere? */
  1154. reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
  1155. reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
  1156. }
  1157. } else {
  1158. /* set the contender (if appropriate) and LCtrl bit in the
  1159. * extended PHY register set. (Should check that PHY_02_EXTENDED
  1160. * is set in register 2?)
  1161. */
  1162. i = get_phy_reg(lynx, 4);
  1163. i |= PHY_04_LCTRL;
  1164. if (hpsb_disable_irm)
  1165. i &= ~PHY_04_CONTENDER;
  1166. else
  1167. i |= PHY_04_CONTENDER;
  1168. if (i != -1) set_phy_reg(lynx, 4, i);
  1169. }
  1170. if (!skip_eeprom)
  1171. {
  1172. /* needed for i2c communication with serial eeprom */
  1173. struct i2c_adapter *i2c_ad;
  1174. struct i2c_algo_bit_data i2c_adapter_data;
  1175. error = -ENOMEM;
  1176. i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
  1177. if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
  1178. memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
  1179. i2c_adapter_data = bit_data;
  1180. i2c_ad->algo_data = &i2c_adapter_data;
  1181. i2c_adapter_data.data = lynx;
  1182. PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
  1183. reg_read(lynx, SERIAL_EEPROM_CONTROL));
  1184. /* reset hardware to sane state */
  1185. lynx->i2c_driven_state = 0x00000070;
  1186. reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
  1187. if (i2c_bit_add_bus(i2c_ad) < 0)
  1188. {
  1189. kfree(i2c_ad);
  1190. error = -ENXIO;
  1191. FAIL("unable to register i2c");
  1192. }
  1193. else
  1194. {
  1195. /* do i2c stuff */
  1196. unsigned char i2c_cmd = 0x10;
  1197. struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
  1198. { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
  1199. };
  1200. /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
  1201. do it more efficiently in one transaction rather then using several reads */
  1202. if (i2c_transfer(i2c_ad, msg, 2) < 0) {
  1203. PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
  1204. } else {
  1205. int i;
  1206. PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
  1207. /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
  1208. * generation(1394a) and link_spd(1394a) field and recalculate
  1209. * the CRC */
  1210. for (i = 0; i < 5 ; i++)
  1211. PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
  1212. i, be32_to_cpu(lynx->bus_info_block[i]));
  1213. /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
  1214. if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
  1215. (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
  1216. {
  1217. PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
  1218. } else {
  1219. kfree(i2c_ad);
  1220. error = -ENXIO;
  1221. FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
  1222. }
  1223. }
  1224. i2c_bit_del_bus(i2c_ad);
  1225. kfree(i2c_ad);
  1226. }
  1227. }
  1228. host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
  1229. host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
  1230. host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
  1231. host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
  1232. if (!lynx->phyic.reg_1394a)
  1233. host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
  1234. else
  1235. host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
  1236. if (hpsb_add_host(host)) {
  1237. error = -ENOMEM;
  1238. FAIL("Failed to register host with highlevel");
  1239. }
  1240. lynx->state = is_host;
  1241. return 0;
  1242. #undef FAIL
  1243. }
  1244. static struct pci_device_id pci_table[] = {
  1245. {
  1246. .vendor = PCI_VENDOR_ID_TI,
  1247. .device = PCI_DEVICE_ID_TI_PCILYNX,
  1248. .subvendor = PCI_ANY_ID,
  1249. .subdevice = PCI_ANY_ID,
  1250. },
  1251. { } /* Terminating entry */
  1252. };
  1253. static struct pci_driver lynx_pci_driver = {
  1254. .name = PCILYNX_DRIVER_NAME,
  1255. .id_table = pci_table,
  1256. .probe = add_card,
  1257. .remove = remove_card,
  1258. };
  1259. static struct hpsb_host_driver lynx_driver = {
  1260. .owner = THIS_MODULE,
  1261. .name = PCILYNX_DRIVER_NAME,
  1262. .set_hw_config_rom = NULL,
  1263. .transmit_packet = lynx_transmit,
  1264. .devctl = lynx_devctl,
  1265. .isoctl = NULL,
  1266. };
  1267. MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
  1268. MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
  1269. MODULE_LICENSE("GPL");
  1270. MODULE_SUPPORTED_DEVICE("pcilynx");
  1271. MODULE_DEVICE_TABLE(pci, pci_table);
  1272. static int __init pcilynx_init(void)
  1273. {
  1274. int ret;
  1275. ret = pci_register_driver(&lynx_pci_driver);
  1276. if (ret < 0) {
  1277. PRINT_G(KERN_ERR, "PCI module init failed");
  1278. return ret;
  1279. }
  1280. return 0;
  1281. }
  1282. static void __exit pcilynx_cleanup(void)
  1283. {
  1284. pci_unregister_driver(&lynx_pci_driver);
  1285. }
  1286. module_init(pcilynx_init);
  1287. module_exit(pcilynx_cleanup);