pcilynx.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594
  1. /*
  2. * pcilynx.c - Texas Instruments PCILynx driver
  3. * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
  4. * Stephan Linz <linz@mazet.de>
  5. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software Foundation,
  19. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. /*
  22. * Contributions:
  23. *
  24. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  25. * reading bus info block (containing GUID) from serial
  26. * eeprom via i2c and storing it in config ROM
  27. * Reworked code for initiating bus resets
  28. * (long, short, with or without hold-off)
  29. * Enhancements in async and iso send code
  30. */
  31. #include <linux/config.h>
  32. #include <linux/kernel.h>
  33. #include <linux/slab.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/wait.h>
  36. #include <linux/errno.h>
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/init.h>
  40. #include <linux/pci.h>
  41. #include <linux/fs.h>
  42. #include <linux/poll.h>
  43. #include <linux/kdev_t.h>
  44. #include <linux/dma-mapping.h>
  45. #include <asm/byteorder.h>
  46. #include <asm/atomic.h>
  47. #include <asm/io.h>
  48. #include <asm/uaccess.h>
  49. #include <asm/irq.h>
  50. #include "csr1212.h"
  51. #include "ieee1394.h"
  52. #include "ieee1394_types.h"
  53. #include "hosts.h"
  54. #include "ieee1394_core.h"
  55. #include "highlevel.h"
  56. #include "pcilynx.h"
  57. #include <linux/i2c.h>
  58. #include <linux/i2c-algo-bit.h>
  59. /* print general (card independent) information */
  60. #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  61. /* print card specific information */
  62. #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  63. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  64. #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  65. #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  66. #else
  67. #define PRINT_GD(level, fmt, args...) do {} while (0)
  68. #define PRINTD(level, card, fmt, args...) do {} while (0)
  69. #endif
  70. /* Module Parameters */
  71. static int skip_eeprom = 0;
  72. module_param(skip_eeprom, int, 0444);
  73. MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
  74. static struct hpsb_host_driver lynx_driver;
  75. static unsigned int card_id;
  76. /*
  77. * I2C stuff
  78. */
  79. /* the i2c stuff was inspired by i2c-philips-par.c */
  80. static void bit_setscl(void *data, int state)
  81. {
  82. if (state) {
  83. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
  84. } else {
  85. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
  86. }
  87. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  88. }
  89. static void bit_setsda(void *data, int state)
  90. {
  91. if (state) {
  92. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
  93. } else {
  94. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
  95. }
  96. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  97. }
  98. static int bit_getscl(void *data)
  99. {
  100. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
  101. }
  102. static int bit_getsda(void *data)
  103. {
  104. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
  105. }
  106. static int bit_reg(struct i2c_client *client)
  107. {
  108. return 0;
  109. }
  110. static int bit_unreg(struct i2c_client *client)
  111. {
  112. return 0;
  113. }
  114. static struct i2c_algo_bit_data bit_data = {
  115. .setsda = bit_setsda,
  116. .setscl = bit_setscl,
  117. .getsda = bit_getsda,
  118. .getscl = bit_getscl,
  119. .udelay = 5,
  120. .mdelay = 5,
  121. .timeout = 100,
  122. };
  123. static struct i2c_adapter bit_ops = {
  124. .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
  125. .client_register = bit_reg,
  126. .client_unregister = bit_unreg,
  127. .name = "PCILynx I2C",
  128. };
  129. /*
  130. * PCL handling functions.
  131. */
  132. static pcl_t alloc_pcl(struct ti_lynx *lynx)
  133. {
  134. u8 m;
  135. int i, j;
  136. spin_lock(&lynx->lock);
  137. /* FIXME - use ffz() to make this readable */
  138. for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
  139. m = lynx->pcl_bmap[i];
  140. for (j = 0; j < 8; j++) {
  141. if (m & 1<<j) {
  142. continue;
  143. }
  144. m |= 1<<j;
  145. lynx->pcl_bmap[i] = m;
  146. spin_unlock(&lynx->lock);
  147. return 8 * i + j;
  148. }
  149. }
  150. spin_unlock(&lynx->lock);
  151. return -1;
  152. }
  153. #if 0
  154. static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
  155. {
  156. int off, bit;
  157. off = pclid / 8;
  158. bit = pclid % 8;
  159. if (pclid < 0) {
  160. return;
  161. }
  162. spin_lock(&lynx->lock);
  163. if (lynx->pcl_bmap[off] & 1<<bit) {
  164. lynx->pcl_bmap[off] &= ~(1<<bit);
  165. } else {
  166. PRINT(KERN_ERR, lynx->id,
  167. "attempted to free unallocated PCL %d", pclid);
  168. }
  169. spin_unlock(&lynx->lock);
  170. }
  171. /* functions useful for debugging */
  172. static void pretty_print_pcl(const struct ti_pcl *pcl)
  173. {
  174. int i;
  175. printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
  176. pcl->next, pcl->user_data, pcl->pcl_status,
  177. pcl->remaining_transfer_count, pcl->next_data_buffer);
  178. printk("PCL");
  179. for (i=0; i<13; i++) {
  180. printk(" c%x:%08x d%x:%08x",
  181. i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
  182. if (!(i & 0x3) && (i != 12)) printk("\nPCL");
  183. }
  184. printk("\n");
  185. }
  186. static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
  187. {
  188. struct ti_pcl pcl;
  189. get_pcl(lynx, pclid, &pcl);
  190. pretty_print_pcl(&pcl);
  191. }
  192. #endif
  193. /***********************************
  194. * IEEE-1394 functionality section *
  195. ***********************************/
  196. static int get_phy_reg(struct ti_lynx *lynx, int addr)
  197. {
  198. int retval;
  199. int i = 0;
  200. unsigned long flags;
  201. if (addr > 15) {
  202. PRINT(KERN_ERR, lynx->id,
  203. "%s: PHY register address %d out of range",
  204. __FUNCTION__, addr);
  205. return -1;
  206. }
  207. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  208. reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
  209. do {
  210. retval = reg_read(lynx, LINK_PHY);
  211. if (i > 10000) {
  212. PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
  213. __FUNCTION__);
  214. retval = -1;
  215. break;
  216. }
  217. i++;
  218. } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
  219. reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
  220. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  221. if (retval != -1) {
  222. return retval & 0xff;
  223. } else {
  224. return -1;
  225. }
  226. }
  227. static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
  228. {
  229. unsigned long flags;
  230. if (addr > 15) {
  231. PRINT(KERN_ERR, lynx->id,
  232. "%s: PHY register address %d out of range", __FUNCTION__, addr);
  233. return -1;
  234. }
  235. if (val > 0xff) {
  236. PRINT(KERN_ERR, lynx->id,
  237. "%s: PHY register value %d out of range", __FUNCTION__, val);
  238. return -1;
  239. }
  240. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  241. reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
  242. | LINK_PHY_WDATA(val));
  243. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  244. return 0;
  245. }
  246. static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
  247. {
  248. int reg;
  249. if (page > 7) {
  250. PRINT(KERN_ERR, lynx->id,
  251. "%s: PHY page %d out of range", __FUNCTION__, page);
  252. return -1;
  253. }
  254. reg = get_phy_reg(lynx, 7);
  255. if (reg != -1) {
  256. reg &= 0x1f;
  257. reg |= (page << 5);
  258. set_phy_reg(lynx, 7, reg);
  259. return 0;
  260. } else {
  261. return -1;
  262. }
  263. }
  264. #if 0 /* not needed at this time */
  265. static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
  266. {
  267. int reg;
  268. if (port > 15) {
  269. PRINT(KERN_ERR, lynx->id,
  270. "%s: PHY port %d out of range", __FUNCTION__, port);
  271. return -1;
  272. }
  273. reg = get_phy_reg(lynx, 7);
  274. if (reg != -1) {
  275. reg &= 0xf0;
  276. reg |= port;
  277. set_phy_reg(lynx, 7, reg);
  278. return 0;
  279. } else {
  280. return -1;
  281. }
  282. }
  283. #endif
  284. static u32 get_phy_vendorid(struct ti_lynx *lynx)
  285. {
  286. u32 pvid = 0;
  287. sel_phy_reg_page(lynx, 1);
  288. pvid |= (get_phy_reg(lynx, 10) << 16);
  289. pvid |= (get_phy_reg(lynx, 11) << 8);
  290. pvid |= get_phy_reg(lynx, 12);
  291. PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
  292. return pvid;
  293. }
  294. static u32 get_phy_productid(struct ti_lynx *lynx)
  295. {
  296. u32 id = 0;
  297. sel_phy_reg_page(lynx, 1);
  298. id |= (get_phy_reg(lynx, 13) << 16);
  299. id |= (get_phy_reg(lynx, 14) << 8);
  300. id |= get_phy_reg(lynx, 15);
  301. PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
  302. return id;
  303. }
  304. static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
  305. struct hpsb_host *host)
  306. {
  307. quadlet_t lsid;
  308. char phyreg[7];
  309. int i;
  310. phyreg[0] = lynx->phy_reg0;
  311. for (i = 1; i < 7; i++) {
  312. phyreg[i] = get_phy_reg(lynx, i);
  313. }
  314. /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
  315. more than 3 ports on the PHY anyway. */
  316. lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
  317. lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
  318. lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
  319. if (!hpsb_disable_irm)
  320. lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
  321. /* lsid |= 1 << 11; *//* set contender (hack) */
  322. lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
  323. for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
  324. if (phyreg[3 + i] & 0x4) {
  325. lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
  326. << (6 - i*2);
  327. } else {
  328. lsid |= 1 << (6 - i*2);
  329. }
  330. }
  331. cpu_to_be32s(&lsid);
  332. PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
  333. return lsid;
  334. }
  335. static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
  336. {
  337. quadlet_t *q = lynx->rcv_page;
  338. int phyid, isroot, size;
  339. quadlet_t lsid = 0;
  340. int i;
  341. if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
  342. size = lynx->selfid_size;
  343. phyid = lynx->phy_reg0;
  344. i = (size > 16 ? 16 : size) / 4 - 1;
  345. while (i >= 0) {
  346. cpu_to_be32s(&q[i]);
  347. i--;
  348. }
  349. if (!lynx->phyic.reg_1394a) {
  350. lsid = generate_own_selfid(lynx, host);
  351. }
  352. isroot = (phyid & 2) != 0;
  353. phyid >>= 2;
  354. PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
  355. phyid, (isroot ? "root" : "not root"));
  356. reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
  357. if (!lynx->phyic.reg_1394a && !size) {
  358. hpsb_selfid_received(host, lsid);
  359. }
  360. while (size > 0) {
  361. struct selfid *sid = (struct selfid *)q;
  362. if (!lynx->phyic.reg_1394a && !sid->extended
  363. && (sid->phy_id == (phyid + 1))) {
  364. hpsb_selfid_received(host, lsid);
  365. }
  366. if (q[0] == ~q[1]) {
  367. PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
  368. q[0]);
  369. hpsb_selfid_received(host, q[0]);
  370. } else {
  371. PRINT(KERN_INFO, lynx->id,
  372. "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
  373. }
  374. q += 2;
  375. size -= 8;
  376. }
  377. if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
  378. hpsb_selfid_received(host, lsid);
  379. }
  380. hpsb_selfid_complete(host, phyid, isroot);
  381. if (host->in_bus_reset) return; /* in bus reset again */
  382. if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
  383. reg_set_bits(lynx, LINK_CONTROL,
  384. LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
  385. | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
  386. }
  387. /* This must be called with the respective queue_lock held. */
  388. static void send_next(struct ti_lynx *lynx, int what)
  389. {
  390. struct ti_pcl pcl;
  391. struct lynx_send_data *d;
  392. struct hpsb_packet *packet;
  393. d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
  394. if (!list_empty(&d->pcl_queue)) {
  395. PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
  396. BUG();
  397. }
  398. packet = driver_packet(d->queue.next);
  399. list_move_tail(&packet->driver_list, &d->pcl_queue);
  400. d->header_dma = pci_map_single(lynx->dev, packet->header,
  401. packet->header_size, PCI_DMA_TODEVICE);
  402. if (packet->data_size) {
  403. d->data_dma = pci_map_single(lynx->dev, packet->data,
  404. packet->data_size,
  405. PCI_DMA_TODEVICE);
  406. } else {
  407. d->data_dma = 0;
  408. }
  409. pcl.next = PCL_NEXT_INVALID;
  410. pcl.async_error_next = PCL_NEXT_INVALID;
  411. pcl.pcl_status = 0;
  412. pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
  413. #ifndef __BIG_ENDIAN
  414. pcl.buffer[0].control |= PCL_BIGENDIAN;
  415. #endif
  416. pcl.buffer[0].pointer = d->header_dma;
  417. pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
  418. pcl.buffer[1].pointer = d->data_dma;
  419. switch (packet->type) {
  420. case hpsb_async:
  421. pcl.buffer[0].control |= PCL_CMD_XMT;
  422. break;
  423. case hpsb_iso:
  424. pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
  425. break;
  426. case hpsb_raw:
  427. pcl.buffer[0].control |= PCL_CMD_UNFXMT;
  428. break;
  429. }
  430. put_pcl(lynx, d->pcl, &pcl);
  431. run_pcl(lynx, d->pcl_start, d->channel);
  432. }
  433. /* called from subsystem core */
  434. static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
  435. {
  436. struct ti_lynx *lynx = host->hostdata;
  437. struct lynx_send_data *d;
  438. unsigned long flags;
  439. if (packet->data_size >= 4096) {
  440. PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
  441. packet->data_size);
  442. return -EOVERFLOW;
  443. }
  444. switch (packet->type) {
  445. case hpsb_async:
  446. case hpsb_raw:
  447. d = &lynx->async;
  448. break;
  449. case hpsb_iso:
  450. d = &lynx->iso_send;
  451. break;
  452. default:
  453. PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
  454. packet->type);
  455. return -EINVAL;
  456. }
  457. if (packet->tcode == TCODE_WRITEQ
  458. || packet->tcode == TCODE_READQ_RESPONSE) {
  459. cpu_to_be32s(&packet->header[3]);
  460. }
  461. spin_lock_irqsave(&d->queue_lock, flags);
  462. list_add_tail(&packet->driver_list, &d->queue);
  463. if (list_empty(&d->pcl_queue))
  464. send_next(lynx, packet->type);
  465. spin_unlock_irqrestore(&d->queue_lock, flags);
  466. return 0;
  467. }
  468. /* called from subsystem core */
  469. static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
  470. {
  471. struct ti_lynx *lynx = host->hostdata;
  472. int retval = 0;
  473. struct hpsb_packet *packet;
  474. LIST_HEAD(packet_list);
  475. unsigned long flags;
  476. int phy_reg;
  477. switch (cmd) {
  478. case RESET_BUS:
  479. if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
  480. retval = 0;
  481. break;
  482. }
  483. switch (arg) {
  484. case SHORT_RESET:
  485. if (lynx->phyic.reg_1394a) {
  486. phy_reg = get_phy_reg(lynx, 5);
  487. if (phy_reg == -1) {
  488. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  489. retval = -1;
  490. break;
  491. }
  492. phy_reg |= 0x40;
  493. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
  494. lynx->selfid_size = -1;
  495. lynx->phy_reg0 = -1;
  496. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  497. break;
  498. } else {
  499. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  500. /* fall through to long bus reset */
  501. }
  502. case LONG_RESET:
  503. phy_reg = get_phy_reg(lynx, 1);
  504. if (phy_reg == -1) {
  505. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  506. retval = -1;
  507. break;
  508. }
  509. phy_reg |= 0x40;
  510. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
  511. lynx->selfid_size = -1;
  512. lynx->phy_reg0 = -1;
  513. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  514. break;
  515. case SHORT_RESET_NO_FORCE_ROOT:
  516. if (lynx->phyic.reg_1394a) {
  517. phy_reg = get_phy_reg(lynx, 1);
  518. if (phy_reg == -1) {
  519. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  520. retval = -1;
  521. break;
  522. }
  523. if (phy_reg & 0x80) {
  524. phy_reg &= ~0x80;
  525. set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
  526. }
  527. phy_reg = get_phy_reg(lynx, 5);
  528. if (phy_reg == -1) {
  529. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  530. retval = -1;
  531. break;
  532. }
  533. phy_reg |= 0x40;
  534. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
  535. lynx->selfid_size = -1;
  536. lynx->phy_reg0 = -1;
  537. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  538. break;
  539. } else {
  540. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  541. /* fall through to long bus reset */
  542. }
  543. case LONG_RESET_NO_FORCE_ROOT:
  544. phy_reg = get_phy_reg(lynx, 1);
  545. if (phy_reg == -1) {
  546. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  547. retval = -1;
  548. break;
  549. }
  550. phy_reg &= ~0x80;
  551. phy_reg |= 0x40;
  552. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
  553. lynx->selfid_size = -1;
  554. lynx->phy_reg0 = -1;
  555. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  556. break;
  557. case SHORT_RESET_FORCE_ROOT:
  558. if (lynx->phyic.reg_1394a) {
  559. phy_reg = get_phy_reg(lynx, 1);
  560. if (phy_reg == -1) {
  561. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  562. retval = -1;
  563. break;
  564. }
  565. if (!(phy_reg & 0x80)) {
  566. phy_reg |= 0x80;
  567. set_phy_reg(lynx, 1, phy_reg); /* set RHB */
  568. }
  569. phy_reg = get_phy_reg(lynx, 5);
  570. if (phy_reg == -1) {
  571. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  572. retval = -1;
  573. break;
  574. }
  575. phy_reg |= 0x40;
  576. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
  577. lynx->selfid_size = -1;
  578. lynx->phy_reg0 = -1;
  579. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  580. break;
  581. } else {
  582. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  583. /* fall through to long bus reset */
  584. }
  585. case LONG_RESET_FORCE_ROOT:
  586. phy_reg = get_phy_reg(lynx, 1);
  587. if (phy_reg == -1) {
  588. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  589. retval = -1;
  590. break;
  591. }
  592. phy_reg |= 0xc0;
  593. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
  594. lynx->selfid_size = -1;
  595. lynx->phy_reg0 = -1;
  596. set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
  597. break;
  598. default:
  599. PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
  600. retval = -1;
  601. }
  602. break;
  603. case GET_CYCLE_COUNTER:
  604. retval = reg_read(lynx, CYCLE_TIMER);
  605. break;
  606. case SET_CYCLE_COUNTER:
  607. reg_write(lynx, CYCLE_TIMER, arg);
  608. break;
  609. case SET_BUS_ID:
  610. reg_write(lynx, LINK_ID,
  611. (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
  612. break;
  613. case ACT_CYCLE_MASTER:
  614. if (arg) {
  615. reg_set_bits(lynx, LINK_CONTROL,
  616. LINK_CONTROL_CYCMASTER);
  617. } else {
  618. reg_clear_bits(lynx, LINK_CONTROL,
  619. LINK_CONTROL_CYCMASTER);
  620. }
  621. break;
  622. case CANCEL_REQUESTS:
  623. spin_lock_irqsave(&lynx->async.queue_lock, flags);
  624. reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
  625. list_splice(&lynx->async.queue, &packet_list);
  626. INIT_LIST_HEAD(&lynx->async.queue);
  627. if (list_empty(&lynx->async.pcl_queue)) {
  628. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  629. PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
  630. } else {
  631. struct ti_pcl pcl;
  632. u32 ack;
  633. struct hpsb_packet *packet;
  634. PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
  635. get_pcl(lynx, lynx->async.pcl, &pcl);
  636. packet = driver_packet(lynx->async.pcl_queue.next);
  637. list_del_init(&packet->driver_list);
  638. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  639. packet->header_size, PCI_DMA_TODEVICE);
  640. if (packet->data_size) {
  641. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  642. packet->data_size, PCI_DMA_TODEVICE);
  643. }
  644. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  645. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  646. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  647. ack = (pcl.pcl_status >> 15) & 0xf;
  648. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  649. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  650. } else {
  651. ack = (pcl.pcl_status >> 15) & 0xf;
  652. }
  653. } else {
  654. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  655. ack = ACKX_ABORTED;
  656. }
  657. hpsb_packet_sent(host, packet, ack);
  658. }
  659. while (!list_empty(&packet_list)) {
  660. packet = driver_packet(packet_list.next);
  661. list_del_init(&packet->driver_list);
  662. hpsb_packet_sent(host, packet, ACKX_ABORTED);
  663. }
  664. break;
  665. case ISO_LISTEN_CHANNEL:
  666. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  667. if (lynx->iso_rcv.chan_count++ == 0) {
  668. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  669. DMA_WORD1_CMP_ENABLE_MASTER);
  670. }
  671. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  672. break;
  673. case ISO_UNLISTEN_CHANNEL:
  674. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  675. if (--lynx->iso_rcv.chan_count == 0) {
  676. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  677. 0);
  678. }
  679. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  680. break;
  681. default:
  682. PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
  683. retval = -1;
  684. }
  685. return retval;
  686. }
  687. /***************************************
  688. * IEEE-1394 functionality section END *
  689. ***************************************/
  690. /********************************************************
  691. * Global stuff (interrupt handler, init/shutdown code) *
  692. ********************************************************/
  693. static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
  694. struct pt_regs *regs_are_unused)
  695. {
  696. struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
  697. struct hpsb_host *host = lynx->host;
  698. u32 intmask;
  699. u32 linkint;
  700. linkint = reg_read(lynx, LINK_INT_STATUS);
  701. intmask = reg_read(lynx, PCI_INT_STATUS);
  702. if (!(intmask & PCI_INT_INT_PEND))
  703. return IRQ_NONE;
  704. PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
  705. linkint);
  706. reg_write(lynx, LINK_INT_STATUS, linkint);
  707. reg_write(lynx, PCI_INT_STATUS, intmask);
  708. if (intmask & PCI_INT_1394) {
  709. if (linkint & LINK_INT_PHY_TIMEOUT) {
  710. PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
  711. }
  712. if (linkint & LINK_INT_PHY_BUSRESET) {
  713. PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
  714. lynx->selfid_size = -1;
  715. lynx->phy_reg0 = -1;
  716. if (!host->in_bus_reset)
  717. hpsb_bus_reset(host);
  718. }
  719. if (linkint & LINK_INT_PHY_REG_RCVD) {
  720. u32 reg;
  721. spin_lock(&lynx->phy_reg_lock);
  722. reg = reg_read(lynx, LINK_PHY);
  723. spin_unlock(&lynx->phy_reg_lock);
  724. if (!host->in_bus_reset) {
  725. PRINT(KERN_INFO, lynx->id,
  726. "phy reg received without reset");
  727. } else if (reg & 0xf00) {
  728. PRINT(KERN_INFO, lynx->id,
  729. "unsolicited phy reg %d received",
  730. (reg >> 8) & 0xf);
  731. } else {
  732. lynx->phy_reg0 = reg & 0xff;
  733. handle_selfid(lynx, host);
  734. }
  735. }
  736. if (linkint & LINK_INT_ISO_STUCK) {
  737. PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
  738. }
  739. if (linkint & LINK_INT_ASYNC_STUCK) {
  740. PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
  741. }
  742. if (linkint & LINK_INT_SENT_REJECT) {
  743. PRINT(KERN_INFO, lynx->id, "sent reject");
  744. }
  745. if (linkint & LINK_INT_TX_INVALID_TC) {
  746. PRINT(KERN_INFO, lynx->id, "invalid transaction code");
  747. }
  748. if (linkint & LINK_INT_GRF_OVERFLOW) {
  749. /* flush FIFO if overflow happens during reset */
  750. if (host->in_bus_reset)
  751. reg_write(lynx, FIFO_CONTROL,
  752. FIFO_CONTROL_GRF_FLUSH);
  753. PRINT(KERN_INFO, lynx->id, "GRF overflow");
  754. }
  755. if (linkint & LINK_INT_ITF_UNDERFLOW) {
  756. PRINT(KERN_INFO, lynx->id, "ITF underflow");
  757. }
  758. if (linkint & LINK_INT_ATF_UNDERFLOW) {
  759. PRINT(KERN_INFO, lynx->id, "ATF underflow");
  760. }
  761. }
  762. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
  763. PRINTD(KERN_DEBUG, lynx->id, "iso receive");
  764. spin_lock(&lynx->iso_rcv.lock);
  765. lynx->iso_rcv.stat[lynx->iso_rcv.next] =
  766. reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
  767. lynx->iso_rcv.used++;
  768. lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
  769. if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
  770. || !lynx->iso_rcv.chan_count) {
  771. PRINTD(KERN_DEBUG, lynx->id, "stopped");
  772. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  773. }
  774. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
  775. CHANNEL_ISO_RCV);
  776. spin_unlock(&lynx->iso_rcv.lock);
  777. tasklet_schedule(&lynx->iso_rcv.tq);
  778. }
  779. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
  780. PRINTD(KERN_DEBUG, lynx->id, "async sent");
  781. spin_lock(&lynx->async.queue_lock);
  782. if (list_empty(&lynx->async.pcl_queue)) {
  783. spin_unlock(&lynx->async.queue_lock);
  784. PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
  785. } else {
  786. struct ti_pcl pcl;
  787. u32 ack;
  788. struct hpsb_packet *packet;
  789. get_pcl(lynx, lynx->async.pcl, &pcl);
  790. packet = driver_packet(lynx->async.pcl_queue.next);
  791. list_del_init(&packet->driver_list);
  792. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  793. packet->header_size, PCI_DMA_TODEVICE);
  794. if (packet->data_size) {
  795. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  796. packet->data_size, PCI_DMA_TODEVICE);
  797. }
  798. if (!list_empty(&lynx->async.queue)) {
  799. send_next(lynx, hpsb_async);
  800. }
  801. spin_unlock(&lynx->async.queue_lock);
  802. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  803. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  804. ack = (pcl.pcl_status >> 15) & 0xf;
  805. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  806. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  807. } else {
  808. ack = (pcl.pcl_status >> 15) & 0xf;
  809. }
  810. } else {
  811. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  812. ack = ACKX_SEND_ERROR;
  813. }
  814. hpsb_packet_sent(host, packet, ack);
  815. }
  816. }
  817. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
  818. PRINTD(KERN_DEBUG, lynx->id, "iso sent");
  819. spin_lock(&lynx->iso_send.queue_lock);
  820. if (list_empty(&lynx->iso_send.pcl_queue)) {
  821. spin_unlock(&lynx->iso_send.queue_lock);
  822. PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
  823. } else {
  824. struct ti_pcl pcl;
  825. u32 ack;
  826. struct hpsb_packet *packet;
  827. get_pcl(lynx, lynx->iso_send.pcl, &pcl);
  828. packet = driver_packet(lynx->iso_send.pcl_queue.next);
  829. list_del_init(&packet->driver_list);
  830. pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
  831. packet->header_size, PCI_DMA_TODEVICE);
  832. if (packet->data_size) {
  833. pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
  834. packet->data_size, PCI_DMA_TODEVICE);
  835. }
  836. if (!list_empty(&lynx->iso_send.queue)) {
  837. send_next(lynx, hpsb_iso);
  838. }
  839. spin_unlock(&lynx->iso_send.queue_lock);
  840. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  841. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  842. ack = (pcl.pcl_status >> 15) & 0xf;
  843. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  844. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  845. } else {
  846. ack = (pcl.pcl_status >> 15) & 0xf;
  847. }
  848. } else {
  849. PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
  850. ack = ACKX_SEND_ERROR;
  851. }
  852. hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
  853. }
  854. }
  855. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
  856. /* general receive DMA completed */
  857. int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
  858. PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
  859. stat & 0x1fff);
  860. if (stat & DMA_CHAN_STAT_SELFID) {
  861. lynx->selfid_size = stat & 0x1fff;
  862. handle_selfid(lynx, host);
  863. } else {
  864. quadlet_t *q_data = lynx->rcv_page;
  865. if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
  866. || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
  867. cpu_to_be32s(q_data + 3);
  868. }
  869. hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
  870. }
  871. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  872. }
  873. return IRQ_HANDLED;
  874. }
  875. static void iso_rcv_bh(struct ti_lynx *lynx)
  876. {
  877. unsigned int idx;
  878. quadlet_t *data;
  879. unsigned long flags;
  880. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  881. while (lynx->iso_rcv.used) {
  882. idx = lynx->iso_rcv.last;
  883. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  884. data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
  885. + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
  886. if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
  887. PRINT(KERN_ERR, lynx->id,
  888. "iso length mismatch 0x%08x/0x%08x", *data,
  889. lynx->iso_rcv.stat[idx]);
  890. }
  891. if (lynx->iso_rcv.stat[idx]
  892. & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
  893. PRINT(KERN_INFO, lynx->id,
  894. "iso receive error on %d to 0x%p", idx, data);
  895. } else {
  896. hpsb_packet_received(lynx->host, data,
  897. lynx->iso_rcv.stat[idx] & 0x1fff,
  898. 0);
  899. }
  900. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  901. lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
  902. lynx->iso_rcv.used--;
  903. }
  904. if (lynx->iso_rcv.chan_count) {
  905. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  906. DMA_WORD1_CMP_ENABLE_MASTER);
  907. }
  908. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  909. }
  910. static void remove_card(struct pci_dev *dev)
  911. {
  912. struct ti_lynx *lynx;
  913. struct device *lynx_dev;
  914. int i;
  915. lynx = pci_get_drvdata(dev);
  916. if (!lynx) return;
  917. pci_set_drvdata(dev, NULL);
  918. lynx_dev = get_device(&lynx->host->device);
  919. switch (lynx->state) {
  920. case is_host:
  921. reg_write(lynx, PCI_INT_ENABLE, 0);
  922. hpsb_remove_host(lynx->host);
  923. case have_intr:
  924. reg_write(lynx, PCI_INT_ENABLE, 0);
  925. free_irq(lynx->dev->irq, lynx);
  926. /* Disable IRM Contender and LCtrl */
  927. if (lynx->phyic.reg_1394a)
  928. set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
  929. /* Let all other nodes know to ignore us */
  930. lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
  931. case have_iomappings:
  932. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  933. /* Fix buggy cards with autoboot pin not tied low: */
  934. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  935. iounmap(lynx->registers);
  936. iounmap(lynx->local_rom);
  937. iounmap(lynx->local_ram);
  938. iounmap(lynx->aux_port);
  939. case have_1394_buffers:
  940. for (i = 0; i < ISORCV_PAGES; i++) {
  941. if (lynx->iso_rcv.page[i]) {
  942. pci_free_consistent(lynx->dev, PAGE_SIZE,
  943. lynx->iso_rcv.page[i],
  944. lynx->iso_rcv.page_dma[i]);
  945. }
  946. }
  947. pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
  948. lynx->rcv_page_dma);
  949. case have_aux_buf:
  950. case have_pcl_mem:
  951. pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
  952. lynx->pcl_mem_dma);
  953. case clear:
  954. /* do nothing - already freed */
  955. ;
  956. }
  957. tasklet_kill(&lynx->iso_rcv.tq);
  958. if (lynx_dev)
  959. put_device(lynx_dev);
  960. }
  961. static int __devinit add_card(struct pci_dev *dev,
  962. const struct pci_device_id *devid_is_unused)
  963. {
  964. #define FAIL(fmt, args...) do { \
  965. PRINT_G(KERN_ERR, fmt , ## args); \
  966. remove_card(dev); \
  967. return error; \
  968. } while (0)
  969. char irq_buf[16];
  970. struct hpsb_host *host;
  971. struct ti_lynx *lynx; /* shortcut to currently handled device */
  972. struct ti_pcl pcl;
  973. u32 *pcli;
  974. int i;
  975. int error;
  976. error = -ENXIO;
  977. if (pci_set_dma_mask(dev, DMA_32BIT_MASK))
  978. FAIL("DMA address limits not supported for PCILynx hardware");
  979. if (pci_enable_device(dev))
  980. FAIL("failed to enable PCILynx hardware");
  981. pci_set_master(dev);
  982. error = -ENOMEM;
  983. host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
  984. if (!host) FAIL("failed to allocate control structure memory");
  985. lynx = host->hostdata;
  986. lynx->id = card_id++;
  987. lynx->dev = dev;
  988. lynx->state = clear;
  989. lynx->host = host;
  990. host->pdev = dev;
  991. pci_set_drvdata(dev, lynx);
  992. spin_lock_init(&lynx->lock);
  993. spin_lock_init(&lynx->phy_reg_lock);
  994. lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
  995. &lynx->pcl_mem_dma);
  996. if (lynx->pcl_mem != NULL) {
  997. lynx->state = have_pcl_mem;
  998. PRINT(KERN_INFO, lynx->id,
  999. "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
  1000. lynx->pcl_mem);
  1001. } else {
  1002. FAIL("failed to allocate PCL memory area");
  1003. }
  1004. lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
  1005. &lynx->rcv_page_dma);
  1006. if (lynx->rcv_page == NULL) {
  1007. FAIL("failed to allocate receive buffer");
  1008. }
  1009. lynx->state = have_1394_buffers;
  1010. for (i = 0; i < ISORCV_PAGES; i++) {
  1011. lynx->iso_rcv.page[i] =
  1012. pci_alloc_consistent(dev, PAGE_SIZE,
  1013. &lynx->iso_rcv.page_dma[i]);
  1014. if (lynx->iso_rcv.page[i] == NULL) {
  1015. FAIL("failed to allocate iso receive buffers");
  1016. }
  1017. }
  1018. lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
  1019. PCILYNX_MAX_REGISTER);
  1020. lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
  1021. lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
  1022. lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
  1023. PCILYNX_MAX_MEMORY);
  1024. lynx->state = have_iomappings;
  1025. if (lynx->registers == NULL) {
  1026. FAIL("failed to remap registers - card not accessible");
  1027. }
  1028. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  1029. /* Fix buggy cards with autoboot pin not tied low: */
  1030. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  1031. #ifndef __sparc__
  1032. sprintf (irq_buf, "%d", dev->irq);
  1033. #else
  1034. sprintf (irq_buf, "%s", __irq_itoa(dev->irq));
  1035. #endif
  1036. if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
  1037. PCILYNX_DRIVER_NAME, lynx)) {
  1038. PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
  1039. lynx->state = have_intr;
  1040. } else {
  1041. FAIL("failed to allocate shared interrupt %s", irq_buf);
  1042. }
  1043. /* alloc_pcl return values are not checked, it is expected that the
  1044. * provided PCL space is sufficient for the initial allocations */
  1045. lynx->rcv_pcl = alloc_pcl(lynx);
  1046. lynx->rcv_pcl_start = alloc_pcl(lynx);
  1047. lynx->async.pcl = alloc_pcl(lynx);
  1048. lynx->async.pcl_start = alloc_pcl(lynx);
  1049. lynx->iso_send.pcl = alloc_pcl(lynx);
  1050. lynx->iso_send.pcl_start = alloc_pcl(lynx);
  1051. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1052. lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
  1053. }
  1054. lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
  1055. /* all allocations successful - simple init stuff follows */
  1056. reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  1057. tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
  1058. (unsigned long)lynx);
  1059. spin_lock_init(&lynx->iso_rcv.lock);
  1060. spin_lock_init(&lynx->async.queue_lock);
  1061. lynx->async.channel = CHANNEL_ASYNC_SEND;
  1062. spin_lock_init(&lynx->iso_send.queue_lock);
  1063. lynx->iso_send.channel = CHANNEL_ISO_SEND;
  1064. PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
  1065. "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
  1066. lynx->local_ram, lynx->aux_port);
  1067. /* now, looking for PHY register set */
  1068. if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  1069. lynx->phyic.reg_1394a = 1;
  1070. PRINT(KERN_INFO, lynx->id,
  1071. "found 1394a conform PHY (using extended register set)");
  1072. lynx->phyic.vendor = get_phy_vendorid(lynx);
  1073. lynx->phyic.product = get_phy_productid(lynx);
  1074. } else {
  1075. lynx->phyic.reg_1394a = 0;
  1076. PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  1077. }
  1078. lynx->selfid_size = -1;
  1079. lynx->phy_reg0 = -1;
  1080. INIT_LIST_HEAD(&lynx->async.queue);
  1081. INIT_LIST_HEAD(&lynx->async.pcl_queue);
  1082. INIT_LIST_HEAD(&lynx->iso_send.queue);
  1083. INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
  1084. pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
  1085. put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
  1086. pcl.next = PCL_NEXT_INVALID;
  1087. pcl.async_error_next = PCL_NEXT_INVALID;
  1088. pcl.buffer[0].control = PCL_CMD_RCV | 16;
  1089. #ifndef __BIG_ENDIAN
  1090. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1091. #endif
  1092. pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
  1093. pcl.buffer[0].pointer = lynx->rcv_page_dma;
  1094. pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
  1095. put_pcl(lynx, lynx->rcv_pcl, &pcl);
  1096. pcl.next = pcl_bus(lynx, lynx->async.pcl);
  1097. pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
  1098. put_pcl(lynx, lynx->async.pcl_start, &pcl);
  1099. pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
  1100. pcl.async_error_next = PCL_NEXT_INVALID;
  1101. put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
  1102. pcl.next = PCL_NEXT_INVALID;
  1103. pcl.async_error_next = PCL_NEXT_INVALID;
  1104. pcl.buffer[0].control = PCL_CMD_RCV | 4;
  1105. #ifndef __BIG_ENDIAN
  1106. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1107. #endif
  1108. pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
  1109. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1110. int page = i / ISORCV_PER_PAGE;
  1111. int sec = i % ISORCV_PER_PAGE;
  1112. pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
  1113. + sec * MAX_ISORCV_SIZE;
  1114. pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
  1115. put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
  1116. }
  1117. pcli = (u32 *)&pcl;
  1118. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1119. pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
  1120. }
  1121. put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
  1122. /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
  1123. reg_write(lynx, FIFO_SIZES, 0x003030a0);
  1124. /* 20 byte threshold before triggering PCI transfer */
  1125. reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
  1126. /* threshold on both send FIFOs before transmitting:
  1127. FIFO size - cache line size - 1 */
  1128. i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
  1129. i = 0x30 - i - 1;
  1130. reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
  1131. reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
  1132. reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
  1133. | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
  1134. | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
  1135. | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
  1136. | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
  1137. | LINK_INT_ATF_UNDERFLOW);
  1138. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1139. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
  1140. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1141. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
  1142. DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
  1143. | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
  1144. | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
  1145. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  1146. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1147. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
  1148. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1149. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  1150. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
  1151. reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
  1152. | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
  1153. | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
  1154. | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
  1155. if (!lynx->phyic.reg_1394a) {
  1156. if (!hpsb_disable_irm) {
  1157. /* attempt to enable contender bit -FIXME- would this
  1158. * work elsewhere? */
  1159. reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
  1160. reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
  1161. }
  1162. } else {
  1163. /* set the contender (if appropriate) and LCtrl bit in the
  1164. * extended PHY register set. (Should check that PHY_02_EXTENDED
  1165. * is set in register 2?)
  1166. */
  1167. i = get_phy_reg(lynx, 4);
  1168. i |= PHY_04_LCTRL;
  1169. if (hpsb_disable_irm)
  1170. i &= !PHY_04_CONTENDER;
  1171. else
  1172. i |= PHY_04_CONTENDER;
  1173. if (i != -1) set_phy_reg(lynx, 4, i);
  1174. }
  1175. if (!skip_eeprom)
  1176. {
  1177. /* needed for i2c communication with serial eeprom */
  1178. struct i2c_adapter *i2c_ad;
  1179. struct i2c_algo_bit_data i2c_adapter_data;
  1180. error = -ENOMEM;
  1181. i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL);
  1182. if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
  1183. memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
  1184. i2c_adapter_data = bit_data;
  1185. i2c_ad->algo_data = &i2c_adapter_data;
  1186. i2c_adapter_data.data = lynx;
  1187. PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
  1188. reg_read(lynx, SERIAL_EEPROM_CONTROL));
  1189. /* reset hardware to sane state */
  1190. lynx->i2c_driven_state = 0x00000070;
  1191. reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
  1192. if (i2c_bit_add_bus(i2c_ad) < 0)
  1193. {
  1194. kfree(i2c_ad);
  1195. error = -ENXIO;
  1196. FAIL("unable to register i2c");
  1197. }
  1198. else
  1199. {
  1200. /* do i2c stuff */
  1201. unsigned char i2c_cmd = 0x10;
  1202. struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
  1203. { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
  1204. };
  1205. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  1206. union i2c_smbus_data data;
  1207. if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
  1208. PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
  1209. else
  1210. {
  1211. u16 addr;
  1212. for (addr=0x00; addr < 0x100; addr++) {
  1213. if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
  1214. PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
  1215. break;
  1216. }
  1217. else
  1218. PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
  1219. }
  1220. }
  1221. #endif
  1222. /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
  1223. do it more efficiently in one transaction rather then using several reads */
  1224. if (i2c_transfer(i2c_ad, msg, 2) < 0) {
  1225. PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
  1226. } else {
  1227. int i;
  1228. PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
  1229. /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
  1230. * generation(1394a) and link_spd(1394a) field and recalculate
  1231. * the CRC */
  1232. for (i = 0; i < 5 ; i++)
  1233. PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
  1234. i, be32_to_cpu(lynx->bus_info_block[i]));
  1235. /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
  1236. if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
  1237. (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
  1238. {
  1239. PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
  1240. } else {
  1241. kfree(i2c_ad);
  1242. error = -ENXIO;
  1243. FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
  1244. }
  1245. }
  1246. i2c_bit_del_bus(i2c_ad);
  1247. kfree(i2c_ad);
  1248. }
  1249. }
  1250. host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
  1251. host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
  1252. host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
  1253. host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
  1254. if (!lynx->phyic.reg_1394a)
  1255. host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
  1256. else
  1257. host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
  1258. if (hpsb_add_host(host)) {
  1259. error = -ENOMEM;
  1260. FAIL("Failed to register host with highlevel");
  1261. }
  1262. lynx->state = is_host;
  1263. return 0;
  1264. #undef FAIL
  1265. }
  1266. static struct pci_device_id pci_table[] = {
  1267. {
  1268. .vendor = PCI_VENDOR_ID_TI,
  1269. .device = PCI_DEVICE_ID_TI_PCILYNX,
  1270. .subvendor = PCI_ANY_ID,
  1271. .subdevice = PCI_ANY_ID,
  1272. },
  1273. { } /* Terminating entry */
  1274. };
  1275. static struct pci_driver lynx_pci_driver = {
  1276. .name = PCILYNX_DRIVER_NAME,
  1277. .id_table = pci_table,
  1278. .probe = add_card,
  1279. .remove = remove_card,
  1280. };
  1281. static struct hpsb_host_driver lynx_driver = {
  1282. .owner = THIS_MODULE,
  1283. .name = PCILYNX_DRIVER_NAME,
  1284. .set_hw_config_rom = NULL,
  1285. .transmit_packet = lynx_transmit,
  1286. .devctl = lynx_devctl,
  1287. .isoctl = NULL,
  1288. };
  1289. MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
  1290. MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
  1291. MODULE_LICENSE("GPL");
  1292. MODULE_SUPPORTED_DEVICE("pcilynx");
  1293. MODULE_DEVICE_TABLE(pci, pci_table);
  1294. static int __init pcilynx_init(void)
  1295. {
  1296. int ret;
  1297. ret = pci_register_driver(&lynx_pci_driver);
  1298. if (ret < 0) {
  1299. PRINT_G(KERN_ERR, "PCI module init failed");
  1300. return ret;
  1301. }
  1302. return 0;
  1303. }
  1304. static void __exit pcilynx_cleanup(void)
  1305. {
  1306. pci_unregister_driver(&lynx_pci_driver);
  1307. }
  1308. module_init(pcilynx_init);
  1309. module_exit(pcilynx_cleanup);