pcilynx.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982
  1. /*
  2. * pcilynx.c - Texas Instruments PCILynx driver
  3. * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
  4. * Stephan Linz <linz@mazet.de>
  5. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software Foundation,
  19. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. /*
  22. * Contributions:
  23. *
  24. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  25. * reading bus info block (containing GUID) from serial
  26. * eeprom via i2c and storing it in config ROM
  27. * Reworked code for initiating bus resets
  28. * (long, short, with or without hold-off)
  29. * Enhancements in async and iso send code
  30. */
  31. #include <linux/config.h>
  32. #include <linux/kernel.h>
  33. #include <linux/slab.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/wait.h>
  36. #include <linux/errno.h>
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/init.h>
  40. #include <linux/pci.h>
  41. #include <linux/fs.h>
  42. #include <linux/poll.h>
  43. #include <linux/kdev_t.h>
  44. #include <asm/byteorder.h>
  45. #include <asm/atomic.h>
  46. #include <asm/io.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/irq.h>
  49. #include "csr1212.h"
  50. #include "ieee1394.h"
  51. #include "ieee1394_types.h"
  52. #include "hosts.h"
  53. #include "ieee1394_core.h"
  54. #include "highlevel.h"
  55. #include "pcilynx.h"
  56. #include <linux/i2c.h>
  57. #include <linux/i2c-algo-bit.h>
  58. /* print general (card independent) information */
  59. #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  60. /* print card specific information */
  61. #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  62. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  63. #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  64. #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  65. #else
  66. #define PRINT_GD(level, fmt, args...) do {} while (0)
  67. #define PRINTD(level, card, fmt, args...) do {} while (0)
  68. #endif
  69. /* Module Parameters */
  70. static int skip_eeprom = 0;
  71. module_param(skip_eeprom, int, 0444);
  72. MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
  73. static struct hpsb_host_driver lynx_driver;
  74. static unsigned int card_id;
  75. /*
  76. * I2C stuff
  77. */
  78. /* the i2c stuff was inspired by i2c-philips-par.c */
  79. static void bit_setscl(void *data, int state)
  80. {
  81. if (state) {
  82. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
  83. } else {
  84. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
  85. }
  86. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  87. }
  88. static void bit_setsda(void *data, int state)
  89. {
  90. if (state) {
  91. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
  92. } else {
  93. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
  94. }
  95. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  96. }
  97. static int bit_getscl(void *data)
  98. {
  99. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
  100. }
  101. static int bit_getsda(void *data)
  102. {
  103. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
  104. }
  105. static int bit_reg(struct i2c_client *client)
  106. {
  107. return 0;
  108. }
  109. static int bit_unreg(struct i2c_client *client)
  110. {
  111. return 0;
  112. }
  113. static struct i2c_algo_bit_data bit_data = {
  114. .setsda = bit_setsda,
  115. .setscl = bit_setscl,
  116. .getsda = bit_getsda,
  117. .getscl = bit_getscl,
  118. .udelay = 5,
  119. .mdelay = 5,
  120. .timeout = 100,
  121. };
  122. static struct i2c_adapter bit_ops = {
  123. .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
  124. .client_register = bit_reg,
  125. .client_unregister = bit_unreg,
  126. .name = "PCILynx I2C",
  127. };
  128. /*
  129. * PCL handling functions.
  130. */
  131. static pcl_t alloc_pcl(struct ti_lynx *lynx)
  132. {
  133. u8 m;
  134. int i, j;
  135. spin_lock(&lynx->lock);
  136. /* FIXME - use ffz() to make this readable */
  137. for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
  138. m = lynx->pcl_bmap[i];
  139. for (j = 0; j < 8; j++) {
  140. if (m & 1<<j) {
  141. continue;
  142. }
  143. m |= 1<<j;
  144. lynx->pcl_bmap[i] = m;
  145. spin_unlock(&lynx->lock);
  146. return 8 * i + j;
  147. }
  148. }
  149. spin_unlock(&lynx->lock);
  150. return -1;
  151. }
  152. #if 0
  153. static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
  154. {
  155. int off, bit;
  156. off = pclid / 8;
  157. bit = pclid % 8;
  158. if (pclid < 0) {
  159. return;
  160. }
  161. spin_lock(&lynx->lock);
  162. if (lynx->pcl_bmap[off] & 1<<bit) {
  163. lynx->pcl_bmap[off] &= ~(1<<bit);
  164. } else {
  165. PRINT(KERN_ERR, lynx->id,
  166. "attempted to free unallocated PCL %d", pclid);
  167. }
  168. spin_unlock(&lynx->lock);
  169. }
  170. /* functions useful for debugging */
  171. static void pretty_print_pcl(const struct ti_pcl *pcl)
  172. {
  173. int i;
  174. printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
  175. pcl->next, pcl->user_data, pcl->pcl_status,
  176. pcl->remaining_transfer_count, pcl->next_data_buffer);
  177. printk("PCL");
  178. for (i=0; i<13; i++) {
  179. printk(" c%x:%08x d%x:%08x",
  180. i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
  181. if (!(i & 0x3) && (i != 12)) printk("\nPCL");
  182. }
  183. printk("\n");
  184. }
  185. static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
  186. {
  187. struct ti_pcl pcl;
  188. get_pcl(lynx, pclid, &pcl);
  189. pretty_print_pcl(&pcl);
  190. }
  191. #endif
  192. /***********************************
  193. * IEEE-1394 functionality section *
  194. ***********************************/
  195. static int get_phy_reg(struct ti_lynx *lynx, int addr)
  196. {
  197. int retval;
  198. int i = 0;
  199. unsigned long flags;
  200. if (addr > 15) {
  201. PRINT(KERN_ERR, lynx->id,
  202. "%s: PHY register address %d out of range",
  203. __FUNCTION__, addr);
  204. return -1;
  205. }
  206. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  207. reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
  208. do {
  209. retval = reg_read(lynx, LINK_PHY);
  210. if (i > 10000) {
  211. PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
  212. __FUNCTION__);
  213. retval = -1;
  214. break;
  215. }
  216. i++;
  217. } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
  218. reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
  219. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  220. if (retval != -1) {
  221. return retval & 0xff;
  222. } else {
  223. return -1;
  224. }
  225. }
  226. static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
  227. {
  228. unsigned long flags;
  229. if (addr > 15) {
  230. PRINT(KERN_ERR, lynx->id,
  231. "%s: PHY register address %d out of range", __FUNCTION__, addr);
  232. return -1;
  233. }
  234. if (val > 0xff) {
  235. PRINT(KERN_ERR, lynx->id,
  236. "%s: PHY register value %d out of range", __FUNCTION__, val);
  237. return -1;
  238. }
  239. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  240. reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
  241. | LINK_PHY_WDATA(val));
  242. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  243. return 0;
  244. }
  245. static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
  246. {
  247. int reg;
  248. if (page > 7) {
  249. PRINT(KERN_ERR, lynx->id,
  250. "%s: PHY page %d out of range", __FUNCTION__, page);
  251. return -1;
  252. }
  253. reg = get_phy_reg(lynx, 7);
  254. if (reg != -1) {
  255. reg &= 0x1f;
  256. reg |= (page << 5);
  257. set_phy_reg(lynx, 7, reg);
  258. return 0;
  259. } else {
  260. return -1;
  261. }
  262. }
  263. #if 0 /* not needed at this time */
  264. static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
  265. {
  266. int reg;
  267. if (port > 15) {
  268. PRINT(KERN_ERR, lynx->id,
  269. "%s: PHY port %d out of range", __FUNCTION__, port);
  270. return -1;
  271. }
  272. reg = get_phy_reg(lynx, 7);
  273. if (reg != -1) {
  274. reg &= 0xf0;
  275. reg |= port;
  276. set_phy_reg(lynx, 7, reg);
  277. return 0;
  278. } else {
  279. return -1;
  280. }
  281. }
  282. #endif
  283. static u32 get_phy_vendorid(struct ti_lynx *lynx)
  284. {
  285. u32 pvid = 0;
  286. sel_phy_reg_page(lynx, 1);
  287. pvid |= (get_phy_reg(lynx, 10) << 16);
  288. pvid |= (get_phy_reg(lynx, 11) << 8);
  289. pvid |= get_phy_reg(lynx, 12);
  290. PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
  291. return pvid;
  292. }
  293. static u32 get_phy_productid(struct ti_lynx *lynx)
  294. {
  295. u32 id = 0;
  296. sel_phy_reg_page(lynx, 1);
  297. id |= (get_phy_reg(lynx, 13) << 16);
  298. id |= (get_phy_reg(lynx, 14) << 8);
  299. id |= get_phy_reg(lynx, 15);
  300. PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
  301. return id;
  302. }
  303. static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
  304. struct hpsb_host *host)
  305. {
  306. quadlet_t lsid;
  307. char phyreg[7];
  308. int i;
  309. phyreg[0] = lynx->phy_reg0;
  310. for (i = 1; i < 7; i++) {
  311. phyreg[i] = get_phy_reg(lynx, i);
  312. }
  313. /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
  314. more than 3 ports on the PHY anyway. */
  315. lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
  316. lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
  317. lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
  318. if (!hpsb_disable_irm)
  319. lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
  320. /* lsid |= 1 << 11; *//* set contender (hack) */
  321. lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
  322. for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
  323. if (phyreg[3 + i] & 0x4) {
  324. lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
  325. << (6 - i*2);
  326. } else {
  327. lsid |= 1 << (6 - i*2);
  328. }
  329. }
  330. cpu_to_be32s(&lsid);
  331. PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
  332. return lsid;
  333. }
  334. static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
  335. {
  336. quadlet_t *q = lynx->rcv_page;
  337. int phyid, isroot, size;
  338. quadlet_t lsid = 0;
  339. int i;
  340. if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
  341. size = lynx->selfid_size;
  342. phyid = lynx->phy_reg0;
  343. i = (size > 16 ? 16 : size) / 4 - 1;
  344. while (i >= 0) {
  345. cpu_to_be32s(&q[i]);
  346. i--;
  347. }
  348. if (!lynx->phyic.reg_1394a) {
  349. lsid = generate_own_selfid(lynx, host);
  350. }
  351. isroot = (phyid & 2) != 0;
  352. phyid >>= 2;
  353. PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
  354. phyid, (isroot ? "root" : "not root"));
  355. reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
  356. if (!lynx->phyic.reg_1394a && !size) {
  357. hpsb_selfid_received(host, lsid);
  358. }
  359. while (size > 0) {
  360. struct selfid *sid = (struct selfid *)q;
  361. if (!lynx->phyic.reg_1394a && !sid->extended
  362. && (sid->phy_id == (phyid + 1))) {
  363. hpsb_selfid_received(host, lsid);
  364. }
  365. if (q[0] == ~q[1]) {
  366. PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
  367. q[0]);
  368. hpsb_selfid_received(host, q[0]);
  369. } else {
  370. PRINT(KERN_INFO, lynx->id,
  371. "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
  372. }
  373. q += 2;
  374. size -= 8;
  375. }
  376. if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
  377. hpsb_selfid_received(host, lsid);
  378. }
  379. hpsb_selfid_complete(host, phyid, isroot);
  380. if (host->in_bus_reset) return; /* in bus reset again */
  381. if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
  382. reg_set_bits(lynx, LINK_CONTROL,
  383. LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
  384. | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
  385. }
  386. /* This must be called with the respective queue_lock held. */
  387. static void send_next(struct ti_lynx *lynx, int what)
  388. {
  389. struct ti_pcl pcl;
  390. struct lynx_send_data *d;
  391. struct hpsb_packet *packet;
  392. d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
  393. if (!list_empty(&d->pcl_queue)) {
  394. PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
  395. BUG();
  396. }
  397. packet = driver_packet(d->queue.next);
  398. list_move_tail(&packet->driver_list, &d->pcl_queue);
  399. d->header_dma = pci_map_single(lynx->dev, packet->header,
  400. packet->header_size, PCI_DMA_TODEVICE);
  401. if (packet->data_size) {
  402. d->data_dma = pci_map_single(lynx->dev, packet->data,
  403. packet->data_size,
  404. PCI_DMA_TODEVICE);
  405. } else {
  406. d->data_dma = 0;
  407. }
  408. pcl.next = PCL_NEXT_INVALID;
  409. pcl.async_error_next = PCL_NEXT_INVALID;
  410. pcl.pcl_status = 0;
  411. pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
  412. #ifndef __BIG_ENDIAN
  413. pcl.buffer[0].control |= PCL_BIGENDIAN;
  414. #endif
  415. pcl.buffer[0].pointer = d->header_dma;
  416. pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
  417. pcl.buffer[1].pointer = d->data_dma;
  418. switch (packet->type) {
  419. case hpsb_async:
  420. pcl.buffer[0].control |= PCL_CMD_XMT;
  421. break;
  422. case hpsb_iso:
  423. pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
  424. break;
  425. case hpsb_raw:
  426. pcl.buffer[0].control |= PCL_CMD_UNFXMT;
  427. break;
  428. }
  429. put_pcl(lynx, d->pcl, &pcl);
  430. run_pcl(lynx, d->pcl_start, d->channel);
  431. }
  432. /* called from subsystem core */
  433. static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
  434. {
  435. struct ti_lynx *lynx = host->hostdata;
  436. struct lynx_send_data *d;
  437. unsigned long flags;
  438. if (packet->data_size >= 4096) {
  439. PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
  440. packet->data_size);
  441. return -EOVERFLOW;
  442. }
  443. switch (packet->type) {
  444. case hpsb_async:
  445. case hpsb_raw:
  446. d = &lynx->async;
  447. break;
  448. case hpsb_iso:
  449. d = &lynx->iso_send;
  450. break;
  451. default:
  452. PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
  453. packet->type);
  454. return -EINVAL;
  455. }
  456. if (packet->tcode == TCODE_WRITEQ
  457. || packet->tcode == TCODE_READQ_RESPONSE) {
  458. cpu_to_be32s(&packet->header[3]);
  459. }
  460. spin_lock_irqsave(&d->queue_lock, flags);
  461. list_add_tail(&packet->driver_list, &d->queue);
  462. if (list_empty(&d->pcl_queue))
  463. send_next(lynx, packet->type);
  464. spin_unlock_irqrestore(&d->queue_lock, flags);
  465. return 0;
  466. }
  467. /* called from subsystem core */
  468. static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
  469. {
  470. struct ti_lynx *lynx = host->hostdata;
  471. int retval = 0;
  472. struct hpsb_packet *packet;
  473. LIST_HEAD(packet_list);
  474. unsigned long flags;
  475. int phy_reg;
  476. switch (cmd) {
  477. case RESET_BUS:
  478. if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
  479. retval = 0;
  480. break;
  481. }
  482. switch (arg) {
  483. case SHORT_RESET:
  484. if (lynx->phyic.reg_1394a) {
  485. phy_reg = get_phy_reg(lynx, 5);
  486. if (phy_reg == -1) {
  487. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  488. retval = -1;
  489. break;
  490. }
  491. phy_reg |= 0x40;
  492. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
  493. lynx->selfid_size = -1;
  494. lynx->phy_reg0 = -1;
  495. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  496. break;
  497. } else {
  498. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  499. /* fall through to long bus reset */
  500. }
  501. case LONG_RESET:
  502. phy_reg = get_phy_reg(lynx, 1);
  503. if (phy_reg == -1) {
  504. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  505. retval = -1;
  506. break;
  507. }
  508. phy_reg |= 0x40;
  509. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
  510. lynx->selfid_size = -1;
  511. lynx->phy_reg0 = -1;
  512. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  513. break;
  514. case SHORT_RESET_NO_FORCE_ROOT:
  515. if (lynx->phyic.reg_1394a) {
  516. phy_reg = get_phy_reg(lynx, 1);
  517. if (phy_reg == -1) {
  518. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  519. retval = -1;
  520. break;
  521. }
  522. if (phy_reg & 0x80) {
  523. phy_reg &= ~0x80;
  524. set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
  525. }
  526. phy_reg = get_phy_reg(lynx, 5);
  527. if (phy_reg == -1) {
  528. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  529. retval = -1;
  530. break;
  531. }
  532. phy_reg |= 0x40;
  533. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
  534. lynx->selfid_size = -1;
  535. lynx->phy_reg0 = -1;
  536. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  537. break;
  538. } else {
  539. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  540. /* fall through to long bus reset */
  541. }
  542. case LONG_RESET_NO_FORCE_ROOT:
  543. phy_reg = get_phy_reg(lynx, 1);
  544. if (phy_reg == -1) {
  545. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  546. retval = -1;
  547. break;
  548. }
  549. phy_reg &= ~0x80;
  550. phy_reg |= 0x40;
  551. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
  552. lynx->selfid_size = -1;
  553. lynx->phy_reg0 = -1;
  554. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  555. break;
  556. case SHORT_RESET_FORCE_ROOT:
  557. if (lynx->phyic.reg_1394a) {
  558. phy_reg = get_phy_reg(lynx, 1);
  559. if (phy_reg == -1) {
  560. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  561. retval = -1;
  562. break;
  563. }
  564. if (!(phy_reg & 0x80)) {
  565. phy_reg |= 0x80;
  566. set_phy_reg(lynx, 1, phy_reg); /* set RHB */
  567. }
  568. phy_reg = get_phy_reg(lynx, 5);
  569. if (phy_reg == -1) {
  570. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  571. retval = -1;
  572. break;
  573. }
  574. phy_reg |= 0x40;
  575. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
  576. lynx->selfid_size = -1;
  577. lynx->phy_reg0 = -1;
  578. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  579. break;
  580. } else {
  581. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  582. /* fall through to long bus reset */
  583. }
  584. case LONG_RESET_FORCE_ROOT:
  585. phy_reg = get_phy_reg(lynx, 1);
  586. if (phy_reg == -1) {
  587. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  588. retval = -1;
  589. break;
  590. }
  591. phy_reg |= 0xc0;
  592. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
  593. lynx->selfid_size = -1;
  594. lynx->phy_reg0 = -1;
  595. set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
  596. break;
  597. default:
  598. PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
  599. retval = -1;
  600. }
  601. break;
  602. case GET_CYCLE_COUNTER:
  603. retval = reg_read(lynx, CYCLE_TIMER);
  604. break;
  605. case SET_CYCLE_COUNTER:
  606. reg_write(lynx, CYCLE_TIMER, arg);
  607. break;
  608. case SET_BUS_ID:
  609. reg_write(lynx, LINK_ID,
  610. (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
  611. break;
  612. case ACT_CYCLE_MASTER:
  613. if (arg) {
  614. reg_set_bits(lynx, LINK_CONTROL,
  615. LINK_CONTROL_CYCMASTER);
  616. } else {
  617. reg_clear_bits(lynx, LINK_CONTROL,
  618. LINK_CONTROL_CYCMASTER);
  619. }
  620. break;
  621. case CANCEL_REQUESTS:
  622. spin_lock_irqsave(&lynx->async.queue_lock, flags);
  623. reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
  624. list_splice(&lynx->async.queue, &packet_list);
  625. INIT_LIST_HEAD(&lynx->async.queue);
  626. if (list_empty(&lynx->async.pcl_queue)) {
  627. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  628. PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
  629. } else {
  630. struct ti_pcl pcl;
  631. u32 ack;
  632. struct hpsb_packet *packet;
  633. PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
  634. get_pcl(lynx, lynx->async.pcl, &pcl);
  635. packet = driver_packet(lynx->async.pcl_queue.next);
  636. list_del_init(&packet->driver_list);
  637. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  638. packet->header_size, PCI_DMA_TODEVICE);
  639. if (packet->data_size) {
  640. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  641. packet->data_size, PCI_DMA_TODEVICE);
  642. }
  643. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  644. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  645. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  646. ack = (pcl.pcl_status >> 15) & 0xf;
  647. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  648. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  649. } else {
  650. ack = (pcl.pcl_status >> 15) & 0xf;
  651. }
  652. } else {
  653. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  654. ack = ACKX_ABORTED;
  655. }
  656. hpsb_packet_sent(host, packet, ack);
  657. }
  658. while (!list_empty(&packet_list)) {
  659. packet = driver_packet(packet_list.next);
  660. list_del_init(&packet->driver_list);
  661. hpsb_packet_sent(host, packet, ACKX_ABORTED);
  662. }
  663. break;
  664. case ISO_LISTEN_CHANNEL:
  665. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  666. if (lynx->iso_rcv.chan_count++ == 0) {
  667. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  668. DMA_WORD1_CMP_ENABLE_MASTER);
  669. }
  670. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  671. break;
  672. case ISO_UNLISTEN_CHANNEL:
  673. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  674. if (--lynx->iso_rcv.chan_count == 0) {
  675. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  676. 0);
  677. }
  678. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  679. break;
  680. default:
  681. PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
  682. retval = -1;
  683. }
  684. return retval;
  685. }
  686. /***************************************
  687. * IEEE-1394 functionality section END *
  688. ***************************************/
  689. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  690. /* VFS functions for local bus / aux device access. Access to those
  691. * is implemented as a character device instead of block devices
  692. * because buffers are not wanted for this. Therefore llseek (from
  693. * VFS) can be used for these char devices with obvious effects.
  694. */
  695. static int mem_open(struct inode*, struct file*);
  696. static int mem_release(struct inode*, struct file*);
  697. static unsigned int aux_poll(struct file*, struct poll_table_struct*);
  698. static loff_t mem_llseek(struct file*, loff_t, int);
  699. static ssize_t mem_read (struct file*, char*, size_t, loff_t*);
  700. static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
  701. static struct file_operations aux_ops = {
  702. .owner = THIS_MODULE,
  703. .read = mem_read,
  704. .write = mem_write,
  705. .poll = aux_poll,
  706. .llseek = mem_llseek,
  707. .open = mem_open,
  708. .release = mem_release,
  709. };
  710. static void aux_setup_pcls(struct ti_lynx *lynx)
  711. {
  712. struct ti_pcl pcl;
  713. pcl.next = PCL_NEXT_INVALID;
  714. pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl);
  715. put_pcl(lynx, lynx->dmem_pcl, &pcl);
  716. }
  717. static int mem_open(struct inode *inode, struct file *file)
  718. {
  719. int cid = iminor(inode);
  720. enum { t_rom, t_aux, t_ram } type;
  721. struct memdata *md;
  722. if (cid < PCILYNX_MINOR_AUX_START) {
  723. /* just for completeness */
  724. return -ENXIO;
  725. } else if (cid < PCILYNX_MINOR_ROM_START) {
  726. cid -= PCILYNX_MINOR_AUX_START;
  727. if (cid >= num_of_cards || !cards[cid].aux_port)
  728. return -ENXIO;
  729. type = t_aux;
  730. } else if (cid < PCILYNX_MINOR_RAM_START) {
  731. cid -= PCILYNX_MINOR_ROM_START;
  732. if (cid >= num_of_cards || !cards[cid].local_rom)
  733. return -ENXIO;
  734. type = t_rom;
  735. } else {
  736. /* WARNING: Know what you are doing when opening RAM.
  737. * It is currently used inside the driver! */
  738. cid -= PCILYNX_MINOR_RAM_START;
  739. if (cid >= num_of_cards || !cards[cid].local_ram)
  740. return -ENXIO;
  741. type = t_ram;
  742. }
  743. md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
  744. if (md == NULL)
  745. return -ENOMEM;
  746. md->lynx = &cards[cid];
  747. md->cid = cid;
  748. switch (type) {
  749. case t_rom:
  750. md->type = rom;
  751. break;
  752. case t_ram:
  753. md->type = ram;
  754. break;
  755. case t_aux:
  756. atomic_set(&md->aux_intr_last_seen,
  757. atomic_read(&cards[cid].aux_intr_seen));
  758. md->type = aux;
  759. break;
  760. }
  761. file->private_data = md;
  762. return 0;
  763. }
  764. static int mem_release(struct inode *inode, struct file *file)
  765. {
  766. kfree(file->private_data);
  767. return 0;
  768. }
  769. static unsigned int aux_poll(struct file *file, poll_table *pt)
  770. {
  771. struct memdata *md = (struct memdata *)file->private_data;
  772. int cid = md->cid;
  773. unsigned int mask;
  774. /* reading and writing is always allowed */
  775. mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
  776. if (md->type == aux) {
  777. poll_wait(file, &cards[cid].aux_intr_wait, pt);
  778. if (atomic_read(&md->aux_intr_last_seen)
  779. != atomic_read(&cards[cid].aux_intr_seen)) {
  780. mask |= POLLPRI;
  781. atomic_inc(&md->aux_intr_last_seen);
  782. }
  783. }
  784. return mask;
  785. }
  786. loff_t mem_llseek(struct file *file, loff_t offs, int orig)
  787. {
  788. loff_t newoffs;
  789. switch (orig) {
  790. case 0:
  791. newoffs = offs;
  792. break;
  793. case 1:
  794. newoffs = offs + file->f_pos;
  795. break;
  796. case 2:
  797. newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
  798. break;
  799. default:
  800. return -EINVAL;
  801. }
  802. if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) return -EINVAL;
  803. file->f_pos = newoffs;
  804. return newoffs;
  805. }
  806. /*
  807. * do not DMA if count is too small because this will have a serious impact
  808. * on performance - the value 2400 was found by experiment and may not work
  809. * everywhere as good as here - use mem_mindma option for modules to change
  810. */
  811. static short mem_mindma = 2400;
  812. module_param(mem_mindma, short, 0444);
  813. MODULE_PARM_DESC(mem_mindma, "Minimum amount of data required to use DMA");
  814. static ssize_t mem_dmaread(struct memdata *md, u32 physbuf, ssize_t count,
  815. int offset)
  816. {
  817. pcltmp_t pcltmp;
  818. struct ti_pcl *pcl;
  819. size_t retval;
  820. int i;
  821. DECLARE_WAITQUEUE(wait, current);
  822. count &= ~3;
  823. count = min(count, 53196);
  824. retval = count;
  825. if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
  826. & DMA_CHAN_CTRL_BUSY) {
  827. PRINT(KERN_WARNING, md->lynx->id, "DMA ALREADY ACTIVE!");
  828. }
  829. reg_write(md->lynx, LBUS_ADDR, md->type | offset);
  830. pcl = edit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
  831. pcl->buffer[0].control = PCL_CMD_LBUS_TO_PCI | min(count, 4092);
  832. pcl->buffer[0].pointer = physbuf;
  833. count -= 4092;
  834. i = 0;
  835. while (count > 0) {
  836. i++;
  837. pcl->buffer[i].control = min(count, 4092);
  838. pcl->buffer[i].pointer = physbuf + i * 4092;
  839. count -= 4092;
  840. }
  841. pcl->buffer[i].control |= PCL_LAST_BUFF;
  842. commit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
  843. set_current_state(TASK_INTERRUPTIBLE);
  844. add_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
  845. run_sub_pcl(md->lynx, md->lynx->dmem_pcl, 2, CHANNEL_LOCALBUS);
  846. schedule();
  847. while (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
  848. & DMA_CHAN_CTRL_BUSY) {
  849. if (signal_pending(current)) {
  850. retval = -EINTR;
  851. break;
  852. }
  853. schedule();
  854. }
  855. reg_write(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS), 0);
  856. remove_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
  857. if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
  858. & DMA_CHAN_CTRL_BUSY) {
  859. PRINT(KERN_ERR, md->lynx->id, "DMA STILL ACTIVE!");
  860. }
  861. return retval;
  862. }
  863. static ssize_t mem_read(struct file *file, char *buffer, size_t count,
  864. loff_t *offset)
  865. {
  866. struct memdata *md = (struct memdata *)file->private_data;
  867. ssize_t bcount;
  868. size_t alignfix;
  869. loff_t off = *offset; /* avoid useless 64bit-arithmetic */
  870. ssize_t retval;
  871. void *membase;
  872. if ((off + count) > PCILYNX_MAX_MEMORY+1) {
  873. count = PCILYNX_MAX_MEMORY+1 - off;
  874. }
  875. if (count == 0 || off > PCILYNX_MAX_MEMORY) {
  876. return -ENOSPC;
  877. }
  878. switch (md->type) {
  879. case rom:
  880. membase = md->lynx->local_rom;
  881. break;
  882. case ram:
  883. membase = md->lynx->local_ram;
  884. break;
  885. case aux:
  886. membase = md->lynx->aux_port;
  887. break;
  888. default:
  889. panic("pcilynx%d: unsupported md->type %d in %s",
  890. md->lynx->id, md->type, __FUNCTION__);
  891. }
  892. down(&md->lynx->mem_dma_mutex);
  893. if (count < mem_mindma) {
  894. memcpy_fromio(md->lynx->mem_dma_buffer, membase+off, count);
  895. goto out;
  896. }
  897. bcount = count;
  898. alignfix = 4 - (off % 4);
  899. if (alignfix != 4) {
  900. if (bcount < alignfix) {
  901. alignfix = bcount;
  902. }
  903. memcpy_fromio(md->lynx->mem_dma_buffer, membase+off,
  904. alignfix);
  905. if (bcount == alignfix) {
  906. goto out;
  907. }
  908. bcount -= alignfix;
  909. off += alignfix;
  910. }
  911. while (bcount >= 4) {
  912. retval = mem_dmaread(md, md->lynx->mem_dma_buffer_dma
  913. + count - bcount, bcount, off);
  914. if (retval < 0) return retval;
  915. bcount -= retval;
  916. off += retval;
  917. }
  918. if (bcount) {
  919. memcpy_fromio(md->lynx->mem_dma_buffer + count - bcount,
  920. membase+off, bcount);
  921. }
  922. out:
  923. retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
  924. up(&md->lynx->mem_dma_mutex);
  925. if (retval) return -EFAULT;
  926. *offset += count;
  927. return count;
  928. }
  929. static ssize_t mem_write(struct file *file, const char *buffer, size_t count,
  930. loff_t *offset)
  931. {
  932. struct memdata *md = (struct memdata *)file->private_data;
  933. if (((*offset) + count) > PCILYNX_MAX_MEMORY+1) {
  934. count = PCILYNX_MAX_MEMORY+1 - *offset;
  935. }
  936. if (count == 0 || *offset > PCILYNX_MAX_MEMORY) {
  937. return -ENOSPC;
  938. }
  939. /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
  940. switch (md->type) {
  941. case aux:
  942. if (copy_from_user(md->lynx->aux_port+(*offset), buffer, count))
  943. return -EFAULT;
  944. break;
  945. case ram:
  946. if (copy_from_user(md->lynx->local_ram+(*offset), buffer, count))
  947. return -EFAULT;
  948. break;
  949. case rom:
  950. /* the ROM may be writeable */
  951. if (copy_from_user(md->lynx->local_rom+(*offset), buffer, count))
  952. return -EFAULT;
  953. break;
  954. }
  955. file->f_pos += count;
  956. return count;
  957. }
  958. #endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
  959. /********************************************************
  960. * Global stuff (interrupt handler, init/shutdown code) *
  961. ********************************************************/
  962. static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
  963. struct pt_regs *regs_are_unused)
  964. {
  965. struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
  966. struct hpsb_host *host = lynx->host;
  967. u32 intmask;
  968. u32 linkint;
  969. linkint = reg_read(lynx, LINK_INT_STATUS);
  970. intmask = reg_read(lynx, PCI_INT_STATUS);
  971. if (!(intmask & PCI_INT_INT_PEND))
  972. return IRQ_NONE;
  973. PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
  974. linkint);
  975. reg_write(lynx, LINK_INT_STATUS, linkint);
  976. reg_write(lynx, PCI_INT_STATUS, intmask);
  977. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  978. if (intmask & PCI_INT_AUX_INT) {
  979. atomic_inc(&lynx->aux_intr_seen);
  980. wake_up_interruptible(&lynx->aux_intr_wait);
  981. }
  982. if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) {
  983. wake_up_interruptible(&lynx->mem_dma_intr_wait);
  984. }
  985. #endif
  986. if (intmask & PCI_INT_1394) {
  987. if (linkint & LINK_INT_PHY_TIMEOUT) {
  988. PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
  989. }
  990. if (linkint & LINK_INT_PHY_BUSRESET) {
  991. PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
  992. lynx->selfid_size = -1;
  993. lynx->phy_reg0 = -1;
  994. if (!host->in_bus_reset)
  995. hpsb_bus_reset(host);
  996. }
  997. if (linkint & LINK_INT_PHY_REG_RCVD) {
  998. u32 reg;
  999. spin_lock(&lynx->phy_reg_lock);
  1000. reg = reg_read(lynx, LINK_PHY);
  1001. spin_unlock(&lynx->phy_reg_lock);
  1002. if (!host->in_bus_reset) {
  1003. PRINT(KERN_INFO, lynx->id,
  1004. "phy reg received without reset");
  1005. } else if (reg & 0xf00) {
  1006. PRINT(KERN_INFO, lynx->id,
  1007. "unsolicited phy reg %d received",
  1008. (reg >> 8) & 0xf);
  1009. } else {
  1010. lynx->phy_reg0 = reg & 0xff;
  1011. handle_selfid(lynx, host);
  1012. }
  1013. }
  1014. if (linkint & LINK_INT_ISO_STUCK) {
  1015. PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
  1016. }
  1017. if (linkint & LINK_INT_ASYNC_STUCK) {
  1018. PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
  1019. }
  1020. if (linkint & LINK_INT_SENT_REJECT) {
  1021. PRINT(KERN_INFO, lynx->id, "sent reject");
  1022. }
  1023. if (linkint & LINK_INT_TX_INVALID_TC) {
  1024. PRINT(KERN_INFO, lynx->id, "invalid transaction code");
  1025. }
  1026. if (linkint & LINK_INT_GRF_OVERFLOW) {
  1027. /* flush FIFO if overflow happens during reset */
  1028. if (host->in_bus_reset)
  1029. reg_write(lynx, FIFO_CONTROL,
  1030. FIFO_CONTROL_GRF_FLUSH);
  1031. PRINT(KERN_INFO, lynx->id, "GRF overflow");
  1032. }
  1033. if (linkint & LINK_INT_ITF_UNDERFLOW) {
  1034. PRINT(KERN_INFO, lynx->id, "ITF underflow");
  1035. }
  1036. if (linkint & LINK_INT_ATF_UNDERFLOW) {
  1037. PRINT(KERN_INFO, lynx->id, "ATF underflow");
  1038. }
  1039. }
  1040. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
  1041. PRINTD(KERN_DEBUG, lynx->id, "iso receive");
  1042. spin_lock(&lynx->iso_rcv.lock);
  1043. lynx->iso_rcv.stat[lynx->iso_rcv.next] =
  1044. reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
  1045. lynx->iso_rcv.used++;
  1046. lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
  1047. if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
  1048. || !lynx->iso_rcv.chan_count) {
  1049. PRINTD(KERN_DEBUG, lynx->id, "stopped");
  1050. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  1051. }
  1052. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
  1053. CHANNEL_ISO_RCV);
  1054. spin_unlock(&lynx->iso_rcv.lock);
  1055. tasklet_schedule(&lynx->iso_rcv.tq);
  1056. }
  1057. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
  1058. PRINTD(KERN_DEBUG, lynx->id, "async sent");
  1059. spin_lock(&lynx->async.queue_lock);
  1060. if (list_empty(&lynx->async.pcl_queue)) {
  1061. spin_unlock(&lynx->async.queue_lock);
  1062. PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
  1063. } else {
  1064. struct ti_pcl pcl;
  1065. u32 ack;
  1066. struct hpsb_packet *packet;
  1067. get_pcl(lynx, lynx->async.pcl, &pcl);
  1068. packet = driver_packet(lynx->async.pcl_queue.next);
  1069. list_del_init(&packet->driver_list);
  1070. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  1071. packet->header_size, PCI_DMA_TODEVICE);
  1072. if (packet->data_size) {
  1073. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  1074. packet->data_size, PCI_DMA_TODEVICE);
  1075. }
  1076. if (!list_empty(&lynx->async.queue)) {
  1077. send_next(lynx, hpsb_async);
  1078. }
  1079. spin_unlock(&lynx->async.queue_lock);
  1080. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  1081. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  1082. ack = (pcl.pcl_status >> 15) & 0xf;
  1083. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  1084. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  1085. } else {
  1086. ack = (pcl.pcl_status >> 15) & 0xf;
  1087. }
  1088. } else {
  1089. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  1090. ack = ACKX_SEND_ERROR;
  1091. }
  1092. hpsb_packet_sent(host, packet, ack);
  1093. }
  1094. }
  1095. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
  1096. PRINTD(KERN_DEBUG, lynx->id, "iso sent");
  1097. spin_lock(&lynx->iso_send.queue_lock);
  1098. if (list_empty(&lynx->iso_send.pcl_queue)) {
  1099. spin_unlock(&lynx->iso_send.queue_lock);
  1100. PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
  1101. } else {
  1102. struct ti_pcl pcl;
  1103. u32 ack;
  1104. struct hpsb_packet *packet;
  1105. get_pcl(lynx, lynx->iso_send.pcl, &pcl);
  1106. packet = driver_packet(lynx->iso_send.pcl_queue.next);
  1107. list_del_init(&packet->driver_list);
  1108. pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
  1109. packet->header_size, PCI_DMA_TODEVICE);
  1110. if (packet->data_size) {
  1111. pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
  1112. packet->data_size, PCI_DMA_TODEVICE);
  1113. }
  1114. if (!list_empty(&lynx->iso_send.queue)) {
  1115. send_next(lynx, hpsb_iso);
  1116. }
  1117. spin_unlock(&lynx->iso_send.queue_lock);
  1118. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  1119. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  1120. ack = (pcl.pcl_status >> 15) & 0xf;
  1121. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  1122. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  1123. } else {
  1124. ack = (pcl.pcl_status >> 15) & 0xf;
  1125. }
  1126. } else {
  1127. PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
  1128. ack = ACKX_SEND_ERROR;
  1129. }
  1130. hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
  1131. }
  1132. }
  1133. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
  1134. /* general receive DMA completed */
  1135. int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
  1136. PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
  1137. stat & 0x1fff);
  1138. if (stat & DMA_CHAN_STAT_SELFID) {
  1139. lynx->selfid_size = stat & 0x1fff;
  1140. handle_selfid(lynx, host);
  1141. } else {
  1142. quadlet_t *q_data = lynx->rcv_page;
  1143. if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
  1144. || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
  1145. cpu_to_be32s(q_data + 3);
  1146. }
  1147. hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
  1148. }
  1149. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  1150. }
  1151. return IRQ_HANDLED;
  1152. }
  1153. static void iso_rcv_bh(struct ti_lynx *lynx)
  1154. {
  1155. unsigned int idx;
  1156. quadlet_t *data;
  1157. unsigned long flags;
  1158. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  1159. while (lynx->iso_rcv.used) {
  1160. idx = lynx->iso_rcv.last;
  1161. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  1162. data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
  1163. + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
  1164. if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
  1165. PRINT(KERN_ERR, lynx->id,
  1166. "iso length mismatch 0x%08x/0x%08x", *data,
  1167. lynx->iso_rcv.stat[idx]);
  1168. }
  1169. if (lynx->iso_rcv.stat[idx]
  1170. & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
  1171. PRINT(KERN_INFO, lynx->id,
  1172. "iso receive error on %d to 0x%p", idx, data);
  1173. } else {
  1174. hpsb_packet_received(lynx->host, data,
  1175. lynx->iso_rcv.stat[idx] & 0x1fff,
  1176. 0);
  1177. }
  1178. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  1179. lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
  1180. lynx->iso_rcv.used--;
  1181. }
  1182. if (lynx->iso_rcv.chan_count) {
  1183. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  1184. DMA_WORD1_CMP_ENABLE_MASTER);
  1185. }
  1186. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  1187. }
  1188. static void remove_card(struct pci_dev *dev)
  1189. {
  1190. struct ti_lynx *lynx;
  1191. struct device *lynx_dev;
  1192. int i;
  1193. lynx = pci_get_drvdata(dev);
  1194. if (!lynx) return;
  1195. pci_set_drvdata(dev, NULL);
  1196. lynx_dev = get_device(&lynx->host->device);
  1197. switch (lynx->state) {
  1198. case is_host:
  1199. reg_write(lynx, PCI_INT_ENABLE, 0);
  1200. hpsb_remove_host(lynx->host);
  1201. case have_intr:
  1202. reg_write(lynx, PCI_INT_ENABLE, 0);
  1203. free_irq(lynx->dev->irq, lynx);
  1204. /* Disable IRM Contender and LCtrl */
  1205. if (lynx->phyic.reg_1394a)
  1206. set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
  1207. /* Let all other nodes know to ignore us */
  1208. lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
  1209. case have_iomappings:
  1210. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  1211. /* Fix buggy cards with autoboot pin not tied low: */
  1212. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  1213. iounmap(lynx->registers);
  1214. iounmap(lynx->local_rom);
  1215. iounmap(lynx->local_ram);
  1216. iounmap(lynx->aux_port);
  1217. case have_1394_buffers:
  1218. for (i = 0; i < ISORCV_PAGES; i++) {
  1219. if (lynx->iso_rcv.page[i]) {
  1220. pci_free_consistent(lynx->dev, PAGE_SIZE,
  1221. lynx->iso_rcv.page[i],
  1222. lynx->iso_rcv.page_dma[i]);
  1223. }
  1224. }
  1225. pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
  1226. lynx->rcv_page_dma);
  1227. case have_aux_buf:
  1228. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  1229. pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer,
  1230. lynx->mem_dma_buffer_dma);
  1231. #endif
  1232. case have_pcl_mem:
  1233. #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
  1234. pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
  1235. lynx->pcl_mem_dma);
  1236. #endif
  1237. case clear:
  1238. /* do nothing - already freed */
  1239. ;
  1240. }
  1241. tasklet_kill(&lynx->iso_rcv.tq);
  1242. if (lynx_dev)
  1243. put_device(lynx_dev);
  1244. }
  1245. static int __devinit add_card(struct pci_dev *dev,
  1246. const struct pci_device_id *devid_is_unused)
  1247. {
  1248. #define FAIL(fmt, args...) do { \
  1249. PRINT_G(KERN_ERR, fmt , ## args); \
  1250. remove_card(dev); \
  1251. return error; \
  1252. } while (0)
  1253. char irq_buf[16];
  1254. struct hpsb_host *host;
  1255. struct ti_lynx *lynx; /* shortcut to currently handled device */
  1256. struct ti_pcl pcl;
  1257. u32 *pcli;
  1258. int i;
  1259. int error;
  1260. error = -ENXIO;
  1261. if (pci_set_dma_mask(dev, 0xffffffff))
  1262. FAIL("DMA address limits not supported for PCILynx hardware");
  1263. if (pci_enable_device(dev))
  1264. FAIL("failed to enable PCILynx hardware");
  1265. pci_set_master(dev);
  1266. error = -ENOMEM;
  1267. host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
  1268. if (!host) FAIL("failed to allocate control structure memory");
  1269. lynx = host->hostdata;
  1270. lynx->id = card_id++;
  1271. lynx->dev = dev;
  1272. lynx->state = clear;
  1273. lynx->host = host;
  1274. host->pdev = dev;
  1275. pci_set_drvdata(dev, lynx);
  1276. spin_lock_init(&lynx->lock);
  1277. spin_lock_init(&lynx->phy_reg_lock);
  1278. #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
  1279. lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
  1280. &lynx->pcl_mem_dma);
  1281. if (lynx->pcl_mem != NULL) {
  1282. lynx->state = have_pcl_mem;
  1283. PRINT(KERN_INFO, lynx->id,
  1284. "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
  1285. lynx->pcl_mem);
  1286. } else {
  1287. FAIL("failed to allocate PCL memory area");
  1288. }
  1289. #endif
  1290. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  1291. lynx->mem_dma_buffer = pci_alloc_consistent(dev, 65536,
  1292. &lynx->mem_dma_buffer_dma);
  1293. if (lynx->mem_dma_buffer == NULL) {
  1294. FAIL("failed to allocate DMA buffer for aux");
  1295. }
  1296. lynx->state = have_aux_buf;
  1297. #endif
  1298. lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
  1299. &lynx->rcv_page_dma);
  1300. if (lynx->rcv_page == NULL) {
  1301. FAIL("failed to allocate receive buffer");
  1302. }
  1303. lynx->state = have_1394_buffers;
  1304. for (i = 0; i < ISORCV_PAGES; i++) {
  1305. lynx->iso_rcv.page[i] =
  1306. pci_alloc_consistent(dev, PAGE_SIZE,
  1307. &lynx->iso_rcv.page_dma[i]);
  1308. if (lynx->iso_rcv.page[i] == NULL) {
  1309. FAIL("failed to allocate iso receive buffers");
  1310. }
  1311. }
  1312. lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
  1313. PCILYNX_MAX_REGISTER);
  1314. lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
  1315. lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
  1316. lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
  1317. PCILYNX_MAX_MEMORY);
  1318. lynx->state = have_iomappings;
  1319. if (lynx->registers == NULL) {
  1320. FAIL("failed to remap registers - card not accessible");
  1321. }
  1322. #ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
  1323. if (lynx->local_ram == NULL) {
  1324. FAIL("failed to remap local RAM which is required for "
  1325. "operation");
  1326. }
  1327. #endif
  1328. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  1329. /* Fix buggy cards with autoboot pin not tied low: */
  1330. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  1331. #ifndef __sparc__
  1332. sprintf (irq_buf, "%d", dev->irq);
  1333. #else
  1334. sprintf (irq_buf, "%s", __irq_itoa(dev->irq));
  1335. #endif
  1336. if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
  1337. PCILYNX_DRIVER_NAME, lynx)) {
  1338. PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
  1339. lynx->state = have_intr;
  1340. } else {
  1341. FAIL("failed to allocate shared interrupt %s", irq_buf);
  1342. }
  1343. /* alloc_pcl return values are not checked, it is expected that the
  1344. * provided PCL space is sufficient for the initial allocations */
  1345. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  1346. if (lynx->aux_port != NULL) {
  1347. lynx->dmem_pcl = alloc_pcl(lynx);
  1348. aux_setup_pcls(lynx);
  1349. sema_init(&lynx->mem_dma_mutex, 1);
  1350. }
  1351. #endif
  1352. lynx->rcv_pcl = alloc_pcl(lynx);
  1353. lynx->rcv_pcl_start = alloc_pcl(lynx);
  1354. lynx->async.pcl = alloc_pcl(lynx);
  1355. lynx->async.pcl_start = alloc_pcl(lynx);
  1356. lynx->iso_send.pcl = alloc_pcl(lynx);
  1357. lynx->iso_send.pcl_start = alloc_pcl(lynx);
  1358. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1359. lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
  1360. }
  1361. lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
  1362. /* all allocations successful - simple init stuff follows */
  1363. reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  1364. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  1365. reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_AUX_INT);
  1366. init_waitqueue_head(&lynx->mem_dma_intr_wait);
  1367. init_waitqueue_head(&lynx->aux_intr_wait);
  1368. #endif
  1369. tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
  1370. (unsigned long)lynx);
  1371. spin_lock_init(&lynx->iso_rcv.lock);
  1372. spin_lock_init(&lynx->async.queue_lock);
  1373. lynx->async.channel = CHANNEL_ASYNC_SEND;
  1374. spin_lock_init(&lynx->iso_send.queue_lock);
  1375. lynx->iso_send.channel = CHANNEL_ISO_SEND;
  1376. PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
  1377. "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
  1378. lynx->local_ram, lynx->aux_port);
  1379. /* now, looking for PHY register set */
  1380. if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  1381. lynx->phyic.reg_1394a = 1;
  1382. PRINT(KERN_INFO, lynx->id,
  1383. "found 1394a conform PHY (using extended register set)");
  1384. lynx->phyic.vendor = get_phy_vendorid(lynx);
  1385. lynx->phyic.product = get_phy_productid(lynx);
  1386. } else {
  1387. lynx->phyic.reg_1394a = 0;
  1388. PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  1389. }
  1390. lynx->selfid_size = -1;
  1391. lynx->phy_reg0 = -1;
  1392. INIT_LIST_HEAD(&lynx->async.queue);
  1393. INIT_LIST_HEAD(&lynx->async.pcl_queue);
  1394. INIT_LIST_HEAD(&lynx->iso_send.queue);
  1395. INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
  1396. pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
  1397. put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
  1398. pcl.next = PCL_NEXT_INVALID;
  1399. pcl.async_error_next = PCL_NEXT_INVALID;
  1400. pcl.buffer[0].control = PCL_CMD_RCV | 16;
  1401. #ifndef __BIG_ENDIAN
  1402. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1403. #endif
  1404. pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
  1405. pcl.buffer[0].pointer = lynx->rcv_page_dma;
  1406. pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
  1407. put_pcl(lynx, lynx->rcv_pcl, &pcl);
  1408. pcl.next = pcl_bus(lynx, lynx->async.pcl);
  1409. pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
  1410. put_pcl(lynx, lynx->async.pcl_start, &pcl);
  1411. pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
  1412. pcl.async_error_next = PCL_NEXT_INVALID;
  1413. put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
  1414. pcl.next = PCL_NEXT_INVALID;
  1415. pcl.async_error_next = PCL_NEXT_INVALID;
  1416. pcl.buffer[0].control = PCL_CMD_RCV | 4;
  1417. #ifndef __BIG_ENDIAN
  1418. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1419. #endif
  1420. pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
  1421. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1422. int page = i / ISORCV_PER_PAGE;
  1423. int sec = i % ISORCV_PER_PAGE;
  1424. pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
  1425. + sec * MAX_ISORCV_SIZE;
  1426. pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
  1427. put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
  1428. }
  1429. pcli = (u32 *)&pcl;
  1430. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1431. pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
  1432. }
  1433. put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
  1434. /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
  1435. reg_write(lynx, FIFO_SIZES, 0x003030a0);
  1436. /* 20 byte threshold before triggering PCI transfer */
  1437. reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
  1438. /* threshold on both send FIFOs before transmitting:
  1439. FIFO size - cache line size - 1 */
  1440. i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
  1441. i = 0x30 - i - 1;
  1442. reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
  1443. reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
  1444. reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
  1445. | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
  1446. | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
  1447. | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
  1448. | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
  1449. | LINK_INT_ATF_UNDERFLOW);
  1450. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1451. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
  1452. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1453. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
  1454. DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
  1455. | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
  1456. | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
  1457. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  1458. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1459. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
  1460. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1461. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  1462. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
  1463. reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
  1464. | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
  1465. | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
  1466. | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
  1467. if (!lynx->phyic.reg_1394a) {
  1468. if (!hpsb_disable_irm) {
  1469. /* attempt to enable contender bit -FIXME- would this
  1470. * work elsewhere? */
  1471. reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
  1472. reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
  1473. }
  1474. } else {
  1475. /* set the contender (if appropriate) and LCtrl bit in the
  1476. * extended PHY register set. (Should check that PHY_02_EXTENDED
  1477. * is set in register 2?)
  1478. */
  1479. i = get_phy_reg(lynx, 4);
  1480. i |= PHY_04_LCTRL;
  1481. if (hpsb_disable_irm)
  1482. i &= !PHY_04_CONTENDER;
  1483. else
  1484. i |= PHY_04_CONTENDER;
  1485. if (i != -1) set_phy_reg(lynx, 4, i);
  1486. }
  1487. if (!skip_eeprom)
  1488. {
  1489. /* needed for i2c communication with serial eeprom */
  1490. struct i2c_adapter *i2c_ad;
  1491. struct i2c_algo_bit_data i2c_adapter_data;
  1492. error = -ENOMEM;
  1493. i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL);
  1494. if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
  1495. memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
  1496. i2c_adapter_data = bit_data;
  1497. i2c_ad->algo_data = &i2c_adapter_data;
  1498. i2c_adapter_data.data = lynx;
  1499. PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
  1500. reg_read(lynx, SERIAL_EEPROM_CONTROL));
  1501. /* reset hardware to sane state */
  1502. lynx->i2c_driven_state = 0x00000070;
  1503. reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
  1504. if (i2c_bit_add_bus(i2c_ad) < 0)
  1505. {
  1506. kfree(i2c_ad);
  1507. error = -ENXIO;
  1508. FAIL("unable to register i2c");
  1509. }
  1510. else
  1511. {
  1512. /* do i2c stuff */
  1513. unsigned char i2c_cmd = 0x10;
  1514. struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
  1515. { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
  1516. };
  1517. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  1518. union i2c_smbus_data data;
  1519. if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
  1520. PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
  1521. else
  1522. {
  1523. u16 addr;
  1524. for (addr=0x00; addr < 0x100; addr++) {
  1525. if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
  1526. PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
  1527. break;
  1528. }
  1529. else
  1530. PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
  1531. }
  1532. }
  1533. #endif
  1534. /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
  1535. do it more efficiently in one transaction rather then using several reads */
  1536. if (i2c_transfer(i2c_ad, msg, 2) < 0) {
  1537. PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
  1538. } else {
  1539. int i;
  1540. PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
  1541. /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
  1542. * generation(1394a) and link_spd(1394a) field and recalculate
  1543. * the CRC */
  1544. for (i = 0; i < 5 ; i++)
  1545. PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
  1546. i, be32_to_cpu(lynx->bus_info_block[i]));
  1547. /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
  1548. if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
  1549. (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
  1550. {
  1551. PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
  1552. } else {
  1553. kfree(i2c_ad);
  1554. error = -ENXIO;
  1555. FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
  1556. }
  1557. }
  1558. i2c_bit_del_bus(i2c_ad);
  1559. kfree(i2c_ad);
  1560. }
  1561. }
  1562. host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
  1563. host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
  1564. host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
  1565. host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
  1566. if (!lynx->phyic.reg_1394a)
  1567. host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
  1568. else
  1569. host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
  1570. if (hpsb_add_host(host)) {
  1571. error = -ENOMEM;
  1572. FAIL("Failed to register host with highlevel");
  1573. }
  1574. lynx->state = is_host;
  1575. return 0;
  1576. #undef FAIL
  1577. }
  1578. static struct pci_device_id pci_table[] = {
  1579. {
  1580. .vendor = PCI_VENDOR_ID_TI,
  1581. .device = PCI_DEVICE_ID_TI_PCILYNX,
  1582. .subvendor = PCI_ANY_ID,
  1583. .subdevice = PCI_ANY_ID,
  1584. },
  1585. { } /* Terminating entry */
  1586. };
  1587. static struct pci_driver lynx_pci_driver = {
  1588. .name = PCILYNX_DRIVER_NAME,
  1589. .id_table = pci_table,
  1590. .probe = add_card,
  1591. .remove = remove_card,
  1592. };
  1593. static struct hpsb_host_driver lynx_driver = {
  1594. .owner = THIS_MODULE,
  1595. .name = PCILYNX_DRIVER_NAME,
  1596. .set_hw_config_rom = NULL,
  1597. .transmit_packet = lynx_transmit,
  1598. .devctl = lynx_devctl,
  1599. .isoctl = NULL,
  1600. };
  1601. MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
  1602. MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
  1603. MODULE_LICENSE("GPL");
  1604. MODULE_SUPPORTED_DEVICE("pcilynx");
  1605. MODULE_DEVICE_TABLE(pci, pci_table);
  1606. static int __init pcilynx_init(void)
  1607. {
  1608. int ret;
  1609. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  1610. if (register_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME, &aux_ops)) {
  1611. PRINT_G(KERN_ERR, "allocation of char major number %d failed",
  1612. PCILYNX_MAJOR);
  1613. return -EBUSY;
  1614. }
  1615. #endif
  1616. ret = pci_register_driver(&lynx_pci_driver);
  1617. if (ret < 0) {
  1618. PRINT_G(KERN_ERR, "PCI module init failed");
  1619. goto free_char_dev;
  1620. }
  1621. return 0;
  1622. free_char_dev:
  1623. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  1624. unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
  1625. #endif
  1626. return ret;
  1627. }
  1628. static void __exit pcilynx_cleanup(void)
  1629. {
  1630. pci_unregister_driver(&lynx_pci_driver);
  1631. #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
  1632. unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
  1633. #endif
  1634. }
  1635. module_init(pcilynx_init);
  1636. module_exit(pcilynx_cleanup);