pcilynx.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568
  1. /*
  2. * pcilynx.c - Texas Instruments PCILynx driver
  3. * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
  4. * Stephan Linz <linz@mazet.de>
  5. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software Foundation,
  19. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. /*
  22. * Contributions:
  23. *
  24. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  25. * reading bus info block (containing GUID) from serial
  26. * eeprom via i2c and storing it in config ROM
  27. * Reworked code for initiating bus resets
  28. * (long, short, with or without hold-off)
  29. * Enhancements in async and iso send code
  30. */
  31. #include <linux/kernel.h>
  32. #include <linux/slab.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/wait.h>
  35. #include <linux/errno.h>
  36. #include <linux/module.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/init.h>
  39. #include <linux/pci.h>
  40. #include <linux/fs.h>
  41. #include <linux/poll.h>
  42. #include <linux/kdev_t.h>
  43. #include <linux/dma-mapping.h>
  44. #include <asm/byteorder.h>
  45. #include <asm/atomic.h>
  46. #include <asm/io.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/irq.h>
  49. #include "csr1212.h"
  50. #include "ieee1394.h"
  51. #include "ieee1394_types.h"
  52. #include "hosts.h"
  53. #include "ieee1394_core.h"
  54. #include "highlevel.h"
  55. #include "pcilynx.h"
  56. #include <linux/i2c.h>
  57. #include <linux/i2c-algo-bit.h>
  58. /* print general (card independent) information */
  59. #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  60. /* print card specific information */
  61. #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  62. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  63. #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
  64. #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
  65. #else
  66. #define PRINT_GD(level, fmt, args...) do {} while (0)
  67. #define PRINTD(level, card, fmt, args...) do {} while (0)
  68. #endif
  69. /* Module Parameters */
  70. static int skip_eeprom;
  71. module_param(skip_eeprom, int, 0444);
  72. MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
  73. static struct hpsb_host_driver lynx_driver;
  74. static unsigned int card_id;
  75. /*
  76. * I2C stuff
  77. */
  78. /* the i2c stuff was inspired by i2c-philips-par.c */
  79. static void bit_setscl(void *data, int state)
  80. {
  81. if (state) {
  82. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
  83. } else {
  84. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
  85. }
  86. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  87. }
  88. static void bit_setsda(void *data, int state)
  89. {
  90. if (state) {
  91. ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
  92. } else {
  93. ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
  94. }
  95. reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
  96. }
  97. static int bit_getscl(void *data)
  98. {
  99. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
  100. }
  101. static int bit_getsda(void *data)
  102. {
  103. return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
  104. }
  105. static int bit_reg(struct i2c_client *client)
  106. {
  107. return 0;
  108. }
  109. static int bit_unreg(struct i2c_client *client)
  110. {
  111. return 0;
  112. }
  113. static struct i2c_algo_bit_data bit_data = {
  114. .setsda = bit_setsda,
  115. .setscl = bit_setscl,
  116. .getsda = bit_getsda,
  117. .getscl = bit_getscl,
  118. .udelay = 5,
  119. .timeout = 100,
  120. };
  121. static struct i2c_adapter bit_ops = {
  122. .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
  123. .client_register = bit_reg,
  124. .client_unregister = bit_unreg,
  125. .name = "PCILynx I2C",
  126. };
  127. /*
  128. * PCL handling functions.
  129. */
  130. static pcl_t alloc_pcl(struct ti_lynx *lynx)
  131. {
  132. u8 m;
  133. int i, j;
  134. spin_lock(&lynx->lock);
  135. /* FIXME - use ffz() to make this readable */
  136. for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
  137. m = lynx->pcl_bmap[i];
  138. for (j = 0; j < 8; j++) {
  139. if (m & 1<<j) {
  140. continue;
  141. }
  142. m |= 1<<j;
  143. lynx->pcl_bmap[i] = m;
  144. spin_unlock(&lynx->lock);
  145. return 8 * i + j;
  146. }
  147. }
  148. spin_unlock(&lynx->lock);
  149. return -1;
  150. }
  151. #if 0
  152. static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
  153. {
  154. int off, bit;
  155. off = pclid / 8;
  156. bit = pclid % 8;
  157. if (pclid < 0) {
  158. return;
  159. }
  160. spin_lock(&lynx->lock);
  161. if (lynx->pcl_bmap[off] & 1<<bit) {
  162. lynx->pcl_bmap[off] &= ~(1<<bit);
  163. } else {
  164. PRINT(KERN_ERR, lynx->id,
  165. "attempted to free unallocated PCL %d", pclid);
  166. }
  167. spin_unlock(&lynx->lock);
  168. }
  169. /* functions useful for debugging */
  170. static void pretty_print_pcl(const struct ti_pcl *pcl)
  171. {
  172. int i;
  173. printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
  174. pcl->next, pcl->user_data, pcl->pcl_status,
  175. pcl->remaining_transfer_count, pcl->next_data_buffer);
  176. printk("PCL");
  177. for (i=0; i<13; i++) {
  178. printk(" c%x:%08x d%x:%08x",
  179. i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
  180. if (!(i & 0x3) && (i != 12)) printk("\nPCL");
  181. }
  182. printk("\n");
  183. }
  184. static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
  185. {
  186. struct ti_pcl pcl;
  187. get_pcl(lynx, pclid, &pcl);
  188. pretty_print_pcl(&pcl);
  189. }
  190. #endif
  191. /***********************************
  192. * IEEE-1394 functionality section *
  193. ***********************************/
  194. static int get_phy_reg(struct ti_lynx *lynx, int addr)
  195. {
  196. int retval;
  197. int i = 0;
  198. unsigned long flags;
  199. if (addr > 15) {
  200. PRINT(KERN_ERR, lynx->id,
  201. "%s: PHY register address %d out of range",
  202. __FUNCTION__, addr);
  203. return -1;
  204. }
  205. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  206. reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
  207. do {
  208. retval = reg_read(lynx, LINK_PHY);
  209. if (i > 10000) {
  210. PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
  211. __FUNCTION__);
  212. retval = -1;
  213. break;
  214. }
  215. i++;
  216. } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
  217. reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
  218. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  219. if (retval != -1) {
  220. return retval & 0xff;
  221. } else {
  222. return -1;
  223. }
  224. }
  225. static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
  226. {
  227. unsigned long flags;
  228. if (addr > 15) {
  229. PRINT(KERN_ERR, lynx->id,
  230. "%s: PHY register address %d out of range", __FUNCTION__, addr);
  231. return -1;
  232. }
  233. if (val > 0xff) {
  234. PRINT(KERN_ERR, lynx->id,
  235. "%s: PHY register value %d out of range", __FUNCTION__, val);
  236. return -1;
  237. }
  238. spin_lock_irqsave(&lynx->phy_reg_lock, flags);
  239. reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
  240. | LINK_PHY_WDATA(val));
  241. spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
  242. return 0;
  243. }
  244. static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
  245. {
  246. int reg;
  247. if (page > 7) {
  248. PRINT(KERN_ERR, lynx->id,
  249. "%s: PHY page %d out of range", __FUNCTION__, page);
  250. return -1;
  251. }
  252. reg = get_phy_reg(lynx, 7);
  253. if (reg != -1) {
  254. reg &= 0x1f;
  255. reg |= (page << 5);
  256. set_phy_reg(lynx, 7, reg);
  257. return 0;
  258. } else {
  259. return -1;
  260. }
  261. }
  262. #if 0 /* not needed at this time */
  263. static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
  264. {
  265. int reg;
  266. if (port > 15) {
  267. PRINT(KERN_ERR, lynx->id,
  268. "%s: PHY port %d out of range", __FUNCTION__, port);
  269. return -1;
  270. }
  271. reg = get_phy_reg(lynx, 7);
  272. if (reg != -1) {
  273. reg &= 0xf0;
  274. reg |= port;
  275. set_phy_reg(lynx, 7, reg);
  276. return 0;
  277. } else {
  278. return -1;
  279. }
  280. }
  281. #endif
  282. static u32 get_phy_vendorid(struct ti_lynx *lynx)
  283. {
  284. u32 pvid = 0;
  285. sel_phy_reg_page(lynx, 1);
  286. pvid |= (get_phy_reg(lynx, 10) << 16);
  287. pvid |= (get_phy_reg(lynx, 11) << 8);
  288. pvid |= get_phy_reg(lynx, 12);
  289. PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
  290. return pvid;
  291. }
  292. static u32 get_phy_productid(struct ti_lynx *lynx)
  293. {
  294. u32 id = 0;
  295. sel_phy_reg_page(lynx, 1);
  296. id |= (get_phy_reg(lynx, 13) << 16);
  297. id |= (get_phy_reg(lynx, 14) << 8);
  298. id |= get_phy_reg(lynx, 15);
  299. PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
  300. return id;
  301. }
  302. static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
  303. struct hpsb_host *host)
  304. {
  305. quadlet_t lsid;
  306. char phyreg[7];
  307. int i;
  308. phyreg[0] = lynx->phy_reg0;
  309. for (i = 1; i < 7; i++) {
  310. phyreg[i] = get_phy_reg(lynx, i);
  311. }
  312. /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
  313. more than 3 ports on the PHY anyway. */
  314. lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
  315. lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
  316. lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
  317. if (!hpsb_disable_irm)
  318. lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
  319. /* lsid |= 1 << 11; *//* set contender (hack) */
  320. lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
  321. for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
  322. if (phyreg[3 + i] & 0x4) {
  323. lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
  324. << (6 - i*2);
  325. } else {
  326. lsid |= 1 << (6 - i*2);
  327. }
  328. }
  329. cpu_to_be32s(&lsid);
  330. PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
  331. return lsid;
  332. }
  333. static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
  334. {
  335. quadlet_t *q = lynx->rcv_page;
  336. int phyid, isroot, size;
  337. quadlet_t lsid = 0;
  338. int i;
  339. if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
  340. size = lynx->selfid_size;
  341. phyid = lynx->phy_reg0;
  342. i = (size > 16 ? 16 : size) / 4 - 1;
  343. while (i >= 0) {
  344. cpu_to_be32s(&q[i]);
  345. i--;
  346. }
  347. if (!lynx->phyic.reg_1394a) {
  348. lsid = generate_own_selfid(lynx, host);
  349. }
  350. isroot = (phyid & 2) != 0;
  351. phyid >>= 2;
  352. PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
  353. phyid, (isroot ? "root" : "not root"));
  354. reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
  355. if (!lynx->phyic.reg_1394a && !size) {
  356. hpsb_selfid_received(host, lsid);
  357. }
  358. while (size > 0) {
  359. struct selfid *sid = (struct selfid *)q;
  360. if (!lynx->phyic.reg_1394a && !sid->extended
  361. && (sid->phy_id == (phyid + 1))) {
  362. hpsb_selfid_received(host, lsid);
  363. }
  364. if (q[0] == ~q[1]) {
  365. PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
  366. q[0]);
  367. hpsb_selfid_received(host, q[0]);
  368. } else {
  369. PRINT(KERN_INFO, lynx->id,
  370. "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
  371. }
  372. q += 2;
  373. size -= 8;
  374. }
  375. if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
  376. hpsb_selfid_received(host, lsid);
  377. }
  378. hpsb_selfid_complete(host, phyid, isroot);
  379. if (host->in_bus_reset) return; /* in bus reset again */
  380. if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
  381. reg_set_bits(lynx, LINK_CONTROL,
  382. LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
  383. | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
  384. }
  385. /* This must be called with the respective queue_lock held. */
  386. static void send_next(struct ti_lynx *lynx, int what)
  387. {
  388. struct ti_pcl pcl;
  389. struct lynx_send_data *d;
  390. struct hpsb_packet *packet;
  391. d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
  392. if (!list_empty(&d->pcl_queue)) {
  393. PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
  394. BUG();
  395. }
  396. packet = driver_packet(d->queue.next);
  397. list_move_tail(&packet->driver_list, &d->pcl_queue);
  398. d->header_dma = pci_map_single(lynx->dev, packet->header,
  399. packet->header_size, PCI_DMA_TODEVICE);
  400. if (packet->data_size) {
  401. d->data_dma = pci_map_single(lynx->dev, packet->data,
  402. packet->data_size,
  403. PCI_DMA_TODEVICE);
  404. } else {
  405. d->data_dma = 0;
  406. }
  407. pcl.next = PCL_NEXT_INVALID;
  408. pcl.async_error_next = PCL_NEXT_INVALID;
  409. pcl.pcl_status = 0;
  410. pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
  411. #ifndef __BIG_ENDIAN
  412. pcl.buffer[0].control |= PCL_BIGENDIAN;
  413. #endif
  414. pcl.buffer[0].pointer = d->header_dma;
  415. pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
  416. pcl.buffer[1].pointer = d->data_dma;
  417. switch (packet->type) {
  418. case hpsb_async:
  419. pcl.buffer[0].control |= PCL_CMD_XMT;
  420. break;
  421. case hpsb_iso:
  422. pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
  423. break;
  424. case hpsb_raw:
  425. pcl.buffer[0].control |= PCL_CMD_UNFXMT;
  426. break;
  427. }
  428. put_pcl(lynx, d->pcl, &pcl);
  429. run_pcl(lynx, d->pcl_start, d->channel);
  430. }
  431. /* called from subsystem core */
  432. static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
  433. {
  434. struct ti_lynx *lynx = host->hostdata;
  435. struct lynx_send_data *d;
  436. unsigned long flags;
  437. if (packet->data_size >= 4096) {
  438. PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
  439. packet->data_size);
  440. return -EOVERFLOW;
  441. }
  442. switch (packet->type) {
  443. case hpsb_async:
  444. case hpsb_raw:
  445. d = &lynx->async;
  446. break;
  447. case hpsb_iso:
  448. d = &lynx->iso_send;
  449. break;
  450. default:
  451. PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
  452. packet->type);
  453. return -EINVAL;
  454. }
  455. if (packet->tcode == TCODE_WRITEQ
  456. || packet->tcode == TCODE_READQ_RESPONSE) {
  457. cpu_to_be32s(&packet->header[3]);
  458. }
  459. spin_lock_irqsave(&d->queue_lock, flags);
  460. list_add_tail(&packet->driver_list, &d->queue);
  461. if (list_empty(&d->pcl_queue))
  462. send_next(lynx, packet->type);
  463. spin_unlock_irqrestore(&d->queue_lock, flags);
  464. return 0;
  465. }
  466. /* called from subsystem core */
  467. static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
  468. {
  469. struct ti_lynx *lynx = host->hostdata;
  470. int retval = 0;
  471. struct hpsb_packet *packet;
  472. LIST_HEAD(packet_list);
  473. unsigned long flags;
  474. int phy_reg;
  475. switch (cmd) {
  476. case RESET_BUS:
  477. if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
  478. retval = 0;
  479. break;
  480. }
  481. switch (arg) {
  482. case SHORT_RESET:
  483. if (lynx->phyic.reg_1394a) {
  484. phy_reg = get_phy_reg(lynx, 5);
  485. if (phy_reg == -1) {
  486. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  487. retval = -1;
  488. break;
  489. }
  490. phy_reg |= 0x40;
  491. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
  492. lynx->selfid_size = -1;
  493. lynx->phy_reg0 = -1;
  494. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  495. break;
  496. } else {
  497. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  498. /* fall through to long bus reset */
  499. }
  500. case LONG_RESET:
  501. phy_reg = get_phy_reg(lynx, 1);
  502. if (phy_reg == -1) {
  503. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  504. retval = -1;
  505. break;
  506. }
  507. phy_reg |= 0x40;
  508. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
  509. lynx->selfid_size = -1;
  510. lynx->phy_reg0 = -1;
  511. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  512. break;
  513. case SHORT_RESET_NO_FORCE_ROOT:
  514. if (lynx->phyic.reg_1394a) {
  515. phy_reg = get_phy_reg(lynx, 1);
  516. if (phy_reg == -1) {
  517. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  518. retval = -1;
  519. break;
  520. }
  521. if (phy_reg & 0x80) {
  522. phy_reg &= ~0x80;
  523. set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
  524. }
  525. phy_reg = get_phy_reg(lynx, 5);
  526. if (phy_reg == -1) {
  527. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  528. retval = -1;
  529. break;
  530. }
  531. phy_reg |= 0x40;
  532. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
  533. lynx->selfid_size = -1;
  534. lynx->phy_reg0 = -1;
  535. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  536. break;
  537. } else {
  538. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  539. /* fall through to long bus reset */
  540. }
  541. case LONG_RESET_NO_FORCE_ROOT:
  542. phy_reg = get_phy_reg(lynx, 1);
  543. if (phy_reg == -1) {
  544. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  545. retval = -1;
  546. break;
  547. }
  548. phy_reg &= ~0x80;
  549. phy_reg |= 0x40;
  550. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
  551. lynx->selfid_size = -1;
  552. lynx->phy_reg0 = -1;
  553. set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
  554. break;
  555. case SHORT_RESET_FORCE_ROOT:
  556. if (lynx->phyic.reg_1394a) {
  557. phy_reg = get_phy_reg(lynx, 1);
  558. if (phy_reg == -1) {
  559. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  560. retval = -1;
  561. break;
  562. }
  563. if (!(phy_reg & 0x80)) {
  564. phy_reg |= 0x80;
  565. set_phy_reg(lynx, 1, phy_reg); /* set RHB */
  566. }
  567. phy_reg = get_phy_reg(lynx, 5);
  568. if (phy_reg == -1) {
  569. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  570. retval = -1;
  571. break;
  572. }
  573. phy_reg |= 0x40;
  574. PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
  575. lynx->selfid_size = -1;
  576. lynx->phy_reg0 = -1;
  577. set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
  578. break;
  579. } else {
  580. PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
  581. /* fall through to long bus reset */
  582. }
  583. case LONG_RESET_FORCE_ROOT:
  584. phy_reg = get_phy_reg(lynx, 1);
  585. if (phy_reg == -1) {
  586. PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
  587. retval = -1;
  588. break;
  589. }
  590. phy_reg |= 0xc0;
  591. PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
  592. lynx->selfid_size = -1;
  593. lynx->phy_reg0 = -1;
  594. set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
  595. break;
  596. default:
  597. PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
  598. retval = -1;
  599. }
  600. break;
  601. case GET_CYCLE_COUNTER:
  602. retval = reg_read(lynx, CYCLE_TIMER);
  603. break;
  604. case SET_CYCLE_COUNTER:
  605. reg_write(lynx, CYCLE_TIMER, arg);
  606. break;
  607. case SET_BUS_ID:
  608. reg_write(lynx, LINK_ID,
  609. (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
  610. break;
  611. case ACT_CYCLE_MASTER:
  612. if (arg) {
  613. reg_set_bits(lynx, LINK_CONTROL,
  614. LINK_CONTROL_CYCMASTER);
  615. } else {
  616. reg_clear_bits(lynx, LINK_CONTROL,
  617. LINK_CONTROL_CYCMASTER);
  618. }
  619. break;
  620. case CANCEL_REQUESTS:
  621. spin_lock_irqsave(&lynx->async.queue_lock, flags);
  622. reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
  623. list_splice(&lynx->async.queue, &packet_list);
  624. INIT_LIST_HEAD(&lynx->async.queue);
  625. if (list_empty(&lynx->async.pcl_queue)) {
  626. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  627. PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
  628. } else {
  629. struct ti_pcl pcl;
  630. u32 ack;
  631. struct hpsb_packet *packet;
  632. PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
  633. get_pcl(lynx, lynx->async.pcl, &pcl);
  634. packet = driver_packet(lynx->async.pcl_queue.next);
  635. list_del_init(&packet->driver_list);
  636. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  637. packet->header_size, PCI_DMA_TODEVICE);
  638. if (packet->data_size) {
  639. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  640. packet->data_size, PCI_DMA_TODEVICE);
  641. }
  642. spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
  643. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  644. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  645. ack = (pcl.pcl_status >> 15) & 0xf;
  646. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  647. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  648. } else {
  649. ack = (pcl.pcl_status >> 15) & 0xf;
  650. }
  651. } else {
  652. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  653. ack = ACKX_ABORTED;
  654. }
  655. hpsb_packet_sent(host, packet, ack);
  656. }
  657. while (!list_empty(&packet_list)) {
  658. packet = driver_packet(packet_list.next);
  659. list_del_init(&packet->driver_list);
  660. hpsb_packet_sent(host, packet, ACKX_ABORTED);
  661. }
  662. break;
  663. case ISO_LISTEN_CHANNEL:
  664. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  665. if (lynx->iso_rcv.chan_count++ == 0) {
  666. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  667. DMA_WORD1_CMP_ENABLE_MASTER);
  668. }
  669. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  670. break;
  671. case ISO_UNLISTEN_CHANNEL:
  672. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  673. if (--lynx->iso_rcv.chan_count == 0) {
  674. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  675. 0);
  676. }
  677. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  678. break;
  679. default:
  680. PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
  681. retval = -1;
  682. }
  683. return retval;
  684. }
  685. /***************************************
  686. * IEEE-1394 functionality section END *
  687. ***************************************/
  688. /********************************************************
  689. * Global stuff (interrupt handler, init/shutdown code) *
  690. ********************************************************/
  691. static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
  692. struct pt_regs *regs_are_unused)
  693. {
  694. struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
  695. struct hpsb_host *host = lynx->host;
  696. u32 intmask;
  697. u32 linkint;
  698. linkint = reg_read(lynx, LINK_INT_STATUS);
  699. intmask = reg_read(lynx, PCI_INT_STATUS);
  700. if (!(intmask & PCI_INT_INT_PEND))
  701. return IRQ_NONE;
  702. PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
  703. linkint);
  704. reg_write(lynx, LINK_INT_STATUS, linkint);
  705. reg_write(lynx, PCI_INT_STATUS, intmask);
  706. if (intmask & PCI_INT_1394) {
  707. if (linkint & LINK_INT_PHY_TIMEOUT) {
  708. PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
  709. }
  710. if (linkint & LINK_INT_PHY_BUSRESET) {
  711. PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
  712. lynx->selfid_size = -1;
  713. lynx->phy_reg0 = -1;
  714. if (!host->in_bus_reset)
  715. hpsb_bus_reset(host);
  716. }
  717. if (linkint & LINK_INT_PHY_REG_RCVD) {
  718. u32 reg;
  719. spin_lock(&lynx->phy_reg_lock);
  720. reg = reg_read(lynx, LINK_PHY);
  721. spin_unlock(&lynx->phy_reg_lock);
  722. if (!host->in_bus_reset) {
  723. PRINT(KERN_INFO, lynx->id,
  724. "phy reg received without reset");
  725. } else if (reg & 0xf00) {
  726. PRINT(KERN_INFO, lynx->id,
  727. "unsolicited phy reg %d received",
  728. (reg >> 8) & 0xf);
  729. } else {
  730. lynx->phy_reg0 = reg & 0xff;
  731. handle_selfid(lynx, host);
  732. }
  733. }
  734. if (linkint & LINK_INT_ISO_STUCK) {
  735. PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
  736. }
  737. if (linkint & LINK_INT_ASYNC_STUCK) {
  738. PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
  739. }
  740. if (linkint & LINK_INT_SENT_REJECT) {
  741. PRINT(KERN_INFO, lynx->id, "sent reject");
  742. }
  743. if (linkint & LINK_INT_TX_INVALID_TC) {
  744. PRINT(KERN_INFO, lynx->id, "invalid transaction code");
  745. }
  746. if (linkint & LINK_INT_GRF_OVERFLOW) {
  747. /* flush FIFO if overflow happens during reset */
  748. if (host->in_bus_reset)
  749. reg_write(lynx, FIFO_CONTROL,
  750. FIFO_CONTROL_GRF_FLUSH);
  751. PRINT(KERN_INFO, lynx->id, "GRF overflow");
  752. }
  753. if (linkint & LINK_INT_ITF_UNDERFLOW) {
  754. PRINT(KERN_INFO, lynx->id, "ITF underflow");
  755. }
  756. if (linkint & LINK_INT_ATF_UNDERFLOW) {
  757. PRINT(KERN_INFO, lynx->id, "ATF underflow");
  758. }
  759. }
  760. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
  761. PRINTD(KERN_DEBUG, lynx->id, "iso receive");
  762. spin_lock(&lynx->iso_rcv.lock);
  763. lynx->iso_rcv.stat[lynx->iso_rcv.next] =
  764. reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
  765. lynx->iso_rcv.used++;
  766. lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
  767. if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
  768. || !lynx->iso_rcv.chan_count) {
  769. PRINTD(KERN_DEBUG, lynx->id, "stopped");
  770. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  771. }
  772. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
  773. CHANNEL_ISO_RCV);
  774. spin_unlock(&lynx->iso_rcv.lock);
  775. tasklet_schedule(&lynx->iso_rcv.tq);
  776. }
  777. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
  778. PRINTD(KERN_DEBUG, lynx->id, "async sent");
  779. spin_lock(&lynx->async.queue_lock);
  780. if (list_empty(&lynx->async.pcl_queue)) {
  781. spin_unlock(&lynx->async.queue_lock);
  782. PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
  783. } else {
  784. struct ti_pcl pcl;
  785. u32 ack;
  786. struct hpsb_packet *packet;
  787. get_pcl(lynx, lynx->async.pcl, &pcl);
  788. packet = driver_packet(lynx->async.pcl_queue.next);
  789. list_del_init(&packet->driver_list);
  790. pci_unmap_single(lynx->dev, lynx->async.header_dma,
  791. packet->header_size, PCI_DMA_TODEVICE);
  792. if (packet->data_size) {
  793. pci_unmap_single(lynx->dev, lynx->async.data_dma,
  794. packet->data_size, PCI_DMA_TODEVICE);
  795. }
  796. if (!list_empty(&lynx->async.queue)) {
  797. send_next(lynx, hpsb_async);
  798. }
  799. spin_unlock(&lynx->async.queue_lock);
  800. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  801. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  802. ack = (pcl.pcl_status >> 15) & 0xf;
  803. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  804. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  805. } else {
  806. ack = (pcl.pcl_status >> 15) & 0xf;
  807. }
  808. } else {
  809. PRINT(KERN_INFO, lynx->id, "async packet was not completed");
  810. ack = ACKX_SEND_ERROR;
  811. }
  812. hpsb_packet_sent(host, packet, ack);
  813. }
  814. }
  815. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
  816. PRINTD(KERN_DEBUG, lynx->id, "iso sent");
  817. spin_lock(&lynx->iso_send.queue_lock);
  818. if (list_empty(&lynx->iso_send.pcl_queue)) {
  819. spin_unlock(&lynx->iso_send.queue_lock);
  820. PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
  821. } else {
  822. struct ti_pcl pcl;
  823. u32 ack;
  824. struct hpsb_packet *packet;
  825. get_pcl(lynx, lynx->iso_send.pcl, &pcl);
  826. packet = driver_packet(lynx->iso_send.pcl_queue.next);
  827. list_del_init(&packet->driver_list);
  828. pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
  829. packet->header_size, PCI_DMA_TODEVICE);
  830. if (packet->data_size) {
  831. pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
  832. packet->data_size, PCI_DMA_TODEVICE);
  833. }
  834. if (!list_empty(&lynx->iso_send.queue)) {
  835. send_next(lynx, hpsb_iso);
  836. }
  837. spin_unlock(&lynx->iso_send.queue_lock);
  838. if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
  839. if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
  840. ack = (pcl.pcl_status >> 15) & 0xf;
  841. PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
  842. ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
  843. } else {
  844. ack = (pcl.pcl_status >> 15) & 0xf;
  845. }
  846. } else {
  847. PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
  848. ack = ACKX_SEND_ERROR;
  849. }
  850. hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
  851. }
  852. }
  853. if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
  854. /* general receive DMA completed */
  855. int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
  856. PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
  857. stat & 0x1fff);
  858. if (stat & DMA_CHAN_STAT_SELFID) {
  859. lynx->selfid_size = stat & 0x1fff;
  860. handle_selfid(lynx, host);
  861. } else {
  862. quadlet_t *q_data = lynx->rcv_page;
  863. if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
  864. || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
  865. cpu_to_be32s(q_data + 3);
  866. }
  867. hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
  868. }
  869. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  870. }
  871. return IRQ_HANDLED;
  872. }
  873. static void iso_rcv_bh(struct ti_lynx *lynx)
  874. {
  875. unsigned int idx;
  876. quadlet_t *data;
  877. unsigned long flags;
  878. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  879. while (lynx->iso_rcv.used) {
  880. idx = lynx->iso_rcv.last;
  881. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  882. data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
  883. + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
  884. if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
  885. PRINT(KERN_ERR, lynx->id,
  886. "iso length mismatch 0x%08x/0x%08x", *data,
  887. lynx->iso_rcv.stat[idx]);
  888. }
  889. if (lynx->iso_rcv.stat[idx]
  890. & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
  891. PRINT(KERN_INFO, lynx->id,
  892. "iso receive error on %d to 0x%p", idx, data);
  893. } else {
  894. hpsb_packet_received(lynx->host, data,
  895. lynx->iso_rcv.stat[idx] & 0x1fff,
  896. 0);
  897. }
  898. spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
  899. lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
  900. lynx->iso_rcv.used--;
  901. }
  902. if (lynx->iso_rcv.chan_count) {
  903. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
  904. DMA_WORD1_CMP_ENABLE_MASTER);
  905. }
  906. spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
  907. }
  908. static void remove_card(struct pci_dev *dev)
  909. {
  910. struct ti_lynx *lynx;
  911. struct device *lynx_dev;
  912. int i;
  913. lynx = pci_get_drvdata(dev);
  914. if (!lynx) return;
  915. pci_set_drvdata(dev, NULL);
  916. lynx_dev = get_device(&lynx->host->device);
  917. switch (lynx->state) {
  918. case is_host:
  919. reg_write(lynx, PCI_INT_ENABLE, 0);
  920. hpsb_remove_host(lynx->host);
  921. case have_intr:
  922. reg_write(lynx, PCI_INT_ENABLE, 0);
  923. free_irq(lynx->dev->irq, lynx);
  924. /* Disable IRM Contender and LCtrl */
  925. if (lynx->phyic.reg_1394a)
  926. set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
  927. /* Let all other nodes know to ignore us */
  928. lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
  929. case have_iomappings:
  930. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  931. /* Fix buggy cards with autoboot pin not tied low: */
  932. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  933. iounmap(lynx->registers);
  934. iounmap(lynx->local_rom);
  935. iounmap(lynx->local_ram);
  936. iounmap(lynx->aux_port);
  937. case have_1394_buffers:
  938. for (i = 0; i < ISORCV_PAGES; i++) {
  939. if (lynx->iso_rcv.page[i]) {
  940. pci_free_consistent(lynx->dev, PAGE_SIZE,
  941. lynx->iso_rcv.page[i],
  942. lynx->iso_rcv.page_dma[i]);
  943. }
  944. }
  945. pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
  946. lynx->rcv_page_dma);
  947. case have_aux_buf:
  948. case have_pcl_mem:
  949. pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
  950. lynx->pcl_mem_dma);
  951. case clear:
  952. /* do nothing - already freed */
  953. ;
  954. }
  955. tasklet_kill(&lynx->iso_rcv.tq);
  956. if (lynx_dev)
  957. put_device(lynx_dev);
  958. }
  959. static int __devinit add_card(struct pci_dev *dev,
  960. const struct pci_device_id *devid_is_unused)
  961. {
  962. #define FAIL(fmt, args...) do { \
  963. PRINT_G(KERN_ERR, fmt , ## args); \
  964. remove_card(dev); \
  965. return error; \
  966. } while (0)
  967. char irq_buf[16];
  968. struct hpsb_host *host;
  969. struct ti_lynx *lynx; /* shortcut to currently handled device */
  970. struct ti_pcl pcl;
  971. u32 *pcli;
  972. int i;
  973. int error;
  974. error = -ENXIO;
  975. if (pci_set_dma_mask(dev, DMA_32BIT_MASK))
  976. FAIL("DMA address limits not supported for PCILynx hardware");
  977. if (pci_enable_device(dev))
  978. FAIL("failed to enable PCILynx hardware");
  979. pci_set_master(dev);
  980. error = -ENOMEM;
  981. host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
  982. if (!host) FAIL("failed to allocate control structure memory");
  983. lynx = host->hostdata;
  984. lynx->id = card_id++;
  985. lynx->dev = dev;
  986. lynx->state = clear;
  987. lynx->host = host;
  988. host->pdev = dev;
  989. pci_set_drvdata(dev, lynx);
  990. spin_lock_init(&lynx->lock);
  991. spin_lock_init(&lynx->phy_reg_lock);
  992. lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
  993. &lynx->pcl_mem_dma);
  994. if (lynx->pcl_mem != NULL) {
  995. lynx->state = have_pcl_mem;
  996. PRINT(KERN_INFO, lynx->id,
  997. "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
  998. lynx->pcl_mem);
  999. } else {
  1000. FAIL("failed to allocate PCL memory area");
  1001. }
  1002. lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
  1003. &lynx->rcv_page_dma);
  1004. if (lynx->rcv_page == NULL) {
  1005. FAIL("failed to allocate receive buffer");
  1006. }
  1007. lynx->state = have_1394_buffers;
  1008. for (i = 0; i < ISORCV_PAGES; i++) {
  1009. lynx->iso_rcv.page[i] =
  1010. pci_alloc_consistent(dev, PAGE_SIZE,
  1011. &lynx->iso_rcv.page_dma[i]);
  1012. if (lynx->iso_rcv.page[i] == NULL) {
  1013. FAIL("failed to allocate iso receive buffers");
  1014. }
  1015. }
  1016. lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
  1017. PCILYNX_MAX_REGISTER);
  1018. lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
  1019. lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
  1020. lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
  1021. PCILYNX_MAX_MEMORY);
  1022. lynx->state = have_iomappings;
  1023. if (lynx->registers == NULL) {
  1024. FAIL("failed to remap registers - card not accessible");
  1025. }
  1026. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  1027. /* Fix buggy cards with autoboot pin not tied low: */
  1028. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  1029. sprintf (irq_buf, "%d", dev->irq);
  1030. if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
  1031. PCILYNX_DRIVER_NAME, lynx)) {
  1032. PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
  1033. lynx->state = have_intr;
  1034. } else {
  1035. FAIL("failed to allocate shared interrupt %s", irq_buf);
  1036. }
  1037. /* alloc_pcl return values are not checked, it is expected that the
  1038. * provided PCL space is sufficient for the initial allocations */
  1039. lynx->rcv_pcl = alloc_pcl(lynx);
  1040. lynx->rcv_pcl_start = alloc_pcl(lynx);
  1041. lynx->async.pcl = alloc_pcl(lynx);
  1042. lynx->async.pcl_start = alloc_pcl(lynx);
  1043. lynx->iso_send.pcl = alloc_pcl(lynx);
  1044. lynx->iso_send.pcl_start = alloc_pcl(lynx);
  1045. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1046. lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
  1047. }
  1048. lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
  1049. /* all allocations successful - simple init stuff follows */
  1050. reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  1051. tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
  1052. (unsigned long)lynx);
  1053. spin_lock_init(&lynx->iso_rcv.lock);
  1054. spin_lock_init(&lynx->async.queue_lock);
  1055. lynx->async.channel = CHANNEL_ASYNC_SEND;
  1056. spin_lock_init(&lynx->iso_send.queue_lock);
  1057. lynx->iso_send.channel = CHANNEL_ISO_SEND;
  1058. PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
  1059. "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
  1060. lynx->local_ram, lynx->aux_port);
  1061. /* now, looking for PHY register set */
  1062. if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  1063. lynx->phyic.reg_1394a = 1;
  1064. PRINT(KERN_INFO, lynx->id,
  1065. "found 1394a conform PHY (using extended register set)");
  1066. lynx->phyic.vendor = get_phy_vendorid(lynx);
  1067. lynx->phyic.product = get_phy_productid(lynx);
  1068. } else {
  1069. lynx->phyic.reg_1394a = 0;
  1070. PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  1071. }
  1072. lynx->selfid_size = -1;
  1073. lynx->phy_reg0 = -1;
  1074. INIT_LIST_HEAD(&lynx->async.queue);
  1075. INIT_LIST_HEAD(&lynx->async.pcl_queue);
  1076. INIT_LIST_HEAD(&lynx->iso_send.queue);
  1077. INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
  1078. pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
  1079. put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
  1080. pcl.next = PCL_NEXT_INVALID;
  1081. pcl.async_error_next = PCL_NEXT_INVALID;
  1082. pcl.buffer[0].control = PCL_CMD_RCV | 16;
  1083. #ifndef __BIG_ENDIAN
  1084. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1085. #endif
  1086. pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
  1087. pcl.buffer[0].pointer = lynx->rcv_page_dma;
  1088. pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
  1089. put_pcl(lynx, lynx->rcv_pcl, &pcl);
  1090. pcl.next = pcl_bus(lynx, lynx->async.pcl);
  1091. pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
  1092. put_pcl(lynx, lynx->async.pcl_start, &pcl);
  1093. pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
  1094. pcl.async_error_next = PCL_NEXT_INVALID;
  1095. put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
  1096. pcl.next = PCL_NEXT_INVALID;
  1097. pcl.async_error_next = PCL_NEXT_INVALID;
  1098. pcl.buffer[0].control = PCL_CMD_RCV | 4;
  1099. #ifndef __BIG_ENDIAN
  1100. pcl.buffer[0].control |= PCL_BIGENDIAN;
  1101. #endif
  1102. pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
  1103. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1104. int page = i / ISORCV_PER_PAGE;
  1105. int sec = i % ISORCV_PER_PAGE;
  1106. pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
  1107. + sec * MAX_ISORCV_SIZE;
  1108. pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
  1109. put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
  1110. }
  1111. pcli = (u32 *)&pcl;
  1112. for (i = 0; i < NUM_ISORCV_PCL; i++) {
  1113. pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
  1114. }
  1115. put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
  1116. /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
  1117. reg_write(lynx, FIFO_SIZES, 0x003030a0);
  1118. /* 20 byte threshold before triggering PCI transfer */
  1119. reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
  1120. /* threshold on both send FIFOs before transmitting:
  1121. FIFO size - cache line size - 1 */
  1122. i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
  1123. i = 0x30 - i - 1;
  1124. reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
  1125. reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
  1126. reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
  1127. | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
  1128. | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
  1129. | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
  1130. | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
  1131. | LINK_INT_ATF_UNDERFLOW);
  1132. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1133. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
  1134. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
  1135. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
  1136. DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
  1137. | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
  1138. | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
  1139. run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
  1140. reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1141. reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
  1142. reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
  1143. reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
  1144. run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
  1145. reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
  1146. | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
  1147. | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
  1148. | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
  1149. if (!lynx->phyic.reg_1394a) {
  1150. if (!hpsb_disable_irm) {
  1151. /* attempt to enable contender bit -FIXME- would this
  1152. * work elsewhere? */
  1153. reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
  1154. reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
  1155. }
  1156. } else {
  1157. /* set the contender (if appropriate) and LCtrl bit in the
  1158. * extended PHY register set. (Should check that PHY_02_EXTENDED
  1159. * is set in register 2?)
  1160. */
  1161. i = get_phy_reg(lynx, 4);
  1162. i |= PHY_04_LCTRL;
  1163. if (hpsb_disable_irm)
  1164. i &= ~PHY_04_CONTENDER;
  1165. else
  1166. i |= PHY_04_CONTENDER;
  1167. if (i != -1) set_phy_reg(lynx, 4, i);
  1168. }
  1169. if (!skip_eeprom)
  1170. {
  1171. /* needed for i2c communication with serial eeprom */
  1172. struct i2c_adapter *i2c_ad;
  1173. struct i2c_algo_bit_data i2c_adapter_data;
  1174. error = -ENOMEM;
  1175. i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
  1176. if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
  1177. memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
  1178. i2c_adapter_data = bit_data;
  1179. i2c_ad->algo_data = &i2c_adapter_data;
  1180. i2c_adapter_data.data = lynx;
  1181. PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
  1182. reg_read(lynx, SERIAL_EEPROM_CONTROL));
  1183. /* reset hardware to sane state */
  1184. lynx->i2c_driven_state = 0x00000070;
  1185. reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
  1186. if (i2c_bit_add_bus(i2c_ad) < 0)
  1187. {
  1188. kfree(i2c_ad);
  1189. error = -ENXIO;
  1190. FAIL("unable to register i2c");
  1191. }
  1192. else
  1193. {
  1194. /* do i2c stuff */
  1195. unsigned char i2c_cmd = 0x10;
  1196. struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
  1197. { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
  1198. };
  1199. /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
  1200. do it more efficiently in one transaction rather then using several reads */
  1201. if (i2c_transfer(i2c_ad, msg, 2) < 0) {
  1202. PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
  1203. } else {
  1204. int i;
  1205. PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
  1206. /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
  1207. * generation(1394a) and link_spd(1394a) field and recalculate
  1208. * the CRC */
  1209. for (i = 0; i < 5 ; i++)
  1210. PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
  1211. i, be32_to_cpu(lynx->bus_info_block[i]));
  1212. /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
  1213. if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
  1214. (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
  1215. {
  1216. PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
  1217. } else {
  1218. kfree(i2c_ad);
  1219. error = -ENXIO;
  1220. FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
  1221. }
  1222. }
  1223. i2c_bit_del_bus(i2c_ad);
  1224. kfree(i2c_ad);
  1225. }
  1226. }
  1227. host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
  1228. host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
  1229. host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
  1230. host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
  1231. if (!lynx->phyic.reg_1394a)
  1232. host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
  1233. else
  1234. host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
  1235. if (hpsb_add_host(host)) {
  1236. error = -ENOMEM;
  1237. FAIL("Failed to register host with highlevel");
  1238. }
  1239. lynx->state = is_host;
  1240. return 0;
  1241. #undef FAIL
  1242. }
  1243. static struct pci_device_id pci_table[] = {
  1244. {
  1245. .vendor = PCI_VENDOR_ID_TI,
  1246. .device = PCI_DEVICE_ID_TI_PCILYNX,
  1247. .subvendor = PCI_ANY_ID,
  1248. .subdevice = PCI_ANY_ID,
  1249. },
  1250. { } /* Terminating entry */
  1251. };
  1252. static struct pci_driver lynx_pci_driver = {
  1253. .name = PCILYNX_DRIVER_NAME,
  1254. .id_table = pci_table,
  1255. .probe = add_card,
  1256. .remove = remove_card,
  1257. };
  1258. static struct hpsb_host_driver lynx_driver = {
  1259. .owner = THIS_MODULE,
  1260. .name = PCILYNX_DRIVER_NAME,
  1261. .set_hw_config_rom = NULL,
  1262. .transmit_packet = lynx_transmit,
  1263. .devctl = lynx_devctl,
  1264. .isoctl = NULL,
  1265. };
  1266. MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
  1267. MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
  1268. MODULE_LICENSE("GPL");
  1269. MODULE_SUPPORTED_DEVICE("pcilynx");
  1270. MODULE_DEVICE_TABLE(pci, pci_table);
  1271. static int __init pcilynx_init(void)
  1272. {
  1273. int ret;
  1274. ret = pci_register_driver(&lynx_pci_driver);
  1275. if (ret < 0) {
  1276. PRINT_G(KERN_ERR, "PCI module init failed");
  1277. return ret;
  1278. }
  1279. return 0;
  1280. }
  1281. static void __exit pcilynx_cleanup(void)
  1282. {
  1283. pci_unregister_driver(&lynx_pci_driver);
  1284. }
  1285. module_init(pcilynx_init);
  1286. module_exit(pcilynx_cleanup);