fw-ohci.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009
  1. /*
  2. * Driver for OHCI 1394 controllers
  3. *
  4. * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/pci.h>
  25. #include <linux/delay.h>
  26. #include <linux/poll.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/mm.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/semaphore.h>
  31. #include "fw-transaction.h"
  32. #include "fw-ohci.h"
  33. #define DESCRIPTOR_OUTPUT_MORE 0
  34. #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
  35. #define DESCRIPTOR_INPUT_MORE (2 << 12)
  36. #define DESCRIPTOR_INPUT_LAST (3 << 12)
  37. #define DESCRIPTOR_STATUS (1 << 11)
  38. #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
  39. #define DESCRIPTOR_PING (1 << 7)
  40. #define DESCRIPTOR_YY (1 << 6)
  41. #define DESCRIPTOR_NO_IRQ (0 << 4)
  42. #define DESCRIPTOR_IRQ_ERROR (1 << 4)
  43. #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
  44. #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
  45. #define DESCRIPTOR_WAIT (3 << 0)
  46. struct descriptor {
  47. __le16 req_count;
  48. __le16 control;
  49. __le32 data_address;
  50. __le32 branch_address;
  51. __le16 res_count;
  52. __le16 transfer_status;
  53. } __attribute__((aligned(16)));
  54. struct db_descriptor {
  55. __le16 first_size;
  56. __le16 control;
  57. __le16 second_req_count;
  58. __le16 first_req_count;
  59. __le32 branch_address;
  60. __le16 second_res_count;
  61. __le16 first_res_count;
  62. __le32 reserved0;
  63. __le32 first_buffer;
  64. __le32 second_buffer;
  65. __le32 reserved1;
  66. } __attribute__((aligned(16)));
  67. #define CONTROL_SET(regs) (regs)
  68. #define CONTROL_CLEAR(regs) ((regs) + 4)
  69. #define COMMAND_PTR(regs) ((regs) + 12)
  70. #define CONTEXT_MATCH(regs) ((regs) + 16)
  71. struct ar_buffer {
  72. struct descriptor descriptor;
  73. struct ar_buffer *next;
  74. __le32 data[0];
  75. };
  76. struct ar_context {
  77. struct fw_ohci *ohci;
  78. struct ar_buffer *current_buffer;
  79. struct ar_buffer *last_buffer;
  80. void *pointer;
  81. u32 regs;
  82. struct tasklet_struct tasklet;
  83. };
  84. struct context;
  85. typedef int (*descriptor_callback_t)(struct context *ctx,
  86. struct descriptor *d,
  87. struct descriptor *last);
  88. struct context {
  89. struct fw_ohci *ohci;
  90. u32 regs;
  91. struct descriptor *buffer;
  92. dma_addr_t buffer_bus;
  93. size_t buffer_size;
  94. struct descriptor *head_descriptor;
  95. struct descriptor *tail_descriptor;
  96. struct descriptor *tail_descriptor_last;
  97. struct descriptor *prev_descriptor;
  98. descriptor_callback_t callback;
  99. struct tasklet_struct tasklet;
  100. };
  101. #define IT_HEADER_SY(v) ((v) << 0)
  102. #define IT_HEADER_TCODE(v) ((v) << 4)
  103. #define IT_HEADER_CHANNEL(v) ((v) << 8)
  104. #define IT_HEADER_TAG(v) ((v) << 14)
  105. #define IT_HEADER_SPEED(v) ((v) << 16)
  106. #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
  107. struct iso_context {
  108. struct fw_iso_context base;
  109. struct context context;
  110. void *header;
  111. size_t header_length;
  112. };
  113. #define CONFIG_ROM_SIZE 1024
  114. struct fw_ohci {
  115. struct fw_card card;
  116. u32 version;
  117. __iomem char *registers;
  118. dma_addr_t self_id_bus;
  119. __le32 *self_id_cpu;
  120. struct tasklet_struct bus_reset_tasklet;
  121. int node_id;
  122. int generation;
  123. int request_generation;
  124. u32 bus_seconds;
  125. /*
  126. * Spinlock for accessing fw_ohci data. Never call out of
  127. * this driver with this lock held.
  128. */
  129. spinlock_t lock;
  130. u32 self_id_buffer[512];
  131. /* Config rom buffers */
  132. __be32 *config_rom;
  133. dma_addr_t config_rom_bus;
  134. __be32 *next_config_rom;
  135. dma_addr_t next_config_rom_bus;
  136. u32 next_header;
  137. struct ar_context ar_request_ctx;
  138. struct ar_context ar_response_ctx;
  139. struct context at_request_ctx;
  140. struct context at_response_ctx;
  141. u32 it_context_mask;
  142. struct iso_context *it_context_list;
  143. u32 ir_context_mask;
  144. struct iso_context *ir_context_list;
  145. };
  146. static inline struct fw_ohci *fw_ohci(struct fw_card *card)
  147. {
  148. return container_of(card, struct fw_ohci, card);
  149. }
  150. #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
  151. #define IR_CONTEXT_BUFFER_FILL 0x80000000
  152. #define IR_CONTEXT_ISOCH_HEADER 0x40000000
  153. #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
  154. #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
  155. #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
  156. #define CONTEXT_RUN 0x8000
  157. #define CONTEXT_WAKE 0x1000
  158. #define CONTEXT_DEAD 0x0800
  159. #define CONTEXT_ACTIVE 0x0400
  160. #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
  161. #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
  162. #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
  163. #define FW_OHCI_MAJOR 240
  164. #define OHCI1394_REGISTER_SIZE 0x800
  165. #define OHCI_LOOP_COUNT 500
  166. #define OHCI1394_PCI_HCI_Control 0x40
  167. #define SELF_ID_BUF_SIZE 0x800
  168. #define OHCI_TCODE_PHY_PACKET 0x0e
  169. #define OHCI_VERSION_1_1 0x010010
  170. #define ISO_BUFFER_SIZE (64 * 1024)
  171. #define AT_BUFFER_SIZE 4096
  172. static char ohci_driver_name[] = KBUILD_MODNAME;
  173. static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
  174. {
  175. writel(data, ohci->registers + offset);
  176. }
  177. static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
  178. {
  179. return readl(ohci->registers + offset);
  180. }
  181. static inline void flush_writes(const struct fw_ohci *ohci)
  182. {
  183. /* Do a dummy read to flush writes. */
  184. reg_read(ohci, OHCI1394_Version);
  185. }
  186. static int
  187. ohci_update_phy_reg(struct fw_card *card, int addr,
  188. int clear_bits, int set_bits)
  189. {
  190. struct fw_ohci *ohci = fw_ohci(card);
  191. u32 val, old;
  192. reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
  193. flush_writes(ohci);
  194. msleep(2);
  195. val = reg_read(ohci, OHCI1394_PhyControl);
  196. if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
  197. fw_error("failed to set phy reg bits.\n");
  198. return -EBUSY;
  199. }
  200. old = OHCI1394_PhyControl_ReadData(val);
  201. old = (old & ~clear_bits) | set_bits;
  202. reg_write(ohci, OHCI1394_PhyControl,
  203. OHCI1394_PhyControl_Write(addr, old));
  204. return 0;
  205. }
  206. static int ar_context_add_page(struct ar_context *ctx)
  207. {
  208. struct device *dev = ctx->ohci->card.device;
  209. struct ar_buffer *ab;
  210. dma_addr_t ab_bus;
  211. size_t offset;
  212. ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
  213. if (ab == NULL)
  214. return -ENOMEM;
  215. ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
  216. if (dma_mapping_error(ab_bus)) {
  217. free_page((unsigned long) ab);
  218. return -ENOMEM;
  219. }
  220. memset(&ab->descriptor, 0, sizeof(ab->descriptor));
  221. ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
  222. DESCRIPTOR_STATUS |
  223. DESCRIPTOR_BRANCH_ALWAYS);
  224. offset = offsetof(struct ar_buffer, data);
  225. ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
  226. ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
  227. ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
  228. ab->descriptor.branch_address = 0;
  229. dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
  230. ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
  231. ctx->last_buffer->next = ab;
  232. ctx->last_buffer = ab;
  233. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  234. flush_writes(ctx->ohci);
  235. return 0;
  236. }
  237. static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
  238. {
  239. struct fw_ohci *ohci = ctx->ohci;
  240. struct fw_packet p;
  241. u32 status, length, tcode;
  242. p.header[0] = le32_to_cpu(buffer[0]);
  243. p.header[1] = le32_to_cpu(buffer[1]);
  244. p.header[2] = le32_to_cpu(buffer[2]);
  245. tcode = (p.header[0] >> 4) & 0x0f;
  246. switch (tcode) {
  247. case TCODE_WRITE_QUADLET_REQUEST:
  248. case TCODE_READ_QUADLET_RESPONSE:
  249. p.header[3] = (__force __u32) buffer[3];
  250. p.header_length = 16;
  251. p.payload_length = 0;
  252. break;
  253. case TCODE_READ_BLOCK_REQUEST :
  254. p.header[3] = le32_to_cpu(buffer[3]);
  255. p.header_length = 16;
  256. p.payload_length = 0;
  257. break;
  258. case TCODE_WRITE_BLOCK_REQUEST:
  259. case TCODE_READ_BLOCK_RESPONSE:
  260. case TCODE_LOCK_REQUEST:
  261. case TCODE_LOCK_RESPONSE:
  262. p.header[3] = le32_to_cpu(buffer[3]);
  263. p.header_length = 16;
  264. p.payload_length = p.header[3] >> 16;
  265. break;
  266. case TCODE_WRITE_RESPONSE:
  267. case TCODE_READ_QUADLET_REQUEST:
  268. case OHCI_TCODE_PHY_PACKET:
  269. p.header_length = 12;
  270. p.payload_length = 0;
  271. break;
  272. }
  273. p.payload = (void *) buffer + p.header_length;
  274. /* FIXME: What to do about evt_* errors? */
  275. length = (p.header_length + p.payload_length + 3) / 4;
  276. status = le32_to_cpu(buffer[length]);
  277. p.ack = ((status >> 16) & 0x1f) - 16;
  278. p.speed = (status >> 21) & 0x7;
  279. p.timestamp = status & 0xffff;
  280. p.generation = ohci->request_generation;
  281. /*
  282. * The OHCI bus reset handler synthesizes a phy packet with
  283. * the new generation number when a bus reset happens (see
  284. * section 8.4.2.3). This helps us determine when a request
  285. * was received and make sure we send the response in the same
  286. * generation. We only need this for requests; for responses
  287. * we use the unique tlabel for finding the matching
  288. * request.
  289. */
  290. if (p.ack + 16 == 0x09)
  291. ohci->request_generation = (buffer[2] >> 16) & 0xff;
  292. else if (ctx == &ohci->ar_request_ctx)
  293. fw_core_handle_request(&ohci->card, &p);
  294. else
  295. fw_core_handle_response(&ohci->card, &p);
  296. return buffer + length + 1;
  297. }
  298. static void ar_context_tasklet(unsigned long data)
  299. {
  300. struct ar_context *ctx = (struct ar_context *)data;
  301. struct fw_ohci *ohci = ctx->ohci;
  302. struct ar_buffer *ab;
  303. struct descriptor *d;
  304. void *buffer, *end;
  305. ab = ctx->current_buffer;
  306. d = &ab->descriptor;
  307. if (d->res_count == 0) {
  308. size_t size, rest, offset;
  309. /*
  310. * This descriptor is finished and we may have a
  311. * packet split across this and the next buffer. We
  312. * reuse the page for reassembling the split packet.
  313. */
  314. offset = offsetof(struct ar_buffer, data);
  315. dma_unmap_single(ohci->card.device,
  316. le32_to_cpu(ab->descriptor.data_address) - offset,
  317. PAGE_SIZE, DMA_BIDIRECTIONAL);
  318. buffer = ab;
  319. ab = ab->next;
  320. d = &ab->descriptor;
  321. size = buffer + PAGE_SIZE - ctx->pointer;
  322. rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
  323. memmove(buffer, ctx->pointer, size);
  324. memcpy(buffer + size, ab->data, rest);
  325. ctx->current_buffer = ab;
  326. ctx->pointer = (void *) ab->data + rest;
  327. end = buffer + size + rest;
  328. while (buffer < end)
  329. buffer = handle_ar_packet(ctx, buffer);
  330. free_page((unsigned long)buffer);
  331. ar_context_add_page(ctx);
  332. } else {
  333. buffer = ctx->pointer;
  334. ctx->pointer = end =
  335. (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
  336. while (buffer < end)
  337. buffer = handle_ar_packet(ctx, buffer);
  338. }
  339. }
  340. static int
  341. ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
  342. {
  343. struct ar_buffer ab;
  344. ctx->regs = regs;
  345. ctx->ohci = ohci;
  346. ctx->last_buffer = &ab;
  347. tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
  348. ar_context_add_page(ctx);
  349. ar_context_add_page(ctx);
  350. ctx->current_buffer = ab.next;
  351. ctx->pointer = ctx->current_buffer->data;
  352. return 0;
  353. }
  354. static void ar_context_run(struct ar_context *ctx)
  355. {
  356. struct ar_buffer *ab = ctx->current_buffer;
  357. dma_addr_t ab_bus;
  358. size_t offset;
  359. offset = offsetof(struct ar_buffer, data);
  360. ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  361. reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
  362. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
  363. flush_writes(ctx->ohci);
  364. }
  365. static void context_tasklet(unsigned long data)
  366. {
  367. struct context *ctx = (struct context *) data;
  368. struct fw_ohci *ohci = ctx->ohci;
  369. struct descriptor *d, *last;
  370. u32 address;
  371. int z;
  372. dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
  373. ctx->buffer_size, DMA_TO_DEVICE);
  374. d = ctx->tail_descriptor;
  375. last = ctx->tail_descriptor_last;
  376. while (last->branch_address != 0) {
  377. address = le32_to_cpu(last->branch_address);
  378. z = address & 0xf;
  379. d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
  380. last = (z == 2) ? d : d + z - 1;
  381. if (!ctx->callback(ctx, d, last))
  382. break;
  383. ctx->tail_descriptor = d;
  384. ctx->tail_descriptor_last = last;
  385. }
  386. }
  387. static int
  388. context_init(struct context *ctx, struct fw_ohci *ohci,
  389. size_t buffer_size, u32 regs,
  390. descriptor_callback_t callback)
  391. {
  392. ctx->ohci = ohci;
  393. ctx->regs = regs;
  394. ctx->buffer_size = buffer_size;
  395. ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
  396. if (ctx->buffer == NULL)
  397. return -ENOMEM;
  398. tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
  399. ctx->callback = callback;
  400. ctx->buffer_bus =
  401. dma_map_single(ohci->card.device, ctx->buffer,
  402. buffer_size, DMA_TO_DEVICE);
  403. if (dma_mapping_error(ctx->buffer_bus)) {
  404. kfree(ctx->buffer);
  405. return -ENOMEM;
  406. }
  407. ctx->head_descriptor = ctx->buffer;
  408. ctx->prev_descriptor = ctx->buffer;
  409. ctx->tail_descriptor = ctx->buffer;
  410. ctx->tail_descriptor_last = ctx->buffer;
  411. /*
  412. * We put a dummy descriptor in the buffer that has a NULL
  413. * branch address and looks like it's been sent. That way we
  414. * have a descriptor to append DMA programs to. Also, the
  415. * ring buffer invariant is that it always has at least one
  416. * element so that head == tail means buffer full.
  417. */
  418. memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
  419. ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
  420. ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
  421. ctx->head_descriptor++;
  422. return 0;
  423. }
  424. static void
  425. context_release(struct context *ctx)
  426. {
  427. struct fw_card *card = &ctx->ohci->card;
  428. dma_unmap_single(card->device, ctx->buffer_bus,
  429. ctx->buffer_size, DMA_TO_DEVICE);
  430. kfree(ctx->buffer);
  431. }
  432. static struct descriptor *
  433. context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
  434. {
  435. struct descriptor *d, *tail, *end;
  436. d = ctx->head_descriptor;
  437. tail = ctx->tail_descriptor;
  438. end = ctx->buffer + ctx->buffer_size / sizeof(*d);
  439. if (d + z <= tail) {
  440. goto has_space;
  441. } else if (d > tail && d + z <= end) {
  442. goto has_space;
  443. } else if (d > tail && ctx->buffer + z <= tail) {
  444. d = ctx->buffer;
  445. goto has_space;
  446. }
  447. return NULL;
  448. has_space:
  449. memset(d, 0, z * sizeof(*d));
  450. *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
  451. return d;
  452. }
  453. static void context_run(struct context *ctx, u32 extra)
  454. {
  455. struct fw_ohci *ohci = ctx->ohci;
  456. reg_write(ohci, COMMAND_PTR(ctx->regs),
  457. le32_to_cpu(ctx->tail_descriptor_last->branch_address));
  458. reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
  459. reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
  460. flush_writes(ohci);
  461. }
  462. static void context_append(struct context *ctx,
  463. struct descriptor *d, int z, int extra)
  464. {
  465. dma_addr_t d_bus;
  466. d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
  467. ctx->head_descriptor = d + z + extra;
  468. ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
  469. ctx->prev_descriptor = z == 2 ? d : d + z - 1;
  470. dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
  471. ctx->buffer_size, DMA_TO_DEVICE);
  472. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  473. flush_writes(ctx->ohci);
  474. }
  475. static void context_stop(struct context *ctx)
  476. {
  477. u32 reg;
  478. int i;
  479. reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
  480. flush_writes(ctx->ohci);
  481. for (i = 0; i < 10; i++) {
  482. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  483. if ((reg & CONTEXT_ACTIVE) == 0)
  484. break;
  485. fw_notify("context_stop: still active (0x%08x)\n", reg);
  486. mdelay(1);
  487. }
  488. }
  489. struct driver_data {
  490. struct fw_packet *packet;
  491. };
  492. /*
  493. * This function apppends a packet to the DMA queue for transmission.
  494. * Must always be called with the ochi->lock held to ensure proper
  495. * generation handling and locking around packet queue manipulation.
  496. */
  497. static int
  498. at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
  499. {
  500. struct fw_ohci *ohci = ctx->ohci;
  501. dma_addr_t d_bus, payload_bus;
  502. struct driver_data *driver_data;
  503. struct descriptor *d, *last;
  504. __le32 *header;
  505. int z, tcode;
  506. u32 reg;
  507. d = context_get_descriptors(ctx, 4, &d_bus);
  508. if (d == NULL) {
  509. packet->ack = RCODE_SEND_ERROR;
  510. return -1;
  511. }
  512. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  513. d[0].res_count = cpu_to_le16(packet->timestamp);
  514. /*
  515. * The DMA format for asyncronous link packets is different
  516. * from the IEEE1394 layout, so shift the fields around
  517. * accordingly. If header_length is 8, it's a PHY packet, to
  518. * which we need to prepend an extra quadlet.
  519. */
  520. header = (__le32 *) &d[1];
  521. if (packet->header_length > 8) {
  522. header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
  523. (packet->speed << 16));
  524. header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
  525. (packet->header[0] & 0xffff0000));
  526. header[2] = cpu_to_le32(packet->header[2]);
  527. tcode = (packet->header[0] >> 4) & 0x0f;
  528. if (TCODE_IS_BLOCK_PACKET(tcode))
  529. header[3] = cpu_to_le32(packet->header[3]);
  530. else
  531. header[3] = (__force __le32) packet->header[3];
  532. d[0].req_count = cpu_to_le16(packet->header_length);
  533. } else {
  534. header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
  535. (packet->speed << 16));
  536. header[1] = cpu_to_le32(packet->header[0]);
  537. header[2] = cpu_to_le32(packet->header[1]);
  538. d[0].req_count = cpu_to_le16(12);
  539. }
  540. driver_data = (struct driver_data *) &d[3];
  541. driver_data->packet = packet;
  542. packet->driver_data = driver_data;
  543. if (packet->payload_length > 0) {
  544. payload_bus =
  545. dma_map_single(ohci->card.device, packet->payload,
  546. packet->payload_length, DMA_TO_DEVICE);
  547. if (dma_mapping_error(payload_bus)) {
  548. packet->ack = RCODE_SEND_ERROR;
  549. return -1;
  550. }
  551. d[2].req_count = cpu_to_le16(packet->payload_length);
  552. d[2].data_address = cpu_to_le32(payload_bus);
  553. last = &d[2];
  554. z = 3;
  555. } else {
  556. last = &d[0];
  557. z = 2;
  558. }
  559. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  560. DESCRIPTOR_IRQ_ALWAYS |
  561. DESCRIPTOR_BRANCH_ALWAYS);
  562. /* FIXME: Document how the locking works. */
  563. if (ohci->generation != packet->generation) {
  564. packet->ack = RCODE_GENERATION;
  565. return -1;
  566. }
  567. context_append(ctx, d, z, 4 - z);
  568. /* If the context isn't already running, start it up. */
  569. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  570. if ((reg & CONTEXT_RUN) == 0)
  571. context_run(ctx, 0);
  572. return 0;
  573. }
  574. static int handle_at_packet(struct context *context,
  575. struct descriptor *d,
  576. struct descriptor *last)
  577. {
  578. struct driver_data *driver_data;
  579. struct fw_packet *packet;
  580. struct fw_ohci *ohci = context->ohci;
  581. dma_addr_t payload_bus;
  582. int evt;
  583. if (last->transfer_status == 0)
  584. /* This descriptor isn't done yet, stop iteration. */
  585. return 0;
  586. driver_data = (struct driver_data *) &d[3];
  587. packet = driver_data->packet;
  588. if (packet == NULL)
  589. /* This packet was cancelled, just continue. */
  590. return 1;
  591. payload_bus = le32_to_cpu(last->data_address);
  592. if (payload_bus != 0)
  593. dma_unmap_single(ohci->card.device, payload_bus,
  594. packet->payload_length, DMA_TO_DEVICE);
  595. evt = le16_to_cpu(last->transfer_status) & 0x1f;
  596. packet->timestamp = le16_to_cpu(last->res_count);
  597. switch (evt) {
  598. case OHCI1394_evt_timeout:
  599. /* Async response transmit timed out. */
  600. packet->ack = RCODE_CANCELLED;
  601. break;
  602. case OHCI1394_evt_flushed:
  603. /*
  604. * The packet was flushed should give same error as
  605. * when we try to use a stale generation count.
  606. */
  607. packet->ack = RCODE_GENERATION;
  608. break;
  609. case OHCI1394_evt_missing_ack:
  610. /*
  611. * Using a valid (current) generation count, but the
  612. * node is not on the bus or not sending acks.
  613. */
  614. packet->ack = RCODE_NO_ACK;
  615. break;
  616. case ACK_COMPLETE + 0x10:
  617. case ACK_PENDING + 0x10:
  618. case ACK_BUSY_X + 0x10:
  619. case ACK_BUSY_A + 0x10:
  620. case ACK_BUSY_B + 0x10:
  621. case ACK_DATA_ERROR + 0x10:
  622. case ACK_TYPE_ERROR + 0x10:
  623. packet->ack = evt - 0x10;
  624. break;
  625. default:
  626. packet->ack = RCODE_SEND_ERROR;
  627. break;
  628. }
  629. packet->callback(packet, &ohci->card, packet->ack);
  630. return 1;
  631. }
  632. #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
  633. #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
  634. #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
  635. #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
  636. #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
  637. static void
  638. handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
  639. {
  640. struct fw_packet response;
  641. int tcode, length, i;
  642. tcode = HEADER_GET_TCODE(packet->header[0]);
  643. if (TCODE_IS_BLOCK_PACKET(tcode))
  644. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  645. else
  646. length = 4;
  647. i = csr - CSR_CONFIG_ROM;
  648. if (i + length > CONFIG_ROM_SIZE) {
  649. fw_fill_response(&response, packet->header,
  650. RCODE_ADDRESS_ERROR, NULL, 0);
  651. } else if (!TCODE_IS_READ_REQUEST(tcode)) {
  652. fw_fill_response(&response, packet->header,
  653. RCODE_TYPE_ERROR, NULL, 0);
  654. } else {
  655. fw_fill_response(&response, packet->header, RCODE_COMPLETE,
  656. (void *) ohci->config_rom + i, length);
  657. }
  658. fw_core_handle_response(&ohci->card, &response);
  659. }
  660. static void
  661. handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
  662. {
  663. struct fw_packet response;
  664. int tcode, length, ext_tcode, sel;
  665. __be32 *payload, lock_old;
  666. u32 lock_arg, lock_data;
  667. tcode = HEADER_GET_TCODE(packet->header[0]);
  668. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  669. payload = packet->payload;
  670. ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
  671. if (tcode == TCODE_LOCK_REQUEST &&
  672. ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
  673. lock_arg = be32_to_cpu(payload[0]);
  674. lock_data = be32_to_cpu(payload[1]);
  675. } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
  676. lock_arg = 0;
  677. lock_data = 0;
  678. } else {
  679. fw_fill_response(&response, packet->header,
  680. RCODE_TYPE_ERROR, NULL, 0);
  681. goto out;
  682. }
  683. sel = (csr - CSR_BUS_MANAGER_ID) / 4;
  684. reg_write(ohci, OHCI1394_CSRData, lock_data);
  685. reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
  686. reg_write(ohci, OHCI1394_CSRControl, sel);
  687. if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
  688. lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
  689. else
  690. fw_notify("swap not done yet\n");
  691. fw_fill_response(&response, packet->header,
  692. RCODE_COMPLETE, &lock_old, sizeof(lock_old));
  693. out:
  694. fw_core_handle_response(&ohci->card, &response);
  695. }
  696. static void
  697. handle_local_request(struct context *ctx, struct fw_packet *packet)
  698. {
  699. u64 offset;
  700. u32 csr;
  701. if (ctx == &ctx->ohci->at_request_ctx) {
  702. packet->ack = ACK_PENDING;
  703. packet->callback(packet, &ctx->ohci->card, packet->ack);
  704. }
  705. offset =
  706. ((unsigned long long)
  707. HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
  708. packet->header[2];
  709. csr = offset - CSR_REGISTER_BASE;
  710. /* Handle config rom reads. */
  711. if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
  712. handle_local_rom(ctx->ohci, packet, csr);
  713. else switch (csr) {
  714. case CSR_BUS_MANAGER_ID:
  715. case CSR_BANDWIDTH_AVAILABLE:
  716. case CSR_CHANNELS_AVAILABLE_HI:
  717. case CSR_CHANNELS_AVAILABLE_LO:
  718. handle_local_lock(ctx->ohci, packet, csr);
  719. break;
  720. default:
  721. if (ctx == &ctx->ohci->at_request_ctx)
  722. fw_core_handle_request(&ctx->ohci->card, packet);
  723. else
  724. fw_core_handle_response(&ctx->ohci->card, packet);
  725. break;
  726. }
  727. if (ctx == &ctx->ohci->at_response_ctx) {
  728. packet->ack = ACK_COMPLETE;
  729. packet->callback(packet, &ctx->ohci->card, packet->ack);
  730. }
  731. }
  732. static void
  733. at_context_transmit(struct context *ctx, struct fw_packet *packet)
  734. {
  735. unsigned long flags;
  736. int retval;
  737. spin_lock_irqsave(&ctx->ohci->lock, flags);
  738. if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
  739. ctx->ohci->generation == packet->generation) {
  740. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  741. handle_local_request(ctx, packet);
  742. return;
  743. }
  744. retval = at_context_queue_packet(ctx, packet);
  745. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  746. if (retval < 0)
  747. packet->callback(packet, &ctx->ohci->card, packet->ack);
  748. }
  749. static void bus_reset_tasklet(unsigned long data)
  750. {
  751. struct fw_ohci *ohci = (struct fw_ohci *)data;
  752. int self_id_count, i, j, reg;
  753. int generation, new_generation;
  754. unsigned long flags;
  755. void *free_rom = NULL;
  756. dma_addr_t free_rom_bus = 0;
  757. reg = reg_read(ohci, OHCI1394_NodeID);
  758. if (!(reg & OHCI1394_NodeID_idValid)) {
  759. fw_error("node ID not valid, new bus reset in progress\n");
  760. return;
  761. }
  762. ohci->node_id = reg & 0xffff;
  763. /*
  764. * The count in the SelfIDCount register is the number of
  765. * bytes in the self ID receive buffer. Since we also receive
  766. * the inverted quadlets and a header quadlet, we shift one
  767. * bit extra to get the actual number of self IDs.
  768. */
  769. self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
  770. generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
  771. for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
  772. if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
  773. fw_error("inconsistent self IDs\n");
  774. ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
  775. }
  776. /*
  777. * Check the consistency of the self IDs we just read. The
  778. * problem we face is that a new bus reset can start while we
  779. * read out the self IDs from the DMA buffer. If this happens,
  780. * the DMA buffer will be overwritten with new self IDs and we
  781. * will read out inconsistent data. The OHCI specification
  782. * (section 11.2) recommends a technique similar to
  783. * linux/seqlock.h, where we remember the generation of the
  784. * self IDs in the buffer before reading them out and compare
  785. * it to the current generation after reading them out. If
  786. * the two generations match we know we have a consistent set
  787. * of self IDs.
  788. */
  789. new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
  790. if (new_generation != generation) {
  791. fw_notify("recursive bus reset detected, "
  792. "discarding self ids\n");
  793. return;
  794. }
  795. /* FIXME: Document how the locking works. */
  796. spin_lock_irqsave(&ohci->lock, flags);
  797. ohci->generation = generation;
  798. context_stop(&ohci->at_request_ctx);
  799. context_stop(&ohci->at_response_ctx);
  800. reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
  801. /*
  802. * This next bit is unrelated to the AT context stuff but we
  803. * have to do it under the spinlock also. If a new config rom
  804. * was set up before this reset, the old one is now no longer
  805. * in use and we can free it. Update the config rom pointers
  806. * to point to the current config rom and clear the
  807. * next_config_rom pointer so a new udpate can take place.
  808. */
  809. if (ohci->next_config_rom != NULL) {
  810. free_rom = ohci->config_rom;
  811. free_rom_bus = ohci->config_rom_bus;
  812. ohci->config_rom = ohci->next_config_rom;
  813. ohci->config_rom_bus = ohci->next_config_rom_bus;
  814. ohci->next_config_rom = NULL;
  815. /*
  816. * Restore config_rom image and manually update
  817. * config_rom registers. Writing the header quadlet
  818. * will indicate that the config rom is ready, so we
  819. * do that last.
  820. */
  821. reg_write(ohci, OHCI1394_BusOptions,
  822. be32_to_cpu(ohci->config_rom[2]));
  823. ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
  824. reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
  825. }
  826. spin_unlock_irqrestore(&ohci->lock, flags);
  827. if (free_rom)
  828. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  829. free_rom, free_rom_bus);
  830. fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
  831. self_id_count, ohci->self_id_buffer);
  832. }
  833. static irqreturn_t irq_handler(int irq, void *data)
  834. {
  835. struct fw_ohci *ohci = data;
  836. u32 event, iso_event, cycle_time;
  837. int i;
  838. event = reg_read(ohci, OHCI1394_IntEventClear);
  839. if (!event || !~event)
  840. return IRQ_NONE;
  841. reg_write(ohci, OHCI1394_IntEventClear, event);
  842. if (event & OHCI1394_selfIDComplete)
  843. tasklet_schedule(&ohci->bus_reset_tasklet);
  844. if (event & OHCI1394_RQPkt)
  845. tasklet_schedule(&ohci->ar_request_ctx.tasklet);
  846. if (event & OHCI1394_RSPkt)
  847. tasklet_schedule(&ohci->ar_response_ctx.tasklet);
  848. if (event & OHCI1394_reqTxComplete)
  849. tasklet_schedule(&ohci->at_request_ctx.tasklet);
  850. if (event & OHCI1394_respTxComplete)
  851. tasklet_schedule(&ohci->at_response_ctx.tasklet);
  852. iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
  853. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
  854. while (iso_event) {
  855. i = ffs(iso_event) - 1;
  856. tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
  857. iso_event &= ~(1 << i);
  858. }
  859. iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
  860. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
  861. while (iso_event) {
  862. i = ffs(iso_event) - 1;
  863. tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
  864. iso_event &= ~(1 << i);
  865. }
  866. if (event & OHCI1394_cycle64Seconds) {
  867. cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  868. if ((cycle_time & 0x80000000) == 0)
  869. ohci->bus_seconds++;
  870. }
  871. return IRQ_HANDLED;
  872. }
  873. static int software_reset(struct fw_ohci *ohci)
  874. {
  875. int i;
  876. reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
  877. for (i = 0; i < OHCI_LOOP_COUNT; i++) {
  878. if ((reg_read(ohci, OHCI1394_HCControlSet) &
  879. OHCI1394_HCControl_softReset) == 0)
  880. return 0;
  881. msleep(1);
  882. }
  883. return -EBUSY;
  884. }
  885. static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
  886. {
  887. struct fw_ohci *ohci = fw_ohci(card);
  888. struct pci_dev *dev = to_pci_dev(card->device);
  889. if (software_reset(ohci)) {
  890. fw_error("Failed to reset ohci card.\n");
  891. return -EBUSY;
  892. }
  893. /*
  894. * Now enable LPS, which we need in order to start accessing
  895. * most of the registers. In fact, on some cards (ALI M5251),
  896. * accessing registers in the SClk domain without LPS enabled
  897. * will lock up the machine. Wait 50msec to make sure we have
  898. * full link enabled.
  899. */
  900. reg_write(ohci, OHCI1394_HCControlSet,
  901. OHCI1394_HCControl_LPS |
  902. OHCI1394_HCControl_postedWriteEnable);
  903. flush_writes(ohci);
  904. msleep(50);
  905. reg_write(ohci, OHCI1394_HCControlClear,
  906. OHCI1394_HCControl_noByteSwapData);
  907. reg_write(ohci, OHCI1394_LinkControlSet,
  908. OHCI1394_LinkControl_rcvSelfID |
  909. OHCI1394_LinkControl_cycleTimerEnable |
  910. OHCI1394_LinkControl_cycleMaster);
  911. reg_write(ohci, OHCI1394_ATRetries,
  912. OHCI1394_MAX_AT_REQ_RETRIES |
  913. (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
  914. (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
  915. ar_context_run(&ohci->ar_request_ctx);
  916. ar_context_run(&ohci->ar_response_ctx);
  917. reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
  918. reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
  919. reg_write(ohci, OHCI1394_IntEventClear, ~0);
  920. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  921. reg_write(ohci, OHCI1394_IntMaskSet,
  922. OHCI1394_selfIDComplete |
  923. OHCI1394_RQPkt | OHCI1394_RSPkt |
  924. OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
  925. OHCI1394_isochRx | OHCI1394_isochTx |
  926. OHCI1394_masterIntEnable |
  927. OHCI1394_cycle64Seconds);
  928. /* Activate link_on bit and contender bit in our self ID packets.*/
  929. if (ohci_update_phy_reg(card, 4, 0,
  930. PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
  931. return -EIO;
  932. /*
  933. * When the link is not yet enabled, the atomic config rom
  934. * update mechanism described below in ohci_set_config_rom()
  935. * is not active. We have to update ConfigRomHeader and
  936. * BusOptions manually, and the write to ConfigROMmap takes
  937. * effect immediately. We tie this to the enabling of the
  938. * link, so we have a valid config rom before enabling - the
  939. * OHCI requires that ConfigROMhdr and BusOptions have valid
  940. * values before enabling.
  941. *
  942. * However, when the ConfigROMmap is written, some controllers
  943. * always read back quadlets 0 and 2 from the config rom to
  944. * the ConfigRomHeader and BusOptions registers on bus reset.
  945. * They shouldn't do that in this initial case where the link
  946. * isn't enabled. This means we have to use the same
  947. * workaround here, setting the bus header to 0 and then write
  948. * the right values in the bus reset tasklet.
  949. */
  950. ohci->next_config_rom =
  951. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  952. &ohci->next_config_rom_bus, GFP_KERNEL);
  953. if (ohci->next_config_rom == NULL)
  954. return -ENOMEM;
  955. memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
  956. fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
  957. ohci->next_header = config_rom[0];
  958. ohci->next_config_rom[0] = 0;
  959. reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
  960. reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
  961. reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
  962. reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
  963. if (request_irq(dev->irq, irq_handler,
  964. IRQF_SHARED, ohci_driver_name, ohci)) {
  965. fw_error("Failed to allocate shared interrupt %d.\n",
  966. dev->irq);
  967. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  968. ohci->config_rom, ohci->config_rom_bus);
  969. return -EIO;
  970. }
  971. reg_write(ohci, OHCI1394_HCControlSet,
  972. OHCI1394_HCControl_linkEnable |
  973. OHCI1394_HCControl_BIBimageValid);
  974. flush_writes(ohci);
  975. /*
  976. * We are ready to go, initiate bus reset to finish the
  977. * initialization.
  978. */
  979. fw_core_initiate_bus_reset(&ohci->card, 1);
  980. return 0;
  981. }
  982. static int
  983. ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
  984. {
  985. struct fw_ohci *ohci;
  986. unsigned long flags;
  987. int retval = -EBUSY;
  988. __be32 *next_config_rom;
  989. dma_addr_t next_config_rom_bus;
  990. ohci = fw_ohci(card);
  991. /*
  992. * When the OHCI controller is enabled, the config rom update
  993. * mechanism is a bit tricky, but easy enough to use. See
  994. * section 5.5.6 in the OHCI specification.
  995. *
  996. * The OHCI controller caches the new config rom address in a
  997. * shadow register (ConfigROMmapNext) and needs a bus reset
  998. * for the changes to take place. When the bus reset is
  999. * detected, the controller loads the new values for the
  1000. * ConfigRomHeader and BusOptions registers from the specified
  1001. * config rom and loads ConfigROMmap from the ConfigROMmapNext
  1002. * shadow register. All automatically and atomically.
  1003. *
  1004. * Now, there's a twist to this story. The automatic load of
  1005. * ConfigRomHeader and BusOptions doesn't honor the
  1006. * noByteSwapData bit, so with a be32 config rom, the
  1007. * controller will load be32 values in to these registers
  1008. * during the atomic update, even on litte endian
  1009. * architectures. The workaround we use is to put a 0 in the
  1010. * header quadlet; 0 is endian agnostic and means that the
  1011. * config rom isn't ready yet. In the bus reset tasklet we
  1012. * then set up the real values for the two registers.
  1013. *
  1014. * We use ohci->lock to avoid racing with the code that sets
  1015. * ohci->next_config_rom to NULL (see bus_reset_tasklet).
  1016. */
  1017. next_config_rom =
  1018. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1019. &next_config_rom_bus, GFP_KERNEL);
  1020. if (next_config_rom == NULL)
  1021. return -ENOMEM;
  1022. spin_lock_irqsave(&ohci->lock, flags);
  1023. if (ohci->next_config_rom == NULL) {
  1024. ohci->next_config_rom = next_config_rom;
  1025. ohci->next_config_rom_bus = next_config_rom_bus;
  1026. memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
  1027. fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
  1028. length * 4);
  1029. ohci->next_header = config_rom[0];
  1030. ohci->next_config_rom[0] = 0;
  1031. reg_write(ohci, OHCI1394_ConfigROMmap,
  1032. ohci->next_config_rom_bus);
  1033. retval = 0;
  1034. }
  1035. spin_unlock_irqrestore(&ohci->lock, flags);
  1036. /*
  1037. * Now initiate a bus reset to have the changes take
  1038. * effect. We clean up the old config rom memory and DMA
  1039. * mappings in the bus reset tasklet, since the OHCI
  1040. * controller could need to access it before the bus reset
  1041. * takes effect.
  1042. */
  1043. if (retval == 0)
  1044. fw_core_initiate_bus_reset(&ohci->card, 1);
  1045. else
  1046. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1047. next_config_rom, next_config_rom_bus);
  1048. return retval;
  1049. }
  1050. static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
  1051. {
  1052. struct fw_ohci *ohci = fw_ohci(card);
  1053. at_context_transmit(&ohci->at_request_ctx, packet);
  1054. }
  1055. static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
  1056. {
  1057. struct fw_ohci *ohci = fw_ohci(card);
  1058. at_context_transmit(&ohci->at_response_ctx, packet);
  1059. }
  1060. static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  1061. {
  1062. struct fw_ohci *ohci = fw_ohci(card);
  1063. struct context *ctx = &ohci->at_request_ctx;
  1064. struct driver_data *driver_data = packet->driver_data;
  1065. int retval = -ENOENT;
  1066. tasklet_disable(&ctx->tasklet);
  1067. if (packet->ack != 0)
  1068. goto out;
  1069. driver_data->packet = NULL;
  1070. packet->ack = RCODE_CANCELLED;
  1071. packet->callback(packet, &ohci->card, packet->ack);
  1072. retval = 0;
  1073. out:
  1074. tasklet_enable(&ctx->tasklet);
  1075. return retval;
  1076. }
  1077. static int
  1078. ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
  1079. {
  1080. struct fw_ohci *ohci = fw_ohci(card);
  1081. unsigned long flags;
  1082. int n, retval = 0;
  1083. /*
  1084. * FIXME: Make sure this bitmask is cleared when we clear the busReset
  1085. * interrupt bit. Clear physReqResourceAllBuses on bus reset.
  1086. */
  1087. spin_lock_irqsave(&ohci->lock, flags);
  1088. if (ohci->generation != generation) {
  1089. retval = -ESTALE;
  1090. goto out;
  1091. }
  1092. /*
  1093. * Note, if the node ID contains a non-local bus ID, physical DMA is
  1094. * enabled for _all_ nodes on remote buses.
  1095. */
  1096. n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
  1097. if (n < 32)
  1098. reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
  1099. else
  1100. reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
  1101. flush_writes(ohci);
  1102. out:
  1103. spin_unlock_irqrestore(&ohci->lock, flags);
  1104. return retval;
  1105. }
  1106. static u64
  1107. ohci_get_bus_time(struct fw_card *card)
  1108. {
  1109. struct fw_ohci *ohci = fw_ohci(card);
  1110. u32 cycle_time;
  1111. u64 bus_time;
  1112. cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  1113. bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
  1114. return bus_time;
  1115. }
  1116. static int handle_ir_dualbuffer_packet(struct context *context,
  1117. struct descriptor *d,
  1118. struct descriptor *last)
  1119. {
  1120. struct iso_context *ctx =
  1121. container_of(context, struct iso_context, context);
  1122. struct db_descriptor *db = (struct db_descriptor *) d;
  1123. __le32 *ir_header;
  1124. size_t header_length;
  1125. void *p, *end;
  1126. int i;
  1127. if (db->first_res_count > 0 && db->second_res_count > 0)
  1128. /* This descriptor isn't done yet, stop iteration. */
  1129. return 0;
  1130. header_length = le16_to_cpu(db->first_req_count) -
  1131. le16_to_cpu(db->first_res_count);
  1132. i = ctx->header_length;
  1133. p = db + 1;
  1134. end = p + header_length;
  1135. while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
  1136. /*
  1137. * The iso header is byteswapped to little endian by
  1138. * the controller, but the remaining header quadlets
  1139. * are big endian. We want to present all the headers
  1140. * as big endian, so we have to swap the first
  1141. * quadlet.
  1142. */
  1143. *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
  1144. memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
  1145. i += ctx->base.header_size;
  1146. p += ctx->base.header_size + 4;
  1147. }
  1148. ctx->header_length = i;
  1149. if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
  1150. ir_header = (__le32 *) (db + 1);
  1151. ctx->base.callback(&ctx->base,
  1152. le32_to_cpu(ir_header[0]) & 0xffff,
  1153. ctx->header_length, ctx->header,
  1154. ctx->base.callback_data);
  1155. ctx->header_length = 0;
  1156. }
  1157. return 1;
  1158. }
  1159. static int handle_it_packet(struct context *context,
  1160. struct descriptor *d,
  1161. struct descriptor *last)
  1162. {
  1163. struct iso_context *ctx =
  1164. container_of(context, struct iso_context, context);
  1165. if (last->transfer_status == 0)
  1166. /* This descriptor isn't done yet, stop iteration. */
  1167. return 0;
  1168. if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
  1169. ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
  1170. 0, NULL, ctx->base.callback_data);
  1171. return 1;
  1172. }
  1173. static struct fw_iso_context *
  1174. ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
  1175. {
  1176. struct fw_ohci *ohci = fw_ohci(card);
  1177. struct iso_context *ctx, *list;
  1178. descriptor_callback_t callback;
  1179. u32 *mask, regs;
  1180. unsigned long flags;
  1181. int index, retval = -ENOMEM;
  1182. if (type == FW_ISO_CONTEXT_TRANSMIT) {
  1183. mask = &ohci->it_context_mask;
  1184. list = ohci->it_context_list;
  1185. callback = handle_it_packet;
  1186. } else {
  1187. mask = &ohci->ir_context_mask;
  1188. list = ohci->ir_context_list;
  1189. callback = handle_ir_dualbuffer_packet;
  1190. }
  1191. /* FIXME: We need a fallback for pre 1.1 OHCI. */
  1192. if (callback == handle_ir_dualbuffer_packet &&
  1193. ohci->version < OHCI_VERSION_1_1)
  1194. return ERR_PTR(-EINVAL);
  1195. spin_lock_irqsave(&ohci->lock, flags);
  1196. index = ffs(*mask) - 1;
  1197. if (index >= 0)
  1198. *mask &= ~(1 << index);
  1199. spin_unlock_irqrestore(&ohci->lock, flags);
  1200. if (index < 0)
  1201. return ERR_PTR(-EBUSY);
  1202. if (type == FW_ISO_CONTEXT_TRANSMIT)
  1203. regs = OHCI1394_IsoXmitContextBase(index);
  1204. else
  1205. regs = OHCI1394_IsoRcvContextBase(index);
  1206. ctx = &list[index];
  1207. memset(ctx, 0, sizeof(*ctx));
  1208. ctx->header_length = 0;
  1209. ctx->header = (void *) __get_free_page(GFP_KERNEL);
  1210. if (ctx->header == NULL)
  1211. goto out;
  1212. retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
  1213. regs, callback);
  1214. if (retval < 0)
  1215. goto out_with_header;
  1216. return &ctx->base;
  1217. out_with_header:
  1218. free_page((unsigned long)ctx->header);
  1219. out:
  1220. spin_lock_irqsave(&ohci->lock, flags);
  1221. *mask |= 1 << index;
  1222. spin_unlock_irqrestore(&ohci->lock, flags);
  1223. return ERR_PTR(retval);
  1224. }
  1225. static int ohci_start_iso(struct fw_iso_context *base,
  1226. s32 cycle, u32 sync, u32 tags)
  1227. {
  1228. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1229. struct fw_ohci *ohci = ctx->context.ohci;
  1230. u32 control, match;
  1231. int index;
  1232. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1233. index = ctx - ohci->it_context_list;
  1234. match = 0;
  1235. if (cycle >= 0)
  1236. match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
  1237. (cycle & 0x7fff) << 16;
  1238. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
  1239. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
  1240. context_run(&ctx->context, match);
  1241. } else {
  1242. index = ctx - ohci->ir_context_list;
  1243. control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
  1244. match = (tags << 28) | (sync << 8) | ctx->base.channel;
  1245. if (cycle >= 0) {
  1246. match |= (cycle & 0x07fff) << 12;
  1247. control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
  1248. }
  1249. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
  1250. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
  1251. reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
  1252. context_run(&ctx->context, control);
  1253. }
  1254. return 0;
  1255. }
  1256. static int ohci_stop_iso(struct fw_iso_context *base)
  1257. {
  1258. struct fw_ohci *ohci = fw_ohci(base->card);
  1259. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1260. int index;
  1261. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1262. index = ctx - ohci->it_context_list;
  1263. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
  1264. } else {
  1265. index = ctx - ohci->ir_context_list;
  1266. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
  1267. }
  1268. flush_writes(ohci);
  1269. context_stop(&ctx->context);
  1270. return 0;
  1271. }
  1272. static void ohci_free_iso_context(struct fw_iso_context *base)
  1273. {
  1274. struct fw_ohci *ohci = fw_ohci(base->card);
  1275. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1276. unsigned long flags;
  1277. int index;
  1278. ohci_stop_iso(base);
  1279. context_release(&ctx->context);
  1280. free_page((unsigned long)ctx->header);
  1281. spin_lock_irqsave(&ohci->lock, flags);
  1282. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1283. index = ctx - ohci->it_context_list;
  1284. ohci->it_context_mask |= 1 << index;
  1285. } else {
  1286. index = ctx - ohci->ir_context_list;
  1287. ohci->ir_context_mask |= 1 << index;
  1288. }
  1289. spin_unlock_irqrestore(&ohci->lock, flags);
  1290. }
  1291. static int
  1292. ohci_queue_iso_transmit(struct fw_iso_context *base,
  1293. struct fw_iso_packet *packet,
  1294. struct fw_iso_buffer *buffer,
  1295. unsigned long payload)
  1296. {
  1297. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1298. struct descriptor *d, *last, *pd;
  1299. struct fw_iso_packet *p;
  1300. __le32 *header;
  1301. dma_addr_t d_bus, page_bus;
  1302. u32 z, header_z, payload_z, irq;
  1303. u32 payload_index, payload_end_index, next_page_index;
  1304. int page, end_page, i, length, offset;
  1305. /*
  1306. * FIXME: Cycle lost behavior should be configurable: lose
  1307. * packet, retransmit or terminate..
  1308. */
  1309. p = packet;
  1310. payload_index = payload;
  1311. if (p->skip)
  1312. z = 1;
  1313. else
  1314. z = 2;
  1315. if (p->header_length > 0)
  1316. z++;
  1317. /* Determine the first page the payload isn't contained in. */
  1318. end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
  1319. if (p->payload_length > 0)
  1320. payload_z = end_page - (payload_index >> PAGE_SHIFT);
  1321. else
  1322. payload_z = 0;
  1323. z += payload_z;
  1324. /* Get header size in number of descriptors. */
  1325. header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
  1326. d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
  1327. if (d == NULL)
  1328. return -ENOMEM;
  1329. if (!p->skip) {
  1330. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  1331. d[0].req_count = cpu_to_le16(8);
  1332. header = (__le32 *) &d[1];
  1333. header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
  1334. IT_HEADER_TAG(p->tag) |
  1335. IT_HEADER_TCODE(TCODE_STREAM_DATA) |
  1336. IT_HEADER_CHANNEL(ctx->base.channel) |
  1337. IT_HEADER_SPEED(ctx->base.speed));
  1338. header[1] =
  1339. cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
  1340. p->payload_length));
  1341. }
  1342. if (p->header_length > 0) {
  1343. d[2].req_count = cpu_to_le16(p->header_length);
  1344. d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
  1345. memcpy(&d[z], p->header, p->header_length);
  1346. }
  1347. pd = d + z - payload_z;
  1348. payload_end_index = payload_index + p->payload_length;
  1349. for (i = 0; i < payload_z; i++) {
  1350. page = payload_index >> PAGE_SHIFT;
  1351. offset = payload_index & ~PAGE_MASK;
  1352. next_page_index = (page + 1) << PAGE_SHIFT;
  1353. length =
  1354. min(next_page_index, payload_end_index) - payload_index;
  1355. pd[i].req_count = cpu_to_le16(length);
  1356. page_bus = page_private(buffer->pages[page]);
  1357. pd[i].data_address = cpu_to_le32(page_bus + offset);
  1358. payload_index += length;
  1359. }
  1360. if (p->interrupt)
  1361. irq = DESCRIPTOR_IRQ_ALWAYS;
  1362. else
  1363. irq = DESCRIPTOR_NO_IRQ;
  1364. last = z == 2 ? d : d + z - 1;
  1365. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  1366. DESCRIPTOR_STATUS |
  1367. DESCRIPTOR_BRANCH_ALWAYS |
  1368. irq);
  1369. context_append(&ctx->context, d, z, header_z);
  1370. return 0;
  1371. }
  1372. static int
  1373. ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
  1374. struct fw_iso_packet *packet,
  1375. struct fw_iso_buffer *buffer,
  1376. unsigned long payload)
  1377. {
  1378. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1379. struct db_descriptor *db = NULL;
  1380. struct descriptor *d;
  1381. struct fw_iso_packet *p;
  1382. dma_addr_t d_bus, page_bus;
  1383. u32 z, header_z, length, rest;
  1384. int page, offset, packet_count, header_size;
  1385. /*
  1386. * FIXME: Cycle lost behavior should be configurable: lose
  1387. * packet, retransmit or terminate..
  1388. */
  1389. if (packet->skip) {
  1390. d = context_get_descriptors(&ctx->context, 2, &d_bus);
  1391. if (d == NULL)
  1392. return -ENOMEM;
  1393. db = (struct db_descriptor *) d;
  1394. db->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1395. DESCRIPTOR_BRANCH_ALWAYS |
  1396. DESCRIPTOR_WAIT);
  1397. db->first_size = cpu_to_le16(ctx->base.header_size + 4);
  1398. context_append(&ctx->context, d, 2, 0);
  1399. }
  1400. p = packet;
  1401. z = 2;
  1402. /*
  1403. * The OHCI controller puts the status word in the header
  1404. * buffer too, so we need 4 extra bytes per packet.
  1405. */
  1406. packet_count = p->header_length / ctx->base.header_size;
  1407. header_size = packet_count * (ctx->base.header_size + 4);
  1408. /* Get header size in number of descriptors. */
  1409. header_z = DIV_ROUND_UP(header_size, sizeof(*d));
  1410. page = payload >> PAGE_SHIFT;
  1411. offset = payload & ~PAGE_MASK;
  1412. rest = p->payload_length;
  1413. /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
  1414. /* FIXME: make packet-per-buffer/dual-buffer a context option */
  1415. while (rest > 0) {
  1416. d = context_get_descriptors(&ctx->context,
  1417. z + header_z, &d_bus);
  1418. if (d == NULL)
  1419. return -ENOMEM;
  1420. db = (struct db_descriptor *) d;
  1421. db->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1422. DESCRIPTOR_BRANCH_ALWAYS);
  1423. db->first_size = cpu_to_le16(ctx->base.header_size + 4);
  1424. db->first_req_count = cpu_to_le16(header_size);
  1425. db->first_res_count = db->first_req_count;
  1426. db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
  1427. if (offset + rest < PAGE_SIZE)
  1428. length = rest;
  1429. else
  1430. length = PAGE_SIZE - offset;
  1431. db->second_req_count = cpu_to_le16(length);
  1432. db->second_res_count = db->second_req_count;
  1433. page_bus = page_private(buffer->pages[page]);
  1434. db->second_buffer = cpu_to_le32(page_bus + offset);
  1435. if (p->interrupt && length == rest)
  1436. db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
  1437. context_append(&ctx->context, d, z, header_z);
  1438. offset = (offset + length) & ~PAGE_MASK;
  1439. rest -= length;
  1440. page++;
  1441. }
  1442. return 0;
  1443. }
  1444. static int
  1445. ohci_queue_iso(struct fw_iso_context *base,
  1446. struct fw_iso_packet *packet,
  1447. struct fw_iso_buffer *buffer,
  1448. unsigned long payload)
  1449. {
  1450. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1451. if (base->type == FW_ISO_CONTEXT_TRANSMIT)
  1452. return ohci_queue_iso_transmit(base, packet, buffer, payload);
  1453. else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
  1454. return ohci_queue_iso_receive_dualbuffer(base, packet,
  1455. buffer, payload);
  1456. else
  1457. /* FIXME: Implement fallback for OHCI 1.0 controllers. */
  1458. return -EINVAL;
  1459. }
  1460. static const struct fw_card_driver ohci_driver = {
  1461. .name = ohci_driver_name,
  1462. .enable = ohci_enable,
  1463. .update_phy_reg = ohci_update_phy_reg,
  1464. .set_config_rom = ohci_set_config_rom,
  1465. .send_request = ohci_send_request,
  1466. .send_response = ohci_send_response,
  1467. .cancel_packet = ohci_cancel_packet,
  1468. .enable_phys_dma = ohci_enable_phys_dma,
  1469. .get_bus_time = ohci_get_bus_time,
  1470. .allocate_iso_context = ohci_allocate_iso_context,
  1471. .free_iso_context = ohci_free_iso_context,
  1472. .queue_iso = ohci_queue_iso,
  1473. .start_iso = ohci_start_iso,
  1474. .stop_iso = ohci_stop_iso,
  1475. };
  1476. static int __devinit
  1477. pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
  1478. {
  1479. struct fw_ohci *ohci;
  1480. u32 bus_options, max_receive, link_speed;
  1481. u64 guid;
  1482. int err;
  1483. size_t size;
  1484. ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
  1485. if (ohci == NULL) {
  1486. fw_error("Could not malloc fw_ohci data.\n");
  1487. return -ENOMEM;
  1488. }
  1489. fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
  1490. err = pci_enable_device(dev);
  1491. if (err) {
  1492. fw_error("Failed to enable OHCI hardware.\n");
  1493. goto fail_put_card;
  1494. }
  1495. pci_set_master(dev);
  1496. pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
  1497. pci_set_drvdata(dev, ohci);
  1498. spin_lock_init(&ohci->lock);
  1499. tasklet_init(&ohci->bus_reset_tasklet,
  1500. bus_reset_tasklet, (unsigned long)ohci);
  1501. err = pci_request_region(dev, 0, ohci_driver_name);
  1502. if (err) {
  1503. fw_error("MMIO resource unavailable\n");
  1504. goto fail_disable;
  1505. }
  1506. ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
  1507. if (ohci->registers == NULL) {
  1508. fw_error("Failed to remap registers\n");
  1509. err = -ENXIO;
  1510. goto fail_iomem;
  1511. }
  1512. ar_context_init(&ohci->ar_request_ctx, ohci,
  1513. OHCI1394_AsReqRcvContextControlSet);
  1514. ar_context_init(&ohci->ar_response_ctx, ohci,
  1515. OHCI1394_AsRspRcvContextControlSet);
  1516. context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
  1517. OHCI1394_AsReqTrContextControlSet, handle_at_packet);
  1518. context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
  1519. OHCI1394_AsRspTrContextControlSet, handle_at_packet);
  1520. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
  1521. ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
  1522. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
  1523. size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
  1524. ohci->it_context_list = kzalloc(size, GFP_KERNEL);
  1525. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
  1526. ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
  1527. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
  1528. size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
  1529. ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
  1530. if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
  1531. fw_error("Out of memory for it/ir contexts.\n");
  1532. err = -ENOMEM;
  1533. goto fail_registers;
  1534. }
  1535. /* self-id dma buffer allocation */
  1536. ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
  1537. SELF_ID_BUF_SIZE,
  1538. &ohci->self_id_bus,
  1539. GFP_KERNEL);
  1540. if (ohci->self_id_cpu == NULL) {
  1541. fw_error("Out of memory for self ID buffer.\n");
  1542. err = -ENOMEM;
  1543. goto fail_registers;
  1544. }
  1545. bus_options = reg_read(ohci, OHCI1394_BusOptions);
  1546. max_receive = (bus_options >> 12) & 0xf;
  1547. link_speed = bus_options & 0x7;
  1548. guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
  1549. reg_read(ohci, OHCI1394_GUIDLo);
  1550. err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
  1551. if (err < 0)
  1552. goto fail_self_id;
  1553. ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
  1554. fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
  1555. dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
  1556. return 0;
  1557. fail_self_id:
  1558. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  1559. ohci->self_id_cpu, ohci->self_id_bus);
  1560. fail_registers:
  1561. kfree(ohci->it_context_list);
  1562. kfree(ohci->ir_context_list);
  1563. pci_iounmap(dev, ohci->registers);
  1564. fail_iomem:
  1565. pci_release_region(dev, 0);
  1566. fail_disable:
  1567. pci_disable_device(dev);
  1568. fail_put_card:
  1569. fw_card_put(&ohci->card);
  1570. return err;
  1571. }
  1572. static void pci_remove(struct pci_dev *dev)
  1573. {
  1574. struct fw_ohci *ohci;
  1575. ohci = pci_get_drvdata(dev);
  1576. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  1577. flush_writes(ohci);
  1578. fw_core_remove_card(&ohci->card);
  1579. /*
  1580. * FIXME: Fail all pending packets here, now that the upper
  1581. * layers can't queue any more.
  1582. */
  1583. software_reset(ohci);
  1584. free_irq(dev->irq, ohci);
  1585. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  1586. ohci->self_id_cpu, ohci->self_id_bus);
  1587. kfree(ohci->it_context_list);
  1588. kfree(ohci->ir_context_list);
  1589. pci_iounmap(dev, ohci->registers);
  1590. pci_release_region(dev, 0);
  1591. pci_disable_device(dev);
  1592. fw_card_put(&ohci->card);
  1593. fw_notify("Removed fw-ohci device.\n");
  1594. }
  1595. #ifdef CONFIG_PM
  1596. static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
  1597. {
  1598. struct fw_ohci *ohci = pci_get_drvdata(pdev);
  1599. int err;
  1600. software_reset(ohci);
  1601. free_irq(pdev->irq, ohci);
  1602. err = pci_save_state(pdev);
  1603. if (err) {
  1604. fw_error("pci_save_state failed\n");
  1605. return err;
  1606. }
  1607. err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1608. if (err)
  1609. fw_error("pci_set_power_state failed with %d\n", err);
  1610. return 0;
  1611. }
  1612. static int pci_resume(struct pci_dev *pdev)
  1613. {
  1614. struct fw_ohci *ohci = pci_get_drvdata(pdev);
  1615. int err;
  1616. pci_set_power_state(pdev, PCI_D0);
  1617. pci_restore_state(pdev);
  1618. err = pci_enable_device(pdev);
  1619. if (err) {
  1620. fw_error("pci_enable_device failed\n");
  1621. return err;
  1622. }
  1623. return ohci_enable(&ohci->card, ohci->config_rom, CONFIG_ROM_SIZE);
  1624. }
  1625. #endif
  1626. static struct pci_device_id pci_table[] = {
  1627. { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
  1628. { }
  1629. };
  1630. MODULE_DEVICE_TABLE(pci, pci_table);
  1631. static struct pci_driver fw_ohci_pci_driver = {
  1632. .name = ohci_driver_name,
  1633. .id_table = pci_table,
  1634. .probe = pci_probe,
  1635. .remove = pci_remove,
  1636. #ifdef CONFIG_PM
  1637. .resume = pci_resume,
  1638. .suspend = pci_suspend,
  1639. #endif
  1640. };
  1641. MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
  1642. MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
  1643. MODULE_LICENSE("GPL");
  1644. /* Provide a module alias so root-on-sbp2 initrds don't break. */
  1645. #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
  1646. MODULE_ALIAS("ohci1394");
  1647. #endif
  1648. static int __init fw_ohci_init(void)
  1649. {
  1650. return pci_register_driver(&fw_ohci_pci_driver);
  1651. }
  1652. static void __exit fw_ohci_cleanup(void)
  1653. {
  1654. pci_unregister_driver(&fw_ohci_pci_driver);
  1655. }
  1656. module_init(fw_ohci_init);
  1657. module_exit(fw_ohci_cleanup);