fw-ohci.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004
  1. /*
  2. * Driver for OHCI 1394 controllers
  3. *
  4. * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/pci.h>
  25. #include <linux/delay.h>
  26. #include <linux/poll.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/mm.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/semaphore.h>
  31. #include "fw-transaction.h"
  32. #include "fw-ohci.h"
  33. #define DESCRIPTOR_OUTPUT_MORE 0
  34. #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
  35. #define DESCRIPTOR_INPUT_MORE (2 << 12)
  36. #define DESCRIPTOR_INPUT_LAST (3 << 12)
  37. #define DESCRIPTOR_STATUS (1 << 11)
  38. #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
  39. #define DESCRIPTOR_PING (1 << 7)
  40. #define DESCRIPTOR_YY (1 << 6)
  41. #define DESCRIPTOR_NO_IRQ (0 << 4)
  42. #define DESCRIPTOR_IRQ_ERROR (1 << 4)
  43. #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
  44. #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
  45. #define DESCRIPTOR_WAIT (3 << 0)
  46. struct descriptor {
  47. __le16 req_count;
  48. __le16 control;
  49. __le32 data_address;
  50. __le32 branch_address;
  51. __le16 res_count;
  52. __le16 transfer_status;
  53. } __attribute__((aligned(16)));
  54. struct db_descriptor {
  55. __le16 first_size;
  56. __le16 control;
  57. __le16 second_req_count;
  58. __le16 first_req_count;
  59. __le32 branch_address;
  60. __le16 second_res_count;
  61. __le16 first_res_count;
  62. __le32 reserved0;
  63. __le32 first_buffer;
  64. __le32 second_buffer;
  65. __le32 reserved1;
  66. } __attribute__((aligned(16)));
  67. #define CONTROL_SET(regs) (regs)
  68. #define CONTROL_CLEAR(regs) ((regs) + 4)
  69. #define COMMAND_PTR(regs) ((regs) + 12)
  70. #define CONTEXT_MATCH(regs) ((regs) + 16)
  71. struct ar_buffer {
  72. struct descriptor descriptor;
  73. struct ar_buffer *next;
  74. __le32 data[0];
  75. };
  76. struct ar_context {
  77. struct fw_ohci *ohci;
  78. struct ar_buffer *current_buffer;
  79. struct ar_buffer *last_buffer;
  80. void *pointer;
  81. u32 regs;
  82. struct tasklet_struct tasklet;
  83. };
  84. struct context;
  85. typedef int (*descriptor_callback_t)(struct context *ctx,
  86. struct descriptor *d,
  87. struct descriptor *last);
  88. struct context {
  89. struct fw_ohci *ohci;
  90. u32 regs;
  91. struct descriptor *buffer;
  92. dma_addr_t buffer_bus;
  93. size_t buffer_size;
  94. struct descriptor *head_descriptor;
  95. struct descriptor *tail_descriptor;
  96. struct descriptor *tail_descriptor_last;
  97. struct descriptor *prev_descriptor;
  98. descriptor_callback_t callback;
  99. struct tasklet_struct tasklet;
  100. };
  101. #define IT_HEADER_SY(v) ((v) << 0)
  102. #define IT_HEADER_TCODE(v) ((v) << 4)
  103. #define IT_HEADER_CHANNEL(v) ((v) << 8)
  104. #define IT_HEADER_TAG(v) ((v) << 14)
  105. #define IT_HEADER_SPEED(v) ((v) << 16)
  106. #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
  107. struct iso_context {
  108. struct fw_iso_context base;
  109. struct context context;
  110. void *header;
  111. size_t header_length;
  112. };
  113. #define CONFIG_ROM_SIZE 1024
  114. struct fw_ohci {
  115. struct fw_card card;
  116. u32 version;
  117. __iomem char *registers;
  118. dma_addr_t self_id_bus;
  119. __le32 *self_id_cpu;
  120. struct tasklet_struct bus_reset_tasklet;
  121. int node_id;
  122. int generation;
  123. int request_generation;
  124. u32 bus_seconds;
  125. /*
  126. * Spinlock for accessing fw_ohci data. Never call out of
  127. * this driver with this lock held.
  128. */
  129. spinlock_t lock;
  130. u32 self_id_buffer[512];
  131. /* Config rom buffers */
  132. __be32 *config_rom;
  133. dma_addr_t config_rom_bus;
  134. __be32 *next_config_rom;
  135. dma_addr_t next_config_rom_bus;
  136. u32 next_header;
  137. struct ar_context ar_request_ctx;
  138. struct ar_context ar_response_ctx;
  139. struct context at_request_ctx;
  140. struct context at_response_ctx;
  141. u32 it_context_mask;
  142. struct iso_context *it_context_list;
  143. u32 ir_context_mask;
  144. struct iso_context *ir_context_list;
  145. };
  146. static inline struct fw_ohci *fw_ohci(struct fw_card *card)
  147. {
  148. return container_of(card, struct fw_ohci, card);
  149. }
  150. #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
  151. #define IR_CONTEXT_BUFFER_FILL 0x80000000
  152. #define IR_CONTEXT_ISOCH_HEADER 0x40000000
  153. #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
  154. #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
  155. #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
  156. #define CONTEXT_RUN 0x8000
  157. #define CONTEXT_WAKE 0x1000
  158. #define CONTEXT_DEAD 0x0800
  159. #define CONTEXT_ACTIVE 0x0400
  160. #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
  161. #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
  162. #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
  163. #define FW_OHCI_MAJOR 240
  164. #define OHCI1394_REGISTER_SIZE 0x800
  165. #define OHCI_LOOP_COUNT 500
  166. #define OHCI1394_PCI_HCI_Control 0x40
  167. #define SELF_ID_BUF_SIZE 0x800
  168. #define OHCI_TCODE_PHY_PACKET 0x0e
  169. #define OHCI_VERSION_1_1 0x010010
  170. #define ISO_BUFFER_SIZE (64 * 1024)
  171. #define AT_BUFFER_SIZE 4096
  172. static char ohci_driver_name[] = KBUILD_MODNAME;
  173. static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
  174. {
  175. writel(data, ohci->registers + offset);
  176. }
  177. static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
  178. {
  179. return readl(ohci->registers + offset);
  180. }
  181. static inline void flush_writes(const struct fw_ohci *ohci)
  182. {
  183. /* Do a dummy read to flush writes. */
  184. reg_read(ohci, OHCI1394_Version);
  185. }
  186. static int
  187. ohci_update_phy_reg(struct fw_card *card, int addr,
  188. int clear_bits, int set_bits)
  189. {
  190. struct fw_ohci *ohci = fw_ohci(card);
  191. u32 val, old;
  192. reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
  193. msleep(2);
  194. val = reg_read(ohci, OHCI1394_PhyControl);
  195. if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
  196. fw_error("failed to set phy reg bits.\n");
  197. return -EBUSY;
  198. }
  199. old = OHCI1394_PhyControl_ReadData(val);
  200. old = (old & ~clear_bits) | set_bits;
  201. reg_write(ohci, OHCI1394_PhyControl,
  202. OHCI1394_PhyControl_Write(addr, old));
  203. return 0;
  204. }
  205. static int ar_context_add_page(struct ar_context *ctx)
  206. {
  207. struct device *dev = ctx->ohci->card.device;
  208. struct ar_buffer *ab;
  209. dma_addr_t ab_bus;
  210. size_t offset;
  211. ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
  212. if (ab == NULL)
  213. return -ENOMEM;
  214. ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
  215. if (dma_mapping_error(ab_bus)) {
  216. free_page((unsigned long) ab);
  217. return -ENOMEM;
  218. }
  219. memset(&ab->descriptor, 0, sizeof(ab->descriptor));
  220. ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
  221. DESCRIPTOR_STATUS |
  222. DESCRIPTOR_BRANCH_ALWAYS);
  223. offset = offsetof(struct ar_buffer, data);
  224. ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
  225. ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
  226. ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
  227. ab->descriptor.branch_address = 0;
  228. dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
  229. ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
  230. ctx->last_buffer->next = ab;
  231. ctx->last_buffer = ab;
  232. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  233. flush_writes(ctx->ohci);
  234. return 0;
  235. }
  236. static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
  237. {
  238. struct fw_ohci *ohci = ctx->ohci;
  239. struct fw_packet p;
  240. u32 status, length, tcode;
  241. p.header[0] = le32_to_cpu(buffer[0]);
  242. p.header[1] = le32_to_cpu(buffer[1]);
  243. p.header[2] = le32_to_cpu(buffer[2]);
  244. tcode = (p.header[0] >> 4) & 0x0f;
  245. switch (tcode) {
  246. case TCODE_WRITE_QUADLET_REQUEST:
  247. case TCODE_READ_QUADLET_RESPONSE:
  248. p.header[3] = (__force __u32) buffer[3];
  249. p.header_length = 16;
  250. p.payload_length = 0;
  251. break;
  252. case TCODE_READ_BLOCK_REQUEST :
  253. p.header[3] = le32_to_cpu(buffer[3]);
  254. p.header_length = 16;
  255. p.payload_length = 0;
  256. break;
  257. case TCODE_WRITE_BLOCK_REQUEST:
  258. case TCODE_READ_BLOCK_RESPONSE:
  259. case TCODE_LOCK_REQUEST:
  260. case TCODE_LOCK_RESPONSE:
  261. p.header[3] = le32_to_cpu(buffer[3]);
  262. p.header_length = 16;
  263. p.payload_length = p.header[3] >> 16;
  264. break;
  265. case TCODE_WRITE_RESPONSE:
  266. case TCODE_READ_QUADLET_REQUEST:
  267. case OHCI_TCODE_PHY_PACKET:
  268. p.header_length = 12;
  269. p.payload_length = 0;
  270. break;
  271. }
  272. p.payload = (void *) buffer + p.header_length;
  273. /* FIXME: What to do about evt_* errors? */
  274. length = (p.header_length + p.payload_length + 3) / 4;
  275. status = le32_to_cpu(buffer[length]);
  276. p.ack = ((status >> 16) & 0x1f) - 16;
  277. p.speed = (status >> 21) & 0x7;
  278. p.timestamp = status & 0xffff;
  279. p.generation = ohci->request_generation;
  280. /*
  281. * The OHCI bus reset handler synthesizes a phy packet with
  282. * the new generation number when a bus reset happens (see
  283. * section 8.4.2.3). This helps us determine when a request
  284. * was received and make sure we send the response in the same
  285. * generation. We only need this for requests; for responses
  286. * we use the unique tlabel for finding the matching
  287. * request.
  288. */
  289. if (p.ack + 16 == 0x09)
  290. ohci->request_generation = (buffer[2] >> 16) & 0xff;
  291. else if (ctx == &ohci->ar_request_ctx)
  292. fw_core_handle_request(&ohci->card, &p);
  293. else
  294. fw_core_handle_response(&ohci->card, &p);
  295. return buffer + length + 1;
  296. }
  297. static void ar_context_tasklet(unsigned long data)
  298. {
  299. struct ar_context *ctx = (struct ar_context *)data;
  300. struct fw_ohci *ohci = ctx->ohci;
  301. struct ar_buffer *ab;
  302. struct descriptor *d;
  303. void *buffer, *end;
  304. ab = ctx->current_buffer;
  305. d = &ab->descriptor;
  306. if (d->res_count == 0) {
  307. size_t size, rest, offset;
  308. /*
  309. * This descriptor is finished and we may have a
  310. * packet split across this and the next buffer. We
  311. * reuse the page for reassembling the split packet.
  312. */
  313. offset = offsetof(struct ar_buffer, data);
  314. dma_unmap_single(ohci->card.device,
  315. le32_to_cpu(ab->descriptor.data_address) - offset,
  316. PAGE_SIZE, DMA_BIDIRECTIONAL);
  317. buffer = ab;
  318. ab = ab->next;
  319. d = &ab->descriptor;
  320. size = buffer + PAGE_SIZE - ctx->pointer;
  321. rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
  322. memmove(buffer, ctx->pointer, size);
  323. memcpy(buffer + size, ab->data, rest);
  324. ctx->current_buffer = ab;
  325. ctx->pointer = (void *) ab->data + rest;
  326. end = buffer + size + rest;
  327. while (buffer < end)
  328. buffer = handle_ar_packet(ctx, buffer);
  329. free_page((unsigned long)buffer);
  330. ar_context_add_page(ctx);
  331. } else {
  332. buffer = ctx->pointer;
  333. ctx->pointer = end =
  334. (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
  335. while (buffer < end)
  336. buffer = handle_ar_packet(ctx, buffer);
  337. }
  338. }
  339. static int
  340. ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
  341. {
  342. struct ar_buffer ab;
  343. ctx->regs = regs;
  344. ctx->ohci = ohci;
  345. ctx->last_buffer = &ab;
  346. tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
  347. ar_context_add_page(ctx);
  348. ar_context_add_page(ctx);
  349. ctx->current_buffer = ab.next;
  350. ctx->pointer = ctx->current_buffer->data;
  351. return 0;
  352. }
  353. static void ar_context_run(struct ar_context *ctx)
  354. {
  355. struct ar_buffer *ab = ctx->current_buffer;
  356. dma_addr_t ab_bus;
  357. size_t offset;
  358. offset = offsetof(struct ar_buffer, data);
  359. ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  360. reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
  361. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
  362. flush_writes(ctx->ohci);
  363. }
  364. static void context_tasklet(unsigned long data)
  365. {
  366. struct context *ctx = (struct context *) data;
  367. struct fw_ohci *ohci = ctx->ohci;
  368. struct descriptor *d, *last;
  369. u32 address;
  370. int z;
  371. dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
  372. ctx->buffer_size, DMA_TO_DEVICE);
  373. d = ctx->tail_descriptor;
  374. last = ctx->tail_descriptor_last;
  375. while (last->branch_address != 0) {
  376. address = le32_to_cpu(last->branch_address);
  377. z = address & 0xf;
  378. d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
  379. last = (z == 2) ? d : d + z - 1;
  380. if (!ctx->callback(ctx, d, last))
  381. break;
  382. ctx->tail_descriptor = d;
  383. ctx->tail_descriptor_last = last;
  384. }
  385. }
  386. static int
  387. context_init(struct context *ctx, struct fw_ohci *ohci,
  388. size_t buffer_size, u32 regs,
  389. descriptor_callback_t callback)
  390. {
  391. ctx->ohci = ohci;
  392. ctx->regs = regs;
  393. ctx->buffer_size = buffer_size;
  394. ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
  395. if (ctx->buffer == NULL)
  396. return -ENOMEM;
  397. tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
  398. ctx->callback = callback;
  399. ctx->buffer_bus =
  400. dma_map_single(ohci->card.device, ctx->buffer,
  401. buffer_size, DMA_TO_DEVICE);
  402. if (dma_mapping_error(ctx->buffer_bus)) {
  403. kfree(ctx->buffer);
  404. return -ENOMEM;
  405. }
  406. ctx->head_descriptor = ctx->buffer;
  407. ctx->prev_descriptor = ctx->buffer;
  408. ctx->tail_descriptor = ctx->buffer;
  409. ctx->tail_descriptor_last = ctx->buffer;
  410. /*
  411. * We put a dummy descriptor in the buffer that has a NULL
  412. * branch address and looks like it's been sent. That way we
  413. * have a descriptor to append DMA programs to. Also, the
  414. * ring buffer invariant is that it always has at least one
  415. * element so that head == tail means buffer full.
  416. */
  417. memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
  418. ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
  419. ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
  420. ctx->head_descriptor++;
  421. return 0;
  422. }
  423. static void
  424. context_release(struct context *ctx)
  425. {
  426. struct fw_card *card = &ctx->ohci->card;
  427. dma_unmap_single(card->device, ctx->buffer_bus,
  428. ctx->buffer_size, DMA_TO_DEVICE);
  429. kfree(ctx->buffer);
  430. }
  431. static struct descriptor *
  432. context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
  433. {
  434. struct descriptor *d, *tail, *end;
  435. d = ctx->head_descriptor;
  436. tail = ctx->tail_descriptor;
  437. end = ctx->buffer + ctx->buffer_size / sizeof(*d);
  438. if (d + z <= tail) {
  439. goto has_space;
  440. } else if (d > tail && d + z <= end) {
  441. goto has_space;
  442. } else if (d > tail && ctx->buffer + z <= tail) {
  443. d = ctx->buffer;
  444. goto has_space;
  445. }
  446. return NULL;
  447. has_space:
  448. memset(d, 0, z * sizeof(*d));
  449. *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
  450. return d;
  451. }
  452. static void context_run(struct context *ctx, u32 extra)
  453. {
  454. struct fw_ohci *ohci = ctx->ohci;
  455. reg_write(ohci, COMMAND_PTR(ctx->regs),
  456. le32_to_cpu(ctx->tail_descriptor_last->branch_address));
  457. reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
  458. reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
  459. flush_writes(ohci);
  460. }
  461. static void context_append(struct context *ctx,
  462. struct descriptor *d, int z, int extra)
  463. {
  464. dma_addr_t d_bus;
  465. d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
  466. ctx->head_descriptor = d + z + extra;
  467. ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
  468. ctx->prev_descriptor = z == 2 ? d : d + z - 1;
  469. dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
  470. ctx->buffer_size, DMA_TO_DEVICE);
  471. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  472. flush_writes(ctx->ohci);
  473. }
  474. static void context_stop(struct context *ctx)
  475. {
  476. u32 reg;
  477. int i;
  478. reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
  479. flush_writes(ctx->ohci);
  480. for (i = 0; i < 10; i++) {
  481. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  482. if ((reg & CONTEXT_ACTIVE) == 0)
  483. break;
  484. fw_notify("context_stop: still active (0x%08x)\n", reg);
  485. msleep(1);
  486. }
  487. }
  488. struct driver_data {
  489. struct fw_packet *packet;
  490. };
  491. /*
  492. * This function apppends a packet to the DMA queue for transmission.
  493. * Must always be called with the ochi->lock held to ensure proper
  494. * generation handling and locking around packet queue manipulation.
  495. */
  496. static int
  497. at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
  498. {
  499. struct fw_ohci *ohci = ctx->ohci;
  500. dma_addr_t d_bus, payload_bus;
  501. struct driver_data *driver_data;
  502. struct descriptor *d, *last;
  503. __le32 *header;
  504. int z, tcode;
  505. u32 reg;
  506. d = context_get_descriptors(ctx, 4, &d_bus);
  507. if (d == NULL) {
  508. packet->ack = RCODE_SEND_ERROR;
  509. return -1;
  510. }
  511. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  512. d[0].res_count = cpu_to_le16(packet->timestamp);
  513. /*
  514. * The DMA format for asyncronous link packets is different
  515. * from the IEEE1394 layout, so shift the fields around
  516. * accordingly. If header_length is 8, it's a PHY packet, to
  517. * which we need to prepend an extra quadlet.
  518. */
  519. header = (__le32 *) &d[1];
  520. if (packet->header_length > 8) {
  521. header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
  522. (packet->speed << 16));
  523. header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
  524. (packet->header[0] & 0xffff0000));
  525. header[2] = cpu_to_le32(packet->header[2]);
  526. tcode = (packet->header[0] >> 4) & 0x0f;
  527. if (TCODE_IS_BLOCK_PACKET(tcode))
  528. header[3] = cpu_to_le32(packet->header[3]);
  529. else
  530. header[3] = (__force __le32) packet->header[3];
  531. d[0].req_count = cpu_to_le16(packet->header_length);
  532. } else {
  533. header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
  534. (packet->speed << 16));
  535. header[1] = cpu_to_le32(packet->header[0]);
  536. header[2] = cpu_to_le32(packet->header[1]);
  537. d[0].req_count = cpu_to_le16(12);
  538. }
  539. driver_data = (struct driver_data *) &d[3];
  540. driver_data->packet = packet;
  541. packet->driver_data = driver_data;
  542. if (packet->payload_length > 0) {
  543. payload_bus =
  544. dma_map_single(ohci->card.device, packet->payload,
  545. packet->payload_length, DMA_TO_DEVICE);
  546. if (dma_mapping_error(payload_bus)) {
  547. packet->ack = RCODE_SEND_ERROR;
  548. return -1;
  549. }
  550. d[2].req_count = cpu_to_le16(packet->payload_length);
  551. d[2].data_address = cpu_to_le32(payload_bus);
  552. last = &d[2];
  553. z = 3;
  554. } else {
  555. last = &d[0];
  556. z = 2;
  557. }
  558. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  559. DESCRIPTOR_IRQ_ALWAYS |
  560. DESCRIPTOR_BRANCH_ALWAYS);
  561. /* FIXME: Document how the locking works. */
  562. if (ohci->generation != packet->generation) {
  563. packet->ack = RCODE_GENERATION;
  564. return -1;
  565. }
  566. context_append(ctx, d, z, 4 - z);
  567. /* If the context isn't already running, start it up. */
  568. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  569. if ((reg & CONTEXT_RUN) == 0)
  570. context_run(ctx, 0);
  571. return 0;
  572. }
  573. static int handle_at_packet(struct context *context,
  574. struct descriptor *d,
  575. struct descriptor *last)
  576. {
  577. struct driver_data *driver_data;
  578. struct fw_packet *packet;
  579. struct fw_ohci *ohci = context->ohci;
  580. dma_addr_t payload_bus;
  581. int evt;
  582. if (last->transfer_status == 0)
  583. /* This descriptor isn't done yet, stop iteration. */
  584. return 0;
  585. driver_data = (struct driver_data *) &d[3];
  586. packet = driver_data->packet;
  587. if (packet == NULL)
  588. /* This packet was cancelled, just continue. */
  589. return 1;
  590. payload_bus = le32_to_cpu(last->data_address);
  591. if (payload_bus != 0)
  592. dma_unmap_single(ohci->card.device, payload_bus,
  593. packet->payload_length, DMA_TO_DEVICE);
  594. evt = le16_to_cpu(last->transfer_status) & 0x1f;
  595. packet->timestamp = le16_to_cpu(last->res_count);
  596. switch (evt) {
  597. case OHCI1394_evt_timeout:
  598. /* Async response transmit timed out. */
  599. packet->ack = RCODE_CANCELLED;
  600. break;
  601. case OHCI1394_evt_flushed:
  602. /*
  603. * The packet was flushed should give same error as
  604. * when we try to use a stale generation count.
  605. */
  606. packet->ack = RCODE_GENERATION;
  607. break;
  608. case OHCI1394_evt_missing_ack:
  609. /*
  610. * Using a valid (current) generation count, but the
  611. * node is not on the bus or not sending acks.
  612. */
  613. packet->ack = RCODE_NO_ACK;
  614. break;
  615. case ACK_COMPLETE + 0x10:
  616. case ACK_PENDING + 0x10:
  617. case ACK_BUSY_X + 0x10:
  618. case ACK_BUSY_A + 0x10:
  619. case ACK_BUSY_B + 0x10:
  620. case ACK_DATA_ERROR + 0x10:
  621. case ACK_TYPE_ERROR + 0x10:
  622. packet->ack = evt - 0x10;
  623. break;
  624. default:
  625. packet->ack = RCODE_SEND_ERROR;
  626. break;
  627. }
  628. packet->callback(packet, &ohci->card, packet->ack);
  629. return 1;
  630. }
  631. #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
  632. #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
  633. #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
  634. #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
  635. #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
  636. static void
  637. handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
  638. {
  639. struct fw_packet response;
  640. int tcode, length, i;
  641. tcode = HEADER_GET_TCODE(packet->header[0]);
  642. if (TCODE_IS_BLOCK_PACKET(tcode))
  643. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  644. else
  645. length = 4;
  646. i = csr - CSR_CONFIG_ROM;
  647. if (i + length > CONFIG_ROM_SIZE) {
  648. fw_fill_response(&response, packet->header,
  649. RCODE_ADDRESS_ERROR, NULL, 0);
  650. } else if (!TCODE_IS_READ_REQUEST(tcode)) {
  651. fw_fill_response(&response, packet->header,
  652. RCODE_TYPE_ERROR, NULL, 0);
  653. } else {
  654. fw_fill_response(&response, packet->header, RCODE_COMPLETE,
  655. (void *) ohci->config_rom + i, length);
  656. }
  657. fw_core_handle_response(&ohci->card, &response);
  658. }
  659. static void
  660. handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
  661. {
  662. struct fw_packet response;
  663. int tcode, length, ext_tcode, sel;
  664. __be32 *payload, lock_old;
  665. u32 lock_arg, lock_data;
  666. tcode = HEADER_GET_TCODE(packet->header[0]);
  667. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  668. payload = packet->payload;
  669. ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
  670. if (tcode == TCODE_LOCK_REQUEST &&
  671. ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
  672. lock_arg = be32_to_cpu(payload[0]);
  673. lock_data = be32_to_cpu(payload[1]);
  674. } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
  675. lock_arg = 0;
  676. lock_data = 0;
  677. } else {
  678. fw_fill_response(&response, packet->header,
  679. RCODE_TYPE_ERROR, NULL, 0);
  680. goto out;
  681. }
  682. sel = (csr - CSR_BUS_MANAGER_ID) / 4;
  683. reg_write(ohci, OHCI1394_CSRData, lock_data);
  684. reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
  685. reg_write(ohci, OHCI1394_CSRControl, sel);
  686. if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
  687. lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
  688. else
  689. fw_notify("swap not done yet\n");
  690. fw_fill_response(&response, packet->header,
  691. RCODE_COMPLETE, &lock_old, sizeof(lock_old));
  692. out:
  693. fw_core_handle_response(&ohci->card, &response);
  694. }
  695. static void
  696. handle_local_request(struct context *ctx, struct fw_packet *packet)
  697. {
  698. u64 offset;
  699. u32 csr;
  700. if (ctx == &ctx->ohci->at_request_ctx) {
  701. packet->ack = ACK_PENDING;
  702. packet->callback(packet, &ctx->ohci->card, packet->ack);
  703. }
  704. offset =
  705. ((unsigned long long)
  706. HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
  707. packet->header[2];
  708. csr = offset - CSR_REGISTER_BASE;
  709. /* Handle config rom reads. */
  710. if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
  711. handle_local_rom(ctx->ohci, packet, csr);
  712. else switch (csr) {
  713. case CSR_BUS_MANAGER_ID:
  714. case CSR_BANDWIDTH_AVAILABLE:
  715. case CSR_CHANNELS_AVAILABLE_HI:
  716. case CSR_CHANNELS_AVAILABLE_LO:
  717. handle_local_lock(ctx->ohci, packet, csr);
  718. break;
  719. default:
  720. if (ctx == &ctx->ohci->at_request_ctx)
  721. fw_core_handle_request(&ctx->ohci->card, packet);
  722. else
  723. fw_core_handle_response(&ctx->ohci->card, packet);
  724. break;
  725. }
  726. if (ctx == &ctx->ohci->at_response_ctx) {
  727. packet->ack = ACK_COMPLETE;
  728. packet->callback(packet, &ctx->ohci->card, packet->ack);
  729. }
  730. }
  731. static void
  732. at_context_transmit(struct context *ctx, struct fw_packet *packet)
  733. {
  734. unsigned long flags;
  735. int retval;
  736. spin_lock_irqsave(&ctx->ohci->lock, flags);
  737. if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
  738. ctx->ohci->generation == packet->generation) {
  739. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  740. handle_local_request(ctx, packet);
  741. return;
  742. }
  743. retval = at_context_queue_packet(ctx, packet);
  744. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  745. if (retval < 0)
  746. packet->callback(packet, &ctx->ohci->card, packet->ack);
  747. }
  748. static void bus_reset_tasklet(unsigned long data)
  749. {
  750. struct fw_ohci *ohci = (struct fw_ohci *)data;
  751. int self_id_count, i, j, reg;
  752. int generation, new_generation;
  753. unsigned long flags;
  754. reg = reg_read(ohci, OHCI1394_NodeID);
  755. if (!(reg & OHCI1394_NodeID_idValid)) {
  756. fw_error("node ID not valid, new bus reset in progress\n");
  757. return;
  758. }
  759. ohci->node_id = reg & 0xffff;
  760. /*
  761. * The count in the SelfIDCount register is the number of
  762. * bytes in the self ID receive buffer. Since we also receive
  763. * the inverted quadlets and a header quadlet, we shift one
  764. * bit extra to get the actual number of self IDs.
  765. */
  766. self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
  767. generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
  768. for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
  769. if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
  770. fw_error("inconsistent self IDs\n");
  771. ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
  772. }
  773. /*
  774. * Check the consistency of the self IDs we just read. The
  775. * problem we face is that a new bus reset can start while we
  776. * read out the self IDs from the DMA buffer. If this happens,
  777. * the DMA buffer will be overwritten with new self IDs and we
  778. * will read out inconsistent data. The OHCI specification
  779. * (section 11.2) recommends a technique similar to
  780. * linux/seqlock.h, where we remember the generation of the
  781. * self IDs in the buffer before reading them out and compare
  782. * it to the current generation after reading them out. If
  783. * the two generations match we know we have a consistent set
  784. * of self IDs.
  785. */
  786. new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
  787. if (new_generation != generation) {
  788. fw_notify("recursive bus reset detected, "
  789. "discarding self ids\n");
  790. return;
  791. }
  792. /* FIXME: Document how the locking works. */
  793. spin_lock_irqsave(&ohci->lock, flags);
  794. ohci->generation = generation;
  795. context_stop(&ohci->at_request_ctx);
  796. context_stop(&ohci->at_response_ctx);
  797. reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
  798. /*
  799. * This next bit is unrelated to the AT context stuff but we
  800. * have to do it under the spinlock also. If a new config rom
  801. * was set up before this reset, the old one is now no longer
  802. * in use and we can free it. Update the config rom pointers
  803. * to point to the current config rom and clear the
  804. * next_config_rom pointer so a new udpate can take place.
  805. */
  806. if (ohci->next_config_rom != NULL) {
  807. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  808. ohci->config_rom, ohci->config_rom_bus);
  809. ohci->config_rom = ohci->next_config_rom;
  810. ohci->config_rom_bus = ohci->next_config_rom_bus;
  811. ohci->next_config_rom = NULL;
  812. /*
  813. * Restore config_rom image and manually update
  814. * config_rom registers. Writing the header quadlet
  815. * will indicate that the config rom is ready, so we
  816. * do that last.
  817. */
  818. reg_write(ohci, OHCI1394_BusOptions,
  819. be32_to_cpu(ohci->config_rom[2]));
  820. ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
  821. reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
  822. }
  823. spin_unlock_irqrestore(&ohci->lock, flags);
  824. fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
  825. self_id_count, ohci->self_id_buffer);
  826. }
  827. static irqreturn_t irq_handler(int irq, void *data)
  828. {
  829. struct fw_ohci *ohci = data;
  830. u32 event, iso_event, cycle_time;
  831. int i;
  832. event = reg_read(ohci, OHCI1394_IntEventClear);
  833. if (!event || !~event)
  834. return IRQ_NONE;
  835. reg_write(ohci, OHCI1394_IntEventClear, event);
  836. if (event & OHCI1394_selfIDComplete)
  837. tasklet_schedule(&ohci->bus_reset_tasklet);
  838. if (event & OHCI1394_RQPkt)
  839. tasklet_schedule(&ohci->ar_request_ctx.tasklet);
  840. if (event & OHCI1394_RSPkt)
  841. tasklet_schedule(&ohci->ar_response_ctx.tasklet);
  842. if (event & OHCI1394_reqTxComplete)
  843. tasklet_schedule(&ohci->at_request_ctx.tasklet);
  844. if (event & OHCI1394_respTxComplete)
  845. tasklet_schedule(&ohci->at_response_ctx.tasklet);
  846. iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
  847. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
  848. while (iso_event) {
  849. i = ffs(iso_event) - 1;
  850. tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
  851. iso_event &= ~(1 << i);
  852. }
  853. iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
  854. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
  855. while (iso_event) {
  856. i = ffs(iso_event) - 1;
  857. tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
  858. iso_event &= ~(1 << i);
  859. }
  860. if (event & OHCI1394_cycle64Seconds) {
  861. cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  862. if ((cycle_time & 0x80000000) == 0)
  863. ohci->bus_seconds++;
  864. }
  865. return IRQ_HANDLED;
  866. }
  867. static int software_reset(struct fw_ohci *ohci)
  868. {
  869. int i;
  870. reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
  871. for (i = 0; i < OHCI_LOOP_COUNT; i++) {
  872. if ((reg_read(ohci, OHCI1394_HCControlSet) &
  873. OHCI1394_HCControl_softReset) == 0)
  874. return 0;
  875. msleep(1);
  876. }
  877. return -EBUSY;
  878. }
  879. static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
  880. {
  881. struct fw_ohci *ohci = fw_ohci(card);
  882. struct pci_dev *dev = to_pci_dev(card->device);
  883. if (software_reset(ohci)) {
  884. fw_error("Failed to reset ohci card.\n");
  885. return -EBUSY;
  886. }
  887. /*
  888. * Now enable LPS, which we need in order to start accessing
  889. * most of the registers. In fact, on some cards (ALI M5251),
  890. * accessing registers in the SClk domain without LPS enabled
  891. * will lock up the machine. Wait 50msec to make sure we have
  892. * full link enabled.
  893. */
  894. reg_write(ohci, OHCI1394_HCControlSet,
  895. OHCI1394_HCControl_LPS |
  896. OHCI1394_HCControl_postedWriteEnable);
  897. flush_writes(ohci);
  898. msleep(50);
  899. reg_write(ohci, OHCI1394_HCControlClear,
  900. OHCI1394_HCControl_noByteSwapData);
  901. reg_write(ohci, OHCI1394_LinkControlSet,
  902. OHCI1394_LinkControl_rcvSelfID |
  903. OHCI1394_LinkControl_cycleTimerEnable |
  904. OHCI1394_LinkControl_cycleMaster);
  905. reg_write(ohci, OHCI1394_ATRetries,
  906. OHCI1394_MAX_AT_REQ_RETRIES |
  907. (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
  908. (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
  909. ar_context_run(&ohci->ar_request_ctx);
  910. ar_context_run(&ohci->ar_response_ctx);
  911. reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
  912. reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
  913. reg_write(ohci, OHCI1394_IntEventClear, ~0);
  914. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  915. reg_write(ohci, OHCI1394_IntMaskSet,
  916. OHCI1394_selfIDComplete |
  917. OHCI1394_RQPkt | OHCI1394_RSPkt |
  918. OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
  919. OHCI1394_isochRx | OHCI1394_isochTx |
  920. OHCI1394_masterIntEnable |
  921. OHCI1394_cycle64Seconds);
  922. /* Activate link_on bit and contender bit in our self ID packets.*/
  923. if (ohci_update_phy_reg(card, 4, 0,
  924. PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
  925. return -EIO;
  926. /*
  927. * When the link is not yet enabled, the atomic config rom
  928. * update mechanism described below in ohci_set_config_rom()
  929. * is not active. We have to update ConfigRomHeader and
  930. * BusOptions manually, and the write to ConfigROMmap takes
  931. * effect immediately. We tie this to the enabling of the
  932. * link, so we have a valid config rom before enabling - the
  933. * OHCI requires that ConfigROMhdr and BusOptions have valid
  934. * values before enabling.
  935. *
  936. * However, when the ConfigROMmap is written, some controllers
  937. * always read back quadlets 0 and 2 from the config rom to
  938. * the ConfigRomHeader and BusOptions registers on bus reset.
  939. * They shouldn't do that in this initial case where the link
  940. * isn't enabled. This means we have to use the same
  941. * workaround here, setting the bus header to 0 and then write
  942. * the right values in the bus reset tasklet.
  943. */
  944. ohci->next_config_rom =
  945. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  946. &ohci->next_config_rom_bus, GFP_KERNEL);
  947. if (ohci->next_config_rom == NULL)
  948. return -ENOMEM;
  949. memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
  950. fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
  951. ohci->next_header = config_rom[0];
  952. ohci->next_config_rom[0] = 0;
  953. reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
  954. reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
  955. reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
  956. reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
  957. if (request_irq(dev->irq, irq_handler,
  958. IRQF_SHARED, ohci_driver_name, ohci)) {
  959. fw_error("Failed to allocate shared interrupt %d.\n",
  960. dev->irq);
  961. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  962. ohci->config_rom, ohci->config_rom_bus);
  963. return -EIO;
  964. }
  965. reg_write(ohci, OHCI1394_HCControlSet,
  966. OHCI1394_HCControl_linkEnable |
  967. OHCI1394_HCControl_BIBimageValid);
  968. flush_writes(ohci);
  969. /*
  970. * We are ready to go, initiate bus reset to finish the
  971. * initialization.
  972. */
  973. fw_core_initiate_bus_reset(&ohci->card, 1);
  974. return 0;
  975. }
  976. static int
  977. ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
  978. {
  979. struct fw_ohci *ohci;
  980. unsigned long flags;
  981. int retval = 0;
  982. __be32 *next_config_rom;
  983. dma_addr_t next_config_rom_bus;
  984. ohci = fw_ohci(card);
  985. /*
  986. * When the OHCI controller is enabled, the config rom update
  987. * mechanism is a bit tricky, but easy enough to use. See
  988. * section 5.5.6 in the OHCI specification.
  989. *
  990. * The OHCI controller caches the new config rom address in a
  991. * shadow register (ConfigROMmapNext) and needs a bus reset
  992. * for the changes to take place. When the bus reset is
  993. * detected, the controller loads the new values for the
  994. * ConfigRomHeader and BusOptions registers from the specified
  995. * config rom and loads ConfigROMmap from the ConfigROMmapNext
  996. * shadow register. All automatically and atomically.
  997. *
  998. * Now, there's a twist to this story. The automatic load of
  999. * ConfigRomHeader and BusOptions doesn't honor the
  1000. * noByteSwapData bit, so with a be32 config rom, the
  1001. * controller will load be32 values in to these registers
  1002. * during the atomic update, even on litte endian
  1003. * architectures. The workaround we use is to put a 0 in the
  1004. * header quadlet; 0 is endian agnostic and means that the
  1005. * config rom isn't ready yet. In the bus reset tasklet we
  1006. * then set up the real values for the two registers.
  1007. *
  1008. * We use ohci->lock to avoid racing with the code that sets
  1009. * ohci->next_config_rom to NULL (see bus_reset_tasklet).
  1010. */
  1011. next_config_rom =
  1012. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1013. &next_config_rom_bus, GFP_KERNEL);
  1014. if (next_config_rom == NULL)
  1015. return -ENOMEM;
  1016. spin_lock_irqsave(&ohci->lock, flags);
  1017. if (ohci->next_config_rom == NULL) {
  1018. ohci->next_config_rom = next_config_rom;
  1019. ohci->next_config_rom_bus = next_config_rom_bus;
  1020. memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
  1021. fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
  1022. length * 4);
  1023. ohci->next_header = config_rom[0];
  1024. ohci->next_config_rom[0] = 0;
  1025. reg_write(ohci, OHCI1394_ConfigROMmap,
  1026. ohci->next_config_rom_bus);
  1027. } else {
  1028. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1029. next_config_rom, next_config_rom_bus);
  1030. retval = -EBUSY;
  1031. }
  1032. spin_unlock_irqrestore(&ohci->lock, flags);
  1033. /*
  1034. * Now initiate a bus reset to have the changes take
  1035. * effect. We clean up the old config rom memory and DMA
  1036. * mappings in the bus reset tasklet, since the OHCI
  1037. * controller could need to access it before the bus reset
  1038. * takes effect.
  1039. */
  1040. if (retval == 0)
  1041. fw_core_initiate_bus_reset(&ohci->card, 1);
  1042. return retval;
  1043. }
  1044. static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
  1045. {
  1046. struct fw_ohci *ohci = fw_ohci(card);
  1047. at_context_transmit(&ohci->at_request_ctx, packet);
  1048. }
  1049. static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
  1050. {
  1051. struct fw_ohci *ohci = fw_ohci(card);
  1052. at_context_transmit(&ohci->at_response_ctx, packet);
  1053. }
  1054. static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  1055. {
  1056. struct fw_ohci *ohci = fw_ohci(card);
  1057. struct context *ctx = &ohci->at_request_ctx;
  1058. struct driver_data *driver_data = packet->driver_data;
  1059. int retval = -ENOENT;
  1060. tasklet_disable(&ctx->tasklet);
  1061. if (packet->ack != 0)
  1062. goto out;
  1063. driver_data->packet = NULL;
  1064. packet->ack = RCODE_CANCELLED;
  1065. packet->callback(packet, &ohci->card, packet->ack);
  1066. retval = 0;
  1067. out:
  1068. tasklet_enable(&ctx->tasklet);
  1069. return retval;
  1070. }
  1071. static int
  1072. ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
  1073. {
  1074. struct fw_ohci *ohci = fw_ohci(card);
  1075. unsigned long flags;
  1076. int n, retval = 0;
  1077. /*
  1078. * FIXME: Make sure this bitmask is cleared when we clear the busReset
  1079. * interrupt bit. Clear physReqResourceAllBuses on bus reset.
  1080. */
  1081. spin_lock_irqsave(&ohci->lock, flags);
  1082. if (ohci->generation != generation) {
  1083. retval = -ESTALE;
  1084. goto out;
  1085. }
  1086. /*
  1087. * Note, if the node ID contains a non-local bus ID, physical DMA is
  1088. * enabled for _all_ nodes on remote buses.
  1089. */
  1090. n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
  1091. if (n < 32)
  1092. reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
  1093. else
  1094. reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
  1095. flush_writes(ohci);
  1096. out:
  1097. spin_unlock_irqrestore(&ohci->lock, flags);
  1098. return retval;
  1099. }
  1100. static u64
  1101. ohci_get_bus_time(struct fw_card *card)
  1102. {
  1103. struct fw_ohci *ohci = fw_ohci(card);
  1104. u32 cycle_time;
  1105. u64 bus_time;
  1106. cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  1107. bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
  1108. return bus_time;
  1109. }
  1110. static int handle_ir_dualbuffer_packet(struct context *context,
  1111. struct descriptor *d,
  1112. struct descriptor *last)
  1113. {
  1114. struct iso_context *ctx =
  1115. container_of(context, struct iso_context, context);
  1116. struct db_descriptor *db = (struct db_descriptor *) d;
  1117. __le32 *ir_header;
  1118. size_t header_length;
  1119. void *p, *end;
  1120. int i;
  1121. if (db->first_res_count > 0 && db->second_res_count > 0)
  1122. /* This descriptor isn't done yet, stop iteration. */
  1123. return 0;
  1124. header_length = le16_to_cpu(db->first_req_count) -
  1125. le16_to_cpu(db->first_res_count);
  1126. i = ctx->header_length;
  1127. p = db + 1;
  1128. end = p + header_length;
  1129. while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
  1130. /*
  1131. * The iso header is byteswapped to little endian by
  1132. * the controller, but the remaining header quadlets
  1133. * are big endian. We want to present all the headers
  1134. * as big endian, so we have to swap the first
  1135. * quadlet.
  1136. */
  1137. *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
  1138. memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
  1139. i += ctx->base.header_size;
  1140. p += ctx->base.header_size + 4;
  1141. }
  1142. ctx->header_length = i;
  1143. if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
  1144. ir_header = (__le32 *) (db + 1);
  1145. ctx->base.callback(&ctx->base,
  1146. le32_to_cpu(ir_header[0]) & 0xffff,
  1147. ctx->header_length, ctx->header,
  1148. ctx->base.callback_data);
  1149. ctx->header_length = 0;
  1150. }
  1151. return 1;
  1152. }
  1153. static int handle_it_packet(struct context *context,
  1154. struct descriptor *d,
  1155. struct descriptor *last)
  1156. {
  1157. struct iso_context *ctx =
  1158. container_of(context, struct iso_context, context);
  1159. if (last->transfer_status == 0)
  1160. /* This descriptor isn't done yet, stop iteration. */
  1161. return 0;
  1162. if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
  1163. ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
  1164. 0, NULL, ctx->base.callback_data);
  1165. return 1;
  1166. }
  1167. static struct fw_iso_context *
  1168. ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
  1169. {
  1170. struct fw_ohci *ohci = fw_ohci(card);
  1171. struct iso_context *ctx, *list;
  1172. descriptor_callback_t callback;
  1173. u32 *mask, regs;
  1174. unsigned long flags;
  1175. int index, retval = -ENOMEM;
  1176. if (type == FW_ISO_CONTEXT_TRANSMIT) {
  1177. mask = &ohci->it_context_mask;
  1178. list = ohci->it_context_list;
  1179. callback = handle_it_packet;
  1180. } else {
  1181. mask = &ohci->ir_context_mask;
  1182. list = ohci->ir_context_list;
  1183. callback = handle_ir_dualbuffer_packet;
  1184. }
  1185. /* FIXME: We need a fallback for pre 1.1 OHCI. */
  1186. if (callback == handle_ir_dualbuffer_packet &&
  1187. ohci->version < OHCI_VERSION_1_1)
  1188. return ERR_PTR(-EINVAL);
  1189. spin_lock_irqsave(&ohci->lock, flags);
  1190. index = ffs(*mask) - 1;
  1191. if (index >= 0)
  1192. *mask &= ~(1 << index);
  1193. spin_unlock_irqrestore(&ohci->lock, flags);
  1194. if (index < 0)
  1195. return ERR_PTR(-EBUSY);
  1196. if (type == FW_ISO_CONTEXT_TRANSMIT)
  1197. regs = OHCI1394_IsoXmitContextBase(index);
  1198. else
  1199. regs = OHCI1394_IsoRcvContextBase(index);
  1200. ctx = &list[index];
  1201. memset(ctx, 0, sizeof(*ctx));
  1202. ctx->header_length = 0;
  1203. ctx->header = (void *) __get_free_page(GFP_KERNEL);
  1204. if (ctx->header == NULL)
  1205. goto out;
  1206. retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
  1207. regs, callback);
  1208. if (retval < 0)
  1209. goto out_with_header;
  1210. return &ctx->base;
  1211. out_with_header:
  1212. free_page((unsigned long)ctx->header);
  1213. out:
  1214. spin_lock_irqsave(&ohci->lock, flags);
  1215. *mask |= 1 << index;
  1216. spin_unlock_irqrestore(&ohci->lock, flags);
  1217. return ERR_PTR(retval);
  1218. }
  1219. static int ohci_start_iso(struct fw_iso_context *base,
  1220. s32 cycle, u32 sync, u32 tags)
  1221. {
  1222. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1223. struct fw_ohci *ohci = ctx->context.ohci;
  1224. u32 control, match;
  1225. int index;
  1226. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1227. index = ctx - ohci->it_context_list;
  1228. match = 0;
  1229. if (cycle >= 0)
  1230. match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
  1231. (cycle & 0x7fff) << 16;
  1232. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
  1233. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
  1234. context_run(&ctx->context, match);
  1235. } else {
  1236. index = ctx - ohci->ir_context_list;
  1237. control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
  1238. match = (tags << 28) | (sync << 8) | ctx->base.channel;
  1239. if (cycle >= 0) {
  1240. match |= (cycle & 0x07fff) << 12;
  1241. control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
  1242. }
  1243. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
  1244. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
  1245. reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
  1246. context_run(&ctx->context, control);
  1247. }
  1248. return 0;
  1249. }
  1250. static int ohci_stop_iso(struct fw_iso_context *base)
  1251. {
  1252. struct fw_ohci *ohci = fw_ohci(base->card);
  1253. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1254. int index;
  1255. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1256. index = ctx - ohci->it_context_list;
  1257. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
  1258. } else {
  1259. index = ctx - ohci->ir_context_list;
  1260. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
  1261. }
  1262. flush_writes(ohci);
  1263. context_stop(&ctx->context);
  1264. return 0;
  1265. }
  1266. static void ohci_free_iso_context(struct fw_iso_context *base)
  1267. {
  1268. struct fw_ohci *ohci = fw_ohci(base->card);
  1269. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1270. unsigned long flags;
  1271. int index;
  1272. ohci_stop_iso(base);
  1273. context_release(&ctx->context);
  1274. free_page((unsigned long)ctx->header);
  1275. spin_lock_irqsave(&ohci->lock, flags);
  1276. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1277. index = ctx - ohci->it_context_list;
  1278. ohci->it_context_mask |= 1 << index;
  1279. } else {
  1280. index = ctx - ohci->ir_context_list;
  1281. ohci->ir_context_mask |= 1 << index;
  1282. }
  1283. spin_unlock_irqrestore(&ohci->lock, flags);
  1284. }
  1285. static int
  1286. ohci_queue_iso_transmit(struct fw_iso_context *base,
  1287. struct fw_iso_packet *packet,
  1288. struct fw_iso_buffer *buffer,
  1289. unsigned long payload)
  1290. {
  1291. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1292. struct descriptor *d, *last, *pd;
  1293. struct fw_iso_packet *p;
  1294. __le32 *header;
  1295. dma_addr_t d_bus, page_bus;
  1296. u32 z, header_z, payload_z, irq;
  1297. u32 payload_index, payload_end_index, next_page_index;
  1298. int page, end_page, i, length, offset;
  1299. /*
  1300. * FIXME: Cycle lost behavior should be configurable: lose
  1301. * packet, retransmit or terminate..
  1302. */
  1303. p = packet;
  1304. payload_index = payload;
  1305. if (p->skip)
  1306. z = 1;
  1307. else
  1308. z = 2;
  1309. if (p->header_length > 0)
  1310. z++;
  1311. /* Determine the first page the payload isn't contained in. */
  1312. end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
  1313. if (p->payload_length > 0)
  1314. payload_z = end_page - (payload_index >> PAGE_SHIFT);
  1315. else
  1316. payload_z = 0;
  1317. z += payload_z;
  1318. /* Get header size in number of descriptors. */
  1319. header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
  1320. d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
  1321. if (d == NULL)
  1322. return -ENOMEM;
  1323. if (!p->skip) {
  1324. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  1325. d[0].req_count = cpu_to_le16(8);
  1326. header = (__le32 *) &d[1];
  1327. header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
  1328. IT_HEADER_TAG(p->tag) |
  1329. IT_HEADER_TCODE(TCODE_STREAM_DATA) |
  1330. IT_HEADER_CHANNEL(ctx->base.channel) |
  1331. IT_HEADER_SPEED(ctx->base.speed));
  1332. header[1] =
  1333. cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
  1334. p->payload_length));
  1335. }
  1336. if (p->header_length > 0) {
  1337. d[2].req_count = cpu_to_le16(p->header_length);
  1338. d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
  1339. memcpy(&d[z], p->header, p->header_length);
  1340. }
  1341. pd = d + z - payload_z;
  1342. payload_end_index = payload_index + p->payload_length;
  1343. for (i = 0; i < payload_z; i++) {
  1344. page = payload_index >> PAGE_SHIFT;
  1345. offset = payload_index & ~PAGE_MASK;
  1346. next_page_index = (page + 1) << PAGE_SHIFT;
  1347. length =
  1348. min(next_page_index, payload_end_index) - payload_index;
  1349. pd[i].req_count = cpu_to_le16(length);
  1350. page_bus = page_private(buffer->pages[page]);
  1351. pd[i].data_address = cpu_to_le32(page_bus + offset);
  1352. payload_index += length;
  1353. }
  1354. if (p->interrupt)
  1355. irq = DESCRIPTOR_IRQ_ALWAYS;
  1356. else
  1357. irq = DESCRIPTOR_NO_IRQ;
  1358. last = z == 2 ? d : d + z - 1;
  1359. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  1360. DESCRIPTOR_STATUS |
  1361. DESCRIPTOR_BRANCH_ALWAYS |
  1362. irq);
  1363. context_append(&ctx->context, d, z, header_z);
  1364. return 0;
  1365. }
  1366. static int
  1367. ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
  1368. struct fw_iso_packet *packet,
  1369. struct fw_iso_buffer *buffer,
  1370. unsigned long payload)
  1371. {
  1372. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1373. struct db_descriptor *db = NULL;
  1374. struct descriptor *d;
  1375. struct fw_iso_packet *p;
  1376. dma_addr_t d_bus, page_bus;
  1377. u32 z, header_z, length, rest;
  1378. int page, offset, packet_count, header_size;
  1379. /*
  1380. * FIXME: Cycle lost behavior should be configurable: lose
  1381. * packet, retransmit or terminate..
  1382. */
  1383. if (packet->skip) {
  1384. d = context_get_descriptors(&ctx->context, 2, &d_bus);
  1385. if (d == NULL)
  1386. return -ENOMEM;
  1387. db = (struct db_descriptor *) d;
  1388. db->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1389. DESCRIPTOR_BRANCH_ALWAYS |
  1390. DESCRIPTOR_WAIT);
  1391. db->first_size = cpu_to_le16(ctx->base.header_size + 4);
  1392. context_append(&ctx->context, d, 2, 0);
  1393. }
  1394. p = packet;
  1395. z = 2;
  1396. /*
  1397. * The OHCI controller puts the status word in the header
  1398. * buffer too, so we need 4 extra bytes per packet.
  1399. */
  1400. packet_count = p->header_length / ctx->base.header_size;
  1401. header_size = packet_count * (ctx->base.header_size + 4);
  1402. /* Get header size in number of descriptors. */
  1403. header_z = DIV_ROUND_UP(header_size, sizeof(*d));
  1404. page = payload >> PAGE_SHIFT;
  1405. offset = payload & ~PAGE_MASK;
  1406. rest = p->payload_length;
  1407. /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
  1408. /* FIXME: make packet-per-buffer/dual-buffer a context option */
  1409. while (rest > 0) {
  1410. d = context_get_descriptors(&ctx->context,
  1411. z + header_z, &d_bus);
  1412. if (d == NULL)
  1413. return -ENOMEM;
  1414. db = (struct db_descriptor *) d;
  1415. db->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1416. DESCRIPTOR_BRANCH_ALWAYS);
  1417. db->first_size = cpu_to_le16(ctx->base.header_size + 4);
  1418. db->first_req_count = cpu_to_le16(header_size);
  1419. db->first_res_count = db->first_req_count;
  1420. db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
  1421. if (offset + rest < PAGE_SIZE)
  1422. length = rest;
  1423. else
  1424. length = PAGE_SIZE - offset;
  1425. db->second_req_count = cpu_to_le16(length);
  1426. db->second_res_count = db->second_req_count;
  1427. page_bus = page_private(buffer->pages[page]);
  1428. db->second_buffer = cpu_to_le32(page_bus + offset);
  1429. if (p->interrupt && length == rest)
  1430. db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
  1431. context_append(&ctx->context, d, z, header_z);
  1432. offset = (offset + length) & ~PAGE_MASK;
  1433. rest -= length;
  1434. page++;
  1435. }
  1436. return 0;
  1437. }
  1438. static int
  1439. ohci_queue_iso(struct fw_iso_context *base,
  1440. struct fw_iso_packet *packet,
  1441. struct fw_iso_buffer *buffer,
  1442. unsigned long payload)
  1443. {
  1444. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1445. if (base->type == FW_ISO_CONTEXT_TRANSMIT)
  1446. return ohci_queue_iso_transmit(base, packet, buffer, payload);
  1447. else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
  1448. return ohci_queue_iso_receive_dualbuffer(base, packet,
  1449. buffer, payload);
  1450. else
  1451. /* FIXME: Implement fallback for OHCI 1.0 controllers. */
  1452. return -EINVAL;
  1453. }
  1454. static const struct fw_card_driver ohci_driver = {
  1455. .name = ohci_driver_name,
  1456. .enable = ohci_enable,
  1457. .update_phy_reg = ohci_update_phy_reg,
  1458. .set_config_rom = ohci_set_config_rom,
  1459. .send_request = ohci_send_request,
  1460. .send_response = ohci_send_response,
  1461. .cancel_packet = ohci_cancel_packet,
  1462. .enable_phys_dma = ohci_enable_phys_dma,
  1463. .get_bus_time = ohci_get_bus_time,
  1464. .allocate_iso_context = ohci_allocate_iso_context,
  1465. .free_iso_context = ohci_free_iso_context,
  1466. .queue_iso = ohci_queue_iso,
  1467. .start_iso = ohci_start_iso,
  1468. .stop_iso = ohci_stop_iso,
  1469. };
  1470. static int __devinit
  1471. pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
  1472. {
  1473. struct fw_ohci *ohci;
  1474. u32 bus_options, max_receive, link_speed;
  1475. u64 guid;
  1476. int err;
  1477. size_t size;
  1478. ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
  1479. if (ohci == NULL) {
  1480. fw_error("Could not malloc fw_ohci data.\n");
  1481. return -ENOMEM;
  1482. }
  1483. fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
  1484. err = pci_enable_device(dev);
  1485. if (err) {
  1486. fw_error("Failed to enable OHCI hardware.\n");
  1487. goto fail_put_card;
  1488. }
  1489. pci_set_master(dev);
  1490. pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
  1491. pci_set_drvdata(dev, ohci);
  1492. spin_lock_init(&ohci->lock);
  1493. tasklet_init(&ohci->bus_reset_tasklet,
  1494. bus_reset_tasklet, (unsigned long)ohci);
  1495. err = pci_request_region(dev, 0, ohci_driver_name);
  1496. if (err) {
  1497. fw_error("MMIO resource unavailable\n");
  1498. goto fail_disable;
  1499. }
  1500. ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
  1501. if (ohci->registers == NULL) {
  1502. fw_error("Failed to remap registers\n");
  1503. err = -ENXIO;
  1504. goto fail_iomem;
  1505. }
  1506. ar_context_init(&ohci->ar_request_ctx, ohci,
  1507. OHCI1394_AsReqRcvContextControlSet);
  1508. ar_context_init(&ohci->ar_response_ctx, ohci,
  1509. OHCI1394_AsRspRcvContextControlSet);
  1510. context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
  1511. OHCI1394_AsReqTrContextControlSet, handle_at_packet);
  1512. context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
  1513. OHCI1394_AsRspTrContextControlSet, handle_at_packet);
  1514. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
  1515. ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
  1516. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
  1517. size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
  1518. ohci->it_context_list = kzalloc(size, GFP_KERNEL);
  1519. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
  1520. ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
  1521. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
  1522. size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
  1523. ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
  1524. if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
  1525. fw_error("Out of memory for it/ir contexts.\n");
  1526. err = -ENOMEM;
  1527. goto fail_registers;
  1528. }
  1529. /* self-id dma buffer allocation */
  1530. ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
  1531. SELF_ID_BUF_SIZE,
  1532. &ohci->self_id_bus,
  1533. GFP_KERNEL);
  1534. if (ohci->self_id_cpu == NULL) {
  1535. fw_error("Out of memory for self ID buffer.\n");
  1536. err = -ENOMEM;
  1537. goto fail_registers;
  1538. }
  1539. bus_options = reg_read(ohci, OHCI1394_BusOptions);
  1540. max_receive = (bus_options >> 12) & 0xf;
  1541. link_speed = bus_options & 0x7;
  1542. guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
  1543. reg_read(ohci, OHCI1394_GUIDLo);
  1544. err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
  1545. if (err < 0)
  1546. goto fail_self_id;
  1547. ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
  1548. fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
  1549. dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
  1550. return 0;
  1551. fail_self_id:
  1552. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  1553. ohci->self_id_cpu, ohci->self_id_bus);
  1554. fail_registers:
  1555. kfree(ohci->it_context_list);
  1556. kfree(ohci->ir_context_list);
  1557. pci_iounmap(dev, ohci->registers);
  1558. fail_iomem:
  1559. pci_release_region(dev, 0);
  1560. fail_disable:
  1561. pci_disable_device(dev);
  1562. fail_put_card:
  1563. fw_card_put(&ohci->card);
  1564. return err;
  1565. }
  1566. static void pci_remove(struct pci_dev *dev)
  1567. {
  1568. struct fw_ohci *ohci;
  1569. ohci = pci_get_drvdata(dev);
  1570. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  1571. flush_writes(ohci);
  1572. fw_core_remove_card(&ohci->card);
  1573. /*
  1574. * FIXME: Fail all pending packets here, now that the upper
  1575. * layers can't queue any more.
  1576. */
  1577. software_reset(ohci);
  1578. free_irq(dev->irq, ohci);
  1579. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  1580. ohci->self_id_cpu, ohci->self_id_bus);
  1581. kfree(ohci->it_context_list);
  1582. kfree(ohci->ir_context_list);
  1583. pci_iounmap(dev, ohci->registers);
  1584. pci_release_region(dev, 0);
  1585. pci_disable_device(dev);
  1586. fw_card_put(&ohci->card);
  1587. fw_notify("Removed fw-ohci device.\n");
  1588. }
  1589. #ifdef CONFIG_PM
  1590. static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
  1591. {
  1592. struct fw_ohci *ohci = pci_get_drvdata(pdev);
  1593. int err;
  1594. software_reset(ohci);
  1595. free_irq(pdev->irq, ohci);
  1596. err = pci_save_state(pdev);
  1597. if (err) {
  1598. fw_error("pci_save_state failed\n");
  1599. return err;
  1600. }
  1601. err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1602. if (err) {
  1603. fw_error("pci_set_power_state failed\n");
  1604. return err;
  1605. }
  1606. return 0;
  1607. }
  1608. static int pci_resume(struct pci_dev *pdev)
  1609. {
  1610. struct fw_ohci *ohci = pci_get_drvdata(pdev);
  1611. int err;
  1612. pci_set_power_state(pdev, PCI_D0);
  1613. pci_restore_state(pdev);
  1614. err = pci_enable_device(pdev);
  1615. if (err) {
  1616. fw_error("pci_enable_device failed\n");
  1617. return err;
  1618. }
  1619. return ohci_enable(&ohci->card, ohci->config_rom, CONFIG_ROM_SIZE);
  1620. }
  1621. #endif
  1622. static struct pci_device_id pci_table[] = {
  1623. { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
  1624. { }
  1625. };
  1626. MODULE_DEVICE_TABLE(pci, pci_table);
  1627. static struct pci_driver fw_ohci_pci_driver = {
  1628. .name = ohci_driver_name,
  1629. .id_table = pci_table,
  1630. .probe = pci_probe,
  1631. .remove = pci_remove,
  1632. #ifdef CONFIG_PM
  1633. .resume = pci_resume,
  1634. .suspend = pci_suspend,
  1635. #endif
  1636. };
  1637. MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
  1638. MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
  1639. MODULE_LICENSE("GPL");
  1640. /* Provide a module alias so root-on-sbp2 initrds don't break. */
  1641. #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
  1642. MODULE_ALIAS("ohci1394");
  1643. #endif
  1644. static int __init fw_ohci_init(void)
  1645. {
  1646. return pci_register_driver(&fw_ohci_pci_driver);
  1647. }
  1648. static void __exit fw_ohci_cleanup(void)
  1649. {
  1650. pci_unregister_driver(&fw_ohci_pci_driver);
  1651. }
  1652. module_init(fw_ohci_init);
  1653. module_exit(fw_ohci_cleanup);