ohci.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714
  1. /*
  2. * Driver for OHCI 1394 controllers
  3. *
  4. * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/compiler.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/firewire.h>
  25. #include <linux/firewire-constants.h>
  26. #include <linux/init.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/io.h>
  29. #include <linux/kernel.h>
  30. #include <linux/list.h>
  31. #include <linux/mm.h>
  32. #include <linux/module.h>
  33. #include <linux/moduleparam.h>
  34. #include <linux/pci.h>
  35. #include <linux/pci_ids.h>
  36. #include <linux/slab.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/string.h>
  39. #include <asm/byteorder.h>
  40. #include <asm/page.h>
  41. #include <asm/system.h>
  42. #ifdef CONFIG_PPC_PMAC
  43. #include <asm/pmac_feature.h>
  44. #endif
  45. #include "core.h"
  46. #include "ohci.h"
  47. #define DESCRIPTOR_OUTPUT_MORE 0
  48. #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
  49. #define DESCRIPTOR_INPUT_MORE (2 << 12)
  50. #define DESCRIPTOR_INPUT_LAST (3 << 12)
  51. #define DESCRIPTOR_STATUS (1 << 11)
  52. #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
  53. #define DESCRIPTOR_PING (1 << 7)
  54. #define DESCRIPTOR_YY (1 << 6)
  55. #define DESCRIPTOR_NO_IRQ (0 << 4)
  56. #define DESCRIPTOR_IRQ_ERROR (1 << 4)
  57. #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
  58. #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
  59. #define DESCRIPTOR_WAIT (3 << 0)
  60. struct descriptor {
  61. __le16 req_count;
  62. __le16 control;
  63. __le32 data_address;
  64. __le32 branch_address;
  65. __le16 res_count;
  66. __le16 transfer_status;
  67. } __attribute__((aligned(16)));
  68. #define CONTROL_SET(regs) (regs)
  69. #define CONTROL_CLEAR(regs) ((regs) + 4)
  70. #define COMMAND_PTR(regs) ((regs) + 12)
  71. #define CONTEXT_MATCH(regs) ((regs) + 16)
  72. struct ar_buffer {
  73. struct descriptor descriptor;
  74. struct ar_buffer *next;
  75. __le32 data[0];
  76. };
  77. struct ar_context {
  78. struct fw_ohci *ohci;
  79. struct ar_buffer *current_buffer;
  80. struct ar_buffer *last_buffer;
  81. void *pointer;
  82. u32 regs;
  83. struct tasklet_struct tasklet;
  84. };
  85. struct context;
  86. typedef int (*descriptor_callback_t)(struct context *ctx,
  87. struct descriptor *d,
  88. struct descriptor *last);
  89. /*
  90. * A buffer that contains a block of DMA-able coherent memory used for
  91. * storing a portion of a DMA descriptor program.
  92. */
  93. struct descriptor_buffer {
  94. struct list_head list;
  95. dma_addr_t buffer_bus;
  96. size_t buffer_size;
  97. size_t used;
  98. struct descriptor buffer[0];
  99. };
  100. struct context {
  101. struct fw_ohci *ohci;
  102. u32 regs;
  103. int total_allocation;
  104. /*
  105. * List of page-sized buffers for storing DMA descriptors.
  106. * Head of list contains buffers in use and tail of list contains
  107. * free buffers.
  108. */
  109. struct list_head buffer_list;
  110. /*
  111. * Pointer to a buffer inside buffer_list that contains the tail
  112. * end of the current DMA program.
  113. */
  114. struct descriptor_buffer *buffer_tail;
  115. /*
  116. * The descriptor containing the branch address of the first
  117. * descriptor that has not yet been filled by the device.
  118. */
  119. struct descriptor *last;
  120. /*
  121. * The last descriptor in the DMA program. It contains the branch
  122. * address that must be updated upon appending a new descriptor.
  123. */
  124. struct descriptor *prev;
  125. descriptor_callback_t callback;
  126. struct tasklet_struct tasklet;
  127. };
  128. #define IT_HEADER_SY(v) ((v) << 0)
  129. #define IT_HEADER_TCODE(v) ((v) << 4)
  130. #define IT_HEADER_CHANNEL(v) ((v) << 8)
  131. #define IT_HEADER_TAG(v) ((v) << 14)
  132. #define IT_HEADER_SPEED(v) ((v) << 16)
  133. #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
  134. struct iso_context {
  135. struct fw_iso_context base;
  136. struct context context;
  137. int excess_bytes;
  138. void *header;
  139. size_t header_length;
  140. };
  141. #define CONFIG_ROM_SIZE 1024
  142. struct fw_ohci {
  143. struct fw_card card;
  144. __iomem char *registers;
  145. int node_id;
  146. int generation;
  147. int request_generation; /* for timestamping incoming requests */
  148. unsigned quirks;
  149. /*
  150. * Spinlock for accessing fw_ohci data. Never call out of
  151. * this driver with this lock held.
  152. */
  153. spinlock_t lock;
  154. struct ar_context ar_request_ctx;
  155. struct ar_context ar_response_ctx;
  156. struct context at_request_ctx;
  157. struct context at_response_ctx;
  158. u32 it_context_mask;
  159. struct iso_context *it_context_list;
  160. u64 ir_context_channels;
  161. u32 ir_context_mask;
  162. struct iso_context *ir_context_list;
  163. __be32 *config_rom;
  164. dma_addr_t config_rom_bus;
  165. __be32 *next_config_rom;
  166. dma_addr_t next_config_rom_bus;
  167. __be32 next_header;
  168. __le32 *self_id_cpu;
  169. dma_addr_t self_id_bus;
  170. struct tasklet_struct bus_reset_tasklet;
  171. u32 self_id_buffer[512];
  172. };
  173. static inline struct fw_ohci *fw_ohci(struct fw_card *card)
  174. {
  175. return container_of(card, struct fw_ohci, card);
  176. }
  177. #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
  178. #define IR_CONTEXT_BUFFER_FILL 0x80000000
  179. #define IR_CONTEXT_ISOCH_HEADER 0x40000000
  180. #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
  181. #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
  182. #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
  183. #define CONTEXT_RUN 0x8000
  184. #define CONTEXT_WAKE 0x1000
  185. #define CONTEXT_DEAD 0x0800
  186. #define CONTEXT_ACTIVE 0x0400
  187. #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
  188. #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
  189. #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
  190. #define OHCI1394_REGISTER_SIZE 0x800
  191. #define OHCI_LOOP_COUNT 500
  192. #define OHCI1394_PCI_HCI_Control 0x40
  193. #define SELF_ID_BUF_SIZE 0x800
  194. #define OHCI_TCODE_PHY_PACKET 0x0e
  195. #define OHCI_VERSION_1_1 0x010010
  196. static char ohci_driver_name[] = KBUILD_MODNAME;
  197. #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
  198. #define QUIRK_CYCLE_TIMER 1
  199. #define QUIRK_RESET_PACKET 2
  200. #define QUIRK_BE_HEADERS 4
  201. #define QUIRK_NO_1394A 8
  202. /* In case of multiple matches in ohci_quirks[], only the first one is used. */
  203. static const struct {
  204. unsigned short vendor, device, flags;
  205. } ohci_quirks[] = {
  206. {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
  207. QUIRK_RESET_PACKET |
  208. QUIRK_NO_1394A},
  209. {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
  210. {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
  211. {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
  212. {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
  213. {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
  214. };
  215. /* This overrides anything that was found in ohci_quirks[]. */
  216. static int param_quirks;
  217. module_param_named(quirks, param_quirks, int, 0644);
  218. MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
  219. ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
  220. ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
  221. ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
  222. ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
  223. ")");
  224. #define OHCI_PARAM_DEBUG_AT_AR 1
  225. #define OHCI_PARAM_DEBUG_SELFIDS 2
  226. #define OHCI_PARAM_DEBUG_IRQS 4
  227. #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
  228. #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
  229. static int param_debug;
  230. module_param_named(debug, param_debug, int, 0644);
  231. MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
  232. ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
  233. ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
  234. ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
  235. ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
  236. ", or a combination, or all = -1)");
  237. static void log_irqs(u32 evt)
  238. {
  239. if (likely(!(param_debug &
  240. (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
  241. return;
  242. if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
  243. !(evt & OHCI1394_busReset))
  244. return;
  245. fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
  246. evt & OHCI1394_selfIDComplete ? " selfID" : "",
  247. evt & OHCI1394_RQPkt ? " AR_req" : "",
  248. evt & OHCI1394_RSPkt ? " AR_resp" : "",
  249. evt & OHCI1394_reqTxComplete ? " AT_req" : "",
  250. evt & OHCI1394_respTxComplete ? " AT_resp" : "",
  251. evt & OHCI1394_isochRx ? " IR" : "",
  252. evt & OHCI1394_isochTx ? " IT" : "",
  253. evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
  254. evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
  255. evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
  256. evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
  257. evt & OHCI1394_busReset ? " busReset" : "",
  258. evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
  259. OHCI1394_RSPkt | OHCI1394_reqTxComplete |
  260. OHCI1394_respTxComplete | OHCI1394_isochRx |
  261. OHCI1394_isochTx | OHCI1394_postedWriteErr |
  262. OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
  263. OHCI1394_regAccessFail | OHCI1394_busReset)
  264. ? " ?" : "");
  265. }
  266. static const char *speed[] = {
  267. [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
  268. };
  269. static const char *power[] = {
  270. [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
  271. [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
  272. };
  273. static const char port[] = { '.', '-', 'p', 'c', };
  274. static char _p(u32 *s, int shift)
  275. {
  276. return port[*s >> shift & 3];
  277. }
  278. static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
  279. {
  280. if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
  281. return;
  282. fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
  283. self_id_count, generation, node_id);
  284. for (; self_id_count--; ++s)
  285. if ((*s & 1 << 23) == 0)
  286. fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
  287. "%s gc=%d %s %s%s%s\n",
  288. *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
  289. speed[*s >> 14 & 3], *s >> 16 & 63,
  290. power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
  291. *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
  292. else
  293. fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
  294. *s, *s >> 24 & 63,
  295. _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
  296. _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
  297. }
  298. static const char *evts[] = {
  299. [0x00] = "evt_no_status", [0x01] = "-reserved-",
  300. [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
  301. [0x04] = "evt_underrun", [0x05] = "evt_overrun",
  302. [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
  303. [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
  304. [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
  305. [0x0c] = "-reserved-", [0x0d] = "-reserved-",
  306. [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
  307. [0x10] = "-reserved-", [0x11] = "ack_complete",
  308. [0x12] = "ack_pending ", [0x13] = "-reserved-",
  309. [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
  310. [0x16] = "ack_busy_B", [0x17] = "-reserved-",
  311. [0x18] = "-reserved-", [0x19] = "-reserved-",
  312. [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
  313. [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
  314. [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
  315. [0x20] = "pending/cancelled",
  316. };
  317. static const char *tcodes[] = {
  318. [0x0] = "QW req", [0x1] = "BW req",
  319. [0x2] = "W resp", [0x3] = "-reserved-",
  320. [0x4] = "QR req", [0x5] = "BR req",
  321. [0x6] = "QR resp", [0x7] = "BR resp",
  322. [0x8] = "cycle start", [0x9] = "Lk req",
  323. [0xa] = "async stream packet", [0xb] = "Lk resp",
  324. [0xc] = "-reserved-", [0xd] = "-reserved-",
  325. [0xe] = "link internal", [0xf] = "-reserved-",
  326. };
  327. static const char *phys[] = {
  328. [0x0] = "phy config packet", [0x1] = "link-on packet",
  329. [0x2] = "self-id packet", [0x3] = "-reserved-",
  330. };
  331. static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
  332. {
  333. int tcode = header[0] >> 4 & 0xf;
  334. char specific[12];
  335. if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
  336. return;
  337. if (unlikely(evt >= ARRAY_SIZE(evts)))
  338. evt = 0x1f;
  339. if (evt == OHCI1394_evt_bus_reset) {
  340. fw_notify("A%c evt_bus_reset, generation %d\n",
  341. dir, (header[2] >> 16) & 0xff);
  342. return;
  343. }
  344. if (header[0] == ~header[1]) {
  345. fw_notify("A%c %s, %s, %08x\n",
  346. dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
  347. return;
  348. }
  349. switch (tcode) {
  350. case 0x0: case 0x6: case 0x8:
  351. snprintf(specific, sizeof(specific), " = %08x",
  352. be32_to_cpu((__force __be32)header[3]));
  353. break;
  354. case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
  355. snprintf(specific, sizeof(specific), " %x,%x",
  356. header[3] >> 16, header[3] & 0xffff);
  357. break;
  358. default:
  359. specific[0] = '\0';
  360. }
  361. switch (tcode) {
  362. case 0xe: case 0xa:
  363. fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
  364. break;
  365. case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
  366. fw_notify("A%c spd %x tl %02x, "
  367. "%04x -> %04x, %s, "
  368. "%s, %04x%08x%s\n",
  369. dir, speed, header[0] >> 10 & 0x3f,
  370. header[1] >> 16, header[0] >> 16, evts[evt],
  371. tcodes[tcode], header[1] & 0xffff, header[2], specific);
  372. break;
  373. default:
  374. fw_notify("A%c spd %x tl %02x, "
  375. "%04x -> %04x, %s, "
  376. "%s%s\n",
  377. dir, speed, header[0] >> 10 & 0x3f,
  378. header[1] >> 16, header[0] >> 16, evts[evt],
  379. tcodes[tcode], specific);
  380. }
  381. }
  382. #else
  383. #define param_debug 0
  384. static inline void log_irqs(u32 evt) {}
  385. static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
  386. static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
  387. #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
  388. static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
  389. {
  390. writel(data, ohci->registers + offset);
  391. }
  392. static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
  393. {
  394. return readl(ohci->registers + offset);
  395. }
  396. static inline void flush_writes(const struct fw_ohci *ohci)
  397. {
  398. /* Do a dummy read to flush writes. */
  399. reg_read(ohci, OHCI1394_Version);
  400. }
  401. static int read_phy_reg(struct fw_ohci *ohci, int addr)
  402. {
  403. u32 val;
  404. int i;
  405. reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
  406. for (i = 0; i < 10; i++) {
  407. val = reg_read(ohci, OHCI1394_PhyControl);
  408. if (val & OHCI1394_PhyControl_ReadDone)
  409. return OHCI1394_PhyControl_ReadData(val);
  410. msleep(1);
  411. }
  412. fw_error("failed to read phy reg\n");
  413. return -EBUSY;
  414. }
  415. static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
  416. {
  417. int i;
  418. reg_write(ohci, OHCI1394_PhyControl,
  419. OHCI1394_PhyControl_Write(addr, val));
  420. for (i = 0; i < 100; i++) {
  421. val = reg_read(ohci, OHCI1394_PhyControl);
  422. if (!(val & OHCI1394_PhyControl_WritePending))
  423. return 0;
  424. msleep(1);
  425. }
  426. fw_error("failed to write phy reg\n");
  427. return -EBUSY;
  428. }
  429. static int ohci_update_phy_reg(struct fw_card *card, int addr,
  430. int clear_bits, int set_bits)
  431. {
  432. struct fw_ohci *ohci = fw_ohci(card);
  433. int ret;
  434. ret = read_phy_reg(ohci, addr);
  435. if (ret < 0)
  436. return ret;
  437. /*
  438. * The interrupt status bits are cleared by writing a one bit.
  439. * Avoid clearing them unless explicitly requested in set_bits.
  440. */
  441. if (addr == 5)
  442. clear_bits |= PHY_INT_STATUS_BITS;
  443. return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
  444. }
  445. static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
  446. {
  447. int ret;
  448. ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
  449. if (ret < 0)
  450. return ret;
  451. return read_phy_reg(ohci, addr);
  452. }
  453. static int ar_context_add_page(struct ar_context *ctx)
  454. {
  455. struct device *dev = ctx->ohci->card.device;
  456. struct ar_buffer *ab;
  457. dma_addr_t uninitialized_var(ab_bus);
  458. size_t offset;
  459. ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
  460. if (ab == NULL)
  461. return -ENOMEM;
  462. ab->next = NULL;
  463. memset(&ab->descriptor, 0, sizeof(ab->descriptor));
  464. ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
  465. DESCRIPTOR_STATUS |
  466. DESCRIPTOR_BRANCH_ALWAYS);
  467. offset = offsetof(struct ar_buffer, data);
  468. ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
  469. ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
  470. ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
  471. ab->descriptor.branch_address = 0;
  472. ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
  473. ctx->last_buffer->next = ab;
  474. ctx->last_buffer = ab;
  475. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  476. flush_writes(ctx->ohci);
  477. return 0;
  478. }
  479. static void ar_context_release(struct ar_context *ctx)
  480. {
  481. struct ar_buffer *ab, *ab_next;
  482. size_t offset;
  483. dma_addr_t ab_bus;
  484. for (ab = ctx->current_buffer; ab; ab = ab_next) {
  485. ab_next = ab->next;
  486. offset = offsetof(struct ar_buffer, data);
  487. ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  488. dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
  489. ab, ab_bus);
  490. }
  491. }
  492. #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
  493. #define cond_le32_to_cpu(v) \
  494. (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
  495. #else
  496. #define cond_le32_to_cpu(v) le32_to_cpu(v)
  497. #endif
  498. static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
  499. {
  500. struct fw_ohci *ohci = ctx->ohci;
  501. struct fw_packet p;
  502. u32 status, length, tcode;
  503. int evt;
  504. p.header[0] = cond_le32_to_cpu(buffer[0]);
  505. p.header[1] = cond_le32_to_cpu(buffer[1]);
  506. p.header[2] = cond_le32_to_cpu(buffer[2]);
  507. tcode = (p.header[0] >> 4) & 0x0f;
  508. switch (tcode) {
  509. case TCODE_WRITE_QUADLET_REQUEST:
  510. case TCODE_READ_QUADLET_RESPONSE:
  511. p.header[3] = (__force __u32) buffer[3];
  512. p.header_length = 16;
  513. p.payload_length = 0;
  514. break;
  515. case TCODE_READ_BLOCK_REQUEST :
  516. p.header[3] = cond_le32_to_cpu(buffer[3]);
  517. p.header_length = 16;
  518. p.payload_length = 0;
  519. break;
  520. case TCODE_WRITE_BLOCK_REQUEST:
  521. case TCODE_READ_BLOCK_RESPONSE:
  522. case TCODE_LOCK_REQUEST:
  523. case TCODE_LOCK_RESPONSE:
  524. p.header[3] = cond_le32_to_cpu(buffer[3]);
  525. p.header_length = 16;
  526. p.payload_length = p.header[3] >> 16;
  527. break;
  528. case TCODE_WRITE_RESPONSE:
  529. case TCODE_READ_QUADLET_REQUEST:
  530. case OHCI_TCODE_PHY_PACKET:
  531. p.header_length = 12;
  532. p.payload_length = 0;
  533. break;
  534. default:
  535. /* FIXME: Stop context, discard everything, and restart? */
  536. p.header_length = 0;
  537. p.payload_length = 0;
  538. }
  539. p.payload = (void *) buffer + p.header_length;
  540. /* FIXME: What to do about evt_* errors? */
  541. length = (p.header_length + p.payload_length + 3) / 4;
  542. status = cond_le32_to_cpu(buffer[length]);
  543. evt = (status >> 16) & 0x1f;
  544. p.ack = evt - 16;
  545. p.speed = (status >> 21) & 0x7;
  546. p.timestamp = status & 0xffff;
  547. p.generation = ohci->request_generation;
  548. log_ar_at_event('R', p.speed, p.header, evt);
  549. /*
  550. * The OHCI bus reset handler synthesizes a phy packet with
  551. * the new generation number when a bus reset happens (see
  552. * section 8.4.2.3). This helps us determine when a request
  553. * was received and make sure we send the response in the same
  554. * generation. We only need this for requests; for responses
  555. * we use the unique tlabel for finding the matching
  556. * request.
  557. *
  558. * Alas some chips sometimes emit bus reset packets with a
  559. * wrong generation. We set the correct generation for these
  560. * at a slightly incorrect time (in bus_reset_tasklet).
  561. */
  562. if (evt == OHCI1394_evt_bus_reset) {
  563. if (!(ohci->quirks & QUIRK_RESET_PACKET))
  564. ohci->request_generation = (p.header[2] >> 16) & 0xff;
  565. } else if (ctx == &ohci->ar_request_ctx) {
  566. fw_core_handle_request(&ohci->card, &p);
  567. } else {
  568. fw_core_handle_response(&ohci->card, &p);
  569. }
  570. return buffer + length + 1;
  571. }
  572. static void ar_context_tasklet(unsigned long data)
  573. {
  574. struct ar_context *ctx = (struct ar_context *)data;
  575. struct fw_ohci *ohci = ctx->ohci;
  576. struct ar_buffer *ab;
  577. struct descriptor *d;
  578. void *buffer, *end;
  579. ab = ctx->current_buffer;
  580. d = &ab->descriptor;
  581. if (d->res_count == 0) {
  582. size_t size, rest, offset;
  583. dma_addr_t start_bus;
  584. void *start;
  585. /*
  586. * This descriptor is finished and we may have a
  587. * packet split across this and the next buffer. We
  588. * reuse the page for reassembling the split packet.
  589. */
  590. offset = offsetof(struct ar_buffer, data);
  591. start = buffer = ab;
  592. start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  593. ab = ab->next;
  594. d = &ab->descriptor;
  595. size = buffer + PAGE_SIZE - ctx->pointer;
  596. rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
  597. memmove(buffer, ctx->pointer, size);
  598. memcpy(buffer + size, ab->data, rest);
  599. ctx->current_buffer = ab;
  600. ctx->pointer = (void *) ab->data + rest;
  601. end = buffer + size + rest;
  602. while (buffer < end)
  603. buffer = handle_ar_packet(ctx, buffer);
  604. dma_free_coherent(ohci->card.device, PAGE_SIZE,
  605. start, start_bus);
  606. ar_context_add_page(ctx);
  607. } else {
  608. buffer = ctx->pointer;
  609. ctx->pointer = end =
  610. (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
  611. while (buffer < end)
  612. buffer = handle_ar_packet(ctx, buffer);
  613. }
  614. }
  615. static int ar_context_init(struct ar_context *ctx,
  616. struct fw_ohci *ohci, u32 regs)
  617. {
  618. struct ar_buffer ab;
  619. ctx->regs = regs;
  620. ctx->ohci = ohci;
  621. ctx->last_buffer = &ab;
  622. tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
  623. ar_context_add_page(ctx);
  624. ar_context_add_page(ctx);
  625. ctx->current_buffer = ab.next;
  626. ctx->pointer = ctx->current_buffer->data;
  627. return 0;
  628. }
  629. static void ar_context_run(struct ar_context *ctx)
  630. {
  631. struct ar_buffer *ab = ctx->current_buffer;
  632. dma_addr_t ab_bus;
  633. size_t offset;
  634. offset = offsetof(struct ar_buffer, data);
  635. ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  636. reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
  637. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
  638. flush_writes(ctx->ohci);
  639. }
  640. static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
  641. {
  642. int b, key;
  643. b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
  644. key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
  645. /* figure out which descriptor the branch address goes in */
  646. if (z == 2 && (b == 3 || key == 2))
  647. return d;
  648. else
  649. return d + z - 1;
  650. }
  651. static void context_tasklet(unsigned long data)
  652. {
  653. struct context *ctx = (struct context *) data;
  654. struct descriptor *d, *last;
  655. u32 address;
  656. int z;
  657. struct descriptor_buffer *desc;
  658. desc = list_entry(ctx->buffer_list.next,
  659. struct descriptor_buffer, list);
  660. last = ctx->last;
  661. while (last->branch_address != 0) {
  662. struct descriptor_buffer *old_desc = desc;
  663. address = le32_to_cpu(last->branch_address);
  664. z = address & 0xf;
  665. address &= ~0xf;
  666. /* If the branch address points to a buffer outside of the
  667. * current buffer, advance to the next buffer. */
  668. if (address < desc->buffer_bus ||
  669. address >= desc->buffer_bus + desc->used)
  670. desc = list_entry(desc->list.next,
  671. struct descriptor_buffer, list);
  672. d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
  673. last = find_branch_descriptor(d, z);
  674. if (!ctx->callback(ctx, d, last))
  675. break;
  676. if (old_desc != desc) {
  677. /* If we've advanced to the next buffer, move the
  678. * previous buffer to the free list. */
  679. unsigned long flags;
  680. old_desc->used = 0;
  681. spin_lock_irqsave(&ctx->ohci->lock, flags);
  682. list_move_tail(&old_desc->list, &ctx->buffer_list);
  683. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  684. }
  685. ctx->last = last;
  686. }
  687. }
  688. /*
  689. * Allocate a new buffer and add it to the list of free buffers for this
  690. * context. Must be called with ohci->lock held.
  691. */
  692. static int context_add_buffer(struct context *ctx)
  693. {
  694. struct descriptor_buffer *desc;
  695. dma_addr_t uninitialized_var(bus_addr);
  696. int offset;
  697. /*
  698. * 16MB of descriptors should be far more than enough for any DMA
  699. * program. This will catch run-away userspace or DoS attacks.
  700. */
  701. if (ctx->total_allocation >= 16*1024*1024)
  702. return -ENOMEM;
  703. desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
  704. &bus_addr, GFP_ATOMIC);
  705. if (!desc)
  706. return -ENOMEM;
  707. offset = (void *)&desc->buffer - (void *)desc;
  708. desc->buffer_size = PAGE_SIZE - offset;
  709. desc->buffer_bus = bus_addr + offset;
  710. desc->used = 0;
  711. list_add_tail(&desc->list, &ctx->buffer_list);
  712. ctx->total_allocation += PAGE_SIZE;
  713. return 0;
  714. }
  715. static int context_init(struct context *ctx, struct fw_ohci *ohci,
  716. u32 regs, descriptor_callback_t callback)
  717. {
  718. ctx->ohci = ohci;
  719. ctx->regs = regs;
  720. ctx->total_allocation = 0;
  721. INIT_LIST_HEAD(&ctx->buffer_list);
  722. if (context_add_buffer(ctx) < 0)
  723. return -ENOMEM;
  724. ctx->buffer_tail = list_entry(ctx->buffer_list.next,
  725. struct descriptor_buffer, list);
  726. tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
  727. ctx->callback = callback;
  728. /*
  729. * We put a dummy descriptor in the buffer that has a NULL
  730. * branch address and looks like it's been sent. That way we
  731. * have a descriptor to append DMA programs to.
  732. */
  733. memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
  734. ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
  735. ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
  736. ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
  737. ctx->last = ctx->buffer_tail->buffer;
  738. ctx->prev = ctx->buffer_tail->buffer;
  739. return 0;
  740. }
  741. static void context_release(struct context *ctx)
  742. {
  743. struct fw_card *card = &ctx->ohci->card;
  744. struct descriptor_buffer *desc, *tmp;
  745. list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
  746. dma_free_coherent(card->device, PAGE_SIZE, desc,
  747. desc->buffer_bus -
  748. ((void *)&desc->buffer - (void *)desc));
  749. }
  750. /* Must be called with ohci->lock held */
  751. static struct descriptor *context_get_descriptors(struct context *ctx,
  752. int z, dma_addr_t *d_bus)
  753. {
  754. struct descriptor *d = NULL;
  755. struct descriptor_buffer *desc = ctx->buffer_tail;
  756. if (z * sizeof(*d) > desc->buffer_size)
  757. return NULL;
  758. if (z * sizeof(*d) > desc->buffer_size - desc->used) {
  759. /* No room for the descriptor in this buffer, so advance to the
  760. * next one. */
  761. if (desc->list.next == &ctx->buffer_list) {
  762. /* If there is no free buffer next in the list,
  763. * allocate one. */
  764. if (context_add_buffer(ctx) < 0)
  765. return NULL;
  766. }
  767. desc = list_entry(desc->list.next,
  768. struct descriptor_buffer, list);
  769. ctx->buffer_tail = desc;
  770. }
  771. d = desc->buffer + desc->used / sizeof(*d);
  772. memset(d, 0, z * sizeof(*d));
  773. *d_bus = desc->buffer_bus + desc->used;
  774. return d;
  775. }
  776. static void context_run(struct context *ctx, u32 extra)
  777. {
  778. struct fw_ohci *ohci = ctx->ohci;
  779. reg_write(ohci, COMMAND_PTR(ctx->regs),
  780. le32_to_cpu(ctx->last->branch_address));
  781. reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
  782. reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
  783. flush_writes(ohci);
  784. }
  785. static void context_append(struct context *ctx,
  786. struct descriptor *d, int z, int extra)
  787. {
  788. dma_addr_t d_bus;
  789. struct descriptor_buffer *desc = ctx->buffer_tail;
  790. d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
  791. desc->used += (z + extra) * sizeof(*d);
  792. ctx->prev->branch_address = cpu_to_le32(d_bus | z);
  793. ctx->prev = find_branch_descriptor(d, z);
  794. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  795. flush_writes(ctx->ohci);
  796. }
  797. static void context_stop(struct context *ctx)
  798. {
  799. u32 reg;
  800. int i;
  801. reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
  802. flush_writes(ctx->ohci);
  803. for (i = 0; i < 10; i++) {
  804. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  805. if ((reg & CONTEXT_ACTIVE) == 0)
  806. return;
  807. mdelay(1);
  808. }
  809. fw_error("Error: DMA context still active (0x%08x)\n", reg);
  810. }
  811. struct driver_data {
  812. struct fw_packet *packet;
  813. };
  814. /*
  815. * This function apppends a packet to the DMA queue for transmission.
  816. * Must always be called with the ochi->lock held to ensure proper
  817. * generation handling and locking around packet queue manipulation.
  818. */
  819. static int at_context_queue_packet(struct context *ctx,
  820. struct fw_packet *packet)
  821. {
  822. struct fw_ohci *ohci = ctx->ohci;
  823. dma_addr_t d_bus, uninitialized_var(payload_bus);
  824. struct driver_data *driver_data;
  825. struct descriptor *d, *last;
  826. __le32 *header;
  827. int z, tcode;
  828. u32 reg;
  829. d = context_get_descriptors(ctx, 4, &d_bus);
  830. if (d == NULL) {
  831. packet->ack = RCODE_SEND_ERROR;
  832. return -1;
  833. }
  834. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  835. d[0].res_count = cpu_to_le16(packet->timestamp);
  836. /*
  837. * The DMA format for asyncronous link packets is different
  838. * from the IEEE1394 layout, so shift the fields around
  839. * accordingly. If header_length is 8, it's a PHY packet, to
  840. * which we need to prepend an extra quadlet.
  841. */
  842. header = (__le32 *) &d[1];
  843. switch (packet->header_length) {
  844. case 16:
  845. case 12:
  846. header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
  847. (packet->speed << 16));
  848. header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
  849. (packet->header[0] & 0xffff0000));
  850. header[2] = cpu_to_le32(packet->header[2]);
  851. tcode = (packet->header[0] >> 4) & 0x0f;
  852. if (TCODE_IS_BLOCK_PACKET(tcode))
  853. header[3] = cpu_to_le32(packet->header[3]);
  854. else
  855. header[3] = (__force __le32) packet->header[3];
  856. d[0].req_count = cpu_to_le16(packet->header_length);
  857. break;
  858. case 8:
  859. header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
  860. (packet->speed << 16));
  861. header[1] = cpu_to_le32(packet->header[0]);
  862. header[2] = cpu_to_le32(packet->header[1]);
  863. d[0].req_count = cpu_to_le16(12);
  864. break;
  865. case 4:
  866. header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
  867. (packet->speed << 16));
  868. header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
  869. d[0].req_count = cpu_to_le16(8);
  870. break;
  871. default:
  872. /* BUG(); */
  873. packet->ack = RCODE_SEND_ERROR;
  874. return -1;
  875. }
  876. driver_data = (struct driver_data *) &d[3];
  877. driver_data->packet = packet;
  878. packet->driver_data = driver_data;
  879. if (packet->payload_length > 0) {
  880. payload_bus =
  881. dma_map_single(ohci->card.device, packet->payload,
  882. packet->payload_length, DMA_TO_DEVICE);
  883. if (dma_mapping_error(ohci->card.device, payload_bus)) {
  884. packet->ack = RCODE_SEND_ERROR;
  885. return -1;
  886. }
  887. packet->payload_bus = payload_bus;
  888. packet->payload_mapped = true;
  889. d[2].req_count = cpu_to_le16(packet->payload_length);
  890. d[2].data_address = cpu_to_le32(payload_bus);
  891. last = &d[2];
  892. z = 3;
  893. } else {
  894. last = &d[0];
  895. z = 2;
  896. }
  897. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  898. DESCRIPTOR_IRQ_ALWAYS |
  899. DESCRIPTOR_BRANCH_ALWAYS);
  900. /*
  901. * If the controller and packet generations don't match, we need to
  902. * bail out and try again. If IntEvent.busReset is set, the AT context
  903. * is halted, so appending to the context and trying to run it is
  904. * futile. Most controllers do the right thing and just flush the AT
  905. * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
  906. * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
  907. * up stalling out. So we just bail out in software and try again
  908. * later, and everyone is happy.
  909. * FIXME: Document how the locking works.
  910. */
  911. if (ohci->generation != packet->generation ||
  912. reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
  913. if (packet->payload_mapped)
  914. dma_unmap_single(ohci->card.device, payload_bus,
  915. packet->payload_length, DMA_TO_DEVICE);
  916. packet->ack = RCODE_GENERATION;
  917. return -1;
  918. }
  919. context_append(ctx, d, z, 4 - z);
  920. /* If the context isn't already running, start it up. */
  921. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  922. if ((reg & CONTEXT_RUN) == 0)
  923. context_run(ctx, 0);
  924. return 0;
  925. }
  926. static int handle_at_packet(struct context *context,
  927. struct descriptor *d,
  928. struct descriptor *last)
  929. {
  930. struct driver_data *driver_data;
  931. struct fw_packet *packet;
  932. struct fw_ohci *ohci = context->ohci;
  933. int evt;
  934. if (last->transfer_status == 0)
  935. /* This descriptor isn't done yet, stop iteration. */
  936. return 0;
  937. driver_data = (struct driver_data *) &d[3];
  938. packet = driver_data->packet;
  939. if (packet == NULL)
  940. /* This packet was cancelled, just continue. */
  941. return 1;
  942. if (packet->payload_mapped)
  943. dma_unmap_single(ohci->card.device, packet->payload_bus,
  944. packet->payload_length, DMA_TO_DEVICE);
  945. evt = le16_to_cpu(last->transfer_status) & 0x1f;
  946. packet->timestamp = le16_to_cpu(last->res_count);
  947. log_ar_at_event('T', packet->speed, packet->header, evt);
  948. switch (evt) {
  949. case OHCI1394_evt_timeout:
  950. /* Async response transmit timed out. */
  951. packet->ack = RCODE_CANCELLED;
  952. break;
  953. case OHCI1394_evt_flushed:
  954. /*
  955. * The packet was flushed should give same error as
  956. * when we try to use a stale generation count.
  957. */
  958. packet->ack = RCODE_GENERATION;
  959. break;
  960. case OHCI1394_evt_missing_ack:
  961. /*
  962. * Using a valid (current) generation count, but the
  963. * node is not on the bus or not sending acks.
  964. */
  965. packet->ack = RCODE_NO_ACK;
  966. break;
  967. case ACK_COMPLETE + 0x10:
  968. case ACK_PENDING + 0x10:
  969. case ACK_BUSY_X + 0x10:
  970. case ACK_BUSY_A + 0x10:
  971. case ACK_BUSY_B + 0x10:
  972. case ACK_DATA_ERROR + 0x10:
  973. case ACK_TYPE_ERROR + 0x10:
  974. packet->ack = evt - 0x10;
  975. break;
  976. default:
  977. packet->ack = RCODE_SEND_ERROR;
  978. break;
  979. }
  980. packet->callback(packet, &ohci->card, packet->ack);
  981. return 1;
  982. }
  983. #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
  984. #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
  985. #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
  986. #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
  987. #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
  988. static void handle_local_rom(struct fw_ohci *ohci,
  989. struct fw_packet *packet, u32 csr)
  990. {
  991. struct fw_packet response;
  992. int tcode, length, i;
  993. tcode = HEADER_GET_TCODE(packet->header[0]);
  994. if (TCODE_IS_BLOCK_PACKET(tcode))
  995. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  996. else
  997. length = 4;
  998. i = csr - CSR_CONFIG_ROM;
  999. if (i + length > CONFIG_ROM_SIZE) {
  1000. fw_fill_response(&response, packet->header,
  1001. RCODE_ADDRESS_ERROR, NULL, 0);
  1002. } else if (!TCODE_IS_READ_REQUEST(tcode)) {
  1003. fw_fill_response(&response, packet->header,
  1004. RCODE_TYPE_ERROR, NULL, 0);
  1005. } else {
  1006. fw_fill_response(&response, packet->header, RCODE_COMPLETE,
  1007. (void *) ohci->config_rom + i, length);
  1008. }
  1009. fw_core_handle_response(&ohci->card, &response);
  1010. }
  1011. static void handle_local_lock(struct fw_ohci *ohci,
  1012. struct fw_packet *packet, u32 csr)
  1013. {
  1014. struct fw_packet response;
  1015. int tcode, length, ext_tcode, sel, try;
  1016. __be32 *payload, lock_old;
  1017. u32 lock_arg, lock_data;
  1018. tcode = HEADER_GET_TCODE(packet->header[0]);
  1019. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  1020. payload = packet->payload;
  1021. ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
  1022. if (tcode == TCODE_LOCK_REQUEST &&
  1023. ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
  1024. lock_arg = be32_to_cpu(payload[0]);
  1025. lock_data = be32_to_cpu(payload[1]);
  1026. } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
  1027. lock_arg = 0;
  1028. lock_data = 0;
  1029. } else {
  1030. fw_fill_response(&response, packet->header,
  1031. RCODE_TYPE_ERROR, NULL, 0);
  1032. goto out;
  1033. }
  1034. sel = (csr - CSR_BUS_MANAGER_ID) / 4;
  1035. reg_write(ohci, OHCI1394_CSRData, lock_data);
  1036. reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
  1037. reg_write(ohci, OHCI1394_CSRControl, sel);
  1038. for (try = 0; try < 20; try++)
  1039. if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
  1040. lock_old = cpu_to_be32(reg_read(ohci,
  1041. OHCI1394_CSRData));
  1042. fw_fill_response(&response, packet->header,
  1043. RCODE_COMPLETE,
  1044. &lock_old, sizeof(lock_old));
  1045. goto out;
  1046. }
  1047. fw_error("swap not done (CSR lock timeout)\n");
  1048. fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
  1049. out:
  1050. fw_core_handle_response(&ohci->card, &response);
  1051. }
  1052. static void handle_local_request(struct context *ctx, struct fw_packet *packet)
  1053. {
  1054. u64 offset, csr;
  1055. if (ctx == &ctx->ohci->at_request_ctx) {
  1056. packet->ack = ACK_PENDING;
  1057. packet->callback(packet, &ctx->ohci->card, packet->ack);
  1058. }
  1059. offset =
  1060. ((unsigned long long)
  1061. HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
  1062. packet->header[2];
  1063. csr = offset - CSR_REGISTER_BASE;
  1064. /* Handle config rom reads. */
  1065. if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
  1066. handle_local_rom(ctx->ohci, packet, csr);
  1067. else switch (csr) {
  1068. case CSR_BUS_MANAGER_ID:
  1069. case CSR_BANDWIDTH_AVAILABLE:
  1070. case CSR_CHANNELS_AVAILABLE_HI:
  1071. case CSR_CHANNELS_AVAILABLE_LO:
  1072. handle_local_lock(ctx->ohci, packet, csr);
  1073. break;
  1074. default:
  1075. if (ctx == &ctx->ohci->at_request_ctx)
  1076. fw_core_handle_request(&ctx->ohci->card, packet);
  1077. else
  1078. fw_core_handle_response(&ctx->ohci->card, packet);
  1079. break;
  1080. }
  1081. if (ctx == &ctx->ohci->at_response_ctx) {
  1082. packet->ack = ACK_COMPLETE;
  1083. packet->callback(packet, &ctx->ohci->card, packet->ack);
  1084. }
  1085. }
  1086. static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
  1087. {
  1088. unsigned long flags;
  1089. int ret;
  1090. spin_lock_irqsave(&ctx->ohci->lock, flags);
  1091. if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
  1092. ctx->ohci->generation == packet->generation) {
  1093. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  1094. handle_local_request(ctx, packet);
  1095. return;
  1096. }
  1097. ret = at_context_queue_packet(ctx, packet);
  1098. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  1099. if (ret < 0)
  1100. packet->callback(packet, &ctx->ohci->card, packet->ack);
  1101. }
  1102. static void bus_reset_tasklet(unsigned long data)
  1103. {
  1104. struct fw_ohci *ohci = (struct fw_ohci *)data;
  1105. int self_id_count, i, j, reg;
  1106. int generation, new_generation;
  1107. unsigned long flags;
  1108. void *free_rom = NULL;
  1109. dma_addr_t free_rom_bus = 0;
  1110. reg = reg_read(ohci, OHCI1394_NodeID);
  1111. if (!(reg & OHCI1394_NodeID_idValid)) {
  1112. fw_notify("node ID not valid, new bus reset in progress\n");
  1113. return;
  1114. }
  1115. if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
  1116. fw_notify("malconfigured bus\n");
  1117. return;
  1118. }
  1119. ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
  1120. OHCI1394_NodeID_nodeNumber);
  1121. reg = reg_read(ohci, OHCI1394_SelfIDCount);
  1122. if (reg & OHCI1394_SelfIDCount_selfIDError) {
  1123. fw_notify("inconsistent self IDs\n");
  1124. return;
  1125. }
  1126. /*
  1127. * The count in the SelfIDCount register is the number of
  1128. * bytes in the self ID receive buffer. Since we also receive
  1129. * the inverted quadlets and a header quadlet, we shift one
  1130. * bit extra to get the actual number of self IDs.
  1131. */
  1132. self_id_count = (reg >> 3) & 0xff;
  1133. if (self_id_count == 0 || self_id_count > 252) {
  1134. fw_notify("inconsistent self IDs\n");
  1135. return;
  1136. }
  1137. generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
  1138. rmb();
  1139. for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
  1140. if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
  1141. fw_notify("inconsistent self IDs\n");
  1142. return;
  1143. }
  1144. ohci->self_id_buffer[j] =
  1145. cond_le32_to_cpu(ohci->self_id_cpu[i]);
  1146. }
  1147. rmb();
  1148. /*
  1149. * Check the consistency of the self IDs we just read. The
  1150. * problem we face is that a new bus reset can start while we
  1151. * read out the self IDs from the DMA buffer. If this happens,
  1152. * the DMA buffer will be overwritten with new self IDs and we
  1153. * will read out inconsistent data. The OHCI specification
  1154. * (section 11.2) recommends a technique similar to
  1155. * linux/seqlock.h, where we remember the generation of the
  1156. * self IDs in the buffer before reading them out and compare
  1157. * it to the current generation after reading them out. If
  1158. * the two generations match we know we have a consistent set
  1159. * of self IDs.
  1160. */
  1161. new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
  1162. if (new_generation != generation) {
  1163. fw_notify("recursive bus reset detected, "
  1164. "discarding self ids\n");
  1165. return;
  1166. }
  1167. /* FIXME: Document how the locking works. */
  1168. spin_lock_irqsave(&ohci->lock, flags);
  1169. ohci->generation = generation;
  1170. context_stop(&ohci->at_request_ctx);
  1171. context_stop(&ohci->at_response_ctx);
  1172. reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
  1173. if (ohci->quirks & QUIRK_RESET_PACKET)
  1174. ohci->request_generation = generation;
  1175. /*
  1176. * This next bit is unrelated to the AT context stuff but we
  1177. * have to do it under the spinlock also. If a new config rom
  1178. * was set up before this reset, the old one is now no longer
  1179. * in use and we can free it. Update the config rom pointers
  1180. * to point to the current config rom and clear the
  1181. * next_config_rom pointer so a new update can take place.
  1182. */
  1183. if (ohci->next_config_rom != NULL) {
  1184. if (ohci->next_config_rom != ohci->config_rom) {
  1185. free_rom = ohci->config_rom;
  1186. free_rom_bus = ohci->config_rom_bus;
  1187. }
  1188. ohci->config_rom = ohci->next_config_rom;
  1189. ohci->config_rom_bus = ohci->next_config_rom_bus;
  1190. ohci->next_config_rom = NULL;
  1191. /*
  1192. * Restore config_rom image and manually update
  1193. * config_rom registers. Writing the header quadlet
  1194. * will indicate that the config rom is ready, so we
  1195. * do that last.
  1196. */
  1197. reg_write(ohci, OHCI1394_BusOptions,
  1198. be32_to_cpu(ohci->config_rom[2]));
  1199. ohci->config_rom[0] = ohci->next_header;
  1200. reg_write(ohci, OHCI1394_ConfigROMhdr,
  1201. be32_to_cpu(ohci->next_header));
  1202. }
  1203. #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
  1204. reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
  1205. reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
  1206. #endif
  1207. spin_unlock_irqrestore(&ohci->lock, flags);
  1208. if (free_rom)
  1209. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1210. free_rom, free_rom_bus);
  1211. log_selfids(ohci->node_id, generation,
  1212. self_id_count, ohci->self_id_buffer);
  1213. fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
  1214. self_id_count, ohci->self_id_buffer);
  1215. }
  1216. static irqreturn_t irq_handler(int irq, void *data)
  1217. {
  1218. struct fw_ohci *ohci = data;
  1219. u32 event, iso_event;
  1220. int i;
  1221. event = reg_read(ohci, OHCI1394_IntEventClear);
  1222. if (!event || !~event)
  1223. return IRQ_NONE;
  1224. /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
  1225. reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
  1226. log_irqs(event);
  1227. if (event & OHCI1394_selfIDComplete)
  1228. tasklet_schedule(&ohci->bus_reset_tasklet);
  1229. if (event & OHCI1394_RQPkt)
  1230. tasklet_schedule(&ohci->ar_request_ctx.tasklet);
  1231. if (event & OHCI1394_RSPkt)
  1232. tasklet_schedule(&ohci->ar_response_ctx.tasklet);
  1233. if (event & OHCI1394_reqTxComplete)
  1234. tasklet_schedule(&ohci->at_request_ctx.tasklet);
  1235. if (event & OHCI1394_respTxComplete)
  1236. tasklet_schedule(&ohci->at_response_ctx.tasklet);
  1237. iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
  1238. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
  1239. while (iso_event) {
  1240. i = ffs(iso_event) - 1;
  1241. tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
  1242. iso_event &= ~(1 << i);
  1243. }
  1244. iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
  1245. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
  1246. while (iso_event) {
  1247. i = ffs(iso_event) - 1;
  1248. tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
  1249. iso_event &= ~(1 << i);
  1250. }
  1251. if (unlikely(event & OHCI1394_regAccessFail))
  1252. fw_error("Register access failure - "
  1253. "please notify linux1394-devel@lists.sf.net\n");
  1254. if (unlikely(event & OHCI1394_postedWriteErr))
  1255. fw_error("PCI posted write error\n");
  1256. if (unlikely(event & OHCI1394_cycleTooLong)) {
  1257. if (printk_ratelimit())
  1258. fw_notify("isochronous cycle too long\n");
  1259. reg_write(ohci, OHCI1394_LinkControlSet,
  1260. OHCI1394_LinkControl_cycleMaster);
  1261. }
  1262. if (unlikely(event & OHCI1394_cycleInconsistent)) {
  1263. /*
  1264. * We need to clear this event bit in order to make
  1265. * cycleMatch isochronous I/O work. In theory we should
  1266. * stop active cycleMatch iso contexts now and restart
  1267. * them at least two cycles later. (FIXME?)
  1268. */
  1269. if (printk_ratelimit())
  1270. fw_notify("isochronous cycle inconsistent\n");
  1271. }
  1272. return IRQ_HANDLED;
  1273. }
  1274. static int software_reset(struct fw_ohci *ohci)
  1275. {
  1276. int i;
  1277. reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
  1278. for (i = 0; i < OHCI_LOOP_COUNT; i++) {
  1279. if ((reg_read(ohci, OHCI1394_HCControlSet) &
  1280. OHCI1394_HCControl_softReset) == 0)
  1281. return 0;
  1282. msleep(1);
  1283. }
  1284. return -EBUSY;
  1285. }
  1286. static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
  1287. {
  1288. size_t size = length * 4;
  1289. memcpy(dest, src, size);
  1290. if (size < CONFIG_ROM_SIZE)
  1291. memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
  1292. }
  1293. static int configure_1394a_enhancements(struct fw_ohci *ohci)
  1294. {
  1295. bool enable_1394a;
  1296. int ret, clear, set, offset;
  1297. /* Check if the driver should configure link and PHY. */
  1298. if (!(reg_read(ohci, OHCI1394_HCControlSet) &
  1299. OHCI1394_HCControl_programPhyEnable))
  1300. return 0;
  1301. /* Paranoia: check whether the PHY supports 1394a, too. */
  1302. enable_1394a = false;
  1303. ret = read_phy_reg(ohci, 2);
  1304. if (ret < 0)
  1305. return ret;
  1306. if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
  1307. ret = read_paged_phy_reg(ohci, 1, 8);
  1308. if (ret < 0)
  1309. return ret;
  1310. if (ret >= 1)
  1311. enable_1394a = true;
  1312. }
  1313. if (ohci->quirks & QUIRK_NO_1394A)
  1314. enable_1394a = false;
  1315. /* Configure PHY and link consistently. */
  1316. if (enable_1394a) {
  1317. clear = 0;
  1318. set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
  1319. } else {
  1320. clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
  1321. set = 0;
  1322. }
  1323. ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
  1324. if (ret < 0)
  1325. return ret;
  1326. if (enable_1394a)
  1327. offset = OHCI1394_HCControlSet;
  1328. else
  1329. offset = OHCI1394_HCControlClear;
  1330. reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
  1331. /* Clean up: configuration has been taken care of. */
  1332. reg_write(ohci, OHCI1394_HCControlClear,
  1333. OHCI1394_HCControl_programPhyEnable);
  1334. return 0;
  1335. }
  1336. static int ohci_enable(struct fw_card *card,
  1337. const __be32 *config_rom, size_t length)
  1338. {
  1339. struct fw_ohci *ohci = fw_ohci(card);
  1340. struct pci_dev *dev = to_pci_dev(card->device);
  1341. u32 lps;
  1342. int i, ret;
  1343. if (software_reset(ohci)) {
  1344. fw_error("Failed to reset ohci card.\n");
  1345. return -EBUSY;
  1346. }
  1347. /*
  1348. * Now enable LPS, which we need in order to start accessing
  1349. * most of the registers. In fact, on some cards (ALI M5251),
  1350. * accessing registers in the SClk domain without LPS enabled
  1351. * will lock up the machine. Wait 50msec to make sure we have
  1352. * full link enabled. However, with some cards (well, at least
  1353. * a JMicron PCIe card), we have to try again sometimes.
  1354. */
  1355. reg_write(ohci, OHCI1394_HCControlSet,
  1356. OHCI1394_HCControl_LPS |
  1357. OHCI1394_HCControl_postedWriteEnable);
  1358. flush_writes(ohci);
  1359. for (lps = 0, i = 0; !lps && i < 3; i++) {
  1360. msleep(50);
  1361. lps = reg_read(ohci, OHCI1394_HCControlSet) &
  1362. OHCI1394_HCControl_LPS;
  1363. }
  1364. if (!lps) {
  1365. fw_error("Failed to set Link Power Status\n");
  1366. return -EIO;
  1367. }
  1368. reg_write(ohci, OHCI1394_HCControlClear,
  1369. OHCI1394_HCControl_noByteSwapData);
  1370. reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
  1371. reg_write(ohci, OHCI1394_LinkControlClear,
  1372. OHCI1394_LinkControl_rcvPhyPkt);
  1373. reg_write(ohci, OHCI1394_LinkControlSet,
  1374. OHCI1394_LinkControl_rcvSelfID |
  1375. OHCI1394_LinkControl_cycleTimerEnable |
  1376. OHCI1394_LinkControl_cycleMaster);
  1377. reg_write(ohci, OHCI1394_ATRetries,
  1378. OHCI1394_MAX_AT_REQ_RETRIES |
  1379. (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
  1380. (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
  1381. ar_context_run(&ohci->ar_request_ctx);
  1382. ar_context_run(&ohci->ar_response_ctx);
  1383. reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
  1384. reg_write(ohci, OHCI1394_IntEventClear, ~0);
  1385. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  1386. reg_write(ohci, OHCI1394_IntMaskSet,
  1387. OHCI1394_selfIDComplete |
  1388. OHCI1394_RQPkt | OHCI1394_RSPkt |
  1389. OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
  1390. OHCI1394_isochRx | OHCI1394_isochTx |
  1391. OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
  1392. OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
  1393. OHCI1394_masterIntEnable);
  1394. if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
  1395. reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
  1396. ret = configure_1394a_enhancements(ohci);
  1397. if (ret < 0)
  1398. return ret;
  1399. /* Activate link_on bit and contender bit in our self ID packets.*/
  1400. ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
  1401. if (ret < 0)
  1402. return ret;
  1403. /*
  1404. * When the link is not yet enabled, the atomic config rom
  1405. * update mechanism described below in ohci_set_config_rom()
  1406. * is not active. We have to update ConfigRomHeader and
  1407. * BusOptions manually, and the write to ConfigROMmap takes
  1408. * effect immediately. We tie this to the enabling of the
  1409. * link, so we have a valid config rom before enabling - the
  1410. * OHCI requires that ConfigROMhdr and BusOptions have valid
  1411. * values before enabling.
  1412. *
  1413. * However, when the ConfigROMmap is written, some controllers
  1414. * always read back quadlets 0 and 2 from the config rom to
  1415. * the ConfigRomHeader and BusOptions registers on bus reset.
  1416. * They shouldn't do that in this initial case where the link
  1417. * isn't enabled. This means we have to use the same
  1418. * workaround here, setting the bus header to 0 and then write
  1419. * the right values in the bus reset tasklet.
  1420. */
  1421. if (config_rom) {
  1422. ohci->next_config_rom =
  1423. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1424. &ohci->next_config_rom_bus,
  1425. GFP_KERNEL);
  1426. if (ohci->next_config_rom == NULL)
  1427. return -ENOMEM;
  1428. copy_config_rom(ohci->next_config_rom, config_rom, length);
  1429. } else {
  1430. /*
  1431. * In the suspend case, config_rom is NULL, which
  1432. * means that we just reuse the old config rom.
  1433. */
  1434. ohci->next_config_rom = ohci->config_rom;
  1435. ohci->next_config_rom_bus = ohci->config_rom_bus;
  1436. }
  1437. ohci->next_header = ohci->next_config_rom[0];
  1438. ohci->next_config_rom[0] = 0;
  1439. reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
  1440. reg_write(ohci, OHCI1394_BusOptions,
  1441. be32_to_cpu(ohci->next_config_rom[2]));
  1442. reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
  1443. reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
  1444. if (request_irq(dev->irq, irq_handler,
  1445. IRQF_SHARED, ohci_driver_name, ohci)) {
  1446. fw_error("Failed to allocate shared interrupt %d.\n",
  1447. dev->irq);
  1448. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1449. ohci->config_rom, ohci->config_rom_bus);
  1450. return -EIO;
  1451. }
  1452. reg_write(ohci, OHCI1394_HCControlSet,
  1453. OHCI1394_HCControl_linkEnable |
  1454. OHCI1394_HCControl_BIBimageValid);
  1455. flush_writes(ohci);
  1456. /*
  1457. * We are ready to go, initiate bus reset to finish the
  1458. * initialization.
  1459. */
  1460. fw_core_initiate_bus_reset(&ohci->card, 1);
  1461. return 0;
  1462. }
  1463. static int ohci_set_config_rom(struct fw_card *card,
  1464. const __be32 *config_rom, size_t length)
  1465. {
  1466. struct fw_ohci *ohci;
  1467. unsigned long flags;
  1468. int ret = -EBUSY;
  1469. __be32 *next_config_rom;
  1470. dma_addr_t uninitialized_var(next_config_rom_bus);
  1471. ohci = fw_ohci(card);
  1472. /*
  1473. * When the OHCI controller is enabled, the config rom update
  1474. * mechanism is a bit tricky, but easy enough to use. See
  1475. * section 5.5.6 in the OHCI specification.
  1476. *
  1477. * The OHCI controller caches the new config rom address in a
  1478. * shadow register (ConfigROMmapNext) and needs a bus reset
  1479. * for the changes to take place. When the bus reset is
  1480. * detected, the controller loads the new values for the
  1481. * ConfigRomHeader and BusOptions registers from the specified
  1482. * config rom and loads ConfigROMmap from the ConfigROMmapNext
  1483. * shadow register. All automatically and atomically.
  1484. *
  1485. * Now, there's a twist to this story. The automatic load of
  1486. * ConfigRomHeader and BusOptions doesn't honor the
  1487. * noByteSwapData bit, so with a be32 config rom, the
  1488. * controller will load be32 values in to these registers
  1489. * during the atomic update, even on litte endian
  1490. * architectures. The workaround we use is to put a 0 in the
  1491. * header quadlet; 0 is endian agnostic and means that the
  1492. * config rom isn't ready yet. In the bus reset tasklet we
  1493. * then set up the real values for the two registers.
  1494. *
  1495. * We use ohci->lock to avoid racing with the code that sets
  1496. * ohci->next_config_rom to NULL (see bus_reset_tasklet).
  1497. */
  1498. next_config_rom =
  1499. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1500. &next_config_rom_bus, GFP_KERNEL);
  1501. if (next_config_rom == NULL)
  1502. return -ENOMEM;
  1503. spin_lock_irqsave(&ohci->lock, flags);
  1504. if (ohci->next_config_rom == NULL) {
  1505. ohci->next_config_rom = next_config_rom;
  1506. ohci->next_config_rom_bus = next_config_rom_bus;
  1507. copy_config_rom(ohci->next_config_rom, config_rom, length);
  1508. ohci->next_header = config_rom[0];
  1509. ohci->next_config_rom[0] = 0;
  1510. reg_write(ohci, OHCI1394_ConfigROMmap,
  1511. ohci->next_config_rom_bus);
  1512. ret = 0;
  1513. }
  1514. spin_unlock_irqrestore(&ohci->lock, flags);
  1515. /*
  1516. * Now initiate a bus reset to have the changes take
  1517. * effect. We clean up the old config rom memory and DMA
  1518. * mappings in the bus reset tasklet, since the OHCI
  1519. * controller could need to access it before the bus reset
  1520. * takes effect.
  1521. */
  1522. if (ret == 0)
  1523. fw_core_initiate_bus_reset(&ohci->card, 1);
  1524. else
  1525. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1526. next_config_rom, next_config_rom_bus);
  1527. return ret;
  1528. }
  1529. static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
  1530. {
  1531. struct fw_ohci *ohci = fw_ohci(card);
  1532. at_context_transmit(&ohci->at_request_ctx, packet);
  1533. }
  1534. static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
  1535. {
  1536. struct fw_ohci *ohci = fw_ohci(card);
  1537. at_context_transmit(&ohci->at_response_ctx, packet);
  1538. }
  1539. static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  1540. {
  1541. struct fw_ohci *ohci = fw_ohci(card);
  1542. struct context *ctx = &ohci->at_request_ctx;
  1543. struct driver_data *driver_data = packet->driver_data;
  1544. int ret = -ENOENT;
  1545. tasklet_disable(&ctx->tasklet);
  1546. if (packet->ack != 0)
  1547. goto out;
  1548. if (packet->payload_mapped)
  1549. dma_unmap_single(ohci->card.device, packet->payload_bus,
  1550. packet->payload_length, DMA_TO_DEVICE);
  1551. log_ar_at_event('T', packet->speed, packet->header, 0x20);
  1552. driver_data->packet = NULL;
  1553. packet->ack = RCODE_CANCELLED;
  1554. packet->callback(packet, &ohci->card, packet->ack);
  1555. ret = 0;
  1556. out:
  1557. tasklet_enable(&ctx->tasklet);
  1558. return ret;
  1559. }
  1560. static int ohci_enable_phys_dma(struct fw_card *card,
  1561. int node_id, int generation)
  1562. {
  1563. #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
  1564. return 0;
  1565. #else
  1566. struct fw_ohci *ohci = fw_ohci(card);
  1567. unsigned long flags;
  1568. int n, ret = 0;
  1569. /*
  1570. * FIXME: Make sure this bitmask is cleared when we clear the busReset
  1571. * interrupt bit. Clear physReqResourceAllBuses on bus reset.
  1572. */
  1573. spin_lock_irqsave(&ohci->lock, flags);
  1574. if (ohci->generation != generation) {
  1575. ret = -ESTALE;
  1576. goto out;
  1577. }
  1578. /*
  1579. * Note, if the node ID contains a non-local bus ID, physical DMA is
  1580. * enabled for _all_ nodes on remote buses.
  1581. */
  1582. n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
  1583. if (n < 32)
  1584. reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
  1585. else
  1586. reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
  1587. flush_writes(ohci);
  1588. out:
  1589. spin_unlock_irqrestore(&ohci->lock, flags);
  1590. return ret;
  1591. #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
  1592. }
  1593. static u32 cycle_timer_ticks(u32 cycle_timer)
  1594. {
  1595. u32 ticks;
  1596. ticks = cycle_timer & 0xfff;
  1597. ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
  1598. ticks += (3072 * 8000) * (cycle_timer >> 25);
  1599. return ticks;
  1600. }
  1601. /*
  1602. * Some controllers exhibit one or more of the following bugs when updating the
  1603. * iso cycle timer register:
  1604. * - When the lowest six bits are wrapping around to zero, a read that happens
  1605. * at the same time will return garbage in the lowest ten bits.
  1606. * - When the cycleOffset field wraps around to zero, the cycleCount field is
  1607. * not incremented for about 60 ns.
  1608. * - Occasionally, the entire register reads zero.
  1609. *
  1610. * To catch these, we read the register three times and ensure that the
  1611. * difference between each two consecutive reads is approximately the same, i.e.
  1612. * less than twice the other. Furthermore, any negative difference indicates an
  1613. * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
  1614. * execute, so we have enough precision to compute the ratio of the differences.)
  1615. */
  1616. static u32 ohci_get_cycle_time(struct fw_card *card)
  1617. {
  1618. struct fw_ohci *ohci = fw_ohci(card);
  1619. u32 c0, c1, c2;
  1620. u32 t0, t1, t2;
  1621. s32 diff01, diff12;
  1622. int i;
  1623. c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  1624. if (ohci->quirks & QUIRK_CYCLE_TIMER) {
  1625. i = 0;
  1626. c1 = c2;
  1627. c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  1628. do {
  1629. c0 = c1;
  1630. c1 = c2;
  1631. c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  1632. t0 = cycle_timer_ticks(c0);
  1633. t1 = cycle_timer_ticks(c1);
  1634. t2 = cycle_timer_ticks(c2);
  1635. diff01 = t1 - t0;
  1636. diff12 = t2 - t1;
  1637. } while ((diff01 <= 0 || diff12 <= 0 ||
  1638. diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
  1639. && i++ < 20);
  1640. }
  1641. return c2;
  1642. }
  1643. static void copy_iso_headers(struct iso_context *ctx, void *p)
  1644. {
  1645. int i = ctx->header_length;
  1646. if (i + ctx->base.header_size > PAGE_SIZE)
  1647. return;
  1648. /*
  1649. * The iso header is byteswapped to little endian by
  1650. * the controller, but the remaining header quadlets
  1651. * are big endian. We want to present all the headers
  1652. * as big endian, so we have to swap the first quadlet.
  1653. */
  1654. if (ctx->base.header_size > 0)
  1655. *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
  1656. if (ctx->base.header_size > 4)
  1657. *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
  1658. if (ctx->base.header_size > 8)
  1659. memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
  1660. ctx->header_length += ctx->base.header_size;
  1661. }
  1662. static int handle_ir_packet_per_buffer(struct context *context,
  1663. struct descriptor *d,
  1664. struct descriptor *last)
  1665. {
  1666. struct iso_context *ctx =
  1667. container_of(context, struct iso_context, context);
  1668. struct descriptor *pd;
  1669. __le32 *ir_header;
  1670. void *p;
  1671. for (pd = d; pd <= last; pd++) {
  1672. if (pd->transfer_status)
  1673. break;
  1674. }
  1675. if (pd > last)
  1676. /* Descriptor(s) not done yet, stop iteration */
  1677. return 0;
  1678. p = last + 1;
  1679. copy_iso_headers(ctx, p);
  1680. if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
  1681. ir_header = (__le32 *) p;
  1682. ctx->base.callback(&ctx->base,
  1683. le32_to_cpu(ir_header[0]) & 0xffff,
  1684. ctx->header_length, ctx->header,
  1685. ctx->base.callback_data);
  1686. ctx->header_length = 0;
  1687. }
  1688. return 1;
  1689. }
  1690. static int handle_it_packet(struct context *context,
  1691. struct descriptor *d,
  1692. struct descriptor *last)
  1693. {
  1694. struct iso_context *ctx =
  1695. container_of(context, struct iso_context, context);
  1696. int i;
  1697. struct descriptor *pd;
  1698. for (pd = d; pd <= last; pd++)
  1699. if (pd->transfer_status)
  1700. break;
  1701. if (pd > last)
  1702. /* Descriptor(s) not done yet, stop iteration */
  1703. return 0;
  1704. i = ctx->header_length;
  1705. if (i + 4 < PAGE_SIZE) {
  1706. /* Present this value as big-endian to match the receive code */
  1707. *(__be32 *)(ctx->header + i) = cpu_to_be32(
  1708. ((u32)le16_to_cpu(pd->transfer_status) << 16) |
  1709. le16_to_cpu(pd->res_count));
  1710. ctx->header_length += 4;
  1711. }
  1712. if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
  1713. ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
  1714. ctx->header_length, ctx->header,
  1715. ctx->base.callback_data);
  1716. ctx->header_length = 0;
  1717. }
  1718. return 1;
  1719. }
  1720. static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
  1721. int type, int channel, size_t header_size)
  1722. {
  1723. struct fw_ohci *ohci = fw_ohci(card);
  1724. struct iso_context *ctx, *list;
  1725. descriptor_callback_t callback;
  1726. u64 *channels, dont_care = ~0ULL;
  1727. u32 *mask, regs;
  1728. unsigned long flags;
  1729. int index, ret = -ENOMEM;
  1730. if (type == FW_ISO_CONTEXT_TRANSMIT) {
  1731. channels = &dont_care;
  1732. mask = &ohci->it_context_mask;
  1733. list = ohci->it_context_list;
  1734. callback = handle_it_packet;
  1735. } else {
  1736. channels = &ohci->ir_context_channels;
  1737. mask = &ohci->ir_context_mask;
  1738. list = ohci->ir_context_list;
  1739. callback = handle_ir_packet_per_buffer;
  1740. }
  1741. spin_lock_irqsave(&ohci->lock, flags);
  1742. index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
  1743. if (index >= 0) {
  1744. *channels &= ~(1ULL << channel);
  1745. *mask &= ~(1 << index);
  1746. }
  1747. spin_unlock_irqrestore(&ohci->lock, flags);
  1748. if (index < 0)
  1749. return ERR_PTR(-EBUSY);
  1750. if (type == FW_ISO_CONTEXT_TRANSMIT)
  1751. regs = OHCI1394_IsoXmitContextBase(index);
  1752. else
  1753. regs = OHCI1394_IsoRcvContextBase(index);
  1754. ctx = &list[index];
  1755. memset(ctx, 0, sizeof(*ctx));
  1756. ctx->header_length = 0;
  1757. ctx->header = (void *) __get_free_page(GFP_KERNEL);
  1758. if (ctx->header == NULL)
  1759. goto out;
  1760. ret = context_init(&ctx->context, ohci, regs, callback);
  1761. if (ret < 0)
  1762. goto out_with_header;
  1763. return &ctx->base;
  1764. out_with_header:
  1765. free_page((unsigned long)ctx->header);
  1766. out:
  1767. spin_lock_irqsave(&ohci->lock, flags);
  1768. *mask |= 1 << index;
  1769. spin_unlock_irqrestore(&ohci->lock, flags);
  1770. return ERR_PTR(ret);
  1771. }
  1772. static int ohci_start_iso(struct fw_iso_context *base,
  1773. s32 cycle, u32 sync, u32 tags)
  1774. {
  1775. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1776. struct fw_ohci *ohci = ctx->context.ohci;
  1777. u32 control, match;
  1778. int index;
  1779. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1780. index = ctx - ohci->it_context_list;
  1781. match = 0;
  1782. if (cycle >= 0)
  1783. match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
  1784. (cycle & 0x7fff) << 16;
  1785. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
  1786. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
  1787. context_run(&ctx->context, match);
  1788. } else {
  1789. index = ctx - ohci->ir_context_list;
  1790. control = IR_CONTEXT_ISOCH_HEADER;
  1791. match = (tags << 28) | (sync << 8) | ctx->base.channel;
  1792. if (cycle >= 0) {
  1793. match |= (cycle & 0x07fff) << 12;
  1794. control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
  1795. }
  1796. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
  1797. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
  1798. reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
  1799. context_run(&ctx->context, control);
  1800. }
  1801. return 0;
  1802. }
  1803. static int ohci_stop_iso(struct fw_iso_context *base)
  1804. {
  1805. struct fw_ohci *ohci = fw_ohci(base->card);
  1806. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1807. int index;
  1808. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1809. index = ctx - ohci->it_context_list;
  1810. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
  1811. } else {
  1812. index = ctx - ohci->ir_context_list;
  1813. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
  1814. }
  1815. flush_writes(ohci);
  1816. context_stop(&ctx->context);
  1817. return 0;
  1818. }
  1819. static void ohci_free_iso_context(struct fw_iso_context *base)
  1820. {
  1821. struct fw_ohci *ohci = fw_ohci(base->card);
  1822. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1823. unsigned long flags;
  1824. int index;
  1825. ohci_stop_iso(base);
  1826. context_release(&ctx->context);
  1827. free_page((unsigned long)ctx->header);
  1828. spin_lock_irqsave(&ohci->lock, flags);
  1829. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1830. index = ctx - ohci->it_context_list;
  1831. ohci->it_context_mask |= 1 << index;
  1832. } else {
  1833. index = ctx - ohci->ir_context_list;
  1834. ohci->ir_context_mask |= 1 << index;
  1835. ohci->ir_context_channels |= 1ULL << base->channel;
  1836. }
  1837. spin_unlock_irqrestore(&ohci->lock, flags);
  1838. }
  1839. static int ohci_queue_iso_transmit(struct fw_iso_context *base,
  1840. struct fw_iso_packet *packet,
  1841. struct fw_iso_buffer *buffer,
  1842. unsigned long payload)
  1843. {
  1844. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1845. struct descriptor *d, *last, *pd;
  1846. struct fw_iso_packet *p;
  1847. __le32 *header;
  1848. dma_addr_t d_bus, page_bus;
  1849. u32 z, header_z, payload_z, irq;
  1850. u32 payload_index, payload_end_index, next_page_index;
  1851. int page, end_page, i, length, offset;
  1852. p = packet;
  1853. payload_index = payload;
  1854. if (p->skip)
  1855. z = 1;
  1856. else
  1857. z = 2;
  1858. if (p->header_length > 0)
  1859. z++;
  1860. /* Determine the first page the payload isn't contained in. */
  1861. end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
  1862. if (p->payload_length > 0)
  1863. payload_z = end_page - (payload_index >> PAGE_SHIFT);
  1864. else
  1865. payload_z = 0;
  1866. z += payload_z;
  1867. /* Get header size in number of descriptors. */
  1868. header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
  1869. d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
  1870. if (d == NULL)
  1871. return -ENOMEM;
  1872. if (!p->skip) {
  1873. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  1874. d[0].req_count = cpu_to_le16(8);
  1875. /*
  1876. * Link the skip address to this descriptor itself. This causes
  1877. * a context to skip a cycle whenever lost cycles or FIFO
  1878. * overruns occur, without dropping the data. The application
  1879. * should then decide whether this is an error condition or not.
  1880. * FIXME: Make the context's cycle-lost behaviour configurable?
  1881. */
  1882. d[0].branch_address = cpu_to_le32(d_bus | z);
  1883. header = (__le32 *) &d[1];
  1884. header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
  1885. IT_HEADER_TAG(p->tag) |
  1886. IT_HEADER_TCODE(TCODE_STREAM_DATA) |
  1887. IT_HEADER_CHANNEL(ctx->base.channel) |
  1888. IT_HEADER_SPEED(ctx->base.speed));
  1889. header[1] =
  1890. cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
  1891. p->payload_length));
  1892. }
  1893. if (p->header_length > 0) {
  1894. d[2].req_count = cpu_to_le16(p->header_length);
  1895. d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
  1896. memcpy(&d[z], p->header, p->header_length);
  1897. }
  1898. pd = d + z - payload_z;
  1899. payload_end_index = payload_index + p->payload_length;
  1900. for (i = 0; i < payload_z; i++) {
  1901. page = payload_index >> PAGE_SHIFT;
  1902. offset = payload_index & ~PAGE_MASK;
  1903. next_page_index = (page + 1) << PAGE_SHIFT;
  1904. length =
  1905. min(next_page_index, payload_end_index) - payload_index;
  1906. pd[i].req_count = cpu_to_le16(length);
  1907. page_bus = page_private(buffer->pages[page]);
  1908. pd[i].data_address = cpu_to_le32(page_bus + offset);
  1909. payload_index += length;
  1910. }
  1911. if (p->interrupt)
  1912. irq = DESCRIPTOR_IRQ_ALWAYS;
  1913. else
  1914. irq = DESCRIPTOR_NO_IRQ;
  1915. last = z == 2 ? d : d + z - 1;
  1916. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  1917. DESCRIPTOR_STATUS |
  1918. DESCRIPTOR_BRANCH_ALWAYS |
  1919. irq);
  1920. context_append(&ctx->context, d, z, header_z);
  1921. return 0;
  1922. }
  1923. static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
  1924. struct fw_iso_packet *packet,
  1925. struct fw_iso_buffer *buffer,
  1926. unsigned long payload)
  1927. {
  1928. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1929. struct descriptor *d, *pd;
  1930. struct fw_iso_packet *p = packet;
  1931. dma_addr_t d_bus, page_bus;
  1932. u32 z, header_z, rest;
  1933. int i, j, length;
  1934. int page, offset, packet_count, header_size, payload_per_buffer;
  1935. /*
  1936. * The OHCI controller puts the isochronous header and trailer in the
  1937. * buffer, so we need at least 8 bytes.
  1938. */
  1939. packet_count = p->header_length / ctx->base.header_size;
  1940. header_size = max(ctx->base.header_size, (size_t)8);
  1941. /* Get header size in number of descriptors. */
  1942. header_z = DIV_ROUND_UP(header_size, sizeof(*d));
  1943. page = payload >> PAGE_SHIFT;
  1944. offset = payload & ~PAGE_MASK;
  1945. payload_per_buffer = p->payload_length / packet_count;
  1946. for (i = 0; i < packet_count; i++) {
  1947. /* d points to the header descriptor */
  1948. z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
  1949. d = context_get_descriptors(&ctx->context,
  1950. z + header_z, &d_bus);
  1951. if (d == NULL)
  1952. return -ENOMEM;
  1953. d->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1954. DESCRIPTOR_INPUT_MORE);
  1955. if (p->skip && i == 0)
  1956. d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
  1957. d->req_count = cpu_to_le16(header_size);
  1958. d->res_count = d->req_count;
  1959. d->transfer_status = 0;
  1960. d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
  1961. rest = payload_per_buffer;
  1962. pd = d;
  1963. for (j = 1; j < z; j++) {
  1964. pd++;
  1965. pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1966. DESCRIPTOR_INPUT_MORE);
  1967. if (offset + rest < PAGE_SIZE)
  1968. length = rest;
  1969. else
  1970. length = PAGE_SIZE - offset;
  1971. pd->req_count = cpu_to_le16(length);
  1972. pd->res_count = pd->req_count;
  1973. pd->transfer_status = 0;
  1974. page_bus = page_private(buffer->pages[page]);
  1975. pd->data_address = cpu_to_le32(page_bus + offset);
  1976. offset = (offset + length) & ~PAGE_MASK;
  1977. rest -= length;
  1978. if (offset == 0)
  1979. page++;
  1980. }
  1981. pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1982. DESCRIPTOR_INPUT_LAST |
  1983. DESCRIPTOR_BRANCH_ALWAYS);
  1984. if (p->interrupt && i == packet_count - 1)
  1985. pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
  1986. context_append(&ctx->context, d, z, header_z);
  1987. }
  1988. return 0;
  1989. }
  1990. static int ohci_queue_iso(struct fw_iso_context *base,
  1991. struct fw_iso_packet *packet,
  1992. struct fw_iso_buffer *buffer,
  1993. unsigned long payload)
  1994. {
  1995. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1996. unsigned long flags;
  1997. int ret;
  1998. spin_lock_irqsave(&ctx->context.ohci->lock, flags);
  1999. if (base->type == FW_ISO_CONTEXT_TRANSMIT)
  2000. ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
  2001. else
  2002. ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
  2003. buffer, payload);
  2004. spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
  2005. return ret;
  2006. }
  2007. static const struct fw_card_driver ohci_driver = {
  2008. .enable = ohci_enable,
  2009. .update_phy_reg = ohci_update_phy_reg,
  2010. .set_config_rom = ohci_set_config_rom,
  2011. .send_request = ohci_send_request,
  2012. .send_response = ohci_send_response,
  2013. .cancel_packet = ohci_cancel_packet,
  2014. .enable_phys_dma = ohci_enable_phys_dma,
  2015. .get_cycle_time = ohci_get_cycle_time,
  2016. .allocate_iso_context = ohci_allocate_iso_context,
  2017. .free_iso_context = ohci_free_iso_context,
  2018. .queue_iso = ohci_queue_iso,
  2019. .start_iso = ohci_start_iso,
  2020. .stop_iso = ohci_stop_iso,
  2021. };
  2022. #ifdef CONFIG_PPC_PMAC
  2023. static void pmac_ohci_on(struct pci_dev *dev)
  2024. {
  2025. if (machine_is(powermac)) {
  2026. struct device_node *ofn = pci_device_to_OF_node(dev);
  2027. if (ofn) {
  2028. pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
  2029. pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
  2030. }
  2031. }
  2032. }
  2033. static void pmac_ohci_off(struct pci_dev *dev)
  2034. {
  2035. if (machine_is(powermac)) {
  2036. struct device_node *ofn = pci_device_to_OF_node(dev);
  2037. if (ofn) {
  2038. pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
  2039. pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
  2040. }
  2041. }
  2042. }
  2043. #else
  2044. static inline void pmac_ohci_on(struct pci_dev *dev) {}
  2045. static inline void pmac_ohci_off(struct pci_dev *dev) {}
  2046. #endif /* CONFIG_PPC_PMAC */
  2047. static int __devinit pci_probe(struct pci_dev *dev,
  2048. const struct pci_device_id *ent)
  2049. {
  2050. struct fw_ohci *ohci;
  2051. u32 bus_options, max_receive, link_speed, version, link_enh;
  2052. u64 guid;
  2053. int i, err, n_ir, n_it;
  2054. size_t size;
  2055. ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
  2056. if (ohci == NULL) {
  2057. err = -ENOMEM;
  2058. goto fail;
  2059. }
  2060. fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
  2061. pmac_ohci_on(dev);
  2062. err = pci_enable_device(dev);
  2063. if (err) {
  2064. fw_error("Failed to enable OHCI hardware\n");
  2065. goto fail_free;
  2066. }
  2067. pci_set_master(dev);
  2068. pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
  2069. pci_set_drvdata(dev, ohci);
  2070. spin_lock_init(&ohci->lock);
  2071. tasklet_init(&ohci->bus_reset_tasklet,
  2072. bus_reset_tasklet, (unsigned long)ohci);
  2073. err = pci_request_region(dev, 0, ohci_driver_name);
  2074. if (err) {
  2075. fw_error("MMIO resource unavailable\n");
  2076. goto fail_disable;
  2077. }
  2078. ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
  2079. if (ohci->registers == NULL) {
  2080. fw_error("Failed to remap registers\n");
  2081. err = -ENXIO;
  2082. goto fail_iomem;
  2083. }
  2084. for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
  2085. if (ohci_quirks[i].vendor == dev->vendor &&
  2086. (ohci_quirks[i].device == dev->device ||
  2087. ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
  2088. ohci->quirks = ohci_quirks[i].flags;
  2089. break;
  2090. }
  2091. if (param_quirks)
  2092. ohci->quirks = param_quirks;
  2093. /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
  2094. if (dev->vendor == PCI_VENDOR_ID_TI) {
  2095. pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
  2096. /* adjust latency of ATx FIFO: use 1.7 KB threshold */
  2097. link_enh &= ~TI_LinkEnh_atx_thresh_mask;
  2098. link_enh |= TI_LinkEnh_atx_thresh_1_7K;
  2099. /* use priority arbitration for asynchronous responses */
  2100. link_enh |= TI_LinkEnh_enab_unfair;
  2101. /* required for aPhyEnhanceEnable to work */
  2102. link_enh |= TI_LinkEnh_enab_accel;
  2103. pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
  2104. }
  2105. ar_context_init(&ohci->ar_request_ctx, ohci,
  2106. OHCI1394_AsReqRcvContextControlSet);
  2107. ar_context_init(&ohci->ar_response_ctx, ohci,
  2108. OHCI1394_AsRspRcvContextControlSet);
  2109. context_init(&ohci->at_request_ctx, ohci,
  2110. OHCI1394_AsReqTrContextControlSet, handle_at_packet);
  2111. context_init(&ohci->at_response_ctx, ohci,
  2112. OHCI1394_AsRspTrContextControlSet, handle_at_packet);
  2113. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
  2114. ohci->ir_context_channels = ~0ULL;
  2115. ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
  2116. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
  2117. n_ir = hweight32(ohci->ir_context_mask);
  2118. size = sizeof(struct iso_context) * n_ir;
  2119. ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
  2120. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
  2121. ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
  2122. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
  2123. n_it = hweight32(ohci->it_context_mask);
  2124. size = sizeof(struct iso_context) * n_it;
  2125. ohci->it_context_list = kzalloc(size, GFP_KERNEL);
  2126. if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
  2127. err = -ENOMEM;
  2128. goto fail_contexts;
  2129. }
  2130. /* self-id dma buffer allocation */
  2131. ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
  2132. SELF_ID_BUF_SIZE,
  2133. &ohci->self_id_bus,
  2134. GFP_KERNEL);
  2135. if (ohci->self_id_cpu == NULL) {
  2136. err = -ENOMEM;
  2137. goto fail_contexts;
  2138. }
  2139. bus_options = reg_read(ohci, OHCI1394_BusOptions);
  2140. max_receive = (bus_options >> 12) & 0xf;
  2141. link_speed = bus_options & 0x7;
  2142. guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
  2143. reg_read(ohci, OHCI1394_GUIDLo);
  2144. err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
  2145. if (err)
  2146. goto fail_self_id;
  2147. version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
  2148. fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
  2149. "%d IR + %d IT contexts, quirks 0x%x\n",
  2150. dev_name(&dev->dev), version >> 16, version & 0xff,
  2151. n_ir, n_it, ohci->quirks);
  2152. return 0;
  2153. fail_self_id:
  2154. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  2155. ohci->self_id_cpu, ohci->self_id_bus);
  2156. fail_contexts:
  2157. kfree(ohci->ir_context_list);
  2158. kfree(ohci->it_context_list);
  2159. context_release(&ohci->at_response_ctx);
  2160. context_release(&ohci->at_request_ctx);
  2161. ar_context_release(&ohci->ar_response_ctx);
  2162. ar_context_release(&ohci->ar_request_ctx);
  2163. pci_iounmap(dev, ohci->registers);
  2164. fail_iomem:
  2165. pci_release_region(dev, 0);
  2166. fail_disable:
  2167. pci_disable_device(dev);
  2168. fail_free:
  2169. kfree(&ohci->card);
  2170. pmac_ohci_off(dev);
  2171. fail:
  2172. if (err == -ENOMEM)
  2173. fw_error("Out of memory\n");
  2174. return err;
  2175. }
  2176. static void pci_remove(struct pci_dev *dev)
  2177. {
  2178. struct fw_ohci *ohci;
  2179. ohci = pci_get_drvdata(dev);
  2180. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  2181. flush_writes(ohci);
  2182. fw_core_remove_card(&ohci->card);
  2183. /*
  2184. * FIXME: Fail all pending packets here, now that the upper
  2185. * layers can't queue any more.
  2186. */
  2187. software_reset(ohci);
  2188. free_irq(dev->irq, ohci);
  2189. if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
  2190. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  2191. ohci->next_config_rom, ohci->next_config_rom_bus);
  2192. if (ohci->config_rom)
  2193. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  2194. ohci->config_rom, ohci->config_rom_bus);
  2195. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  2196. ohci->self_id_cpu, ohci->self_id_bus);
  2197. ar_context_release(&ohci->ar_request_ctx);
  2198. ar_context_release(&ohci->ar_response_ctx);
  2199. context_release(&ohci->at_request_ctx);
  2200. context_release(&ohci->at_response_ctx);
  2201. kfree(ohci->it_context_list);
  2202. kfree(ohci->ir_context_list);
  2203. pci_iounmap(dev, ohci->registers);
  2204. pci_release_region(dev, 0);
  2205. pci_disable_device(dev);
  2206. kfree(&ohci->card);
  2207. pmac_ohci_off(dev);
  2208. fw_notify("Removed fw-ohci device.\n");
  2209. }
  2210. #ifdef CONFIG_PM
  2211. static int pci_suspend(struct pci_dev *dev, pm_message_t state)
  2212. {
  2213. struct fw_ohci *ohci = pci_get_drvdata(dev);
  2214. int err;
  2215. software_reset(ohci);
  2216. free_irq(dev->irq, ohci);
  2217. err = pci_save_state(dev);
  2218. if (err) {
  2219. fw_error("pci_save_state failed\n");
  2220. return err;
  2221. }
  2222. err = pci_set_power_state(dev, pci_choose_state(dev, state));
  2223. if (err)
  2224. fw_error("pci_set_power_state failed with %d\n", err);
  2225. pmac_ohci_off(dev);
  2226. return 0;
  2227. }
  2228. static int pci_resume(struct pci_dev *dev)
  2229. {
  2230. struct fw_ohci *ohci = pci_get_drvdata(dev);
  2231. int err;
  2232. pmac_ohci_on(dev);
  2233. pci_set_power_state(dev, PCI_D0);
  2234. pci_restore_state(dev);
  2235. err = pci_enable_device(dev);
  2236. if (err) {
  2237. fw_error("pci_enable_device failed\n");
  2238. return err;
  2239. }
  2240. return ohci_enable(&ohci->card, NULL, 0);
  2241. }
  2242. #endif
  2243. static const struct pci_device_id pci_table[] = {
  2244. { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
  2245. { }
  2246. };
  2247. MODULE_DEVICE_TABLE(pci, pci_table);
  2248. static struct pci_driver fw_ohci_pci_driver = {
  2249. .name = ohci_driver_name,
  2250. .id_table = pci_table,
  2251. .probe = pci_probe,
  2252. .remove = pci_remove,
  2253. #ifdef CONFIG_PM
  2254. .resume = pci_resume,
  2255. .suspend = pci_suspend,
  2256. #endif
  2257. };
  2258. MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
  2259. MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
  2260. MODULE_LICENSE("GPL");
  2261. /* Provide a module alias so root-on-sbp2 initrds don't break. */
  2262. #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
  2263. MODULE_ALIAS("ohci1394");
  2264. #endif
  2265. static int __init fw_ohci_init(void)
  2266. {
  2267. return pci_register_driver(&fw_ohci_pci_driver);
  2268. }
  2269. static void __exit fw_ohci_cleanup(void)
  2270. {
  2271. pci_unregister_driver(&fw_ohci_pci_driver);
  2272. }
  2273. module_init(fw_ohci_init);
  2274. module_exit(fw_ohci_cleanup);