ohci.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659
  1. /*
  2. * Driver for OHCI 1394 controllers
  3. *
  4. * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/compiler.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/firewire.h>
  25. #include <linux/firewire-constants.h>
  26. #include <linux/gfp.h>
  27. #include <linux/init.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/io.h>
  30. #include <linux/kernel.h>
  31. #include <linux/list.h>
  32. #include <linux/mm.h>
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/pci.h>
  36. #include <linux/pci_ids.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/string.h>
  39. #include <asm/atomic.h>
  40. #include <asm/byteorder.h>
  41. #include <asm/page.h>
  42. #include <asm/system.h>
  43. #ifdef CONFIG_PPC_PMAC
  44. #include <asm/pmac_feature.h>
  45. #endif
  46. #include "core.h"
  47. #include "ohci.h"
  48. #define DESCRIPTOR_OUTPUT_MORE 0
  49. #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
  50. #define DESCRIPTOR_INPUT_MORE (2 << 12)
  51. #define DESCRIPTOR_INPUT_LAST (3 << 12)
  52. #define DESCRIPTOR_STATUS (1 << 11)
  53. #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
  54. #define DESCRIPTOR_PING (1 << 7)
  55. #define DESCRIPTOR_YY (1 << 6)
  56. #define DESCRIPTOR_NO_IRQ (0 << 4)
  57. #define DESCRIPTOR_IRQ_ERROR (1 << 4)
  58. #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
  59. #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
  60. #define DESCRIPTOR_WAIT (3 << 0)
  61. struct descriptor {
  62. __le16 req_count;
  63. __le16 control;
  64. __le32 data_address;
  65. __le32 branch_address;
  66. __le16 res_count;
  67. __le16 transfer_status;
  68. } __attribute__((aligned(16)));
  69. struct db_descriptor {
  70. __le16 first_size;
  71. __le16 control;
  72. __le16 second_req_count;
  73. __le16 first_req_count;
  74. __le32 branch_address;
  75. __le16 second_res_count;
  76. __le16 first_res_count;
  77. __le32 reserved0;
  78. __le32 first_buffer;
  79. __le32 second_buffer;
  80. __le32 reserved1;
  81. } __attribute__((aligned(16)));
  82. #define CONTROL_SET(regs) (regs)
  83. #define CONTROL_CLEAR(regs) ((regs) + 4)
  84. #define COMMAND_PTR(regs) ((regs) + 12)
  85. #define CONTEXT_MATCH(regs) ((regs) + 16)
  86. struct ar_buffer {
  87. struct descriptor descriptor;
  88. struct ar_buffer *next;
  89. __le32 data[0];
  90. };
  91. struct ar_context {
  92. struct fw_ohci *ohci;
  93. struct ar_buffer *current_buffer;
  94. struct ar_buffer *last_buffer;
  95. void *pointer;
  96. u32 regs;
  97. struct tasklet_struct tasklet;
  98. };
  99. struct context;
  100. typedef int (*descriptor_callback_t)(struct context *ctx,
  101. struct descriptor *d,
  102. struct descriptor *last);
  103. /*
  104. * A buffer that contains a block of DMA-able coherent memory used for
  105. * storing a portion of a DMA descriptor program.
  106. */
  107. struct descriptor_buffer {
  108. struct list_head list;
  109. dma_addr_t buffer_bus;
  110. size_t buffer_size;
  111. size_t used;
  112. struct descriptor buffer[0];
  113. };
  114. struct context {
  115. struct fw_ohci *ohci;
  116. u32 regs;
  117. int total_allocation;
  118. /*
  119. * List of page-sized buffers for storing DMA descriptors.
  120. * Head of list contains buffers in use and tail of list contains
  121. * free buffers.
  122. */
  123. struct list_head buffer_list;
  124. /*
  125. * Pointer to a buffer inside buffer_list that contains the tail
  126. * end of the current DMA program.
  127. */
  128. struct descriptor_buffer *buffer_tail;
  129. /*
  130. * The descriptor containing the branch address of the first
  131. * descriptor that has not yet been filled by the device.
  132. */
  133. struct descriptor *last;
  134. /*
  135. * The last descriptor in the DMA program. It contains the branch
  136. * address that must be updated upon appending a new descriptor.
  137. */
  138. struct descriptor *prev;
  139. descriptor_callback_t callback;
  140. struct tasklet_struct tasklet;
  141. };
  142. #define IT_HEADER_SY(v) ((v) << 0)
  143. #define IT_HEADER_TCODE(v) ((v) << 4)
  144. #define IT_HEADER_CHANNEL(v) ((v) << 8)
  145. #define IT_HEADER_TAG(v) ((v) << 14)
  146. #define IT_HEADER_SPEED(v) ((v) << 16)
  147. #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
  148. struct iso_context {
  149. struct fw_iso_context base;
  150. struct context context;
  151. int excess_bytes;
  152. void *header;
  153. size_t header_length;
  154. };
  155. #define CONFIG_ROM_SIZE 1024
  156. struct fw_ohci {
  157. struct fw_card card;
  158. __iomem char *registers;
  159. dma_addr_t self_id_bus;
  160. __le32 *self_id_cpu;
  161. struct tasklet_struct bus_reset_tasklet;
  162. int node_id;
  163. int generation;
  164. int request_generation; /* for timestamping incoming requests */
  165. atomic_t bus_seconds;
  166. bool use_dualbuffer;
  167. bool old_uninorth;
  168. bool bus_reset_packet_quirk;
  169. /*
  170. * Spinlock for accessing fw_ohci data. Never call out of
  171. * this driver with this lock held.
  172. */
  173. spinlock_t lock;
  174. u32 self_id_buffer[512];
  175. /* Config rom buffers */
  176. __be32 *config_rom;
  177. dma_addr_t config_rom_bus;
  178. __be32 *next_config_rom;
  179. dma_addr_t next_config_rom_bus;
  180. __be32 next_header;
  181. struct ar_context ar_request_ctx;
  182. struct ar_context ar_response_ctx;
  183. struct context at_request_ctx;
  184. struct context at_response_ctx;
  185. u32 it_context_mask;
  186. struct iso_context *it_context_list;
  187. u64 ir_context_channels;
  188. u32 ir_context_mask;
  189. struct iso_context *ir_context_list;
  190. };
  191. static inline struct fw_ohci *fw_ohci(struct fw_card *card)
  192. {
  193. return container_of(card, struct fw_ohci, card);
  194. }
  195. #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
  196. #define IR_CONTEXT_BUFFER_FILL 0x80000000
  197. #define IR_CONTEXT_ISOCH_HEADER 0x40000000
  198. #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
  199. #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
  200. #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
  201. #define CONTEXT_RUN 0x8000
  202. #define CONTEXT_WAKE 0x1000
  203. #define CONTEXT_DEAD 0x0800
  204. #define CONTEXT_ACTIVE 0x0400
  205. #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
  206. #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
  207. #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
  208. #define OHCI1394_REGISTER_SIZE 0x800
  209. #define OHCI_LOOP_COUNT 500
  210. #define OHCI1394_PCI_HCI_Control 0x40
  211. #define SELF_ID_BUF_SIZE 0x800
  212. #define OHCI_TCODE_PHY_PACKET 0x0e
  213. #define OHCI_VERSION_1_1 0x010010
  214. static char ohci_driver_name[] = KBUILD_MODNAME;
  215. #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
  216. #define OHCI_PARAM_DEBUG_AT_AR 1
  217. #define OHCI_PARAM_DEBUG_SELFIDS 2
  218. #define OHCI_PARAM_DEBUG_IRQS 4
  219. #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
  220. static int param_debug;
  221. module_param_named(debug, param_debug, int, 0644);
  222. MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
  223. ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
  224. ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
  225. ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
  226. ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
  227. ", or a combination, or all = -1)");
  228. static void log_irqs(u32 evt)
  229. {
  230. if (likely(!(param_debug &
  231. (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
  232. return;
  233. if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
  234. !(evt & OHCI1394_busReset))
  235. return;
  236. fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
  237. evt & OHCI1394_selfIDComplete ? " selfID" : "",
  238. evt & OHCI1394_RQPkt ? " AR_req" : "",
  239. evt & OHCI1394_RSPkt ? " AR_resp" : "",
  240. evt & OHCI1394_reqTxComplete ? " AT_req" : "",
  241. evt & OHCI1394_respTxComplete ? " AT_resp" : "",
  242. evt & OHCI1394_isochRx ? " IR" : "",
  243. evt & OHCI1394_isochTx ? " IT" : "",
  244. evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
  245. evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
  246. evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
  247. evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
  248. evt & OHCI1394_busReset ? " busReset" : "",
  249. evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
  250. OHCI1394_RSPkt | OHCI1394_reqTxComplete |
  251. OHCI1394_respTxComplete | OHCI1394_isochRx |
  252. OHCI1394_isochTx | OHCI1394_postedWriteErr |
  253. OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
  254. OHCI1394_regAccessFail | OHCI1394_busReset)
  255. ? " ?" : "");
  256. }
  257. static const char *speed[] = {
  258. [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
  259. };
  260. static const char *power[] = {
  261. [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
  262. [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
  263. };
  264. static const char port[] = { '.', '-', 'p', 'c', };
  265. static char _p(u32 *s, int shift)
  266. {
  267. return port[*s >> shift & 3];
  268. }
  269. static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
  270. {
  271. if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
  272. return;
  273. fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
  274. self_id_count, generation, node_id);
  275. for (; self_id_count--; ++s)
  276. if ((*s & 1 << 23) == 0)
  277. fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
  278. "%s gc=%d %s %s%s%s\n",
  279. *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
  280. speed[*s >> 14 & 3], *s >> 16 & 63,
  281. power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
  282. *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
  283. else
  284. fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
  285. *s, *s >> 24 & 63,
  286. _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
  287. _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
  288. }
  289. static const char *evts[] = {
  290. [0x00] = "evt_no_status", [0x01] = "-reserved-",
  291. [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
  292. [0x04] = "evt_underrun", [0x05] = "evt_overrun",
  293. [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
  294. [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
  295. [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
  296. [0x0c] = "-reserved-", [0x0d] = "-reserved-",
  297. [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
  298. [0x10] = "-reserved-", [0x11] = "ack_complete",
  299. [0x12] = "ack_pending ", [0x13] = "-reserved-",
  300. [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
  301. [0x16] = "ack_busy_B", [0x17] = "-reserved-",
  302. [0x18] = "-reserved-", [0x19] = "-reserved-",
  303. [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
  304. [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
  305. [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
  306. [0x20] = "pending/cancelled",
  307. };
  308. static const char *tcodes[] = {
  309. [0x0] = "QW req", [0x1] = "BW req",
  310. [0x2] = "W resp", [0x3] = "-reserved-",
  311. [0x4] = "QR req", [0x5] = "BR req",
  312. [0x6] = "QR resp", [0x7] = "BR resp",
  313. [0x8] = "cycle start", [0x9] = "Lk req",
  314. [0xa] = "async stream packet", [0xb] = "Lk resp",
  315. [0xc] = "-reserved-", [0xd] = "-reserved-",
  316. [0xe] = "link internal", [0xf] = "-reserved-",
  317. };
  318. static const char *phys[] = {
  319. [0x0] = "phy config packet", [0x1] = "link-on packet",
  320. [0x2] = "self-id packet", [0x3] = "-reserved-",
  321. };
  322. static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
  323. {
  324. int tcode = header[0] >> 4 & 0xf;
  325. char specific[12];
  326. if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
  327. return;
  328. if (unlikely(evt >= ARRAY_SIZE(evts)))
  329. evt = 0x1f;
  330. if (evt == OHCI1394_evt_bus_reset) {
  331. fw_notify("A%c evt_bus_reset, generation %d\n",
  332. dir, (header[2] >> 16) & 0xff);
  333. return;
  334. }
  335. if (header[0] == ~header[1]) {
  336. fw_notify("A%c %s, %s, %08x\n",
  337. dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
  338. return;
  339. }
  340. switch (tcode) {
  341. case 0x0: case 0x6: case 0x8:
  342. snprintf(specific, sizeof(specific), " = %08x",
  343. be32_to_cpu((__force __be32)header[3]));
  344. break;
  345. case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
  346. snprintf(specific, sizeof(specific), " %x,%x",
  347. header[3] >> 16, header[3] & 0xffff);
  348. break;
  349. default:
  350. specific[0] = '\0';
  351. }
  352. switch (tcode) {
  353. case 0xe: case 0xa:
  354. fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
  355. break;
  356. case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
  357. fw_notify("A%c spd %x tl %02x, "
  358. "%04x -> %04x, %s, "
  359. "%s, %04x%08x%s\n",
  360. dir, speed, header[0] >> 10 & 0x3f,
  361. header[1] >> 16, header[0] >> 16, evts[evt],
  362. tcodes[tcode], header[1] & 0xffff, header[2], specific);
  363. break;
  364. default:
  365. fw_notify("A%c spd %x tl %02x, "
  366. "%04x -> %04x, %s, "
  367. "%s%s\n",
  368. dir, speed, header[0] >> 10 & 0x3f,
  369. header[1] >> 16, header[0] >> 16, evts[evt],
  370. tcodes[tcode], specific);
  371. }
  372. }
  373. #else
  374. #define log_irqs(evt)
  375. #define log_selfids(node_id, generation, self_id_count, sid)
  376. #define log_ar_at_event(dir, speed, header, evt)
  377. #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
  378. static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
  379. {
  380. writel(data, ohci->registers + offset);
  381. }
  382. static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
  383. {
  384. return readl(ohci->registers + offset);
  385. }
  386. static inline void flush_writes(const struct fw_ohci *ohci)
  387. {
  388. /* Do a dummy read to flush writes. */
  389. reg_read(ohci, OHCI1394_Version);
  390. }
  391. static int ohci_update_phy_reg(struct fw_card *card, int addr,
  392. int clear_bits, int set_bits)
  393. {
  394. struct fw_ohci *ohci = fw_ohci(card);
  395. u32 val, old;
  396. reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
  397. flush_writes(ohci);
  398. msleep(2);
  399. val = reg_read(ohci, OHCI1394_PhyControl);
  400. if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
  401. fw_error("failed to set phy reg bits.\n");
  402. return -EBUSY;
  403. }
  404. old = OHCI1394_PhyControl_ReadData(val);
  405. old = (old & ~clear_bits) | set_bits;
  406. reg_write(ohci, OHCI1394_PhyControl,
  407. OHCI1394_PhyControl_Write(addr, old));
  408. return 0;
  409. }
  410. static int ar_context_add_page(struct ar_context *ctx)
  411. {
  412. struct device *dev = ctx->ohci->card.device;
  413. struct ar_buffer *ab;
  414. dma_addr_t uninitialized_var(ab_bus);
  415. size_t offset;
  416. ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
  417. if (ab == NULL)
  418. return -ENOMEM;
  419. ab->next = NULL;
  420. memset(&ab->descriptor, 0, sizeof(ab->descriptor));
  421. ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
  422. DESCRIPTOR_STATUS |
  423. DESCRIPTOR_BRANCH_ALWAYS);
  424. offset = offsetof(struct ar_buffer, data);
  425. ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
  426. ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
  427. ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
  428. ab->descriptor.branch_address = 0;
  429. ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
  430. ctx->last_buffer->next = ab;
  431. ctx->last_buffer = ab;
  432. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  433. flush_writes(ctx->ohci);
  434. return 0;
  435. }
  436. static void ar_context_release(struct ar_context *ctx)
  437. {
  438. struct ar_buffer *ab, *ab_next;
  439. size_t offset;
  440. dma_addr_t ab_bus;
  441. for (ab = ctx->current_buffer; ab; ab = ab_next) {
  442. ab_next = ab->next;
  443. offset = offsetof(struct ar_buffer, data);
  444. ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  445. dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
  446. ab, ab_bus);
  447. }
  448. }
  449. #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
  450. #define cond_le32_to_cpu(v) \
  451. (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
  452. #else
  453. #define cond_le32_to_cpu(v) le32_to_cpu(v)
  454. #endif
  455. static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
  456. {
  457. struct fw_ohci *ohci = ctx->ohci;
  458. struct fw_packet p;
  459. u32 status, length, tcode;
  460. int evt;
  461. p.header[0] = cond_le32_to_cpu(buffer[0]);
  462. p.header[1] = cond_le32_to_cpu(buffer[1]);
  463. p.header[2] = cond_le32_to_cpu(buffer[2]);
  464. tcode = (p.header[0] >> 4) & 0x0f;
  465. switch (tcode) {
  466. case TCODE_WRITE_QUADLET_REQUEST:
  467. case TCODE_READ_QUADLET_RESPONSE:
  468. p.header[3] = (__force __u32) buffer[3];
  469. p.header_length = 16;
  470. p.payload_length = 0;
  471. break;
  472. case TCODE_READ_BLOCK_REQUEST :
  473. p.header[3] = cond_le32_to_cpu(buffer[3]);
  474. p.header_length = 16;
  475. p.payload_length = 0;
  476. break;
  477. case TCODE_WRITE_BLOCK_REQUEST:
  478. case TCODE_READ_BLOCK_RESPONSE:
  479. case TCODE_LOCK_REQUEST:
  480. case TCODE_LOCK_RESPONSE:
  481. p.header[3] = cond_le32_to_cpu(buffer[3]);
  482. p.header_length = 16;
  483. p.payload_length = p.header[3] >> 16;
  484. break;
  485. case TCODE_WRITE_RESPONSE:
  486. case TCODE_READ_QUADLET_REQUEST:
  487. case OHCI_TCODE_PHY_PACKET:
  488. p.header_length = 12;
  489. p.payload_length = 0;
  490. break;
  491. default:
  492. /* FIXME: Stop context, discard everything, and restart? */
  493. p.header_length = 0;
  494. p.payload_length = 0;
  495. }
  496. p.payload = (void *) buffer + p.header_length;
  497. /* FIXME: What to do about evt_* errors? */
  498. length = (p.header_length + p.payload_length + 3) / 4;
  499. status = cond_le32_to_cpu(buffer[length]);
  500. evt = (status >> 16) & 0x1f;
  501. p.ack = evt - 16;
  502. p.speed = (status >> 21) & 0x7;
  503. p.timestamp = status & 0xffff;
  504. p.generation = ohci->request_generation;
  505. log_ar_at_event('R', p.speed, p.header, evt);
  506. /*
  507. * The OHCI bus reset handler synthesizes a phy packet with
  508. * the new generation number when a bus reset happens (see
  509. * section 8.4.2.3). This helps us determine when a request
  510. * was received and make sure we send the response in the same
  511. * generation. We only need this for requests; for responses
  512. * we use the unique tlabel for finding the matching
  513. * request.
  514. *
  515. * Alas some chips sometimes emit bus reset packets with a
  516. * wrong generation. We set the correct generation for these
  517. * at a slightly incorrect time (in bus_reset_tasklet).
  518. */
  519. if (evt == OHCI1394_evt_bus_reset) {
  520. if (!ohci->bus_reset_packet_quirk)
  521. ohci->request_generation = (p.header[2] >> 16) & 0xff;
  522. } else if (ctx == &ohci->ar_request_ctx) {
  523. fw_core_handle_request(&ohci->card, &p);
  524. } else {
  525. fw_core_handle_response(&ohci->card, &p);
  526. }
  527. return buffer + length + 1;
  528. }
  529. static void ar_context_tasklet(unsigned long data)
  530. {
  531. struct ar_context *ctx = (struct ar_context *)data;
  532. struct fw_ohci *ohci = ctx->ohci;
  533. struct ar_buffer *ab;
  534. struct descriptor *d;
  535. void *buffer, *end;
  536. ab = ctx->current_buffer;
  537. d = &ab->descriptor;
  538. if (d->res_count == 0) {
  539. size_t size, rest, offset;
  540. dma_addr_t start_bus;
  541. void *start;
  542. /*
  543. * This descriptor is finished and we may have a
  544. * packet split across this and the next buffer. We
  545. * reuse the page for reassembling the split packet.
  546. */
  547. offset = offsetof(struct ar_buffer, data);
  548. start = buffer = ab;
  549. start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  550. ab = ab->next;
  551. d = &ab->descriptor;
  552. size = buffer + PAGE_SIZE - ctx->pointer;
  553. rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
  554. memmove(buffer, ctx->pointer, size);
  555. memcpy(buffer + size, ab->data, rest);
  556. ctx->current_buffer = ab;
  557. ctx->pointer = (void *) ab->data + rest;
  558. end = buffer + size + rest;
  559. while (buffer < end)
  560. buffer = handle_ar_packet(ctx, buffer);
  561. dma_free_coherent(ohci->card.device, PAGE_SIZE,
  562. start, start_bus);
  563. ar_context_add_page(ctx);
  564. } else {
  565. buffer = ctx->pointer;
  566. ctx->pointer = end =
  567. (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
  568. while (buffer < end)
  569. buffer = handle_ar_packet(ctx, buffer);
  570. }
  571. }
  572. static int ar_context_init(struct ar_context *ctx,
  573. struct fw_ohci *ohci, u32 regs)
  574. {
  575. struct ar_buffer ab;
  576. ctx->regs = regs;
  577. ctx->ohci = ohci;
  578. ctx->last_buffer = &ab;
  579. tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
  580. ar_context_add_page(ctx);
  581. ar_context_add_page(ctx);
  582. ctx->current_buffer = ab.next;
  583. ctx->pointer = ctx->current_buffer->data;
  584. return 0;
  585. }
  586. static void ar_context_run(struct ar_context *ctx)
  587. {
  588. struct ar_buffer *ab = ctx->current_buffer;
  589. dma_addr_t ab_bus;
  590. size_t offset;
  591. offset = offsetof(struct ar_buffer, data);
  592. ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
  593. reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
  594. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
  595. flush_writes(ctx->ohci);
  596. }
  597. static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
  598. {
  599. int b, key;
  600. b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
  601. key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
  602. /* figure out which descriptor the branch address goes in */
  603. if (z == 2 && (b == 3 || key == 2))
  604. return d;
  605. else
  606. return d + z - 1;
  607. }
  608. static void context_tasklet(unsigned long data)
  609. {
  610. struct context *ctx = (struct context *) data;
  611. struct descriptor *d, *last;
  612. u32 address;
  613. int z;
  614. struct descriptor_buffer *desc;
  615. desc = list_entry(ctx->buffer_list.next,
  616. struct descriptor_buffer, list);
  617. last = ctx->last;
  618. while (last->branch_address != 0) {
  619. struct descriptor_buffer *old_desc = desc;
  620. address = le32_to_cpu(last->branch_address);
  621. z = address & 0xf;
  622. address &= ~0xf;
  623. /* If the branch address points to a buffer outside of the
  624. * current buffer, advance to the next buffer. */
  625. if (address < desc->buffer_bus ||
  626. address >= desc->buffer_bus + desc->used)
  627. desc = list_entry(desc->list.next,
  628. struct descriptor_buffer, list);
  629. d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
  630. last = find_branch_descriptor(d, z);
  631. if (!ctx->callback(ctx, d, last))
  632. break;
  633. if (old_desc != desc) {
  634. /* If we've advanced to the next buffer, move the
  635. * previous buffer to the free list. */
  636. unsigned long flags;
  637. old_desc->used = 0;
  638. spin_lock_irqsave(&ctx->ohci->lock, flags);
  639. list_move_tail(&old_desc->list, &ctx->buffer_list);
  640. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  641. }
  642. ctx->last = last;
  643. }
  644. }
  645. /*
  646. * Allocate a new buffer and add it to the list of free buffers for this
  647. * context. Must be called with ohci->lock held.
  648. */
  649. static int context_add_buffer(struct context *ctx)
  650. {
  651. struct descriptor_buffer *desc;
  652. dma_addr_t uninitialized_var(bus_addr);
  653. int offset;
  654. /*
  655. * 16MB of descriptors should be far more than enough for any DMA
  656. * program. This will catch run-away userspace or DoS attacks.
  657. */
  658. if (ctx->total_allocation >= 16*1024*1024)
  659. return -ENOMEM;
  660. desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
  661. &bus_addr, GFP_ATOMIC);
  662. if (!desc)
  663. return -ENOMEM;
  664. offset = (void *)&desc->buffer - (void *)desc;
  665. desc->buffer_size = PAGE_SIZE - offset;
  666. desc->buffer_bus = bus_addr + offset;
  667. desc->used = 0;
  668. list_add_tail(&desc->list, &ctx->buffer_list);
  669. ctx->total_allocation += PAGE_SIZE;
  670. return 0;
  671. }
  672. static int context_init(struct context *ctx, struct fw_ohci *ohci,
  673. u32 regs, descriptor_callback_t callback)
  674. {
  675. ctx->ohci = ohci;
  676. ctx->regs = regs;
  677. ctx->total_allocation = 0;
  678. INIT_LIST_HEAD(&ctx->buffer_list);
  679. if (context_add_buffer(ctx) < 0)
  680. return -ENOMEM;
  681. ctx->buffer_tail = list_entry(ctx->buffer_list.next,
  682. struct descriptor_buffer, list);
  683. tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
  684. ctx->callback = callback;
  685. /*
  686. * We put a dummy descriptor in the buffer that has a NULL
  687. * branch address and looks like it's been sent. That way we
  688. * have a descriptor to append DMA programs to.
  689. */
  690. memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
  691. ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
  692. ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
  693. ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
  694. ctx->last = ctx->buffer_tail->buffer;
  695. ctx->prev = ctx->buffer_tail->buffer;
  696. return 0;
  697. }
  698. static void context_release(struct context *ctx)
  699. {
  700. struct fw_card *card = &ctx->ohci->card;
  701. struct descriptor_buffer *desc, *tmp;
  702. list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
  703. dma_free_coherent(card->device, PAGE_SIZE, desc,
  704. desc->buffer_bus -
  705. ((void *)&desc->buffer - (void *)desc));
  706. }
  707. /* Must be called with ohci->lock held */
  708. static struct descriptor *context_get_descriptors(struct context *ctx,
  709. int z, dma_addr_t *d_bus)
  710. {
  711. struct descriptor *d = NULL;
  712. struct descriptor_buffer *desc = ctx->buffer_tail;
  713. if (z * sizeof(*d) > desc->buffer_size)
  714. return NULL;
  715. if (z * sizeof(*d) > desc->buffer_size - desc->used) {
  716. /* No room for the descriptor in this buffer, so advance to the
  717. * next one. */
  718. if (desc->list.next == &ctx->buffer_list) {
  719. /* If there is no free buffer next in the list,
  720. * allocate one. */
  721. if (context_add_buffer(ctx) < 0)
  722. return NULL;
  723. }
  724. desc = list_entry(desc->list.next,
  725. struct descriptor_buffer, list);
  726. ctx->buffer_tail = desc;
  727. }
  728. d = desc->buffer + desc->used / sizeof(*d);
  729. memset(d, 0, z * sizeof(*d));
  730. *d_bus = desc->buffer_bus + desc->used;
  731. return d;
  732. }
  733. static void context_run(struct context *ctx, u32 extra)
  734. {
  735. struct fw_ohci *ohci = ctx->ohci;
  736. reg_write(ohci, COMMAND_PTR(ctx->regs),
  737. le32_to_cpu(ctx->last->branch_address));
  738. reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
  739. reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
  740. flush_writes(ohci);
  741. }
  742. static void context_append(struct context *ctx,
  743. struct descriptor *d, int z, int extra)
  744. {
  745. dma_addr_t d_bus;
  746. struct descriptor_buffer *desc = ctx->buffer_tail;
  747. d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
  748. desc->used += (z + extra) * sizeof(*d);
  749. ctx->prev->branch_address = cpu_to_le32(d_bus | z);
  750. ctx->prev = find_branch_descriptor(d, z);
  751. reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
  752. flush_writes(ctx->ohci);
  753. }
  754. static void context_stop(struct context *ctx)
  755. {
  756. u32 reg;
  757. int i;
  758. reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
  759. flush_writes(ctx->ohci);
  760. for (i = 0; i < 10; i++) {
  761. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  762. if ((reg & CONTEXT_ACTIVE) == 0)
  763. return;
  764. mdelay(1);
  765. }
  766. fw_error("Error: DMA context still active (0x%08x)\n", reg);
  767. }
  768. struct driver_data {
  769. struct fw_packet *packet;
  770. };
  771. /*
  772. * This function apppends a packet to the DMA queue for transmission.
  773. * Must always be called with the ochi->lock held to ensure proper
  774. * generation handling and locking around packet queue manipulation.
  775. */
  776. static int at_context_queue_packet(struct context *ctx,
  777. struct fw_packet *packet)
  778. {
  779. struct fw_ohci *ohci = ctx->ohci;
  780. dma_addr_t d_bus, uninitialized_var(payload_bus);
  781. struct driver_data *driver_data;
  782. struct descriptor *d, *last;
  783. __le32 *header;
  784. int z, tcode;
  785. u32 reg;
  786. d = context_get_descriptors(ctx, 4, &d_bus);
  787. if (d == NULL) {
  788. packet->ack = RCODE_SEND_ERROR;
  789. return -1;
  790. }
  791. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  792. d[0].res_count = cpu_to_le16(packet->timestamp);
  793. /*
  794. * The DMA format for asyncronous link packets is different
  795. * from the IEEE1394 layout, so shift the fields around
  796. * accordingly. If header_length is 8, it's a PHY packet, to
  797. * which we need to prepend an extra quadlet.
  798. */
  799. header = (__le32 *) &d[1];
  800. switch (packet->header_length) {
  801. case 16:
  802. case 12:
  803. header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
  804. (packet->speed << 16));
  805. header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
  806. (packet->header[0] & 0xffff0000));
  807. header[2] = cpu_to_le32(packet->header[2]);
  808. tcode = (packet->header[0] >> 4) & 0x0f;
  809. if (TCODE_IS_BLOCK_PACKET(tcode))
  810. header[3] = cpu_to_le32(packet->header[3]);
  811. else
  812. header[3] = (__force __le32) packet->header[3];
  813. d[0].req_count = cpu_to_le16(packet->header_length);
  814. break;
  815. case 8:
  816. header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
  817. (packet->speed << 16));
  818. header[1] = cpu_to_le32(packet->header[0]);
  819. header[2] = cpu_to_le32(packet->header[1]);
  820. d[0].req_count = cpu_to_le16(12);
  821. break;
  822. case 4:
  823. header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
  824. (packet->speed << 16));
  825. header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
  826. d[0].req_count = cpu_to_le16(8);
  827. break;
  828. default:
  829. /* BUG(); */
  830. packet->ack = RCODE_SEND_ERROR;
  831. return -1;
  832. }
  833. driver_data = (struct driver_data *) &d[3];
  834. driver_data->packet = packet;
  835. packet->driver_data = driver_data;
  836. if (packet->payload_length > 0) {
  837. payload_bus =
  838. dma_map_single(ohci->card.device, packet->payload,
  839. packet->payload_length, DMA_TO_DEVICE);
  840. if (dma_mapping_error(ohci->card.device, payload_bus)) {
  841. packet->ack = RCODE_SEND_ERROR;
  842. return -1;
  843. }
  844. packet->payload_bus = payload_bus;
  845. packet->payload_mapped = true;
  846. d[2].req_count = cpu_to_le16(packet->payload_length);
  847. d[2].data_address = cpu_to_le32(payload_bus);
  848. last = &d[2];
  849. z = 3;
  850. } else {
  851. last = &d[0];
  852. z = 2;
  853. }
  854. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  855. DESCRIPTOR_IRQ_ALWAYS |
  856. DESCRIPTOR_BRANCH_ALWAYS);
  857. /*
  858. * If the controller and packet generations don't match, we need to
  859. * bail out and try again. If IntEvent.busReset is set, the AT context
  860. * is halted, so appending to the context and trying to run it is
  861. * futile. Most controllers do the right thing and just flush the AT
  862. * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
  863. * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
  864. * up stalling out. So we just bail out in software and try again
  865. * later, and everyone is happy.
  866. * FIXME: Document how the locking works.
  867. */
  868. if (ohci->generation != packet->generation ||
  869. reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
  870. if (packet->payload_mapped)
  871. dma_unmap_single(ohci->card.device, payload_bus,
  872. packet->payload_length, DMA_TO_DEVICE);
  873. packet->ack = RCODE_GENERATION;
  874. return -1;
  875. }
  876. context_append(ctx, d, z, 4 - z);
  877. /* If the context isn't already running, start it up. */
  878. reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
  879. if ((reg & CONTEXT_RUN) == 0)
  880. context_run(ctx, 0);
  881. return 0;
  882. }
  883. static int handle_at_packet(struct context *context,
  884. struct descriptor *d,
  885. struct descriptor *last)
  886. {
  887. struct driver_data *driver_data;
  888. struct fw_packet *packet;
  889. struct fw_ohci *ohci = context->ohci;
  890. int evt;
  891. if (last->transfer_status == 0)
  892. /* This descriptor isn't done yet, stop iteration. */
  893. return 0;
  894. driver_data = (struct driver_data *) &d[3];
  895. packet = driver_data->packet;
  896. if (packet == NULL)
  897. /* This packet was cancelled, just continue. */
  898. return 1;
  899. if (packet->payload_mapped)
  900. dma_unmap_single(ohci->card.device, packet->payload_bus,
  901. packet->payload_length, DMA_TO_DEVICE);
  902. evt = le16_to_cpu(last->transfer_status) & 0x1f;
  903. packet->timestamp = le16_to_cpu(last->res_count);
  904. log_ar_at_event('T', packet->speed, packet->header, evt);
  905. switch (evt) {
  906. case OHCI1394_evt_timeout:
  907. /* Async response transmit timed out. */
  908. packet->ack = RCODE_CANCELLED;
  909. break;
  910. case OHCI1394_evt_flushed:
  911. /*
  912. * The packet was flushed should give same error as
  913. * when we try to use a stale generation count.
  914. */
  915. packet->ack = RCODE_GENERATION;
  916. break;
  917. case OHCI1394_evt_missing_ack:
  918. /*
  919. * Using a valid (current) generation count, but the
  920. * node is not on the bus or not sending acks.
  921. */
  922. packet->ack = RCODE_NO_ACK;
  923. break;
  924. case ACK_COMPLETE + 0x10:
  925. case ACK_PENDING + 0x10:
  926. case ACK_BUSY_X + 0x10:
  927. case ACK_BUSY_A + 0x10:
  928. case ACK_BUSY_B + 0x10:
  929. case ACK_DATA_ERROR + 0x10:
  930. case ACK_TYPE_ERROR + 0x10:
  931. packet->ack = evt - 0x10;
  932. break;
  933. default:
  934. packet->ack = RCODE_SEND_ERROR;
  935. break;
  936. }
  937. packet->callback(packet, &ohci->card, packet->ack);
  938. return 1;
  939. }
  940. #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
  941. #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
  942. #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
  943. #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
  944. #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
  945. static void handle_local_rom(struct fw_ohci *ohci,
  946. struct fw_packet *packet, u32 csr)
  947. {
  948. struct fw_packet response;
  949. int tcode, length, i;
  950. tcode = HEADER_GET_TCODE(packet->header[0]);
  951. if (TCODE_IS_BLOCK_PACKET(tcode))
  952. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  953. else
  954. length = 4;
  955. i = csr - CSR_CONFIG_ROM;
  956. if (i + length > CONFIG_ROM_SIZE) {
  957. fw_fill_response(&response, packet->header,
  958. RCODE_ADDRESS_ERROR, NULL, 0);
  959. } else if (!TCODE_IS_READ_REQUEST(tcode)) {
  960. fw_fill_response(&response, packet->header,
  961. RCODE_TYPE_ERROR, NULL, 0);
  962. } else {
  963. fw_fill_response(&response, packet->header, RCODE_COMPLETE,
  964. (void *) ohci->config_rom + i, length);
  965. }
  966. fw_core_handle_response(&ohci->card, &response);
  967. }
  968. static void handle_local_lock(struct fw_ohci *ohci,
  969. struct fw_packet *packet, u32 csr)
  970. {
  971. struct fw_packet response;
  972. int tcode, length, ext_tcode, sel;
  973. __be32 *payload, lock_old;
  974. u32 lock_arg, lock_data;
  975. tcode = HEADER_GET_TCODE(packet->header[0]);
  976. length = HEADER_GET_DATA_LENGTH(packet->header[3]);
  977. payload = packet->payload;
  978. ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
  979. if (tcode == TCODE_LOCK_REQUEST &&
  980. ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
  981. lock_arg = be32_to_cpu(payload[0]);
  982. lock_data = be32_to_cpu(payload[1]);
  983. } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
  984. lock_arg = 0;
  985. lock_data = 0;
  986. } else {
  987. fw_fill_response(&response, packet->header,
  988. RCODE_TYPE_ERROR, NULL, 0);
  989. goto out;
  990. }
  991. sel = (csr - CSR_BUS_MANAGER_ID) / 4;
  992. reg_write(ohci, OHCI1394_CSRData, lock_data);
  993. reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
  994. reg_write(ohci, OHCI1394_CSRControl, sel);
  995. if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
  996. lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
  997. else
  998. fw_notify("swap not done yet\n");
  999. fw_fill_response(&response, packet->header,
  1000. RCODE_COMPLETE, &lock_old, sizeof(lock_old));
  1001. out:
  1002. fw_core_handle_response(&ohci->card, &response);
  1003. }
  1004. static void handle_local_request(struct context *ctx, struct fw_packet *packet)
  1005. {
  1006. u64 offset;
  1007. u32 csr;
  1008. if (ctx == &ctx->ohci->at_request_ctx) {
  1009. packet->ack = ACK_PENDING;
  1010. packet->callback(packet, &ctx->ohci->card, packet->ack);
  1011. }
  1012. offset =
  1013. ((unsigned long long)
  1014. HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
  1015. packet->header[2];
  1016. csr = offset - CSR_REGISTER_BASE;
  1017. /* Handle config rom reads. */
  1018. if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
  1019. handle_local_rom(ctx->ohci, packet, csr);
  1020. else switch (csr) {
  1021. case CSR_BUS_MANAGER_ID:
  1022. case CSR_BANDWIDTH_AVAILABLE:
  1023. case CSR_CHANNELS_AVAILABLE_HI:
  1024. case CSR_CHANNELS_AVAILABLE_LO:
  1025. handle_local_lock(ctx->ohci, packet, csr);
  1026. break;
  1027. default:
  1028. if (ctx == &ctx->ohci->at_request_ctx)
  1029. fw_core_handle_request(&ctx->ohci->card, packet);
  1030. else
  1031. fw_core_handle_response(&ctx->ohci->card, packet);
  1032. break;
  1033. }
  1034. if (ctx == &ctx->ohci->at_response_ctx) {
  1035. packet->ack = ACK_COMPLETE;
  1036. packet->callback(packet, &ctx->ohci->card, packet->ack);
  1037. }
  1038. }
  1039. static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
  1040. {
  1041. unsigned long flags;
  1042. int ret;
  1043. spin_lock_irqsave(&ctx->ohci->lock, flags);
  1044. if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
  1045. ctx->ohci->generation == packet->generation) {
  1046. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  1047. handle_local_request(ctx, packet);
  1048. return;
  1049. }
  1050. ret = at_context_queue_packet(ctx, packet);
  1051. spin_unlock_irqrestore(&ctx->ohci->lock, flags);
  1052. if (ret < 0)
  1053. packet->callback(packet, &ctx->ohci->card, packet->ack);
  1054. }
  1055. static void bus_reset_tasklet(unsigned long data)
  1056. {
  1057. struct fw_ohci *ohci = (struct fw_ohci *)data;
  1058. int self_id_count, i, j, reg;
  1059. int generation, new_generation;
  1060. unsigned long flags;
  1061. void *free_rom = NULL;
  1062. dma_addr_t free_rom_bus = 0;
  1063. reg = reg_read(ohci, OHCI1394_NodeID);
  1064. if (!(reg & OHCI1394_NodeID_idValid)) {
  1065. fw_notify("node ID not valid, new bus reset in progress\n");
  1066. return;
  1067. }
  1068. if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
  1069. fw_notify("malconfigured bus\n");
  1070. return;
  1071. }
  1072. ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
  1073. OHCI1394_NodeID_nodeNumber);
  1074. reg = reg_read(ohci, OHCI1394_SelfIDCount);
  1075. if (reg & OHCI1394_SelfIDCount_selfIDError) {
  1076. fw_notify("inconsistent self IDs\n");
  1077. return;
  1078. }
  1079. /*
  1080. * The count in the SelfIDCount register is the number of
  1081. * bytes in the self ID receive buffer. Since we also receive
  1082. * the inverted quadlets and a header quadlet, we shift one
  1083. * bit extra to get the actual number of self IDs.
  1084. */
  1085. self_id_count = (reg >> 3) & 0xff;
  1086. if (self_id_count == 0 || self_id_count > 252) {
  1087. fw_notify("inconsistent self IDs\n");
  1088. return;
  1089. }
  1090. generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
  1091. rmb();
  1092. for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
  1093. if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
  1094. fw_notify("inconsistent self IDs\n");
  1095. return;
  1096. }
  1097. ohci->self_id_buffer[j] =
  1098. cond_le32_to_cpu(ohci->self_id_cpu[i]);
  1099. }
  1100. rmb();
  1101. /*
  1102. * Check the consistency of the self IDs we just read. The
  1103. * problem we face is that a new bus reset can start while we
  1104. * read out the self IDs from the DMA buffer. If this happens,
  1105. * the DMA buffer will be overwritten with new self IDs and we
  1106. * will read out inconsistent data. The OHCI specification
  1107. * (section 11.2) recommends a technique similar to
  1108. * linux/seqlock.h, where we remember the generation of the
  1109. * self IDs in the buffer before reading them out and compare
  1110. * it to the current generation after reading them out. If
  1111. * the two generations match we know we have a consistent set
  1112. * of self IDs.
  1113. */
  1114. new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
  1115. if (new_generation != generation) {
  1116. fw_notify("recursive bus reset detected, "
  1117. "discarding self ids\n");
  1118. return;
  1119. }
  1120. /* FIXME: Document how the locking works. */
  1121. spin_lock_irqsave(&ohci->lock, flags);
  1122. ohci->generation = generation;
  1123. context_stop(&ohci->at_request_ctx);
  1124. context_stop(&ohci->at_response_ctx);
  1125. reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
  1126. if (ohci->bus_reset_packet_quirk)
  1127. ohci->request_generation = generation;
  1128. /*
  1129. * This next bit is unrelated to the AT context stuff but we
  1130. * have to do it under the spinlock also. If a new config rom
  1131. * was set up before this reset, the old one is now no longer
  1132. * in use and we can free it. Update the config rom pointers
  1133. * to point to the current config rom and clear the
  1134. * next_config_rom pointer so a new udpate can take place.
  1135. */
  1136. if (ohci->next_config_rom != NULL) {
  1137. if (ohci->next_config_rom != ohci->config_rom) {
  1138. free_rom = ohci->config_rom;
  1139. free_rom_bus = ohci->config_rom_bus;
  1140. }
  1141. ohci->config_rom = ohci->next_config_rom;
  1142. ohci->config_rom_bus = ohci->next_config_rom_bus;
  1143. ohci->next_config_rom = NULL;
  1144. /*
  1145. * Restore config_rom image and manually update
  1146. * config_rom registers. Writing the header quadlet
  1147. * will indicate that the config rom is ready, so we
  1148. * do that last.
  1149. */
  1150. reg_write(ohci, OHCI1394_BusOptions,
  1151. be32_to_cpu(ohci->config_rom[2]));
  1152. ohci->config_rom[0] = ohci->next_header;
  1153. reg_write(ohci, OHCI1394_ConfigROMhdr,
  1154. be32_to_cpu(ohci->next_header));
  1155. }
  1156. #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
  1157. reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
  1158. reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
  1159. #endif
  1160. spin_unlock_irqrestore(&ohci->lock, flags);
  1161. if (free_rom)
  1162. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1163. free_rom, free_rom_bus);
  1164. log_selfids(ohci->node_id, generation,
  1165. self_id_count, ohci->self_id_buffer);
  1166. fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
  1167. self_id_count, ohci->self_id_buffer);
  1168. }
  1169. static irqreturn_t irq_handler(int irq, void *data)
  1170. {
  1171. struct fw_ohci *ohci = data;
  1172. u32 event, iso_event, cycle_time;
  1173. int i;
  1174. event = reg_read(ohci, OHCI1394_IntEventClear);
  1175. if (!event || !~event)
  1176. return IRQ_NONE;
  1177. /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
  1178. reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
  1179. log_irqs(event);
  1180. if (event & OHCI1394_selfIDComplete)
  1181. tasklet_schedule(&ohci->bus_reset_tasklet);
  1182. if (event & OHCI1394_RQPkt)
  1183. tasklet_schedule(&ohci->ar_request_ctx.tasklet);
  1184. if (event & OHCI1394_RSPkt)
  1185. tasklet_schedule(&ohci->ar_response_ctx.tasklet);
  1186. if (event & OHCI1394_reqTxComplete)
  1187. tasklet_schedule(&ohci->at_request_ctx.tasklet);
  1188. if (event & OHCI1394_respTxComplete)
  1189. tasklet_schedule(&ohci->at_response_ctx.tasklet);
  1190. iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
  1191. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
  1192. while (iso_event) {
  1193. i = ffs(iso_event) - 1;
  1194. tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
  1195. iso_event &= ~(1 << i);
  1196. }
  1197. iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
  1198. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
  1199. while (iso_event) {
  1200. i = ffs(iso_event) - 1;
  1201. tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
  1202. iso_event &= ~(1 << i);
  1203. }
  1204. if (unlikely(event & OHCI1394_regAccessFail))
  1205. fw_error("Register access failure - "
  1206. "please notify linux1394-devel@lists.sf.net\n");
  1207. if (unlikely(event & OHCI1394_postedWriteErr))
  1208. fw_error("PCI posted write error\n");
  1209. if (unlikely(event & OHCI1394_cycleTooLong)) {
  1210. if (printk_ratelimit())
  1211. fw_notify("isochronous cycle too long\n");
  1212. reg_write(ohci, OHCI1394_LinkControlSet,
  1213. OHCI1394_LinkControl_cycleMaster);
  1214. }
  1215. if (event & OHCI1394_cycle64Seconds) {
  1216. cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  1217. if ((cycle_time & 0x80000000) == 0)
  1218. atomic_inc(&ohci->bus_seconds);
  1219. }
  1220. return IRQ_HANDLED;
  1221. }
  1222. static int software_reset(struct fw_ohci *ohci)
  1223. {
  1224. int i;
  1225. reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
  1226. for (i = 0; i < OHCI_LOOP_COUNT; i++) {
  1227. if ((reg_read(ohci, OHCI1394_HCControlSet) &
  1228. OHCI1394_HCControl_softReset) == 0)
  1229. return 0;
  1230. msleep(1);
  1231. }
  1232. return -EBUSY;
  1233. }
  1234. static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
  1235. {
  1236. size_t size = length * 4;
  1237. memcpy(dest, src, size);
  1238. if (size < CONFIG_ROM_SIZE)
  1239. memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
  1240. }
  1241. static int ohci_enable(struct fw_card *card,
  1242. const __be32 *config_rom, size_t length)
  1243. {
  1244. struct fw_ohci *ohci = fw_ohci(card);
  1245. struct pci_dev *dev = to_pci_dev(card->device);
  1246. u32 lps;
  1247. int i;
  1248. if (software_reset(ohci)) {
  1249. fw_error("Failed to reset ohci card.\n");
  1250. return -EBUSY;
  1251. }
  1252. /*
  1253. * Now enable LPS, which we need in order to start accessing
  1254. * most of the registers. In fact, on some cards (ALI M5251),
  1255. * accessing registers in the SClk domain without LPS enabled
  1256. * will lock up the machine. Wait 50msec to make sure we have
  1257. * full link enabled. However, with some cards (well, at least
  1258. * a JMicron PCIe card), we have to try again sometimes.
  1259. */
  1260. reg_write(ohci, OHCI1394_HCControlSet,
  1261. OHCI1394_HCControl_LPS |
  1262. OHCI1394_HCControl_postedWriteEnable);
  1263. flush_writes(ohci);
  1264. for (lps = 0, i = 0; !lps && i < 3; i++) {
  1265. msleep(50);
  1266. lps = reg_read(ohci, OHCI1394_HCControlSet) &
  1267. OHCI1394_HCControl_LPS;
  1268. }
  1269. if (!lps) {
  1270. fw_error("Failed to set Link Power Status\n");
  1271. return -EIO;
  1272. }
  1273. reg_write(ohci, OHCI1394_HCControlClear,
  1274. OHCI1394_HCControl_noByteSwapData);
  1275. reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
  1276. reg_write(ohci, OHCI1394_LinkControlClear,
  1277. OHCI1394_LinkControl_rcvPhyPkt);
  1278. reg_write(ohci, OHCI1394_LinkControlSet,
  1279. OHCI1394_LinkControl_rcvSelfID |
  1280. OHCI1394_LinkControl_cycleTimerEnable |
  1281. OHCI1394_LinkControl_cycleMaster);
  1282. reg_write(ohci, OHCI1394_ATRetries,
  1283. OHCI1394_MAX_AT_REQ_RETRIES |
  1284. (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
  1285. (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
  1286. ar_context_run(&ohci->ar_request_ctx);
  1287. ar_context_run(&ohci->ar_response_ctx);
  1288. reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
  1289. reg_write(ohci, OHCI1394_IntEventClear, ~0);
  1290. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  1291. reg_write(ohci, OHCI1394_IntMaskSet,
  1292. OHCI1394_selfIDComplete |
  1293. OHCI1394_RQPkt | OHCI1394_RSPkt |
  1294. OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
  1295. OHCI1394_isochRx | OHCI1394_isochTx |
  1296. OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
  1297. OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
  1298. OHCI1394_masterIntEnable);
  1299. if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
  1300. reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
  1301. /* Activate link_on bit and contender bit in our self ID packets.*/
  1302. if (ohci_update_phy_reg(card, 4, 0,
  1303. PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
  1304. return -EIO;
  1305. /*
  1306. * When the link is not yet enabled, the atomic config rom
  1307. * update mechanism described below in ohci_set_config_rom()
  1308. * is not active. We have to update ConfigRomHeader and
  1309. * BusOptions manually, and the write to ConfigROMmap takes
  1310. * effect immediately. We tie this to the enabling of the
  1311. * link, so we have a valid config rom before enabling - the
  1312. * OHCI requires that ConfigROMhdr and BusOptions have valid
  1313. * values before enabling.
  1314. *
  1315. * However, when the ConfigROMmap is written, some controllers
  1316. * always read back quadlets 0 and 2 from the config rom to
  1317. * the ConfigRomHeader and BusOptions registers on bus reset.
  1318. * They shouldn't do that in this initial case where the link
  1319. * isn't enabled. This means we have to use the same
  1320. * workaround here, setting the bus header to 0 and then write
  1321. * the right values in the bus reset tasklet.
  1322. */
  1323. if (config_rom) {
  1324. ohci->next_config_rom =
  1325. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1326. &ohci->next_config_rom_bus,
  1327. GFP_KERNEL);
  1328. if (ohci->next_config_rom == NULL)
  1329. return -ENOMEM;
  1330. copy_config_rom(ohci->next_config_rom, config_rom, length);
  1331. } else {
  1332. /*
  1333. * In the suspend case, config_rom is NULL, which
  1334. * means that we just reuse the old config rom.
  1335. */
  1336. ohci->next_config_rom = ohci->config_rom;
  1337. ohci->next_config_rom_bus = ohci->config_rom_bus;
  1338. }
  1339. ohci->next_header = ohci->next_config_rom[0];
  1340. ohci->next_config_rom[0] = 0;
  1341. reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
  1342. reg_write(ohci, OHCI1394_BusOptions,
  1343. be32_to_cpu(ohci->next_config_rom[2]));
  1344. reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
  1345. reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
  1346. if (request_irq(dev->irq, irq_handler,
  1347. IRQF_SHARED, ohci_driver_name, ohci)) {
  1348. fw_error("Failed to allocate shared interrupt %d.\n",
  1349. dev->irq);
  1350. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1351. ohci->config_rom, ohci->config_rom_bus);
  1352. return -EIO;
  1353. }
  1354. reg_write(ohci, OHCI1394_HCControlSet,
  1355. OHCI1394_HCControl_linkEnable |
  1356. OHCI1394_HCControl_BIBimageValid);
  1357. flush_writes(ohci);
  1358. /*
  1359. * We are ready to go, initiate bus reset to finish the
  1360. * initialization.
  1361. */
  1362. fw_core_initiate_bus_reset(&ohci->card, 1);
  1363. return 0;
  1364. }
  1365. static int ohci_set_config_rom(struct fw_card *card,
  1366. const __be32 *config_rom, size_t length)
  1367. {
  1368. struct fw_ohci *ohci;
  1369. unsigned long flags;
  1370. int ret = -EBUSY;
  1371. __be32 *next_config_rom;
  1372. dma_addr_t uninitialized_var(next_config_rom_bus);
  1373. ohci = fw_ohci(card);
  1374. /*
  1375. * When the OHCI controller is enabled, the config rom update
  1376. * mechanism is a bit tricky, but easy enough to use. See
  1377. * section 5.5.6 in the OHCI specification.
  1378. *
  1379. * The OHCI controller caches the new config rom address in a
  1380. * shadow register (ConfigROMmapNext) and needs a bus reset
  1381. * for the changes to take place. When the bus reset is
  1382. * detected, the controller loads the new values for the
  1383. * ConfigRomHeader and BusOptions registers from the specified
  1384. * config rom and loads ConfigROMmap from the ConfigROMmapNext
  1385. * shadow register. All automatically and atomically.
  1386. *
  1387. * Now, there's a twist to this story. The automatic load of
  1388. * ConfigRomHeader and BusOptions doesn't honor the
  1389. * noByteSwapData bit, so with a be32 config rom, the
  1390. * controller will load be32 values in to these registers
  1391. * during the atomic update, even on litte endian
  1392. * architectures. The workaround we use is to put a 0 in the
  1393. * header quadlet; 0 is endian agnostic and means that the
  1394. * config rom isn't ready yet. In the bus reset tasklet we
  1395. * then set up the real values for the two registers.
  1396. *
  1397. * We use ohci->lock to avoid racing with the code that sets
  1398. * ohci->next_config_rom to NULL (see bus_reset_tasklet).
  1399. */
  1400. next_config_rom =
  1401. dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1402. &next_config_rom_bus, GFP_KERNEL);
  1403. if (next_config_rom == NULL)
  1404. return -ENOMEM;
  1405. spin_lock_irqsave(&ohci->lock, flags);
  1406. if (ohci->next_config_rom == NULL) {
  1407. ohci->next_config_rom = next_config_rom;
  1408. ohci->next_config_rom_bus = next_config_rom_bus;
  1409. copy_config_rom(ohci->next_config_rom, config_rom, length);
  1410. ohci->next_header = config_rom[0];
  1411. ohci->next_config_rom[0] = 0;
  1412. reg_write(ohci, OHCI1394_ConfigROMmap,
  1413. ohci->next_config_rom_bus);
  1414. ret = 0;
  1415. }
  1416. spin_unlock_irqrestore(&ohci->lock, flags);
  1417. /*
  1418. * Now initiate a bus reset to have the changes take
  1419. * effect. We clean up the old config rom memory and DMA
  1420. * mappings in the bus reset tasklet, since the OHCI
  1421. * controller could need to access it before the bus reset
  1422. * takes effect.
  1423. */
  1424. if (ret == 0)
  1425. fw_core_initiate_bus_reset(&ohci->card, 1);
  1426. else
  1427. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  1428. next_config_rom, next_config_rom_bus);
  1429. return ret;
  1430. }
  1431. static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
  1432. {
  1433. struct fw_ohci *ohci = fw_ohci(card);
  1434. at_context_transmit(&ohci->at_request_ctx, packet);
  1435. }
  1436. static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
  1437. {
  1438. struct fw_ohci *ohci = fw_ohci(card);
  1439. at_context_transmit(&ohci->at_response_ctx, packet);
  1440. }
  1441. static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  1442. {
  1443. struct fw_ohci *ohci = fw_ohci(card);
  1444. struct context *ctx = &ohci->at_request_ctx;
  1445. struct driver_data *driver_data = packet->driver_data;
  1446. int ret = -ENOENT;
  1447. tasklet_disable(&ctx->tasklet);
  1448. if (packet->ack != 0)
  1449. goto out;
  1450. if (packet->payload_mapped)
  1451. dma_unmap_single(ohci->card.device, packet->payload_bus,
  1452. packet->payload_length, DMA_TO_DEVICE);
  1453. log_ar_at_event('T', packet->speed, packet->header, 0x20);
  1454. driver_data->packet = NULL;
  1455. packet->ack = RCODE_CANCELLED;
  1456. packet->callback(packet, &ohci->card, packet->ack);
  1457. ret = 0;
  1458. out:
  1459. tasklet_enable(&ctx->tasklet);
  1460. return ret;
  1461. }
  1462. static int ohci_enable_phys_dma(struct fw_card *card,
  1463. int node_id, int generation)
  1464. {
  1465. #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
  1466. return 0;
  1467. #else
  1468. struct fw_ohci *ohci = fw_ohci(card);
  1469. unsigned long flags;
  1470. int n, ret = 0;
  1471. /*
  1472. * FIXME: Make sure this bitmask is cleared when we clear the busReset
  1473. * interrupt bit. Clear physReqResourceAllBuses on bus reset.
  1474. */
  1475. spin_lock_irqsave(&ohci->lock, flags);
  1476. if (ohci->generation != generation) {
  1477. ret = -ESTALE;
  1478. goto out;
  1479. }
  1480. /*
  1481. * Note, if the node ID contains a non-local bus ID, physical DMA is
  1482. * enabled for _all_ nodes on remote buses.
  1483. */
  1484. n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
  1485. if (n < 32)
  1486. reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
  1487. else
  1488. reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
  1489. flush_writes(ohci);
  1490. out:
  1491. spin_unlock_irqrestore(&ohci->lock, flags);
  1492. return ret;
  1493. #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
  1494. }
  1495. static u64 ohci_get_bus_time(struct fw_card *card)
  1496. {
  1497. struct fw_ohci *ohci = fw_ohci(card);
  1498. u32 cycle_time;
  1499. u64 bus_time;
  1500. cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  1501. bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
  1502. return bus_time;
  1503. }
  1504. static void copy_iso_headers(struct iso_context *ctx, void *p)
  1505. {
  1506. int i = ctx->header_length;
  1507. if (i + ctx->base.header_size > PAGE_SIZE)
  1508. return;
  1509. /*
  1510. * The iso header is byteswapped to little endian by
  1511. * the controller, but the remaining header quadlets
  1512. * are big endian. We want to present all the headers
  1513. * as big endian, so we have to swap the first quadlet.
  1514. */
  1515. if (ctx->base.header_size > 0)
  1516. *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
  1517. if (ctx->base.header_size > 4)
  1518. *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
  1519. if (ctx->base.header_size > 8)
  1520. memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
  1521. ctx->header_length += ctx->base.header_size;
  1522. }
  1523. static int handle_ir_dualbuffer_packet(struct context *context,
  1524. struct descriptor *d,
  1525. struct descriptor *last)
  1526. {
  1527. struct iso_context *ctx =
  1528. container_of(context, struct iso_context, context);
  1529. struct db_descriptor *db = (struct db_descriptor *) d;
  1530. __le32 *ir_header;
  1531. size_t header_length;
  1532. void *p, *end;
  1533. if (db->first_res_count != 0 && db->second_res_count != 0) {
  1534. if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
  1535. /* This descriptor isn't done yet, stop iteration. */
  1536. return 0;
  1537. }
  1538. ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
  1539. }
  1540. header_length = le16_to_cpu(db->first_req_count) -
  1541. le16_to_cpu(db->first_res_count);
  1542. p = db + 1;
  1543. end = p + header_length;
  1544. while (p < end) {
  1545. copy_iso_headers(ctx, p);
  1546. ctx->excess_bytes +=
  1547. (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
  1548. p += max(ctx->base.header_size, (size_t)8);
  1549. }
  1550. ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
  1551. le16_to_cpu(db->second_res_count);
  1552. if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
  1553. ir_header = (__le32 *) (db + 1);
  1554. ctx->base.callback(&ctx->base,
  1555. le32_to_cpu(ir_header[0]) & 0xffff,
  1556. ctx->header_length, ctx->header,
  1557. ctx->base.callback_data);
  1558. ctx->header_length = 0;
  1559. }
  1560. return 1;
  1561. }
  1562. static int handle_ir_packet_per_buffer(struct context *context,
  1563. struct descriptor *d,
  1564. struct descriptor *last)
  1565. {
  1566. struct iso_context *ctx =
  1567. container_of(context, struct iso_context, context);
  1568. struct descriptor *pd;
  1569. __le32 *ir_header;
  1570. void *p;
  1571. for (pd = d; pd <= last; pd++) {
  1572. if (pd->transfer_status)
  1573. break;
  1574. }
  1575. if (pd > last)
  1576. /* Descriptor(s) not done yet, stop iteration */
  1577. return 0;
  1578. p = last + 1;
  1579. copy_iso_headers(ctx, p);
  1580. if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
  1581. ir_header = (__le32 *) p;
  1582. ctx->base.callback(&ctx->base,
  1583. le32_to_cpu(ir_header[0]) & 0xffff,
  1584. ctx->header_length, ctx->header,
  1585. ctx->base.callback_data);
  1586. ctx->header_length = 0;
  1587. }
  1588. return 1;
  1589. }
  1590. static int handle_it_packet(struct context *context,
  1591. struct descriptor *d,
  1592. struct descriptor *last)
  1593. {
  1594. struct iso_context *ctx =
  1595. container_of(context, struct iso_context, context);
  1596. if (last->transfer_status == 0)
  1597. /* This descriptor isn't done yet, stop iteration. */
  1598. return 0;
  1599. if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
  1600. ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
  1601. 0, NULL, ctx->base.callback_data);
  1602. return 1;
  1603. }
  1604. static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
  1605. int type, int channel, size_t header_size)
  1606. {
  1607. struct fw_ohci *ohci = fw_ohci(card);
  1608. struct iso_context *ctx, *list;
  1609. descriptor_callback_t callback;
  1610. u64 *channels, dont_care = ~0ULL;
  1611. u32 *mask, regs;
  1612. unsigned long flags;
  1613. int index, ret = -ENOMEM;
  1614. if (type == FW_ISO_CONTEXT_TRANSMIT) {
  1615. channels = &dont_care;
  1616. mask = &ohci->it_context_mask;
  1617. list = ohci->it_context_list;
  1618. callback = handle_it_packet;
  1619. } else {
  1620. channels = &ohci->ir_context_channels;
  1621. mask = &ohci->ir_context_mask;
  1622. list = ohci->ir_context_list;
  1623. if (ohci->use_dualbuffer)
  1624. callback = handle_ir_dualbuffer_packet;
  1625. else
  1626. callback = handle_ir_packet_per_buffer;
  1627. }
  1628. spin_lock_irqsave(&ohci->lock, flags);
  1629. index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
  1630. if (index >= 0) {
  1631. *channels &= ~(1ULL << channel);
  1632. *mask &= ~(1 << index);
  1633. }
  1634. spin_unlock_irqrestore(&ohci->lock, flags);
  1635. if (index < 0)
  1636. return ERR_PTR(-EBUSY);
  1637. if (type == FW_ISO_CONTEXT_TRANSMIT)
  1638. regs = OHCI1394_IsoXmitContextBase(index);
  1639. else
  1640. regs = OHCI1394_IsoRcvContextBase(index);
  1641. ctx = &list[index];
  1642. memset(ctx, 0, sizeof(*ctx));
  1643. ctx->header_length = 0;
  1644. ctx->header = (void *) __get_free_page(GFP_KERNEL);
  1645. if (ctx->header == NULL)
  1646. goto out;
  1647. ret = context_init(&ctx->context, ohci, regs, callback);
  1648. if (ret < 0)
  1649. goto out_with_header;
  1650. return &ctx->base;
  1651. out_with_header:
  1652. free_page((unsigned long)ctx->header);
  1653. out:
  1654. spin_lock_irqsave(&ohci->lock, flags);
  1655. *mask |= 1 << index;
  1656. spin_unlock_irqrestore(&ohci->lock, flags);
  1657. return ERR_PTR(ret);
  1658. }
  1659. static int ohci_start_iso(struct fw_iso_context *base,
  1660. s32 cycle, u32 sync, u32 tags)
  1661. {
  1662. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1663. struct fw_ohci *ohci = ctx->context.ohci;
  1664. u32 control, match;
  1665. int index;
  1666. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1667. index = ctx - ohci->it_context_list;
  1668. match = 0;
  1669. if (cycle >= 0)
  1670. match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
  1671. (cycle & 0x7fff) << 16;
  1672. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
  1673. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
  1674. context_run(&ctx->context, match);
  1675. } else {
  1676. index = ctx - ohci->ir_context_list;
  1677. control = IR_CONTEXT_ISOCH_HEADER;
  1678. if (ohci->use_dualbuffer)
  1679. control |= IR_CONTEXT_DUAL_BUFFER_MODE;
  1680. match = (tags << 28) | (sync << 8) | ctx->base.channel;
  1681. if (cycle >= 0) {
  1682. match |= (cycle & 0x07fff) << 12;
  1683. control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
  1684. }
  1685. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
  1686. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
  1687. reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
  1688. context_run(&ctx->context, control);
  1689. }
  1690. return 0;
  1691. }
  1692. static int ohci_stop_iso(struct fw_iso_context *base)
  1693. {
  1694. struct fw_ohci *ohci = fw_ohci(base->card);
  1695. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1696. int index;
  1697. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1698. index = ctx - ohci->it_context_list;
  1699. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
  1700. } else {
  1701. index = ctx - ohci->ir_context_list;
  1702. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
  1703. }
  1704. flush_writes(ohci);
  1705. context_stop(&ctx->context);
  1706. return 0;
  1707. }
  1708. static void ohci_free_iso_context(struct fw_iso_context *base)
  1709. {
  1710. struct fw_ohci *ohci = fw_ohci(base->card);
  1711. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1712. unsigned long flags;
  1713. int index;
  1714. ohci_stop_iso(base);
  1715. context_release(&ctx->context);
  1716. free_page((unsigned long)ctx->header);
  1717. spin_lock_irqsave(&ohci->lock, flags);
  1718. if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
  1719. index = ctx - ohci->it_context_list;
  1720. ohci->it_context_mask |= 1 << index;
  1721. } else {
  1722. index = ctx - ohci->ir_context_list;
  1723. ohci->ir_context_mask |= 1 << index;
  1724. ohci->ir_context_channels |= 1ULL << base->channel;
  1725. }
  1726. spin_unlock_irqrestore(&ohci->lock, flags);
  1727. }
  1728. static int ohci_queue_iso_transmit(struct fw_iso_context *base,
  1729. struct fw_iso_packet *packet,
  1730. struct fw_iso_buffer *buffer,
  1731. unsigned long payload)
  1732. {
  1733. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1734. struct descriptor *d, *last, *pd;
  1735. struct fw_iso_packet *p;
  1736. __le32 *header;
  1737. dma_addr_t d_bus, page_bus;
  1738. u32 z, header_z, payload_z, irq;
  1739. u32 payload_index, payload_end_index, next_page_index;
  1740. int page, end_page, i, length, offset;
  1741. /*
  1742. * FIXME: Cycle lost behavior should be configurable: lose
  1743. * packet, retransmit or terminate..
  1744. */
  1745. p = packet;
  1746. payload_index = payload;
  1747. if (p->skip)
  1748. z = 1;
  1749. else
  1750. z = 2;
  1751. if (p->header_length > 0)
  1752. z++;
  1753. /* Determine the first page the payload isn't contained in. */
  1754. end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
  1755. if (p->payload_length > 0)
  1756. payload_z = end_page - (payload_index >> PAGE_SHIFT);
  1757. else
  1758. payload_z = 0;
  1759. z += payload_z;
  1760. /* Get header size in number of descriptors. */
  1761. header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
  1762. d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
  1763. if (d == NULL)
  1764. return -ENOMEM;
  1765. if (!p->skip) {
  1766. d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
  1767. d[0].req_count = cpu_to_le16(8);
  1768. header = (__le32 *) &d[1];
  1769. header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
  1770. IT_HEADER_TAG(p->tag) |
  1771. IT_HEADER_TCODE(TCODE_STREAM_DATA) |
  1772. IT_HEADER_CHANNEL(ctx->base.channel) |
  1773. IT_HEADER_SPEED(ctx->base.speed));
  1774. header[1] =
  1775. cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
  1776. p->payload_length));
  1777. }
  1778. if (p->header_length > 0) {
  1779. d[2].req_count = cpu_to_le16(p->header_length);
  1780. d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
  1781. memcpy(&d[z], p->header, p->header_length);
  1782. }
  1783. pd = d + z - payload_z;
  1784. payload_end_index = payload_index + p->payload_length;
  1785. for (i = 0; i < payload_z; i++) {
  1786. page = payload_index >> PAGE_SHIFT;
  1787. offset = payload_index & ~PAGE_MASK;
  1788. next_page_index = (page + 1) << PAGE_SHIFT;
  1789. length =
  1790. min(next_page_index, payload_end_index) - payload_index;
  1791. pd[i].req_count = cpu_to_le16(length);
  1792. page_bus = page_private(buffer->pages[page]);
  1793. pd[i].data_address = cpu_to_le32(page_bus + offset);
  1794. payload_index += length;
  1795. }
  1796. if (p->interrupt)
  1797. irq = DESCRIPTOR_IRQ_ALWAYS;
  1798. else
  1799. irq = DESCRIPTOR_NO_IRQ;
  1800. last = z == 2 ? d : d + z - 1;
  1801. last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
  1802. DESCRIPTOR_STATUS |
  1803. DESCRIPTOR_BRANCH_ALWAYS |
  1804. irq);
  1805. context_append(&ctx->context, d, z, header_z);
  1806. return 0;
  1807. }
  1808. static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
  1809. struct fw_iso_packet *packet,
  1810. struct fw_iso_buffer *buffer,
  1811. unsigned long payload)
  1812. {
  1813. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1814. struct db_descriptor *db = NULL;
  1815. struct descriptor *d;
  1816. struct fw_iso_packet *p;
  1817. dma_addr_t d_bus, page_bus;
  1818. u32 z, header_z, length, rest;
  1819. int page, offset, packet_count, header_size;
  1820. /*
  1821. * FIXME: Cycle lost behavior should be configurable: lose
  1822. * packet, retransmit or terminate..
  1823. */
  1824. p = packet;
  1825. z = 2;
  1826. /*
  1827. * The OHCI controller puts the isochronous header and trailer in the
  1828. * buffer, so we need at least 8 bytes.
  1829. */
  1830. packet_count = p->header_length / ctx->base.header_size;
  1831. header_size = packet_count * max(ctx->base.header_size, (size_t)8);
  1832. /* Get header size in number of descriptors. */
  1833. header_z = DIV_ROUND_UP(header_size, sizeof(*d));
  1834. page = payload >> PAGE_SHIFT;
  1835. offset = payload & ~PAGE_MASK;
  1836. rest = p->payload_length;
  1837. /* FIXME: make packet-per-buffer/dual-buffer a context option */
  1838. while (rest > 0) {
  1839. d = context_get_descriptors(&ctx->context,
  1840. z + header_z, &d_bus);
  1841. if (d == NULL)
  1842. return -ENOMEM;
  1843. db = (struct db_descriptor *) d;
  1844. db->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1845. DESCRIPTOR_BRANCH_ALWAYS);
  1846. db->first_size =
  1847. cpu_to_le16(max(ctx->base.header_size, (size_t)8));
  1848. if (p->skip && rest == p->payload_length) {
  1849. db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
  1850. db->first_req_count = db->first_size;
  1851. } else {
  1852. db->first_req_count = cpu_to_le16(header_size);
  1853. }
  1854. db->first_res_count = db->first_req_count;
  1855. db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
  1856. if (p->skip && rest == p->payload_length)
  1857. length = 4;
  1858. else if (offset + rest < PAGE_SIZE)
  1859. length = rest;
  1860. else
  1861. length = PAGE_SIZE - offset;
  1862. db->second_req_count = cpu_to_le16(length);
  1863. db->second_res_count = db->second_req_count;
  1864. page_bus = page_private(buffer->pages[page]);
  1865. db->second_buffer = cpu_to_le32(page_bus + offset);
  1866. if (p->interrupt && length == rest)
  1867. db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
  1868. context_append(&ctx->context, d, z, header_z);
  1869. offset = (offset + length) & ~PAGE_MASK;
  1870. rest -= length;
  1871. if (offset == 0)
  1872. page++;
  1873. }
  1874. return 0;
  1875. }
  1876. static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
  1877. struct fw_iso_packet *packet,
  1878. struct fw_iso_buffer *buffer,
  1879. unsigned long payload)
  1880. {
  1881. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1882. struct descriptor *d = NULL, *pd = NULL;
  1883. struct fw_iso_packet *p = packet;
  1884. dma_addr_t d_bus, page_bus;
  1885. u32 z, header_z, rest;
  1886. int i, j, length;
  1887. int page, offset, packet_count, header_size, payload_per_buffer;
  1888. /*
  1889. * The OHCI controller puts the isochronous header and trailer in the
  1890. * buffer, so we need at least 8 bytes.
  1891. */
  1892. packet_count = p->header_length / ctx->base.header_size;
  1893. header_size = max(ctx->base.header_size, (size_t)8);
  1894. /* Get header size in number of descriptors. */
  1895. header_z = DIV_ROUND_UP(header_size, sizeof(*d));
  1896. page = payload >> PAGE_SHIFT;
  1897. offset = payload & ~PAGE_MASK;
  1898. payload_per_buffer = p->payload_length / packet_count;
  1899. for (i = 0; i < packet_count; i++) {
  1900. /* d points to the header descriptor */
  1901. z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
  1902. d = context_get_descriptors(&ctx->context,
  1903. z + header_z, &d_bus);
  1904. if (d == NULL)
  1905. return -ENOMEM;
  1906. d->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1907. DESCRIPTOR_INPUT_MORE);
  1908. if (p->skip && i == 0)
  1909. d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
  1910. d->req_count = cpu_to_le16(header_size);
  1911. d->res_count = d->req_count;
  1912. d->transfer_status = 0;
  1913. d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
  1914. rest = payload_per_buffer;
  1915. for (j = 1; j < z; j++) {
  1916. pd = d + j;
  1917. pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1918. DESCRIPTOR_INPUT_MORE);
  1919. if (offset + rest < PAGE_SIZE)
  1920. length = rest;
  1921. else
  1922. length = PAGE_SIZE - offset;
  1923. pd->req_count = cpu_to_le16(length);
  1924. pd->res_count = pd->req_count;
  1925. pd->transfer_status = 0;
  1926. page_bus = page_private(buffer->pages[page]);
  1927. pd->data_address = cpu_to_le32(page_bus + offset);
  1928. offset = (offset + length) & ~PAGE_MASK;
  1929. rest -= length;
  1930. if (offset == 0)
  1931. page++;
  1932. }
  1933. pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
  1934. DESCRIPTOR_INPUT_LAST |
  1935. DESCRIPTOR_BRANCH_ALWAYS);
  1936. if (p->interrupt && i == packet_count - 1)
  1937. pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
  1938. context_append(&ctx->context, d, z, header_z);
  1939. }
  1940. return 0;
  1941. }
  1942. static int ohci_queue_iso(struct fw_iso_context *base,
  1943. struct fw_iso_packet *packet,
  1944. struct fw_iso_buffer *buffer,
  1945. unsigned long payload)
  1946. {
  1947. struct iso_context *ctx = container_of(base, struct iso_context, base);
  1948. unsigned long flags;
  1949. int ret;
  1950. spin_lock_irqsave(&ctx->context.ohci->lock, flags);
  1951. if (base->type == FW_ISO_CONTEXT_TRANSMIT)
  1952. ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
  1953. else if (ctx->context.ohci->use_dualbuffer)
  1954. ret = ohci_queue_iso_receive_dualbuffer(base, packet,
  1955. buffer, payload);
  1956. else
  1957. ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
  1958. buffer, payload);
  1959. spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
  1960. return ret;
  1961. }
  1962. static const struct fw_card_driver ohci_driver = {
  1963. .enable = ohci_enable,
  1964. .update_phy_reg = ohci_update_phy_reg,
  1965. .set_config_rom = ohci_set_config_rom,
  1966. .send_request = ohci_send_request,
  1967. .send_response = ohci_send_response,
  1968. .cancel_packet = ohci_cancel_packet,
  1969. .enable_phys_dma = ohci_enable_phys_dma,
  1970. .get_bus_time = ohci_get_bus_time,
  1971. .allocate_iso_context = ohci_allocate_iso_context,
  1972. .free_iso_context = ohci_free_iso_context,
  1973. .queue_iso = ohci_queue_iso,
  1974. .start_iso = ohci_start_iso,
  1975. .stop_iso = ohci_stop_iso,
  1976. };
  1977. #ifdef CONFIG_PPC_PMAC
  1978. static void ohci_pmac_on(struct pci_dev *dev)
  1979. {
  1980. if (machine_is(powermac)) {
  1981. struct device_node *ofn = pci_device_to_OF_node(dev);
  1982. if (ofn) {
  1983. pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
  1984. pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
  1985. }
  1986. }
  1987. }
  1988. static void ohci_pmac_off(struct pci_dev *dev)
  1989. {
  1990. if (machine_is(powermac)) {
  1991. struct device_node *ofn = pci_device_to_OF_node(dev);
  1992. if (ofn) {
  1993. pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
  1994. pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
  1995. }
  1996. }
  1997. }
  1998. #else
  1999. #define ohci_pmac_on(dev)
  2000. #define ohci_pmac_off(dev)
  2001. #endif /* CONFIG_PPC_PMAC */
  2002. #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
  2003. #define PCI_DEVICE_ID_AGERE_FW643 0x5901
  2004. static int __devinit pci_probe(struct pci_dev *dev,
  2005. const struct pci_device_id *ent)
  2006. {
  2007. struct fw_ohci *ohci;
  2008. u32 bus_options, max_receive, link_speed, version;
  2009. u64 guid;
  2010. int err;
  2011. size_t size;
  2012. ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
  2013. if (ohci == NULL) {
  2014. err = -ENOMEM;
  2015. goto fail;
  2016. }
  2017. fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
  2018. ohci_pmac_on(dev);
  2019. err = pci_enable_device(dev);
  2020. if (err) {
  2021. fw_error("Failed to enable OHCI hardware\n");
  2022. goto fail_free;
  2023. }
  2024. pci_set_master(dev);
  2025. pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
  2026. pci_set_drvdata(dev, ohci);
  2027. spin_lock_init(&ohci->lock);
  2028. tasklet_init(&ohci->bus_reset_tasklet,
  2029. bus_reset_tasklet, (unsigned long)ohci);
  2030. err = pci_request_region(dev, 0, ohci_driver_name);
  2031. if (err) {
  2032. fw_error("MMIO resource unavailable\n");
  2033. goto fail_disable;
  2034. }
  2035. ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
  2036. if (ohci->registers == NULL) {
  2037. fw_error("Failed to remap registers\n");
  2038. err = -ENXIO;
  2039. goto fail_iomem;
  2040. }
  2041. version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
  2042. ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
  2043. /* dual-buffer mode is broken if more than one IR context is active */
  2044. if (dev->vendor == PCI_VENDOR_ID_AGERE &&
  2045. dev->device == PCI_DEVICE_ID_AGERE_FW643)
  2046. ohci->use_dualbuffer = false;
  2047. /* dual-buffer mode is broken */
  2048. if (dev->vendor == PCI_VENDOR_ID_RICOH &&
  2049. dev->device == PCI_DEVICE_ID_RICOH_R5C832)
  2050. ohci->use_dualbuffer = false;
  2051. /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
  2052. #if !defined(CONFIG_X86_32)
  2053. /* dual-buffer mode is broken with descriptor addresses above 2G */
  2054. if (dev->vendor == PCI_VENDOR_ID_TI &&
  2055. dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
  2056. ohci->use_dualbuffer = false;
  2057. #endif
  2058. #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
  2059. ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
  2060. dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
  2061. #endif
  2062. ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
  2063. ar_context_init(&ohci->ar_request_ctx, ohci,
  2064. OHCI1394_AsReqRcvContextControlSet);
  2065. ar_context_init(&ohci->ar_response_ctx, ohci,
  2066. OHCI1394_AsRspRcvContextControlSet);
  2067. context_init(&ohci->at_request_ctx, ohci,
  2068. OHCI1394_AsReqTrContextControlSet, handle_at_packet);
  2069. context_init(&ohci->at_response_ctx, ohci,
  2070. OHCI1394_AsRspTrContextControlSet, handle_at_packet);
  2071. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
  2072. ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
  2073. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
  2074. size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
  2075. ohci->it_context_list = kzalloc(size, GFP_KERNEL);
  2076. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
  2077. ohci->ir_context_channels = ~0ULL;
  2078. ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
  2079. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
  2080. size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
  2081. ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
  2082. if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
  2083. err = -ENOMEM;
  2084. goto fail_contexts;
  2085. }
  2086. /* self-id dma buffer allocation */
  2087. ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
  2088. SELF_ID_BUF_SIZE,
  2089. &ohci->self_id_bus,
  2090. GFP_KERNEL);
  2091. if (ohci->self_id_cpu == NULL) {
  2092. err = -ENOMEM;
  2093. goto fail_contexts;
  2094. }
  2095. bus_options = reg_read(ohci, OHCI1394_BusOptions);
  2096. max_receive = (bus_options >> 12) & 0xf;
  2097. link_speed = bus_options & 0x7;
  2098. guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
  2099. reg_read(ohci, OHCI1394_GUIDLo);
  2100. err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
  2101. if (err)
  2102. goto fail_self_id;
  2103. fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
  2104. dev_name(&dev->dev), version >> 16, version & 0xff);
  2105. return 0;
  2106. fail_self_id:
  2107. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  2108. ohci->self_id_cpu, ohci->self_id_bus);
  2109. fail_contexts:
  2110. kfree(ohci->ir_context_list);
  2111. kfree(ohci->it_context_list);
  2112. context_release(&ohci->at_response_ctx);
  2113. context_release(&ohci->at_request_ctx);
  2114. ar_context_release(&ohci->ar_response_ctx);
  2115. ar_context_release(&ohci->ar_request_ctx);
  2116. pci_iounmap(dev, ohci->registers);
  2117. fail_iomem:
  2118. pci_release_region(dev, 0);
  2119. fail_disable:
  2120. pci_disable_device(dev);
  2121. fail_free:
  2122. kfree(&ohci->card);
  2123. ohci_pmac_off(dev);
  2124. fail:
  2125. if (err == -ENOMEM)
  2126. fw_error("Out of memory\n");
  2127. return err;
  2128. }
  2129. static void pci_remove(struct pci_dev *dev)
  2130. {
  2131. struct fw_ohci *ohci;
  2132. ohci = pci_get_drvdata(dev);
  2133. reg_write(ohci, OHCI1394_IntMaskClear, ~0);
  2134. flush_writes(ohci);
  2135. fw_core_remove_card(&ohci->card);
  2136. /*
  2137. * FIXME: Fail all pending packets here, now that the upper
  2138. * layers can't queue any more.
  2139. */
  2140. software_reset(ohci);
  2141. free_irq(dev->irq, ohci);
  2142. if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
  2143. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  2144. ohci->next_config_rom, ohci->next_config_rom_bus);
  2145. if (ohci->config_rom)
  2146. dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
  2147. ohci->config_rom, ohci->config_rom_bus);
  2148. dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
  2149. ohci->self_id_cpu, ohci->self_id_bus);
  2150. ar_context_release(&ohci->ar_request_ctx);
  2151. ar_context_release(&ohci->ar_response_ctx);
  2152. context_release(&ohci->at_request_ctx);
  2153. context_release(&ohci->at_response_ctx);
  2154. kfree(ohci->it_context_list);
  2155. kfree(ohci->ir_context_list);
  2156. pci_iounmap(dev, ohci->registers);
  2157. pci_release_region(dev, 0);
  2158. pci_disable_device(dev);
  2159. kfree(&ohci->card);
  2160. ohci_pmac_off(dev);
  2161. fw_notify("Removed fw-ohci device.\n");
  2162. }
  2163. #ifdef CONFIG_PM
  2164. static int pci_suspend(struct pci_dev *dev, pm_message_t state)
  2165. {
  2166. struct fw_ohci *ohci = pci_get_drvdata(dev);
  2167. int err;
  2168. software_reset(ohci);
  2169. free_irq(dev->irq, ohci);
  2170. err = pci_save_state(dev);
  2171. if (err) {
  2172. fw_error("pci_save_state failed\n");
  2173. return err;
  2174. }
  2175. err = pci_set_power_state(dev, pci_choose_state(dev, state));
  2176. if (err)
  2177. fw_error("pci_set_power_state failed with %d\n", err);
  2178. ohci_pmac_off(dev);
  2179. return 0;
  2180. }
  2181. static int pci_resume(struct pci_dev *dev)
  2182. {
  2183. struct fw_ohci *ohci = pci_get_drvdata(dev);
  2184. int err;
  2185. ohci_pmac_on(dev);
  2186. pci_set_power_state(dev, PCI_D0);
  2187. pci_restore_state(dev);
  2188. err = pci_enable_device(dev);
  2189. if (err) {
  2190. fw_error("pci_enable_device failed\n");
  2191. return err;
  2192. }
  2193. return ohci_enable(&ohci->card, NULL, 0);
  2194. }
  2195. #endif
  2196. static struct pci_device_id pci_table[] = {
  2197. { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
  2198. { }
  2199. };
  2200. MODULE_DEVICE_TABLE(pci, pci_table);
  2201. static struct pci_driver fw_ohci_pci_driver = {
  2202. .name = ohci_driver_name,
  2203. .id_table = pci_table,
  2204. .probe = pci_probe,
  2205. .remove = pci_remove,
  2206. #ifdef CONFIG_PM
  2207. .resume = pci_resume,
  2208. .suspend = pci_suspend,
  2209. #endif
  2210. };
  2211. MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
  2212. MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
  2213. MODULE_LICENSE("GPL");
  2214. /* Provide a module alias so root-on-sbp2 initrds don't break. */
  2215. #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
  2216. MODULE_ALIAS("ohci1394");
  2217. #endif
  2218. static int __init fw_ohci_init(void)
  2219. {
  2220. return pci_register_driver(&fw_ohci_pci_driver);
  2221. }
  2222. static void __exit fw_ohci_cleanup(void)
  2223. {
  2224. pci_unregister_driver(&fw_ohci_pci_driver);
  2225. }
  2226. module_init(fw_ohci_init);
  2227. module_exit(fw_ohci_cleanup);