pci.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/pci.h>
  18. #include <linux/module.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/spinlock.h>
  21. #include "core.h"
  22. #include "debug.h"
  23. #include "targaddrs.h"
  24. #include "bmi.h"
  25. #include "hif.h"
  26. #include "htc.h"
  27. #include "ce.h"
  28. #include "pci.h"
  29. static unsigned int ath10k_target_ps;
  30. module_param(ath10k_target_ps, uint, 0644);
  31. MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
  32. #define QCA988X_2_0_DEVICE_ID (0x003c)
  33. static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
  34. { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  35. {0}
  36. };
  37. static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  38. u32 *data);
  39. static void ath10k_pci_process_ce(struct ath10k *ar);
  40. static int ath10k_pci_post_rx(struct ath10k *ar);
  41. static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  42. int num);
  43. static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
  44. static void ath10k_pci_stop_ce(struct ath10k *ar);
  45. static void ath10k_pci_device_reset(struct ath10k *ar);
  46. static int ath10k_pci_reset_target(struct ath10k *ar);
  47. static int ath10k_pci_start_intr(struct ath10k *ar);
  48. static void ath10k_pci_stop_intr(struct ath10k *ar);
  49. static const struct ce_attr host_ce_config_wlan[] = {
  50. /* CE0: host->target HTC control and raw streams */
  51. {
  52. .flags = CE_ATTR_FLAGS,
  53. .src_nentries = 16,
  54. .src_sz_max = 256,
  55. .dest_nentries = 0,
  56. },
  57. /* CE1: target->host HTT + HTC control */
  58. {
  59. .flags = CE_ATTR_FLAGS,
  60. .src_nentries = 0,
  61. .src_sz_max = 512,
  62. .dest_nentries = 512,
  63. },
  64. /* CE2: target->host WMI */
  65. {
  66. .flags = CE_ATTR_FLAGS,
  67. .src_nentries = 0,
  68. .src_sz_max = 2048,
  69. .dest_nentries = 32,
  70. },
  71. /* CE3: host->target WMI */
  72. {
  73. .flags = CE_ATTR_FLAGS,
  74. .src_nentries = 32,
  75. .src_sz_max = 2048,
  76. .dest_nentries = 0,
  77. },
  78. /* CE4: host->target HTT */
  79. {
  80. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  81. .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
  82. .src_sz_max = 256,
  83. .dest_nentries = 0,
  84. },
  85. /* CE5: unused */
  86. {
  87. .flags = CE_ATTR_FLAGS,
  88. .src_nentries = 0,
  89. .src_sz_max = 0,
  90. .dest_nentries = 0,
  91. },
  92. /* CE6: target autonomous hif_memcpy */
  93. {
  94. .flags = CE_ATTR_FLAGS,
  95. .src_nentries = 0,
  96. .src_sz_max = 0,
  97. .dest_nentries = 0,
  98. },
  99. /* CE7: ce_diag, the Diagnostic Window */
  100. {
  101. .flags = CE_ATTR_FLAGS,
  102. .src_nentries = 2,
  103. .src_sz_max = DIAG_TRANSFER_LIMIT,
  104. .dest_nentries = 2,
  105. },
  106. };
  107. /* Target firmware's Copy Engine configuration. */
  108. static const struct ce_pipe_config target_ce_config_wlan[] = {
  109. /* CE0: host->target HTC control and raw streams */
  110. {
  111. .pipenum = 0,
  112. .pipedir = PIPEDIR_OUT,
  113. .nentries = 32,
  114. .nbytes_max = 256,
  115. .flags = CE_ATTR_FLAGS,
  116. .reserved = 0,
  117. },
  118. /* CE1: target->host HTT + HTC control */
  119. {
  120. .pipenum = 1,
  121. .pipedir = PIPEDIR_IN,
  122. .nentries = 32,
  123. .nbytes_max = 512,
  124. .flags = CE_ATTR_FLAGS,
  125. .reserved = 0,
  126. },
  127. /* CE2: target->host WMI */
  128. {
  129. .pipenum = 2,
  130. .pipedir = PIPEDIR_IN,
  131. .nentries = 32,
  132. .nbytes_max = 2048,
  133. .flags = CE_ATTR_FLAGS,
  134. .reserved = 0,
  135. },
  136. /* CE3: host->target WMI */
  137. {
  138. .pipenum = 3,
  139. .pipedir = PIPEDIR_OUT,
  140. .nentries = 32,
  141. .nbytes_max = 2048,
  142. .flags = CE_ATTR_FLAGS,
  143. .reserved = 0,
  144. },
  145. /* CE4: host->target HTT */
  146. {
  147. .pipenum = 4,
  148. .pipedir = PIPEDIR_OUT,
  149. .nentries = 256,
  150. .nbytes_max = 256,
  151. .flags = CE_ATTR_FLAGS,
  152. .reserved = 0,
  153. },
  154. /* NB: 50% of src nentries, since tx has 2 frags */
  155. /* CE5: unused */
  156. {
  157. .pipenum = 5,
  158. .pipedir = PIPEDIR_OUT,
  159. .nentries = 32,
  160. .nbytes_max = 2048,
  161. .flags = CE_ATTR_FLAGS,
  162. .reserved = 0,
  163. },
  164. /* CE6: Reserved for target autonomous hif_memcpy */
  165. {
  166. .pipenum = 6,
  167. .pipedir = PIPEDIR_INOUT,
  168. .nentries = 32,
  169. .nbytes_max = 4096,
  170. .flags = CE_ATTR_FLAGS,
  171. .reserved = 0,
  172. },
  173. /* CE7 used only by Host */
  174. };
  175. /*
  176. * Diagnostic read/write access is provided for startup/config/debug usage.
  177. * Caller must guarantee proper alignment, when applicable, and single user
  178. * at any moment.
  179. */
  180. static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
  181. int nbytes)
  182. {
  183. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  184. int ret = 0;
  185. u32 buf;
  186. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  187. unsigned int id;
  188. unsigned int flags;
  189. struct ath10k_ce_pipe *ce_diag;
  190. /* Host buffer address in CE space */
  191. u32 ce_data;
  192. dma_addr_t ce_data_base = 0;
  193. void *data_buf = NULL;
  194. int i;
  195. /*
  196. * This code cannot handle reads to non-memory space. Redirect to the
  197. * register read fn but preserve the multi word read capability of
  198. * this fn
  199. */
  200. if (address < DRAM_BASE_ADDRESS) {
  201. if (!IS_ALIGNED(address, 4) ||
  202. !IS_ALIGNED((unsigned long)data, 4))
  203. return -EIO;
  204. while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
  205. ar, address, (u32 *)data)) == 0)) {
  206. nbytes -= sizeof(u32);
  207. address += sizeof(u32);
  208. data += sizeof(u32);
  209. }
  210. return ret;
  211. }
  212. ce_diag = ar_pci->ce_diag;
  213. /*
  214. * Allocate a temporary bounce buffer to hold caller's data
  215. * to be DMA'ed from Target. This guarantees
  216. * 1) 4-byte alignment
  217. * 2) Buffer in DMA-able space
  218. */
  219. orig_nbytes = nbytes;
  220. data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
  221. orig_nbytes,
  222. &ce_data_base);
  223. if (!data_buf) {
  224. ret = -ENOMEM;
  225. goto done;
  226. }
  227. memset(data_buf, 0, orig_nbytes);
  228. remaining_bytes = orig_nbytes;
  229. ce_data = ce_data_base;
  230. while (remaining_bytes) {
  231. nbytes = min_t(unsigned int, remaining_bytes,
  232. DIAG_TRANSFER_LIMIT);
  233. ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
  234. if (ret != 0)
  235. goto done;
  236. /* Request CE to send from Target(!) address to Host buffer */
  237. /*
  238. * The address supplied by the caller is in the
  239. * Target CPU virtual address space.
  240. *
  241. * In order to use this address with the diagnostic CE,
  242. * convert it from Target CPU virtual address space
  243. * to CE address space
  244. */
  245. ath10k_pci_wake(ar);
  246. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
  247. address);
  248. ath10k_pci_sleep(ar);
  249. ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
  250. 0);
  251. if (ret)
  252. goto done;
  253. i = 0;
  254. while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  255. &completed_nbytes,
  256. &id) != 0) {
  257. mdelay(1);
  258. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  259. ret = -EBUSY;
  260. goto done;
  261. }
  262. }
  263. if (nbytes != completed_nbytes) {
  264. ret = -EIO;
  265. goto done;
  266. }
  267. if (buf != (u32) address) {
  268. ret = -EIO;
  269. goto done;
  270. }
  271. i = 0;
  272. while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  273. &completed_nbytes,
  274. &id, &flags) != 0) {
  275. mdelay(1);
  276. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  277. ret = -EBUSY;
  278. goto done;
  279. }
  280. }
  281. if (nbytes != completed_nbytes) {
  282. ret = -EIO;
  283. goto done;
  284. }
  285. if (buf != ce_data) {
  286. ret = -EIO;
  287. goto done;
  288. }
  289. remaining_bytes -= nbytes;
  290. address += nbytes;
  291. ce_data += nbytes;
  292. }
  293. done:
  294. if (ret == 0) {
  295. /* Copy data from allocated DMA buf to caller's buf */
  296. WARN_ON_ONCE(orig_nbytes & 3);
  297. for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
  298. ((u32 *)data)[i] =
  299. __le32_to_cpu(((__le32 *)data_buf)[i]);
  300. }
  301. } else
  302. ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
  303. __func__, address);
  304. if (data_buf)
  305. pci_free_consistent(ar_pci->pdev, orig_nbytes,
  306. data_buf, ce_data_base);
  307. return ret;
  308. }
  309. /* Read 4-byte aligned data from Target memory or register */
  310. static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  311. u32 *data)
  312. {
  313. /* Assume range doesn't cross this boundary */
  314. if (address >= DRAM_BASE_ADDRESS)
  315. return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
  316. ath10k_pci_wake(ar);
  317. *data = ath10k_pci_read32(ar, address);
  318. ath10k_pci_sleep(ar);
  319. return 0;
  320. }
  321. static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
  322. const void *data, int nbytes)
  323. {
  324. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  325. int ret = 0;
  326. u32 buf;
  327. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  328. unsigned int id;
  329. unsigned int flags;
  330. struct ath10k_ce_pipe *ce_diag;
  331. void *data_buf = NULL;
  332. u32 ce_data; /* Host buffer address in CE space */
  333. dma_addr_t ce_data_base = 0;
  334. int i;
  335. ce_diag = ar_pci->ce_diag;
  336. /*
  337. * Allocate a temporary bounce buffer to hold caller's data
  338. * to be DMA'ed to Target. This guarantees
  339. * 1) 4-byte alignment
  340. * 2) Buffer in DMA-able space
  341. */
  342. orig_nbytes = nbytes;
  343. data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
  344. orig_nbytes,
  345. &ce_data_base);
  346. if (!data_buf) {
  347. ret = -ENOMEM;
  348. goto done;
  349. }
  350. /* Copy caller's data to allocated DMA buf */
  351. WARN_ON_ONCE(orig_nbytes & 3);
  352. for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
  353. ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
  354. /*
  355. * The address supplied by the caller is in the
  356. * Target CPU virtual address space.
  357. *
  358. * In order to use this address with the diagnostic CE,
  359. * convert it from
  360. * Target CPU virtual address space
  361. * to
  362. * CE address space
  363. */
  364. ath10k_pci_wake(ar);
  365. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
  366. ath10k_pci_sleep(ar);
  367. remaining_bytes = orig_nbytes;
  368. ce_data = ce_data_base;
  369. while (remaining_bytes) {
  370. /* FIXME: check cast */
  371. nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
  372. /* Set up to receive directly into Target(!) address */
  373. ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
  374. if (ret != 0)
  375. goto done;
  376. /*
  377. * Request CE to send caller-supplied data that
  378. * was copied to bounce buffer to Target(!) address.
  379. */
  380. ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
  381. nbytes, 0, 0);
  382. if (ret != 0)
  383. goto done;
  384. i = 0;
  385. while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  386. &completed_nbytes,
  387. &id) != 0) {
  388. mdelay(1);
  389. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  390. ret = -EBUSY;
  391. goto done;
  392. }
  393. }
  394. if (nbytes != completed_nbytes) {
  395. ret = -EIO;
  396. goto done;
  397. }
  398. if (buf != ce_data) {
  399. ret = -EIO;
  400. goto done;
  401. }
  402. i = 0;
  403. while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  404. &completed_nbytes,
  405. &id, &flags) != 0) {
  406. mdelay(1);
  407. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  408. ret = -EBUSY;
  409. goto done;
  410. }
  411. }
  412. if (nbytes != completed_nbytes) {
  413. ret = -EIO;
  414. goto done;
  415. }
  416. if (buf != address) {
  417. ret = -EIO;
  418. goto done;
  419. }
  420. remaining_bytes -= nbytes;
  421. address += nbytes;
  422. ce_data += nbytes;
  423. }
  424. done:
  425. if (data_buf) {
  426. pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
  427. ce_data_base);
  428. }
  429. if (ret != 0)
  430. ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
  431. address);
  432. return ret;
  433. }
  434. /* Write 4B data to Target memory or register */
  435. static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
  436. u32 data)
  437. {
  438. /* Assume range doesn't cross this boundary */
  439. if (address >= DRAM_BASE_ADDRESS)
  440. return ath10k_pci_diag_write_mem(ar, address, &data,
  441. sizeof(u32));
  442. ath10k_pci_wake(ar);
  443. ath10k_pci_write32(ar, address, data);
  444. ath10k_pci_sleep(ar);
  445. return 0;
  446. }
  447. static bool ath10k_pci_target_is_awake(struct ath10k *ar)
  448. {
  449. void __iomem *mem = ath10k_pci_priv(ar)->mem;
  450. u32 val;
  451. val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
  452. RTC_STATE_ADDRESS);
  453. return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
  454. }
  455. static void ath10k_pci_wait(struct ath10k *ar)
  456. {
  457. int n = 100;
  458. while (n-- && !ath10k_pci_target_is_awake(ar))
  459. msleep(10);
  460. if (n < 0)
  461. ath10k_warn("Unable to wakeup target\n");
  462. }
  463. void ath10k_do_pci_wake(struct ath10k *ar)
  464. {
  465. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  466. void __iomem *pci_addr = ar_pci->mem;
  467. int tot_delay = 0;
  468. int curr_delay = 5;
  469. if (atomic_read(&ar_pci->keep_awake_count) == 0) {
  470. /* Force AWAKE */
  471. iowrite32(PCIE_SOC_WAKE_V_MASK,
  472. pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  473. PCIE_SOC_WAKE_ADDRESS);
  474. }
  475. atomic_inc(&ar_pci->keep_awake_count);
  476. if (ar_pci->verified_awake)
  477. return;
  478. for (;;) {
  479. if (ath10k_pci_target_is_awake(ar)) {
  480. ar_pci->verified_awake = true;
  481. break;
  482. }
  483. if (tot_delay > PCIE_WAKE_TIMEOUT) {
  484. ath10k_warn("target takes too long to wake up (awake count %d)\n",
  485. atomic_read(&ar_pci->keep_awake_count));
  486. break;
  487. }
  488. udelay(curr_delay);
  489. tot_delay += curr_delay;
  490. if (curr_delay < 50)
  491. curr_delay += 5;
  492. }
  493. }
  494. void ath10k_do_pci_sleep(struct ath10k *ar)
  495. {
  496. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  497. void __iomem *pci_addr = ar_pci->mem;
  498. if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
  499. /* Allow sleep */
  500. ar_pci->verified_awake = false;
  501. iowrite32(PCIE_SOC_WAKE_RESET,
  502. pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  503. PCIE_SOC_WAKE_ADDRESS);
  504. }
  505. }
  506. /*
  507. * FIXME: Handle OOM properly.
  508. */
  509. static inline
  510. struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
  511. {
  512. struct ath10k_pci_compl *compl = NULL;
  513. spin_lock_bh(&pipe_info->pipe_lock);
  514. if (list_empty(&pipe_info->compl_free)) {
  515. ath10k_warn("Completion buffers are full\n");
  516. goto exit;
  517. }
  518. compl = list_first_entry(&pipe_info->compl_free,
  519. struct ath10k_pci_compl, list);
  520. list_del(&compl->list);
  521. exit:
  522. spin_unlock_bh(&pipe_info->pipe_lock);
  523. return compl;
  524. }
  525. /* Called by lower (CE) layer when a send to Target completes. */
  526. static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
  527. void *transfer_context,
  528. u32 ce_data,
  529. unsigned int nbytes,
  530. unsigned int transfer_id)
  531. {
  532. struct ath10k *ar = ce_state->ar;
  533. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  534. struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
  535. struct ath10k_pci_compl *compl;
  536. bool process = false;
  537. do {
  538. /*
  539. * For the send completion of an item in sendlist, just
  540. * increment num_sends_allowed. The upper layer callback will
  541. * be triggered when last fragment is done with send.
  542. */
  543. if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
  544. spin_lock_bh(&pipe_info->pipe_lock);
  545. pipe_info->num_sends_allowed++;
  546. spin_unlock_bh(&pipe_info->pipe_lock);
  547. continue;
  548. }
  549. compl = get_free_compl(pipe_info);
  550. if (!compl)
  551. break;
  552. compl->state = ATH10K_PCI_COMPL_SEND;
  553. compl->ce_state = ce_state;
  554. compl->pipe_info = pipe_info;
  555. compl->skb = transfer_context;
  556. compl->nbytes = nbytes;
  557. compl->transfer_id = transfer_id;
  558. compl->flags = 0;
  559. /*
  560. * Add the completion to the processing queue.
  561. */
  562. spin_lock_bh(&ar_pci->compl_lock);
  563. list_add_tail(&compl->list, &ar_pci->compl_process);
  564. spin_unlock_bh(&ar_pci->compl_lock);
  565. process = true;
  566. } while (ath10k_ce_completed_send_next(ce_state,
  567. &transfer_context,
  568. &ce_data, &nbytes,
  569. &transfer_id) == 0);
  570. /*
  571. * If only some of the items within a sendlist have completed,
  572. * don't invoke completion processing until the entire sendlist
  573. * has been sent.
  574. */
  575. if (!process)
  576. return;
  577. ath10k_pci_process_ce(ar);
  578. }
  579. /* Called by lower (CE) layer when data is received from the Target. */
  580. static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
  581. void *transfer_context, u32 ce_data,
  582. unsigned int nbytes,
  583. unsigned int transfer_id,
  584. unsigned int flags)
  585. {
  586. struct ath10k *ar = ce_state->ar;
  587. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  588. struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
  589. struct ath10k_pci_compl *compl;
  590. struct sk_buff *skb;
  591. do {
  592. compl = get_free_compl(pipe_info);
  593. if (!compl)
  594. break;
  595. compl->state = ATH10K_PCI_COMPL_RECV;
  596. compl->ce_state = ce_state;
  597. compl->pipe_info = pipe_info;
  598. compl->skb = transfer_context;
  599. compl->nbytes = nbytes;
  600. compl->transfer_id = transfer_id;
  601. compl->flags = flags;
  602. skb = transfer_context;
  603. dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
  604. skb->len + skb_tailroom(skb),
  605. DMA_FROM_DEVICE);
  606. /*
  607. * Add the completion to the processing queue.
  608. */
  609. spin_lock_bh(&ar_pci->compl_lock);
  610. list_add_tail(&compl->list, &ar_pci->compl_process);
  611. spin_unlock_bh(&ar_pci->compl_lock);
  612. } while (ath10k_ce_completed_recv_next(ce_state,
  613. &transfer_context,
  614. &ce_data, &nbytes,
  615. &transfer_id,
  616. &flags) == 0);
  617. ath10k_pci_process_ce(ar);
  618. }
  619. /* Send the first nbytes bytes of the buffer */
  620. static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
  621. unsigned int transfer_id,
  622. unsigned int bytes, struct sk_buff *nbuf)
  623. {
  624. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
  625. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  626. struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
  627. struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
  628. struct ce_sendlist sendlist;
  629. unsigned int len;
  630. u32 flags = 0;
  631. int ret;
  632. memset(&sendlist, 0, sizeof(struct ce_sendlist));
  633. len = min(bytes, nbuf->len);
  634. bytes -= len;
  635. if (len & 3)
  636. ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
  637. ath10k_dbg(ATH10K_DBG_PCI,
  638. "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
  639. nbuf->data, (unsigned long long) skb_cb->paddr,
  640. nbuf->len, len);
  641. ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
  642. "ath10k tx: data: ",
  643. nbuf->data, nbuf->len);
  644. ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
  645. /* Make sure we have resources to handle this request */
  646. spin_lock_bh(&pipe_info->pipe_lock);
  647. if (!pipe_info->num_sends_allowed) {
  648. ath10k_warn("Pipe: %d is full\n", pipe_id);
  649. spin_unlock_bh(&pipe_info->pipe_lock);
  650. return -ENOSR;
  651. }
  652. pipe_info->num_sends_allowed--;
  653. spin_unlock_bh(&pipe_info->pipe_lock);
  654. ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
  655. if (ret)
  656. ath10k_warn("CE send failed: %p\n", nbuf);
  657. return ret;
  658. }
  659. static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
  660. {
  661. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  662. struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
  663. int ret;
  664. spin_lock_bh(&pipe_info->pipe_lock);
  665. ret = pipe_info->num_sends_allowed;
  666. spin_unlock_bh(&pipe_info->pipe_lock);
  667. return ret;
  668. }
  669. static void ath10k_pci_hif_dump_area(struct ath10k *ar)
  670. {
  671. u32 reg_dump_area = 0;
  672. u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
  673. u32 host_addr;
  674. int ret;
  675. u32 i;
  676. ath10k_err("firmware crashed!\n");
  677. ath10k_err("hardware name %s version 0x%x\n",
  678. ar->hw_params.name, ar->target_version);
  679. ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
  680. ar->fw_version_minor, ar->fw_version_release,
  681. ar->fw_version_build);
  682. host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
  683. if (ath10k_pci_diag_read_mem(ar, host_addr,
  684. &reg_dump_area, sizeof(u32)) != 0) {
  685. ath10k_warn("could not read hi_failure_state\n");
  686. return;
  687. }
  688. ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
  689. ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
  690. &reg_dump_values[0],
  691. REG_DUMP_COUNT_QCA988X * sizeof(u32));
  692. if (ret != 0) {
  693. ath10k_err("could not dump FW Dump Area\n");
  694. return;
  695. }
  696. BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
  697. ath10k_err("target Register Dump\n");
  698. for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
  699. ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
  700. i,
  701. reg_dump_values[i],
  702. reg_dump_values[i + 1],
  703. reg_dump_values[i + 2],
  704. reg_dump_values[i + 3]);
  705. ieee80211_queue_work(ar->hw, &ar->restart_work);
  706. }
  707. static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
  708. int force)
  709. {
  710. if (!force) {
  711. int resources;
  712. /*
  713. * Decide whether to actually poll for completions, or just
  714. * wait for a later chance.
  715. * If there seem to be plenty of resources left, then just wait
  716. * since checking involves reading a CE register, which is a
  717. * relatively expensive operation.
  718. */
  719. resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
  720. /*
  721. * If at least 50% of the total resources are still available,
  722. * don't bother checking again yet.
  723. */
  724. if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
  725. return;
  726. }
  727. ath10k_ce_per_engine_service(ar, pipe);
  728. }
  729. static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
  730. struct ath10k_hif_cb *callbacks)
  731. {
  732. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  733. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  734. memcpy(&ar_pci->msg_callbacks_current, callbacks,
  735. sizeof(ar_pci->msg_callbacks_current));
  736. }
  737. static int ath10k_pci_start_ce(struct ath10k *ar)
  738. {
  739. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  740. struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
  741. const struct ce_attr *attr;
  742. struct ath10k_pci_pipe *pipe_info;
  743. struct ath10k_pci_compl *compl;
  744. int i, pipe_num, completions, disable_interrupts;
  745. spin_lock_init(&ar_pci->compl_lock);
  746. INIT_LIST_HEAD(&ar_pci->compl_process);
  747. for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
  748. pipe_info = &ar_pci->pipe_info[pipe_num];
  749. spin_lock_init(&pipe_info->pipe_lock);
  750. INIT_LIST_HEAD(&pipe_info->compl_free);
  751. /* Handle Diagnostic CE specially */
  752. if (pipe_info->ce_hdl == ce_diag)
  753. continue;
  754. attr = &host_ce_config_wlan[pipe_num];
  755. completions = 0;
  756. if (attr->src_nentries) {
  757. disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
  758. ath10k_ce_send_cb_register(pipe_info->ce_hdl,
  759. ath10k_pci_ce_send_done,
  760. disable_interrupts);
  761. completions += attr->src_nentries;
  762. pipe_info->num_sends_allowed = attr->src_nentries - 1;
  763. }
  764. if (attr->dest_nentries) {
  765. ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
  766. ath10k_pci_ce_recv_data);
  767. completions += attr->dest_nentries;
  768. }
  769. if (completions == 0)
  770. continue;
  771. for (i = 0; i < completions; i++) {
  772. compl = kmalloc(sizeof(*compl), GFP_KERNEL);
  773. if (!compl) {
  774. ath10k_warn("No memory for completion state\n");
  775. ath10k_pci_stop_ce(ar);
  776. return -ENOMEM;
  777. }
  778. compl->state = ATH10K_PCI_COMPL_FREE;
  779. list_add_tail(&compl->list, &pipe_info->compl_free);
  780. }
  781. }
  782. return 0;
  783. }
  784. static void ath10k_pci_stop_ce(struct ath10k *ar)
  785. {
  786. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  787. struct ath10k_pci_compl *compl;
  788. struct sk_buff *skb;
  789. int i;
  790. ath10k_ce_disable_interrupts(ar);
  791. /* Cancel the pending tasklet */
  792. tasklet_kill(&ar_pci->intr_tq);
  793. for (i = 0; i < CE_COUNT; i++)
  794. tasklet_kill(&ar_pci->pipe_info[i].intr);
  795. /* Mark pending completions as aborted, so that upper layers free up
  796. * their associated resources */
  797. spin_lock_bh(&ar_pci->compl_lock);
  798. list_for_each_entry(compl, &ar_pci->compl_process, list) {
  799. skb = compl->skb;
  800. ATH10K_SKB_CB(skb)->is_aborted = true;
  801. }
  802. spin_unlock_bh(&ar_pci->compl_lock);
  803. }
  804. static void ath10k_pci_cleanup_ce(struct ath10k *ar)
  805. {
  806. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  807. struct ath10k_pci_compl *compl, *tmp;
  808. struct ath10k_pci_pipe *pipe_info;
  809. struct sk_buff *netbuf;
  810. int pipe_num;
  811. /* Free pending completions. */
  812. spin_lock_bh(&ar_pci->compl_lock);
  813. if (!list_empty(&ar_pci->compl_process))
  814. ath10k_warn("pending completions still present! possible memory leaks.\n");
  815. list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
  816. list_del(&compl->list);
  817. netbuf = compl->skb;
  818. dev_kfree_skb_any(netbuf);
  819. kfree(compl);
  820. }
  821. spin_unlock_bh(&ar_pci->compl_lock);
  822. /* Free unused completions for each pipe. */
  823. for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
  824. pipe_info = &ar_pci->pipe_info[pipe_num];
  825. spin_lock_bh(&pipe_info->pipe_lock);
  826. list_for_each_entry_safe(compl, tmp,
  827. &pipe_info->compl_free, list) {
  828. list_del(&compl->list);
  829. kfree(compl);
  830. }
  831. spin_unlock_bh(&pipe_info->pipe_lock);
  832. }
  833. }
  834. static void ath10k_pci_process_ce(struct ath10k *ar)
  835. {
  836. struct ath10k_pci *ar_pci = ar->hif.priv;
  837. struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  838. struct ath10k_pci_compl *compl;
  839. struct sk_buff *skb;
  840. unsigned int nbytes;
  841. int ret, send_done = 0;
  842. /* Upper layers aren't ready to handle tx/rx completions in parallel so
  843. * we must serialize all completion processing. */
  844. spin_lock_bh(&ar_pci->compl_lock);
  845. if (ar_pci->compl_processing) {
  846. spin_unlock_bh(&ar_pci->compl_lock);
  847. return;
  848. }
  849. ar_pci->compl_processing = true;
  850. spin_unlock_bh(&ar_pci->compl_lock);
  851. for (;;) {
  852. spin_lock_bh(&ar_pci->compl_lock);
  853. if (list_empty(&ar_pci->compl_process)) {
  854. spin_unlock_bh(&ar_pci->compl_lock);
  855. break;
  856. }
  857. compl = list_first_entry(&ar_pci->compl_process,
  858. struct ath10k_pci_compl, list);
  859. list_del(&compl->list);
  860. spin_unlock_bh(&ar_pci->compl_lock);
  861. switch (compl->state) {
  862. case ATH10K_PCI_COMPL_SEND:
  863. cb->tx_completion(ar,
  864. compl->skb,
  865. compl->transfer_id);
  866. send_done = 1;
  867. break;
  868. case ATH10K_PCI_COMPL_RECV:
  869. ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
  870. if (ret) {
  871. ath10k_warn("Unable to post recv buffer for pipe: %d\n",
  872. compl->pipe_info->pipe_num);
  873. break;
  874. }
  875. skb = compl->skb;
  876. nbytes = compl->nbytes;
  877. ath10k_dbg(ATH10K_DBG_PCI,
  878. "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
  879. skb, nbytes);
  880. ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
  881. "ath10k rx: ", skb->data, nbytes);
  882. if (skb->len + skb_tailroom(skb) >= nbytes) {
  883. skb_trim(skb, 0);
  884. skb_put(skb, nbytes);
  885. cb->rx_completion(ar, skb,
  886. compl->pipe_info->pipe_num);
  887. } else {
  888. ath10k_warn("rxed more than expected (nbytes %d, max %d)",
  889. nbytes,
  890. skb->len + skb_tailroom(skb));
  891. }
  892. break;
  893. case ATH10K_PCI_COMPL_FREE:
  894. ath10k_warn("free completion cannot be processed\n");
  895. break;
  896. default:
  897. ath10k_warn("invalid completion state (%d)\n",
  898. compl->state);
  899. break;
  900. }
  901. compl->state = ATH10K_PCI_COMPL_FREE;
  902. /*
  903. * Add completion back to the pipe's free list.
  904. */
  905. spin_lock_bh(&compl->pipe_info->pipe_lock);
  906. list_add_tail(&compl->list, &compl->pipe_info->compl_free);
  907. compl->pipe_info->num_sends_allowed += send_done;
  908. spin_unlock_bh(&compl->pipe_info->pipe_lock);
  909. }
  910. spin_lock_bh(&ar_pci->compl_lock);
  911. ar_pci->compl_processing = false;
  912. spin_unlock_bh(&ar_pci->compl_lock);
  913. }
  914. /* TODO - temporary mapping while we have too few CE's */
  915. static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
  916. u16 service_id, u8 *ul_pipe,
  917. u8 *dl_pipe, int *ul_is_polled,
  918. int *dl_is_polled)
  919. {
  920. int ret = 0;
  921. /* polling for received messages not supported */
  922. *dl_is_polled = 0;
  923. switch (service_id) {
  924. case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
  925. /*
  926. * Host->target HTT gets its own pipe, so it can be polled
  927. * while other pipes are interrupt driven.
  928. */
  929. *ul_pipe = 4;
  930. /*
  931. * Use the same target->host pipe for HTC ctrl, HTC raw
  932. * streams, and HTT.
  933. */
  934. *dl_pipe = 1;
  935. break;
  936. case ATH10K_HTC_SVC_ID_RSVD_CTRL:
  937. case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
  938. /*
  939. * Note: HTC_RAW_STREAMS_SVC is currently unused, and
  940. * HTC_CTRL_RSVD_SVC could share the same pipe as the
  941. * WMI services. So, if another CE is needed, change
  942. * this to *ul_pipe = 3, which frees up CE 0.
  943. */
  944. /* *ul_pipe = 3; */
  945. *ul_pipe = 0;
  946. *dl_pipe = 1;
  947. break;
  948. case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
  949. case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
  950. case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
  951. case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
  952. case ATH10K_HTC_SVC_ID_WMI_CONTROL:
  953. *ul_pipe = 3;
  954. *dl_pipe = 2;
  955. break;
  956. /* pipe 5 unused */
  957. /* pipe 6 reserved */
  958. /* pipe 7 reserved */
  959. default:
  960. ret = -1;
  961. break;
  962. }
  963. *ul_is_polled =
  964. (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
  965. return ret;
  966. }
  967. static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
  968. u8 *ul_pipe, u8 *dl_pipe)
  969. {
  970. int ul_is_polled, dl_is_polled;
  971. (void)ath10k_pci_hif_map_service_to_pipe(ar,
  972. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  973. ul_pipe,
  974. dl_pipe,
  975. &ul_is_polled,
  976. &dl_is_polled);
  977. }
  978. static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  979. int num)
  980. {
  981. struct ath10k *ar = pipe_info->hif_ce_state;
  982. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  983. struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
  984. struct sk_buff *skb;
  985. dma_addr_t ce_data;
  986. int i, ret = 0;
  987. if (pipe_info->buf_sz == 0)
  988. return 0;
  989. for (i = 0; i < num; i++) {
  990. skb = dev_alloc_skb(pipe_info->buf_sz);
  991. if (!skb) {
  992. ath10k_warn("could not allocate skbuff for pipe %d\n",
  993. num);
  994. ret = -ENOMEM;
  995. goto err;
  996. }
  997. WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  998. ce_data = dma_map_single(ar->dev, skb->data,
  999. skb->len + skb_tailroom(skb),
  1000. DMA_FROM_DEVICE);
  1001. if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
  1002. ath10k_warn("could not dma map skbuff\n");
  1003. dev_kfree_skb_any(skb);
  1004. ret = -EIO;
  1005. goto err;
  1006. }
  1007. ATH10K_SKB_CB(skb)->paddr = ce_data;
  1008. pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
  1009. pipe_info->buf_sz,
  1010. PCI_DMA_FROMDEVICE);
  1011. ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
  1012. ce_data);
  1013. if (ret) {
  1014. ath10k_warn("could not enqueue to pipe %d (%d)\n",
  1015. num, ret);
  1016. goto err;
  1017. }
  1018. }
  1019. return ret;
  1020. err:
  1021. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1022. return ret;
  1023. }
  1024. static int ath10k_pci_post_rx(struct ath10k *ar)
  1025. {
  1026. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1027. struct ath10k_pci_pipe *pipe_info;
  1028. const struct ce_attr *attr;
  1029. int pipe_num, ret = 0;
  1030. for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
  1031. pipe_info = &ar_pci->pipe_info[pipe_num];
  1032. attr = &host_ce_config_wlan[pipe_num];
  1033. if (attr->dest_nentries == 0)
  1034. continue;
  1035. ret = ath10k_pci_post_rx_pipe(pipe_info,
  1036. attr->dest_nentries - 1);
  1037. if (ret) {
  1038. ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
  1039. pipe_num);
  1040. for (; pipe_num >= 0; pipe_num--) {
  1041. pipe_info = &ar_pci->pipe_info[pipe_num];
  1042. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1043. }
  1044. return ret;
  1045. }
  1046. }
  1047. return 0;
  1048. }
  1049. static int ath10k_pci_hif_start(struct ath10k *ar)
  1050. {
  1051. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1052. int ret;
  1053. ret = ath10k_pci_start_ce(ar);
  1054. if (ret) {
  1055. ath10k_warn("could not start CE (%d)\n", ret);
  1056. return ret;
  1057. }
  1058. /* Post buffers once to start things off. */
  1059. ret = ath10k_pci_post_rx(ar);
  1060. if (ret) {
  1061. ath10k_warn("could not post rx pipes (%d)\n", ret);
  1062. return ret;
  1063. }
  1064. ar_pci->started = 1;
  1065. return 0;
  1066. }
  1067. static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  1068. {
  1069. struct ath10k *ar;
  1070. struct ath10k_pci *ar_pci;
  1071. struct ath10k_ce_pipe *ce_hdl;
  1072. u32 buf_sz;
  1073. struct sk_buff *netbuf;
  1074. u32 ce_data;
  1075. buf_sz = pipe_info->buf_sz;
  1076. /* Unused Copy Engine */
  1077. if (buf_sz == 0)
  1078. return;
  1079. ar = pipe_info->hif_ce_state;
  1080. ar_pci = ath10k_pci_priv(ar);
  1081. if (!ar_pci->started)
  1082. return;
  1083. ce_hdl = pipe_info->ce_hdl;
  1084. while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
  1085. &ce_data) == 0) {
  1086. dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
  1087. netbuf->len + skb_tailroom(netbuf),
  1088. DMA_FROM_DEVICE);
  1089. dev_kfree_skb_any(netbuf);
  1090. }
  1091. }
  1092. static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  1093. {
  1094. struct ath10k *ar;
  1095. struct ath10k_pci *ar_pci;
  1096. struct ath10k_ce_pipe *ce_hdl;
  1097. struct sk_buff *netbuf;
  1098. u32 ce_data;
  1099. unsigned int nbytes;
  1100. unsigned int id;
  1101. u32 buf_sz;
  1102. buf_sz = pipe_info->buf_sz;
  1103. /* Unused Copy Engine */
  1104. if (buf_sz == 0)
  1105. return;
  1106. ar = pipe_info->hif_ce_state;
  1107. ar_pci = ath10k_pci_priv(ar);
  1108. if (!ar_pci->started)
  1109. return;
  1110. ce_hdl = pipe_info->ce_hdl;
  1111. while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
  1112. &ce_data, &nbytes, &id) == 0) {
  1113. if (netbuf != CE_SENDLIST_ITEM_CTXT)
  1114. /*
  1115. * Indicate the completion to higer layer to free
  1116. * the buffer
  1117. */
  1118. ATH10K_SKB_CB(netbuf)->is_aborted = true;
  1119. ar_pci->msg_callbacks_current.tx_completion(ar,
  1120. netbuf,
  1121. id);
  1122. }
  1123. }
  1124. /*
  1125. * Cleanup residual buffers for device shutdown:
  1126. * buffers that were enqueued for receive
  1127. * buffers that were to be sent
  1128. * Note: Buffers that had completed but which were
  1129. * not yet processed are on a completion queue. They
  1130. * are handled when the completion thread shuts down.
  1131. */
  1132. static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
  1133. {
  1134. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1135. int pipe_num;
  1136. for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
  1137. struct ath10k_pci_pipe *pipe_info;
  1138. pipe_info = &ar_pci->pipe_info[pipe_num];
  1139. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1140. ath10k_pci_tx_pipe_cleanup(pipe_info);
  1141. }
  1142. }
  1143. static void ath10k_pci_ce_deinit(struct ath10k *ar)
  1144. {
  1145. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1146. struct ath10k_pci_pipe *pipe_info;
  1147. int pipe_num;
  1148. for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
  1149. pipe_info = &ar_pci->pipe_info[pipe_num];
  1150. if (pipe_info->ce_hdl) {
  1151. ath10k_ce_deinit(pipe_info->ce_hdl);
  1152. pipe_info->ce_hdl = NULL;
  1153. pipe_info->buf_sz = 0;
  1154. }
  1155. }
  1156. }
  1157. static void ath10k_pci_disable_irqs(struct ath10k *ar)
  1158. {
  1159. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1160. int i;
  1161. for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
  1162. disable_irq(ar_pci->pdev->irq + i);
  1163. }
  1164. static void ath10k_pci_hif_stop(struct ath10k *ar)
  1165. {
  1166. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1167. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  1168. /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
  1169. * by ath10k_pci_start_intr(). */
  1170. ath10k_pci_disable_irqs(ar);
  1171. ath10k_pci_stop_ce(ar);
  1172. /* At this point, asynchronous threads are stopped, the target should
  1173. * not DMA nor interrupt. We process the leftovers and then free
  1174. * everything else up. */
  1175. ath10k_pci_process_ce(ar);
  1176. ath10k_pci_cleanup_ce(ar);
  1177. ath10k_pci_buffer_cleanup(ar);
  1178. ar_pci->started = 0;
  1179. }
  1180. static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
  1181. void *req, u32 req_len,
  1182. void *resp, u32 *resp_len)
  1183. {
  1184. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1185. struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
  1186. struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
  1187. struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
  1188. struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
  1189. dma_addr_t req_paddr = 0;
  1190. dma_addr_t resp_paddr = 0;
  1191. struct bmi_xfer xfer = {};
  1192. void *treq, *tresp = NULL;
  1193. int ret = 0;
  1194. if (resp && !resp_len)
  1195. return -EINVAL;
  1196. if (resp && resp_len && *resp_len == 0)
  1197. return -EINVAL;
  1198. treq = kmemdup(req, req_len, GFP_KERNEL);
  1199. if (!treq)
  1200. return -ENOMEM;
  1201. req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
  1202. ret = dma_mapping_error(ar->dev, req_paddr);
  1203. if (ret)
  1204. goto err_dma;
  1205. if (resp && resp_len) {
  1206. tresp = kzalloc(*resp_len, GFP_KERNEL);
  1207. if (!tresp) {
  1208. ret = -ENOMEM;
  1209. goto err_req;
  1210. }
  1211. resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
  1212. DMA_FROM_DEVICE);
  1213. ret = dma_mapping_error(ar->dev, resp_paddr);
  1214. if (ret)
  1215. goto err_req;
  1216. xfer.wait_for_resp = true;
  1217. xfer.resp_len = 0;
  1218. ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
  1219. }
  1220. init_completion(&xfer.done);
  1221. ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
  1222. if (ret)
  1223. goto err_resp;
  1224. ret = wait_for_completion_timeout(&xfer.done,
  1225. BMI_COMMUNICATION_TIMEOUT_HZ);
  1226. if (ret <= 0) {
  1227. u32 unused_buffer;
  1228. unsigned int unused_nbytes;
  1229. unsigned int unused_id;
  1230. ret = -ETIMEDOUT;
  1231. ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
  1232. &unused_nbytes, &unused_id);
  1233. } else {
  1234. /* non-zero means we did not time out */
  1235. ret = 0;
  1236. }
  1237. err_resp:
  1238. if (resp) {
  1239. u32 unused_buffer;
  1240. ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
  1241. dma_unmap_single(ar->dev, resp_paddr,
  1242. *resp_len, DMA_FROM_DEVICE);
  1243. }
  1244. err_req:
  1245. dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
  1246. if (ret == 0 && resp_len) {
  1247. *resp_len = min(*resp_len, xfer.resp_len);
  1248. memcpy(resp, tresp, xfer.resp_len);
  1249. }
  1250. err_dma:
  1251. kfree(treq);
  1252. kfree(tresp);
  1253. return ret;
  1254. }
  1255. static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
  1256. void *transfer_context,
  1257. u32 data,
  1258. unsigned int nbytes,
  1259. unsigned int transfer_id)
  1260. {
  1261. struct bmi_xfer *xfer = transfer_context;
  1262. if (xfer->wait_for_resp)
  1263. return;
  1264. complete(&xfer->done);
  1265. }
  1266. static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
  1267. void *transfer_context,
  1268. u32 data,
  1269. unsigned int nbytes,
  1270. unsigned int transfer_id,
  1271. unsigned int flags)
  1272. {
  1273. struct bmi_xfer *xfer = transfer_context;
  1274. if (!xfer->wait_for_resp) {
  1275. ath10k_warn("unexpected: BMI data received; ignoring\n");
  1276. return;
  1277. }
  1278. xfer->resp_len = nbytes;
  1279. complete(&xfer->done);
  1280. }
  1281. /*
  1282. * Map from service/endpoint to Copy Engine.
  1283. * This table is derived from the CE_PCI TABLE, above.
  1284. * It is passed to the Target at startup for use by firmware.
  1285. */
  1286. static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
  1287. {
  1288. ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  1289. PIPEDIR_OUT, /* out = UL = host -> target */
  1290. 3,
  1291. },
  1292. {
  1293. ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  1294. PIPEDIR_IN, /* in = DL = target -> host */
  1295. 2,
  1296. },
  1297. {
  1298. ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  1299. PIPEDIR_OUT, /* out = UL = host -> target */
  1300. 3,
  1301. },
  1302. {
  1303. ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  1304. PIPEDIR_IN, /* in = DL = target -> host */
  1305. 2,
  1306. },
  1307. {
  1308. ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  1309. PIPEDIR_OUT, /* out = UL = host -> target */
  1310. 3,
  1311. },
  1312. {
  1313. ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  1314. PIPEDIR_IN, /* in = DL = target -> host */
  1315. 2,
  1316. },
  1317. {
  1318. ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  1319. PIPEDIR_OUT, /* out = UL = host -> target */
  1320. 3,
  1321. },
  1322. {
  1323. ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  1324. PIPEDIR_IN, /* in = DL = target -> host */
  1325. 2,
  1326. },
  1327. {
  1328. ATH10K_HTC_SVC_ID_WMI_CONTROL,
  1329. PIPEDIR_OUT, /* out = UL = host -> target */
  1330. 3,
  1331. },
  1332. {
  1333. ATH10K_HTC_SVC_ID_WMI_CONTROL,
  1334. PIPEDIR_IN, /* in = DL = target -> host */
  1335. 2,
  1336. },
  1337. {
  1338. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  1339. PIPEDIR_OUT, /* out = UL = host -> target */
  1340. 0, /* could be moved to 3 (share with WMI) */
  1341. },
  1342. {
  1343. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  1344. PIPEDIR_IN, /* in = DL = target -> host */
  1345. 1,
  1346. },
  1347. {
  1348. ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  1349. PIPEDIR_OUT, /* out = UL = host -> target */
  1350. 0,
  1351. },
  1352. {
  1353. ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  1354. PIPEDIR_IN, /* in = DL = target -> host */
  1355. 1,
  1356. },
  1357. {
  1358. ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  1359. PIPEDIR_OUT, /* out = UL = host -> target */
  1360. 4,
  1361. },
  1362. {
  1363. ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  1364. PIPEDIR_IN, /* in = DL = target -> host */
  1365. 1,
  1366. },
  1367. /* (Additions here) */
  1368. { /* Must be last */
  1369. 0,
  1370. 0,
  1371. 0,
  1372. },
  1373. };
  1374. /*
  1375. * Send an interrupt to the device to wake up the Target CPU
  1376. * so it has an opportunity to notice any changed state.
  1377. */
  1378. static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
  1379. {
  1380. int ret;
  1381. u32 core_ctrl;
  1382. ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
  1383. CORE_CTRL_ADDRESS,
  1384. &core_ctrl);
  1385. if (ret) {
  1386. ath10k_warn("Unable to read core ctrl\n");
  1387. return ret;
  1388. }
  1389. /* A_INUM_FIRMWARE interrupt to Target CPU */
  1390. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  1391. ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
  1392. CORE_CTRL_ADDRESS,
  1393. core_ctrl);
  1394. if (ret)
  1395. ath10k_warn("Unable to set interrupt mask\n");
  1396. return ret;
  1397. }
  1398. static int ath10k_pci_init_config(struct ath10k *ar)
  1399. {
  1400. u32 interconnect_targ_addr;
  1401. u32 pcie_state_targ_addr = 0;
  1402. u32 pipe_cfg_targ_addr = 0;
  1403. u32 svc_to_pipe_map = 0;
  1404. u32 pcie_config_flags = 0;
  1405. u32 ealloc_value;
  1406. u32 ealloc_targ_addr;
  1407. u32 flag2_value;
  1408. u32 flag2_targ_addr;
  1409. int ret = 0;
  1410. /* Download to Target the CE Config and the service-to-CE map */
  1411. interconnect_targ_addr =
  1412. host_interest_item_address(HI_ITEM(hi_interconnect_state));
  1413. /* Supply Target-side CE configuration */
  1414. ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
  1415. &pcie_state_targ_addr);
  1416. if (ret != 0) {
  1417. ath10k_err("Failed to get pcie state addr: %d\n", ret);
  1418. return ret;
  1419. }
  1420. if (pcie_state_targ_addr == 0) {
  1421. ret = -EIO;
  1422. ath10k_err("Invalid pcie state addr\n");
  1423. return ret;
  1424. }
  1425. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1426. offsetof(struct pcie_state,
  1427. pipe_cfg_addr),
  1428. &pipe_cfg_targ_addr);
  1429. if (ret != 0) {
  1430. ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
  1431. return ret;
  1432. }
  1433. if (pipe_cfg_targ_addr == 0) {
  1434. ret = -EIO;
  1435. ath10k_err("Invalid pipe cfg addr\n");
  1436. return ret;
  1437. }
  1438. ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
  1439. target_ce_config_wlan,
  1440. sizeof(target_ce_config_wlan));
  1441. if (ret != 0) {
  1442. ath10k_err("Failed to write pipe cfg: %d\n", ret);
  1443. return ret;
  1444. }
  1445. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1446. offsetof(struct pcie_state,
  1447. svc_to_pipe_map),
  1448. &svc_to_pipe_map);
  1449. if (ret != 0) {
  1450. ath10k_err("Failed to get svc/pipe map: %d\n", ret);
  1451. return ret;
  1452. }
  1453. if (svc_to_pipe_map == 0) {
  1454. ret = -EIO;
  1455. ath10k_err("Invalid svc_to_pipe map\n");
  1456. return ret;
  1457. }
  1458. ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
  1459. target_service_to_ce_map_wlan,
  1460. sizeof(target_service_to_ce_map_wlan));
  1461. if (ret != 0) {
  1462. ath10k_err("Failed to write svc/pipe map: %d\n", ret);
  1463. return ret;
  1464. }
  1465. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1466. offsetof(struct pcie_state,
  1467. config_flags),
  1468. &pcie_config_flags);
  1469. if (ret != 0) {
  1470. ath10k_err("Failed to get pcie config_flags: %d\n", ret);
  1471. return ret;
  1472. }
  1473. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1474. ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
  1475. offsetof(struct pcie_state, config_flags),
  1476. &pcie_config_flags,
  1477. sizeof(pcie_config_flags));
  1478. if (ret != 0) {
  1479. ath10k_err("Failed to write pcie config_flags: %d\n", ret);
  1480. return ret;
  1481. }
  1482. /* configure early allocation */
  1483. ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
  1484. ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
  1485. if (ret != 0) {
  1486. ath10k_err("Faile to get early alloc val: %d\n", ret);
  1487. return ret;
  1488. }
  1489. /* first bank is switched to IRAM */
  1490. ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1491. HI_EARLY_ALLOC_MAGIC_MASK);
  1492. ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
  1493. HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1494. ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
  1495. if (ret != 0) {
  1496. ath10k_err("Failed to set early alloc val: %d\n", ret);
  1497. return ret;
  1498. }
  1499. /* Tell Target to proceed with initialization */
  1500. flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
  1501. ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
  1502. if (ret != 0) {
  1503. ath10k_err("Failed to get option val: %d\n", ret);
  1504. return ret;
  1505. }
  1506. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1507. ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
  1508. if (ret != 0) {
  1509. ath10k_err("Failed to set option val: %d\n", ret);
  1510. return ret;
  1511. }
  1512. return 0;
  1513. }
  1514. static int ath10k_pci_ce_init(struct ath10k *ar)
  1515. {
  1516. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1517. struct ath10k_pci_pipe *pipe_info;
  1518. const struct ce_attr *attr;
  1519. int pipe_num;
  1520. for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
  1521. pipe_info = &ar_pci->pipe_info[pipe_num];
  1522. pipe_info->pipe_num = pipe_num;
  1523. pipe_info->hif_ce_state = ar;
  1524. attr = &host_ce_config_wlan[pipe_num];
  1525. pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
  1526. if (pipe_info->ce_hdl == NULL) {
  1527. ath10k_err("Unable to initialize CE for pipe: %d\n",
  1528. pipe_num);
  1529. /* It is safe to call it here. It checks if ce_hdl is
  1530. * valid for each pipe */
  1531. ath10k_pci_ce_deinit(ar);
  1532. return -1;
  1533. }
  1534. if (pipe_num == ar_pci->ce_count - 1) {
  1535. /*
  1536. * Reserve the ultimate CE for
  1537. * diagnostic Window support
  1538. */
  1539. ar_pci->ce_diag =
  1540. ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
  1541. continue;
  1542. }
  1543. pipe_info->buf_sz = (size_t) (attr->src_sz_max);
  1544. }
  1545. /*
  1546. * Initially, establish CE completion handlers for use with BMI.
  1547. * These are overwritten with generic handlers after we exit BMI phase.
  1548. */
  1549. pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
  1550. ath10k_ce_send_cb_register(pipe_info->ce_hdl,
  1551. ath10k_pci_bmi_send_done, 0);
  1552. pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
  1553. ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
  1554. ath10k_pci_bmi_recv_data);
  1555. return 0;
  1556. }
  1557. static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
  1558. {
  1559. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1560. u32 fw_indicator_address, fw_indicator;
  1561. ath10k_pci_wake(ar);
  1562. fw_indicator_address = ar_pci->fw_indicator_address;
  1563. fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
  1564. if (fw_indicator & FW_IND_EVENT_PENDING) {
  1565. /* ACK: clear Target-side pending event */
  1566. ath10k_pci_write32(ar, fw_indicator_address,
  1567. fw_indicator & ~FW_IND_EVENT_PENDING);
  1568. if (ar_pci->started) {
  1569. ath10k_pci_hif_dump_area(ar);
  1570. } else {
  1571. /*
  1572. * Probable Target failure before we're prepared
  1573. * to handle it. Generally unexpected.
  1574. */
  1575. ath10k_warn("early firmware event indicated\n");
  1576. }
  1577. }
  1578. ath10k_pci_sleep(ar);
  1579. }
  1580. static int ath10k_pci_hif_power_up(struct ath10k *ar)
  1581. {
  1582. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1583. int ret;
  1584. ret = ath10k_pci_start_intr(ar);
  1585. if (ret) {
  1586. ath10k_err("could not start interrupt handling (%d)\n", ret);
  1587. goto err;
  1588. }
  1589. /*
  1590. * Bring the target up cleanly.
  1591. *
  1592. * The target may be in an undefined state with an AUX-powered Target
  1593. * and a Host in WoW mode. If the Host crashes, loses power, or is
  1594. * restarted (without unloading the driver) then the Target is left
  1595. * (aux) powered and running. On a subsequent driver load, the Target
  1596. * is in an unexpected state. We try to catch that here in order to
  1597. * reset the Target and retry the probe.
  1598. */
  1599. ath10k_pci_device_reset(ar);
  1600. ret = ath10k_pci_reset_target(ar);
  1601. if (ret)
  1602. goto err_irq;
  1603. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1604. /* Force AWAKE forever */
  1605. ath10k_do_pci_wake(ar);
  1606. ret = ath10k_pci_ce_init(ar);
  1607. if (ret)
  1608. goto err_ps;
  1609. ret = ath10k_pci_init_config(ar);
  1610. if (ret)
  1611. goto err_ce;
  1612. ret = ath10k_pci_wake_target_cpu(ar);
  1613. if (ret) {
  1614. ath10k_err("could not wake up target CPU (%d)\n", ret);
  1615. goto err_ce;
  1616. }
  1617. return 0;
  1618. err_ce:
  1619. ath10k_pci_ce_deinit(ar);
  1620. err_ps:
  1621. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1622. ath10k_do_pci_sleep(ar);
  1623. err_irq:
  1624. ath10k_pci_stop_intr(ar);
  1625. err:
  1626. return ret;
  1627. }
  1628. static void ath10k_pci_hif_power_down(struct ath10k *ar)
  1629. {
  1630. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1631. ath10k_pci_stop_intr(ar);
  1632. ath10k_pci_ce_deinit(ar);
  1633. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1634. ath10k_do_pci_sleep(ar);
  1635. }
  1636. #ifdef CONFIG_PM
  1637. #define ATH10K_PCI_PM_CONTROL 0x44
  1638. static int ath10k_pci_hif_suspend(struct ath10k *ar)
  1639. {
  1640. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1641. struct pci_dev *pdev = ar_pci->pdev;
  1642. u32 val;
  1643. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1644. if ((val & 0x000000ff) != 0x3) {
  1645. pci_save_state(pdev);
  1646. pci_disable_device(pdev);
  1647. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1648. (val & 0xffffff00) | 0x03);
  1649. }
  1650. return 0;
  1651. }
  1652. static int ath10k_pci_hif_resume(struct ath10k *ar)
  1653. {
  1654. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1655. struct pci_dev *pdev = ar_pci->pdev;
  1656. u32 val;
  1657. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1658. if ((val & 0x000000ff) != 0) {
  1659. pci_restore_state(pdev);
  1660. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1661. val & 0xffffff00);
  1662. /*
  1663. * Suspend/Resume resets the PCI configuration space,
  1664. * so we have to re-disable the RETRY_TIMEOUT register (0x41)
  1665. * to keep PCI Tx retries from interfering with C3 CPU state
  1666. */
  1667. pci_read_config_dword(pdev, 0x40, &val);
  1668. if ((val & 0x0000ff00) != 0)
  1669. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  1670. }
  1671. return 0;
  1672. }
  1673. #endif
  1674. static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
  1675. .send_head = ath10k_pci_hif_send_head,
  1676. .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
  1677. .start = ath10k_pci_hif_start,
  1678. .stop = ath10k_pci_hif_stop,
  1679. .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
  1680. .get_default_pipe = ath10k_pci_hif_get_default_pipe,
  1681. .send_complete_check = ath10k_pci_hif_send_complete_check,
  1682. .set_callbacks = ath10k_pci_hif_set_callbacks,
  1683. .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
  1684. .power_up = ath10k_pci_hif_power_up,
  1685. .power_down = ath10k_pci_hif_power_down,
  1686. #ifdef CONFIG_PM
  1687. .suspend = ath10k_pci_hif_suspend,
  1688. .resume = ath10k_pci_hif_resume,
  1689. #endif
  1690. };
  1691. static void ath10k_pci_ce_tasklet(unsigned long ptr)
  1692. {
  1693. struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
  1694. struct ath10k_pci *ar_pci = pipe->ar_pci;
  1695. ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
  1696. }
  1697. static void ath10k_msi_err_tasklet(unsigned long data)
  1698. {
  1699. struct ath10k *ar = (struct ath10k *)data;
  1700. ath10k_pci_fw_interrupt_handler(ar);
  1701. }
  1702. /*
  1703. * Handler for a per-engine interrupt on a PARTICULAR CE.
  1704. * This is used in cases where each CE has a private MSI interrupt.
  1705. */
  1706. static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
  1707. {
  1708. struct ath10k *ar = arg;
  1709. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1710. int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
  1711. if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
  1712. ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
  1713. return IRQ_HANDLED;
  1714. }
  1715. /*
  1716. * NOTE: We are able to derive ce_id from irq because we
  1717. * use a one-to-one mapping for CE's 0..5.
  1718. * CE's 6 & 7 do not use interrupts at all.
  1719. *
  1720. * This mapping must be kept in sync with the mapping
  1721. * used by firmware.
  1722. */
  1723. tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
  1724. return IRQ_HANDLED;
  1725. }
  1726. static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
  1727. {
  1728. struct ath10k *ar = arg;
  1729. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1730. tasklet_schedule(&ar_pci->msi_fw_err);
  1731. return IRQ_HANDLED;
  1732. }
  1733. /*
  1734. * Top-level interrupt handler for all PCI interrupts from a Target.
  1735. * When a block of MSI interrupts is allocated, this top-level handler
  1736. * is not used; instead, we directly call the correct sub-handler.
  1737. */
  1738. static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
  1739. {
  1740. struct ath10k *ar = arg;
  1741. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1742. if (ar_pci->num_msi_intrs == 0) {
  1743. /*
  1744. * IMPORTANT: INTR_CLR regiser has to be set after
  1745. * INTR_ENABLE is set to 0, otherwise interrupt can not be
  1746. * really cleared.
  1747. */
  1748. iowrite32(0, ar_pci->mem +
  1749. (SOC_CORE_BASE_ADDRESS |
  1750. PCIE_INTR_ENABLE_ADDRESS));
  1751. iowrite32(PCIE_INTR_FIRMWARE_MASK |
  1752. PCIE_INTR_CE_MASK_ALL,
  1753. ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
  1754. PCIE_INTR_CLR_ADDRESS));
  1755. /*
  1756. * IMPORTANT: this extra read transaction is required to
  1757. * flush the posted write buffer.
  1758. */
  1759. (void) ioread32(ar_pci->mem +
  1760. (SOC_CORE_BASE_ADDRESS |
  1761. PCIE_INTR_ENABLE_ADDRESS));
  1762. }
  1763. tasklet_schedule(&ar_pci->intr_tq);
  1764. return IRQ_HANDLED;
  1765. }
  1766. static void ath10k_pci_tasklet(unsigned long data)
  1767. {
  1768. struct ath10k *ar = (struct ath10k *)data;
  1769. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1770. ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
  1771. ath10k_ce_per_engine_service_any(ar);
  1772. if (ar_pci->num_msi_intrs == 0) {
  1773. /* Enable Legacy PCI line interrupts */
  1774. iowrite32(PCIE_INTR_FIRMWARE_MASK |
  1775. PCIE_INTR_CE_MASK_ALL,
  1776. ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
  1777. PCIE_INTR_ENABLE_ADDRESS));
  1778. /*
  1779. * IMPORTANT: this extra read transaction is required to
  1780. * flush the posted write buffer
  1781. */
  1782. (void) ioread32(ar_pci->mem +
  1783. (SOC_CORE_BASE_ADDRESS |
  1784. PCIE_INTR_ENABLE_ADDRESS));
  1785. }
  1786. }
  1787. static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
  1788. {
  1789. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1790. int ret;
  1791. int i;
  1792. ret = pci_enable_msi_block(ar_pci->pdev, num);
  1793. if (ret)
  1794. return ret;
  1795. ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
  1796. ath10k_pci_msi_fw_handler,
  1797. IRQF_SHARED, "ath10k_pci", ar);
  1798. if (ret) {
  1799. ath10k_warn("request_irq(%d) failed %d\n",
  1800. ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
  1801. pci_disable_msi(ar_pci->pdev);
  1802. return ret;
  1803. }
  1804. for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
  1805. ret = request_irq(ar_pci->pdev->irq + i,
  1806. ath10k_pci_per_engine_handler,
  1807. IRQF_SHARED, "ath10k_pci", ar);
  1808. if (ret) {
  1809. ath10k_warn("request_irq(%d) failed %d\n",
  1810. ar_pci->pdev->irq + i, ret);
  1811. for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
  1812. free_irq(ar_pci->pdev->irq + i, ar);
  1813. free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
  1814. pci_disable_msi(ar_pci->pdev);
  1815. return ret;
  1816. }
  1817. }
  1818. ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
  1819. return 0;
  1820. }
  1821. static int ath10k_pci_start_intr_msi(struct ath10k *ar)
  1822. {
  1823. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1824. int ret;
  1825. ret = pci_enable_msi(ar_pci->pdev);
  1826. if (ret < 0)
  1827. return ret;
  1828. ret = request_irq(ar_pci->pdev->irq,
  1829. ath10k_pci_interrupt_handler,
  1830. IRQF_SHARED, "ath10k_pci", ar);
  1831. if (ret < 0) {
  1832. pci_disable_msi(ar_pci->pdev);
  1833. return ret;
  1834. }
  1835. ath10k_info("MSI interrupt handling\n");
  1836. return 0;
  1837. }
  1838. static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
  1839. {
  1840. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1841. int ret;
  1842. ret = request_irq(ar_pci->pdev->irq,
  1843. ath10k_pci_interrupt_handler,
  1844. IRQF_SHARED, "ath10k_pci", ar);
  1845. if (ret < 0)
  1846. return ret;
  1847. /*
  1848. * Make sure to wake the Target before enabling Legacy
  1849. * Interrupt.
  1850. */
  1851. iowrite32(PCIE_SOC_WAKE_V_MASK,
  1852. ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
  1853. PCIE_SOC_WAKE_ADDRESS);
  1854. ath10k_pci_wait(ar);
  1855. /*
  1856. * A potential race occurs here: The CORE_BASE write
  1857. * depends on target correctly decoding AXI address but
  1858. * host won't know when target writes BAR to CORE_CTRL.
  1859. * This write might get lost if target has NOT written BAR.
  1860. * For now, fix the race by repeating the write in below
  1861. * synchronization checking.
  1862. */
  1863. iowrite32(PCIE_INTR_FIRMWARE_MASK |
  1864. PCIE_INTR_CE_MASK_ALL,
  1865. ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
  1866. PCIE_INTR_ENABLE_ADDRESS));
  1867. iowrite32(PCIE_SOC_WAKE_RESET,
  1868. ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
  1869. PCIE_SOC_WAKE_ADDRESS);
  1870. ath10k_info("legacy interrupt handling\n");
  1871. return 0;
  1872. }
  1873. static int ath10k_pci_start_intr(struct ath10k *ar)
  1874. {
  1875. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1876. int num = MSI_NUM_REQUEST;
  1877. int ret;
  1878. int i;
  1879. tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
  1880. tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
  1881. (unsigned long) ar);
  1882. for (i = 0; i < CE_COUNT; i++) {
  1883. ar_pci->pipe_info[i].ar_pci = ar_pci;
  1884. tasklet_init(&ar_pci->pipe_info[i].intr,
  1885. ath10k_pci_ce_tasklet,
  1886. (unsigned long)&ar_pci->pipe_info[i]);
  1887. }
  1888. if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
  1889. num = 1;
  1890. if (num > 1) {
  1891. ret = ath10k_pci_start_intr_msix(ar, num);
  1892. if (ret == 0)
  1893. goto exit;
  1894. ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
  1895. num = 1;
  1896. }
  1897. if (num == 1) {
  1898. ret = ath10k_pci_start_intr_msi(ar);
  1899. if (ret == 0)
  1900. goto exit;
  1901. ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
  1902. ret);
  1903. num = 0;
  1904. }
  1905. ret = ath10k_pci_start_intr_legacy(ar);
  1906. exit:
  1907. ar_pci->num_msi_intrs = num;
  1908. ar_pci->ce_count = CE_COUNT;
  1909. return ret;
  1910. }
  1911. static void ath10k_pci_stop_intr(struct ath10k *ar)
  1912. {
  1913. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1914. int i;
  1915. /* There's at least one interrupt irregardless whether its legacy INTR
  1916. * or MSI or MSI-X */
  1917. for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
  1918. free_irq(ar_pci->pdev->irq + i, ar);
  1919. if (ar_pci->num_msi_intrs > 0)
  1920. pci_disable_msi(ar_pci->pdev);
  1921. }
  1922. static int ath10k_pci_reset_target(struct ath10k *ar)
  1923. {
  1924. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1925. int wait_limit = 300; /* 3 sec */
  1926. /* Wait for Target to finish initialization before we proceed. */
  1927. iowrite32(PCIE_SOC_WAKE_V_MASK,
  1928. ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
  1929. PCIE_SOC_WAKE_ADDRESS);
  1930. ath10k_pci_wait(ar);
  1931. while (wait_limit-- &&
  1932. !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
  1933. FW_IND_INITIALIZED)) {
  1934. if (ar_pci->num_msi_intrs == 0)
  1935. /* Fix potential race by repeating CORE_BASE writes */
  1936. iowrite32(PCIE_INTR_FIRMWARE_MASK |
  1937. PCIE_INTR_CE_MASK_ALL,
  1938. ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
  1939. PCIE_INTR_ENABLE_ADDRESS));
  1940. mdelay(10);
  1941. }
  1942. if (wait_limit < 0) {
  1943. ath10k_err("Target stalled\n");
  1944. iowrite32(PCIE_SOC_WAKE_RESET,
  1945. ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
  1946. PCIE_SOC_WAKE_ADDRESS);
  1947. return -EIO;
  1948. }
  1949. iowrite32(PCIE_SOC_WAKE_RESET,
  1950. ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
  1951. PCIE_SOC_WAKE_ADDRESS);
  1952. return 0;
  1953. }
  1954. static void ath10k_pci_device_reset(struct ath10k *ar)
  1955. {
  1956. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1957. void __iomem *mem = ar_pci->mem;
  1958. int i;
  1959. u32 val;
  1960. if (!SOC_GLOBAL_RESET_ADDRESS)
  1961. return;
  1962. if (!mem)
  1963. return;
  1964. ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
  1965. PCIE_SOC_WAKE_V_MASK);
  1966. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  1967. if (ath10k_pci_target_is_awake(ar))
  1968. break;
  1969. msleep(1);
  1970. }
  1971. /* Put Target, including PCIe, into RESET. */
  1972. val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
  1973. val |= 1;
  1974. ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
  1975. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  1976. if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
  1977. RTC_STATE_COLD_RESET_MASK)
  1978. break;
  1979. msleep(1);
  1980. }
  1981. /* Pull Target, including PCIe, out of RESET. */
  1982. val &= ~1;
  1983. ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
  1984. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  1985. if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
  1986. RTC_STATE_COLD_RESET_MASK))
  1987. break;
  1988. msleep(1);
  1989. }
  1990. ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1991. }
  1992. static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
  1993. {
  1994. int i;
  1995. for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
  1996. if (!test_bit(i, ar_pci->features))
  1997. continue;
  1998. switch (i) {
  1999. case ATH10K_PCI_FEATURE_MSI_X:
  2000. ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
  2001. break;
  2002. case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
  2003. ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
  2004. break;
  2005. }
  2006. }
  2007. }
  2008. static int ath10k_pci_probe(struct pci_dev *pdev,
  2009. const struct pci_device_id *pci_dev)
  2010. {
  2011. void __iomem *mem;
  2012. int ret = 0;
  2013. struct ath10k *ar;
  2014. struct ath10k_pci *ar_pci;
  2015. u32 lcr_val;
  2016. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  2017. ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
  2018. if (ar_pci == NULL)
  2019. return -ENOMEM;
  2020. ar_pci->pdev = pdev;
  2021. ar_pci->dev = &pdev->dev;
  2022. switch (pci_dev->device) {
  2023. case QCA988X_2_0_DEVICE_ID:
  2024. set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
  2025. break;
  2026. default:
  2027. ret = -ENODEV;
  2028. ath10k_err("Unkown device ID: %d\n", pci_dev->device);
  2029. goto err_ar_pci;
  2030. }
  2031. if (ath10k_target_ps)
  2032. set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
  2033. ath10k_pci_dump_features(ar_pci);
  2034. ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
  2035. if (!ar) {
  2036. ath10k_err("ath10k_core_create failed!\n");
  2037. ret = -EINVAL;
  2038. goto err_ar_pci;
  2039. }
  2040. ar_pci->ar = ar;
  2041. ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
  2042. atomic_set(&ar_pci->keep_awake_count, 0);
  2043. pci_set_drvdata(pdev, ar);
  2044. /*
  2045. * Without any knowledge of the Host, the Target may have been reset or
  2046. * power cycled and its Config Space may no longer reflect the PCI
  2047. * address space that was assigned earlier by the PCI infrastructure.
  2048. * Refresh it now.
  2049. */
  2050. ret = pci_assign_resource(pdev, BAR_NUM);
  2051. if (ret) {
  2052. ath10k_err("cannot assign PCI space: %d\n", ret);
  2053. goto err_ar;
  2054. }
  2055. ret = pci_enable_device(pdev);
  2056. if (ret) {
  2057. ath10k_err("cannot enable PCI device: %d\n", ret);
  2058. goto err_ar;
  2059. }
  2060. /* Request MMIO resources */
  2061. ret = pci_request_region(pdev, BAR_NUM, "ath");
  2062. if (ret) {
  2063. ath10k_err("PCI MMIO reservation error: %d\n", ret);
  2064. goto err_device;
  2065. }
  2066. /*
  2067. * Target structures have a limit of 32 bit DMA pointers.
  2068. * DMA pointers can be wider than 32 bits by default on some systems.
  2069. */
  2070. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2071. if (ret) {
  2072. ath10k_err("32-bit DMA not available: %d\n", ret);
  2073. goto err_region;
  2074. }
  2075. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  2076. if (ret) {
  2077. ath10k_err("cannot enable 32-bit consistent DMA\n");
  2078. goto err_region;
  2079. }
  2080. /* Set bus master bit in PCI_COMMAND to enable DMA */
  2081. pci_set_master(pdev);
  2082. /*
  2083. * Temporary FIX: disable ASPM
  2084. * Will be removed after the OTP is programmed
  2085. */
  2086. pci_read_config_dword(pdev, 0x80, &lcr_val);
  2087. pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
  2088. /* Arrange for access to Target SoC registers. */
  2089. mem = pci_iomap(pdev, BAR_NUM, 0);
  2090. if (!mem) {
  2091. ath10k_err("PCI iomap error\n");
  2092. ret = -EIO;
  2093. goto err_master;
  2094. }
  2095. ar_pci->mem = mem;
  2096. spin_lock_init(&ar_pci->ce_lock);
  2097. ret = ath10k_core_register(ar);
  2098. if (ret) {
  2099. ath10k_err("could not register driver core (%d)\n", ret);
  2100. goto err_iomap;
  2101. }
  2102. return 0;
  2103. err_iomap:
  2104. pci_iounmap(pdev, mem);
  2105. err_master:
  2106. pci_clear_master(pdev);
  2107. err_region:
  2108. pci_release_region(pdev, BAR_NUM);
  2109. err_device:
  2110. pci_disable_device(pdev);
  2111. err_ar:
  2112. pci_set_drvdata(pdev, NULL);
  2113. ath10k_core_destroy(ar);
  2114. err_ar_pci:
  2115. /* call HIF PCI free here */
  2116. kfree(ar_pci);
  2117. return ret;
  2118. }
  2119. static void ath10k_pci_remove(struct pci_dev *pdev)
  2120. {
  2121. struct ath10k *ar = pci_get_drvdata(pdev);
  2122. struct ath10k_pci *ar_pci;
  2123. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  2124. if (!ar)
  2125. return;
  2126. ar_pci = ath10k_pci_priv(ar);
  2127. if (!ar_pci)
  2128. return;
  2129. tasklet_kill(&ar_pci->msi_fw_err);
  2130. ath10k_core_unregister(ar);
  2131. pci_set_drvdata(pdev, NULL);
  2132. pci_iounmap(pdev, ar_pci->mem);
  2133. pci_release_region(pdev, BAR_NUM);
  2134. pci_clear_master(pdev);
  2135. pci_disable_device(pdev);
  2136. ath10k_core_destroy(ar);
  2137. kfree(ar_pci);
  2138. }
  2139. MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
  2140. static struct pci_driver ath10k_pci_driver = {
  2141. .name = "ath10k_pci",
  2142. .id_table = ath10k_pci_id_table,
  2143. .probe = ath10k_pci_probe,
  2144. .remove = ath10k_pci_remove,
  2145. };
  2146. static int __init ath10k_pci_init(void)
  2147. {
  2148. int ret;
  2149. ret = pci_register_driver(&ath10k_pci_driver);
  2150. if (ret)
  2151. ath10k_err("pci_register_driver failed [%d]\n", ret);
  2152. return ret;
  2153. }
  2154. module_init(ath10k_pci_init);
  2155. static void __exit ath10k_pci_exit(void)
  2156. {
  2157. pci_unregister_driver(&ath10k_pci_driver);
  2158. }
  2159. module_exit(ath10k_pci_exit);
  2160. MODULE_AUTHOR("Qualcomm Atheros");
  2161. MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
  2162. MODULE_LICENSE("Dual BSD/GPL");
  2163. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
  2164. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
  2165. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);