aiutils.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. *
  16. * File contents: support functions for PCI/PCIe
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/delay.h>
  20. #include <linux/pci.h>
  21. #include <defs.h>
  22. #include <chipcommon.h>
  23. #include <brcmu_utils.h>
  24. #include <brcm_hw_ids.h>
  25. #include <soc.h>
  26. #include "types.h"
  27. #include "pub.h"
  28. #include "pmu.h"
  29. #include "srom.h"
  30. #include "nicpci.h"
  31. #include "aiutils.h"
  32. /* slow_clk_ctl */
  33. /* slow clock source mask */
  34. #define SCC_SS_MASK 0x00000007
  35. /* source of slow clock is LPO */
  36. #define SCC_SS_LPO 0x00000000
  37. /* source of slow clock is crystal */
  38. #define SCC_SS_XTAL 0x00000001
  39. /* source of slow clock is PCI */
  40. #define SCC_SS_PCI 0x00000002
  41. /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
  42. #define SCC_LF 0x00000200
  43. /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */
  44. #define SCC_LP 0x00000400
  45. /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */
  46. #define SCC_FS 0x00000800
  47. /* IgnorePllOffReq, 1/0:
  48. * power logic ignores/honors PLL clock disable requests from core
  49. */
  50. #define SCC_IP 0x00001000
  51. /* XtalControlEn, 1/0:
  52. * power logic does/doesn't disable crystal when appropriate
  53. */
  54. #define SCC_XC 0x00002000
  55. /* XtalPU (RO), 1/0: crystal running/disabled */
  56. #define SCC_XP 0x00004000
  57. /* ClockDivider (SlowClk = 1/(4+divisor)) */
  58. #define SCC_CD_MASK 0xffff0000
  59. #define SCC_CD_SHIFT 16
  60. /* system_clk_ctl */
  61. /* ILPen: Enable Idle Low Power */
  62. #define SYCC_IE 0x00000001
  63. /* ALPen: Enable Active Low Power */
  64. #define SYCC_AE 0x00000002
  65. /* ForcePLLOn */
  66. #define SYCC_FP 0x00000004
  67. /* Force ALP (or HT if ALPen is not set */
  68. #define SYCC_AR 0x00000008
  69. /* Force HT */
  70. #define SYCC_HR 0x00000010
  71. /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
  72. #define SYCC_CD_MASK 0xffff0000
  73. #define SYCC_CD_SHIFT 16
  74. #define CST4329_SPROM_OTP_SEL_MASK 0x00000003
  75. /* OTP is powered up, use def. CIS, no SPROM */
  76. #define CST4329_DEFCIS_SEL 0
  77. /* OTP is powered up, SPROM is present */
  78. #define CST4329_SPROM_SEL 1
  79. /* OTP is powered up, no SPROM */
  80. #define CST4329_OTP_SEL 2
  81. /* OTP is powered down, SPROM is present */
  82. #define CST4329_OTP_PWRDN 3
  83. #define CST4329_SPI_SDIO_MODE_MASK 0x00000004
  84. #define CST4329_SPI_SDIO_MODE_SHIFT 2
  85. /* 43224 chip-specific ChipControl register bits */
  86. #define CCTRL43224_GPIO_TOGGLE 0x8000
  87. /* 12 mA drive strength */
  88. #define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
  89. /* 12 mA drive strength for later 43224s */
  90. #define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
  91. /* 43236 Chip specific ChipStatus register bits */
  92. #define CST43236_SFLASH_MASK 0x00000040
  93. #define CST43236_OTP_MASK 0x00000080
  94. #define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
  95. #define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
  96. #define CST43236_BOOT_MASK 0x00001800
  97. #define CST43236_BOOT_SHIFT 11
  98. #define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
  99. #define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
  100. #define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
  101. #define CST43236_BOOT_FROM_INVALID 3
  102. /* 4331 chip-specific ChipControl register bits */
  103. /* 0 disable */
  104. #define CCTRL4331_BT_COEXIST (1<<0)
  105. /* 0 SECI is disabled (JTAG functional) */
  106. #define CCTRL4331_SECI (1<<1)
  107. /* 0 disable */
  108. #define CCTRL4331_EXT_LNA (1<<2)
  109. /* sprom/gpio13-15 mux */
  110. #define CCTRL4331_SPROM_GPIO13_15 (1<<3)
  111. /* 0 ext pa disable, 1 ext pa enabled */
  112. #define CCTRL4331_EXTPA_EN (1<<4)
  113. /* set drive out GPIO_CLK on sprom_cs pin */
  114. #define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5)
  115. /* use sprom_cs pin as PCIE mdio interface */
  116. #define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6)
  117. /* aband extpa will be at gpio2/5 and sprom_dout */
  118. #define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7)
  119. /* override core control on pipe_AuxClkEnable */
  120. #define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8)
  121. /* override core control on pipe_AuxPowerDown */
  122. #define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9)
  123. /* pcie_auxclkenable */
  124. #define CCTRL4331_PCIE_AUXCLKEN (1<<10)
  125. /* pcie_pipe_pllpowerdown */
  126. #define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11)
  127. /* enable bt_shd0 at gpio4 */
  128. #define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16)
  129. /* enable bt_shd1 at gpio5 */
  130. #define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17)
  131. /* 4331 Chip specific ChipStatus register bits */
  132. /* crystal frequency 20/40Mhz */
  133. #define CST4331_XTAL_FREQ 0x00000001
  134. #define CST4331_SPROM_PRESENT 0x00000002
  135. #define CST4331_OTP_PRESENT 0x00000004
  136. #define CST4331_LDO_RF 0x00000008
  137. #define CST4331_LDO_PAR 0x00000010
  138. /* 4319 chip-specific ChipStatus register bits */
  139. #define CST4319_SPI_CPULESSUSB 0x00000001
  140. #define CST4319_SPI_CLK_POL 0x00000002
  141. #define CST4319_SPI_CLK_PH 0x00000008
  142. /* gpio [7:6], SDIO CIS selection */
  143. #define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
  144. #define CST4319_SPROM_OTP_SEL_SHIFT 6
  145. /* use default CIS, OTP is powered up */
  146. #define CST4319_DEFCIS_SEL 0x00000000
  147. /* use SPROM, OTP is powered up */
  148. #define CST4319_SPROM_SEL 0x00000040
  149. /* use OTP, OTP is powered up */
  150. #define CST4319_OTP_SEL 0x00000080
  151. /* use SPROM, OTP is powered down */
  152. #define CST4319_OTP_PWRDN 0x000000c0
  153. /* gpio [8], sdio/usb mode */
  154. #define CST4319_SDIO_USB_MODE 0x00000100
  155. #define CST4319_REMAP_SEL_MASK 0x00000600
  156. #define CST4319_ILPDIV_EN 0x00000800
  157. #define CST4319_XTAL_PD_POL 0x00001000
  158. #define CST4319_LPO_SEL 0x00002000
  159. #define CST4319_RES_INIT_MODE 0x0000c000
  160. /* PALDO is configured with external PNP */
  161. #define CST4319_PALDO_EXTPNP 0x00010000
  162. #define CST4319_CBUCK_MODE_MASK 0x00060000
  163. #define CST4319_CBUCK_MODE_BURST 0x00020000
  164. #define CST4319_CBUCK_MODE_LPBURST 0x00060000
  165. #define CST4319_RCAL_VALID 0x01000000
  166. #define CST4319_RCAL_VALUE_MASK 0x3e000000
  167. #define CST4319_RCAL_VALUE_SHIFT 25
  168. /* 4336 chip-specific ChipStatus register bits */
  169. #define CST4336_SPI_MODE_MASK 0x00000001
  170. #define CST4336_SPROM_PRESENT 0x00000002
  171. #define CST4336_OTP_PRESENT 0x00000004
  172. #define CST4336_ARMREMAP_0 0x00000008
  173. #define CST4336_ILPDIV_EN_MASK 0x00000010
  174. #define CST4336_ILPDIV_EN_SHIFT 4
  175. #define CST4336_XTAL_PD_POL_MASK 0x00000020
  176. #define CST4336_XTAL_PD_POL_SHIFT 5
  177. #define CST4336_LPO_SEL_MASK 0x00000040
  178. #define CST4336_LPO_SEL_SHIFT 6
  179. #define CST4336_RES_INIT_MODE_MASK 0x00000180
  180. #define CST4336_RES_INIT_MODE_SHIFT 7
  181. #define CST4336_CBUCK_MODE_MASK 0x00000600
  182. #define CST4336_CBUCK_MODE_SHIFT 9
  183. /* 4313 chip-specific ChipStatus register bits */
  184. #define CST4313_SPROM_PRESENT 1
  185. #define CST4313_OTP_PRESENT 2
  186. #define CST4313_SPROM_OTP_SEL_MASK 0x00000002
  187. #define CST4313_SPROM_OTP_SEL_SHIFT 0
  188. /* 4313 Chip specific ChipControl register bits */
  189. /* 12 mA drive strengh for later 4313 */
  190. #define CCTRL_4313_12MA_LED_DRIVE 0x00000007
  191. /* Manufacturer Ids */
  192. #define MFGID_ARM 0x43b
  193. #define MFGID_BRCM 0x4bf
  194. #define MFGID_MIPS 0x4a7
  195. /* Enumeration ROM registers */
  196. #define ER_EROMENTRY 0x000
  197. #define ER_REMAPCONTROL 0xe00
  198. #define ER_REMAPSELECT 0xe04
  199. #define ER_MASTERSELECT 0xe10
  200. #define ER_ITCR 0xf00
  201. #define ER_ITIP 0xf04
  202. /* Erom entries */
  203. #define ER_TAG 0xe
  204. #define ER_TAG1 0x6
  205. #define ER_VALID 1
  206. #define ER_CI 0
  207. #define ER_MP 2
  208. #define ER_ADD 4
  209. #define ER_END 0xe
  210. #define ER_BAD 0xffffffff
  211. /* EROM CompIdentA */
  212. #define CIA_MFG_MASK 0xfff00000
  213. #define CIA_MFG_SHIFT 20
  214. #define CIA_CID_MASK 0x000fff00
  215. #define CIA_CID_SHIFT 8
  216. #define CIA_CCL_MASK 0x000000f0
  217. #define CIA_CCL_SHIFT 4
  218. /* EROM CompIdentB */
  219. #define CIB_REV_MASK 0xff000000
  220. #define CIB_REV_SHIFT 24
  221. #define CIB_NSW_MASK 0x00f80000
  222. #define CIB_NSW_SHIFT 19
  223. #define CIB_NMW_MASK 0x0007c000
  224. #define CIB_NMW_SHIFT 14
  225. #define CIB_NSP_MASK 0x00003e00
  226. #define CIB_NSP_SHIFT 9
  227. #define CIB_NMP_MASK 0x000001f0
  228. #define CIB_NMP_SHIFT 4
  229. /* EROM AddrDesc */
  230. #define AD_ADDR_MASK 0xfffff000
  231. #define AD_SP_MASK 0x00000f00
  232. #define AD_SP_SHIFT 8
  233. #define AD_ST_MASK 0x000000c0
  234. #define AD_ST_SHIFT 6
  235. #define AD_ST_SLAVE 0x00000000
  236. #define AD_ST_BRIDGE 0x00000040
  237. #define AD_ST_SWRAP 0x00000080
  238. #define AD_ST_MWRAP 0x000000c0
  239. #define AD_SZ_MASK 0x00000030
  240. #define AD_SZ_SHIFT 4
  241. #define AD_SZ_4K 0x00000000
  242. #define AD_SZ_8K 0x00000010
  243. #define AD_SZ_16K 0x00000020
  244. #define AD_SZ_SZD 0x00000030
  245. #define AD_AG32 0x00000008
  246. #define AD_ADDR_ALIGN 0x00000fff
  247. #define AD_SZ_BASE 0x00001000 /* 4KB */
  248. /* EROM SizeDesc */
  249. #define SD_SZ_MASK 0xfffff000
  250. #define SD_SG32 0x00000008
  251. #define SD_SZ_ALIGN 0x00000fff
  252. /* PCI config space bit 4 for 4306c0 slow clock source */
  253. #define PCI_CFG_GPIO_SCS 0x10
  254. /* PCI config space GPIO 14 for Xtal power-up */
  255. #define PCI_CFG_GPIO_XTAL 0x40
  256. /* PCI config space GPIO 15 for PLL power-down */
  257. #define PCI_CFG_GPIO_PLL 0x80
  258. /* power control defines */
  259. #define PLL_DELAY 150 /* us pll on delay */
  260. #define FREF_DELAY 200 /* us fref change delay */
  261. #define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
  262. /* resetctrl */
  263. #define AIRC_RESET 1
  264. #define NOREV -1 /* Invalid rev */
  265. /* GPIO Based LED powersave defines */
  266. #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
  267. #define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
  268. /* When Srom support present, fields in sromcontrol */
  269. #define SRC_START 0x80000000
  270. #define SRC_BUSY 0x80000000
  271. #define SRC_OPCODE 0x60000000
  272. #define SRC_OP_READ 0x00000000
  273. #define SRC_OP_WRITE 0x20000000
  274. #define SRC_OP_WRDIS 0x40000000
  275. #define SRC_OP_WREN 0x60000000
  276. #define SRC_OTPSEL 0x00000010
  277. #define SRC_LOCK 0x00000008
  278. #define SRC_SIZE_MASK 0x00000006
  279. #define SRC_SIZE_1K 0x00000000
  280. #define SRC_SIZE_4K 0x00000002
  281. #define SRC_SIZE_16K 0x00000004
  282. #define SRC_SIZE_SHIFT 1
  283. #define SRC_PRESENT 0x00000001
  284. /* External PA enable mask */
  285. #define GPIO_CTRL_EPA_EN_MASK 0x40
  286. #define DEFAULT_GPIOTIMERVAL \
  287. ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
  288. #define BADIDX (SI_MAXCORES + 1)
  289. /* Newer chips can access PCI/PCIE and CC core without requiring to change
  290. * PCI BAR0 WIN
  291. */
  292. #define SI_FAST(sih) ((ai_get_buscoretype(sih) == PCIE_CORE_ID) || \
  293. ((ai_get_buscoretype(sih) == PCI_CORE_ID) && \
  294. ai_get_buscorerev(sih) >= 13))
  295. #define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
  296. PCI_16KB0_CCREGS_OFFSET))
  297. #define IS_SIM(chippkg) \
  298. ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
  299. /*
  300. * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
  301. * before after core switching to avoid invalid register accesss inside ISR.
  302. */
  303. #define INTR_OFF(si, intr_val) \
  304. if ((si)->intrsoff_fn && \
  305. (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
  306. intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
  307. #define INTR_RESTORE(si, intr_val) \
  308. if ((si)->intrsrestore_fn && \
  309. (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
  310. (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
  311. #define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
  312. #define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
  313. #define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
  314. #ifdef BCMDBG
  315. #define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
  316. #else
  317. #define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  318. #endif /* BCMDBG */
  319. #define GOODCOREADDR(x, b) \
  320. (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
  321. IS_ALIGNED((x), SI_CORE_SIZE))
  322. #define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
  323. PCI_16KB0_PCIREGS_OFFSET)
  324. struct aidmp {
  325. u32 oobselina30; /* 0x000 */
  326. u32 oobselina74; /* 0x004 */
  327. u32 PAD[6];
  328. u32 oobselinb30; /* 0x020 */
  329. u32 oobselinb74; /* 0x024 */
  330. u32 PAD[6];
  331. u32 oobselinc30; /* 0x040 */
  332. u32 oobselinc74; /* 0x044 */
  333. u32 PAD[6];
  334. u32 oobselind30; /* 0x060 */
  335. u32 oobselind74; /* 0x064 */
  336. u32 PAD[38];
  337. u32 oobselouta30; /* 0x100 */
  338. u32 oobselouta74; /* 0x104 */
  339. u32 PAD[6];
  340. u32 oobseloutb30; /* 0x120 */
  341. u32 oobseloutb74; /* 0x124 */
  342. u32 PAD[6];
  343. u32 oobseloutc30; /* 0x140 */
  344. u32 oobseloutc74; /* 0x144 */
  345. u32 PAD[6];
  346. u32 oobseloutd30; /* 0x160 */
  347. u32 oobseloutd74; /* 0x164 */
  348. u32 PAD[38];
  349. u32 oobsynca; /* 0x200 */
  350. u32 oobseloutaen; /* 0x204 */
  351. u32 PAD[6];
  352. u32 oobsyncb; /* 0x220 */
  353. u32 oobseloutben; /* 0x224 */
  354. u32 PAD[6];
  355. u32 oobsyncc; /* 0x240 */
  356. u32 oobseloutcen; /* 0x244 */
  357. u32 PAD[6];
  358. u32 oobsyncd; /* 0x260 */
  359. u32 oobseloutden; /* 0x264 */
  360. u32 PAD[38];
  361. u32 oobaextwidth; /* 0x300 */
  362. u32 oobainwidth; /* 0x304 */
  363. u32 oobaoutwidth; /* 0x308 */
  364. u32 PAD[5];
  365. u32 oobbextwidth; /* 0x320 */
  366. u32 oobbinwidth; /* 0x324 */
  367. u32 oobboutwidth; /* 0x328 */
  368. u32 PAD[5];
  369. u32 oobcextwidth; /* 0x340 */
  370. u32 oobcinwidth; /* 0x344 */
  371. u32 oobcoutwidth; /* 0x348 */
  372. u32 PAD[5];
  373. u32 oobdextwidth; /* 0x360 */
  374. u32 oobdinwidth; /* 0x364 */
  375. u32 oobdoutwidth; /* 0x368 */
  376. u32 PAD[37];
  377. u32 ioctrlset; /* 0x400 */
  378. u32 ioctrlclear; /* 0x404 */
  379. u32 ioctrl; /* 0x408 */
  380. u32 PAD[61];
  381. u32 iostatus; /* 0x500 */
  382. u32 PAD[127];
  383. u32 ioctrlwidth; /* 0x700 */
  384. u32 iostatuswidth; /* 0x704 */
  385. u32 PAD[62];
  386. u32 resetctrl; /* 0x800 */
  387. u32 resetstatus; /* 0x804 */
  388. u32 resetreadid; /* 0x808 */
  389. u32 resetwriteid; /* 0x80c */
  390. u32 PAD[60];
  391. u32 errlogctrl; /* 0x900 */
  392. u32 errlogdone; /* 0x904 */
  393. u32 errlogstatus; /* 0x908 */
  394. u32 errlogaddrlo; /* 0x90c */
  395. u32 errlogaddrhi; /* 0x910 */
  396. u32 errlogid; /* 0x914 */
  397. u32 errloguser; /* 0x918 */
  398. u32 errlogflags; /* 0x91c */
  399. u32 PAD[56];
  400. u32 intstatus; /* 0xa00 */
  401. u32 PAD[127];
  402. u32 config; /* 0xe00 */
  403. u32 PAD[63];
  404. u32 itcr; /* 0xf00 */
  405. u32 PAD[3];
  406. u32 itipooba; /* 0xf10 */
  407. u32 itipoobb; /* 0xf14 */
  408. u32 itipoobc; /* 0xf18 */
  409. u32 itipoobd; /* 0xf1c */
  410. u32 PAD[4];
  411. u32 itipoobaout; /* 0xf30 */
  412. u32 itipoobbout; /* 0xf34 */
  413. u32 itipoobcout; /* 0xf38 */
  414. u32 itipoobdout; /* 0xf3c */
  415. u32 PAD[4];
  416. u32 itopooba; /* 0xf50 */
  417. u32 itopoobb; /* 0xf54 */
  418. u32 itopoobc; /* 0xf58 */
  419. u32 itopoobd; /* 0xf5c */
  420. u32 PAD[4];
  421. u32 itopoobain; /* 0xf70 */
  422. u32 itopoobbin; /* 0xf74 */
  423. u32 itopoobcin; /* 0xf78 */
  424. u32 itopoobdin; /* 0xf7c */
  425. u32 PAD[4];
  426. u32 itopreset; /* 0xf90 */
  427. u32 PAD[15];
  428. u32 peripherialid4; /* 0xfd0 */
  429. u32 peripherialid5; /* 0xfd4 */
  430. u32 peripherialid6; /* 0xfd8 */
  431. u32 peripherialid7; /* 0xfdc */
  432. u32 peripherialid0; /* 0xfe0 */
  433. u32 peripherialid1; /* 0xfe4 */
  434. u32 peripherialid2; /* 0xfe8 */
  435. u32 peripherialid3; /* 0xfec */
  436. u32 componentid0; /* 0xff0 */
  437. u32 componentid1; /* 0xff4 */
  438. u32 componentid2; /* 0xff8 */
  439. u32 componentid3; /* 0xffc */
  440. };
  441. /* EROM parsing */
  442. static u32
  443. get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
  444. {
  445. u32 ent;
  446. uint inv = 0, nom = 0;
  447. while (true) {
  448. ent = R_REG(*eromptr);
  449. (*eromptr)++;
  450. if (mask == 0)
  451. break;
  452. if ((ent & ER_VALID) == 0) {
  453. inv++;
  454. continue;
  455. }
  456. if (ent == (ER_END | ER_VALID))
  457. break;
  458. if ((ent & mask) == match)
  459. break;
  460. nom++;
  461. }
  462. return ent;
  463. }
  464. static u32
  465. get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
  466. u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
  467. {
  468. u32 asd, sz, szd;
  469. asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
  470. if (((asd & ER_TAG1) != ER_ADD) ||
  471. (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
  472. ((asd & AD_ST_MASK) != st)) {
  473. /* This is not what we want, "push" it back */
  474. (*eromptr)--;
  475. return 0;
  476. }
  477. *addrl = asd & AD_ADDR_MASK;
  478. if (asd & AD_AG32)
  479. *addrh = get_erom_ent(sih, eromptr, 0, 0);
  480. else
  481. *addrh = 0;
  482. *sizeh = 0;
  483. sz = asd & AD_SZ_MASK;
  484. if (sz == AD_SZ_SZD) {
  485. szd = get_erom_ent(sih, eromptr, 0, 0);
  486. *sizel = szd & SD_SZ_MASK;
  487. if (szd & SD_SG32)
  488. *sizeh = get_erom_ent(sih, eromptr, 0, 0);
  489. } else
  490. *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
  491. return asd;
  492. }
  493. static void ai_hwfixup(struct si_info *sii)
  494. {
  495. }
  496. /* parse the enumeration rom to identify all cores */
  497. static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
  498. {
  499. struct si_info *sii = (struct si_info *)sih;
  500. u32 erombase;
  501. u32 __iomem *eromptr, *eromlim;
  502. void __iomem *regs = cc;
  503. erombase = R_REG(&cc->eromptr);
  504. /* Set wrappers address */
  505. sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
  506. /* Now point the window at the erom */
  507. pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
  508. eromptr = regs;
  509. eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
  510. while (eromptr < eromlim) {
  511. u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
  512. u32 mpd, asd, addrl, addrh, sizel, sizeh;
  513. u32 __iomem *base;
  514. uint i, j, idx;
  515. bool br;
  516. br = false;
  517. /* Grok a component */
  518. cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
  519. if (cia == (ER_END | ER_VALID)) {
  520. /* Found END of erom */
  521. ai_hwfixup(sii);
  522. return;
  523. }
  524. base = eromptr - 1;
  525. cib = get_erom_ent(sih, &eromptr, 0, 0);
  526. if ((cib & ER_TAG) != ER_CI) {
  527. /* CIA not followed by CIB */
  528. goto error;
  529. }
  530. cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
  531. mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
  532. crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
  533. nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
  534. nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
  535. nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
  536. nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
  537. if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
  538. continue;
  539. if ((nmw + nsw == 0)) {
  540. /* A component which is not a core */
  541. if (cid == OOB_ROUTER_CORE_ID) {
  542. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
  543. &addrl, &addrh, &sizel, &sizeh);
  544. if (asd != 0)
  545. sii->oob_router = addrl;
  546. }
  547. continue;
  548. }
  549. idx = sii->numcores;
  550. /* sii->eromptr[idx] = base; */
  551. sii->cia[idx] = cia;
  552. sii->cib[idx] = cib;
  553. sii->coreid[idx] = cid;
  554. for (i = 0; i < nmp; i++) {
  555. mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
  556. if ((mpd & ER_TAG) != ER_MP) {
  557. /* Not enough MP entries for component */
  558. goto error;
  559. }
  560. }
  561. /* First Slave Address Descriptor should be port 0:
  562. * the main register space for the core
  563. */
  564. asd =
  565. get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
  566. &sizel, &sizeh);
  567. if (asd == 0) {
  568. /* Try again to see if it is a bridge */
  569. asd =
  570. get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
  571. &addrh, &sizel, &sizeh);
  572. if (asd != 0)
  573. br = true;
  574. else if ((addrh != 0) || (sizeh != 0)
  575. || (sizel != SI_CORE_SIZE)) {
  576. /* First Slave ASD for core malformed */
  577. goto error;
  578. }
  579. }
  580. sii->coresba[idx] = addrl;
  581. sii->coresba_size[idx] = sizel;
  582. /* Get any more ASDs in port 0 */
  583. j = 1;
  584. do {
  585. asd =
  586. get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
  587. &addrh, &sizel, &sizeh);
  588. if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
  589. sii->coresba2[idx] = addrl;
  590. sii->coresba2_size[idx] = sizel;
  591. }
  592. j++;
  593. } while (asd != 0);
  594. /* Go through the ASDs for other slave ports */
  595. for (i = 1; i < nsp; i++) {
  596. j = 0;
  597. do {
  598. asd =
  599. get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
  600. &addrl, &addrh, &sizel, &sizeh);
  601. } while (asd != 0);
  602. if (j == 0) {
  603. /* SP has no address descriptors */
  604. goto error;
  605. }
  606. }
  607. /* Now get master wrappers */
  608. for (i = 0; i < nmw; i++) {
  609. asd =
  610. get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
  611. &addrh, &sizel, &sizeh);
  612. if (asd == 0) {
  613. /* Missing descriptor for MW */
  614. goto error;
  615. }
  616. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  617. /* Master wrapper %d is not 4KB */
  618. goto error;
  619. }
  620. if (i == 0)
  621. sii->wrapba[idx] = addrl;
  622. }
  623. /* And finally slave wrappers */
  624. for (i = 0; i < nsw; i++) {
  625. uint fwp = (nsp == 1) ? 0 : 1;
  626. asd =
  627. get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
  628. &addrl, &addrh, &sizel, &sizeh);
  629. if (asd == 0) {
  630. /* Missing descriptor for SW */
  631. goto error;
  632. }
  633. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  634. /* Slave wrapper is not 4KB */
  635. goto error;
  636. }
  637. if ((nmw == 0) && (i == 0))
  638. sii->wrapba[idx] = addrl;
  639. }
  640. /* Don't record bridges */
  641. if (br)
  642. continue;
  643. /* Done with core */
  644. sii->numcores++;
  645. }
  646. error:
  647. /* Reached end of erom without finding END */
  648. sii->numcores = 0;
  649. return;
  650. }
  651. /*
  652. * This function changes the logical "focus" to the indicated core.
  653. * Return the current core's virtual address. Since each core starts with the
  654. * same set of registers (BIST, clock control, etc), the returned address
  655. * contains the first register of this 'common' register block (not to be
  656. * confused with 'common core').
  657. */
  658. void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
  659. {
  660. struct si_info *sii = (struct si_info *)sih;
  661. u32 addr = sii->coresba[coreidx];
  662. u32 wrap = sii->wrapba[coreidx];
  663. if (coreidx >= sii->numcores)
  664. return NULL;
  665. /* point bar0 window */
  666. pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
  667. /* point bar0 2nd 4KB window */
  668. pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
  669. sii->curidx = coreidx;
  670. return sii->curmap;
  671. }
  672. /* Return the number of address spaces in current core */
  673. int ai_numaddrspaces(struct si_pub *sih)
  674. {
  675. return 2;
  676. }
  677. /* Return the address of the nth address space in the current core */
  678. u32 ai_addrspace(struct si_pub *sih, uint asidx)
  679. {
  680. struct si_info *sii;
  681. uint cidx;
  682. sii = (struct si_info *)sih;
  683. cidx = sii->curidx;
  684. if (asidx == 0)
  685. return sii->coresba[cidx];
  686. else if (asidx == 1)
  687. return sii->coresba2[cidx];
  688. else {
  689. /* Need to parse the erom again to find addr space */
  690. return 0;
  691. }
  692. }
  693. /* Return the size of the nth address space in the current core */
  694. u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
  695. {
  696. struct si_info *sii;
  697. uint cidx;
  698. sii = (struct si_info *)sih;
  699. cidx = sii->curidx;
  700. if (asidx == 0)
  701. return sii->coresba_size[cidx];
  702. else if (asidx == 1)
  703. return sii->coresba2_size[cidx];
  704. else {
  705. /* Need to parse the erom again to find addr */
  706. return 0;
  707. }
  708. }
  709. uint ai_flag(struct si_pub *sih)
  710. {
  711. struct si_info *sii;
  712. struct aidmp *ai;
  713. sii = (struct si_info *)sih;
  714. ai = sii->curwrap;
  715. return R_REG(&ai->oobselouta30) & 0x1f;
  716. }
  717. void ai_setint(struct si_pub *sih, int siflag)
  718. {
  719. }
  720. uint ai_corevendor(struct si_pub *sih)
  721. {
  722. struct si_info *sii;
  723. u32 cia;
  724. sii = (struct si_info *)sih;
  725. cia = sii->cia[sii->curidx];
  726. return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
  727. }
  728. uint ai_corerev(struct si_pub *sih)
  729. {
  730. struct si_info *sii;
  731. u32 cib;
  732. sii = (struct si_info *)sih;
  733. cib = sii->cib[sii->curidx];
  734. return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
  735. }
  736. bool ai_iscoreup(struct si_pub *sih)
  737. {
  738. struct si_info *sii;
  739. struct aidmp *ai;
  740. sii = (struct si_info *)sih;
  741. ai = sii->curwrap;
  742. return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
  743. SICF_CLOCK_EN)
  744. && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
  745. }
  746. void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
  747. {
  748. struct si_info *sii;
  749. struct aidmp *ai;
  750. u32 w;
  751. sii = (struct si_info *)sih;
  752. ai = sii->curwrap;
  753. if (mask || val) {
  754. w = ((R_REG(&ai->ioctrl) & ~mask) | val);
  755. W_REG(&ai->ioctrl, w);
  756. }
  757. }
  758. u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
  759. {
  760. struct si_info *sii;
  761. struct aidmp *ai;
  762. u32 w;
  763. sii = (struct si_info *)sih;
  764. ai = sii->curwrap;
  765. if (mask || val) {
  766. w = ((R_REG(&ai->ioctrl) & ~mask) | val);
  767. W_REG(&ai->ioctrl, w);
  768. }
  769. return R_REG(&ai->ioctrl);
  770. }
  771. /* return true if PCIE capability exists in the pci config space */
  772. static bool ai_ispcie(struct si_info *sii)
  773. {
  774. u8 cap_ptr;
  775. cap_ptr =
  776. pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
  777. NULL);
  778. if (!cap_ptr)
  779. return false;
  780. return true;
  781. }
  782. static bool ai_buscore_prep(struct si_info *sii)
  783. {
  784. /* kludge to enable the clock on the 4306 which lacks a slowclock */
  785. if (!ai_ispcie(sii))
  786. ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
  787. return true;
  788. }
  789. u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
  790. {
  791. struct si_info *sii;
  792. struct aidmp *ai;
  793. u32 w;
  794. sii = (struct si_info *)sih;
  795. ai = sii->curwrap;
  796. if (mask || val) {
  797. w = ((R_REG(&ai->iostatus) & ~mask) | val);
  798. W_REG(&ai->iostatus, w);
  799. }
  800. return R_REG(&ai->iostatus);
  801. }
  802. static bool
  803. ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
  804. {
  805. bool pci, pcie;
  806. uint i;
  807. uint pciidx, pcieidx, pcirev, pcierev;
  808. struct chipcregs __iomem *cc;
  809. cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
  810. /* get chipcommon rev */
  811. sii->pub.ccrev = (int)ai_corerev(&sii->pub);
  812. /* get chipcommon chipstatus */
  813. if (ai_get_ccrev(&sii->pub) >= 11)
  814. sii->chipst = R_REG(&cc->chipstatus);
  815. /* get chipcommon capabilites */
  816. sii->pub.cccaps = R_REG(&cc->capabilities);
  817. /* get pmu rev and caps */
  818. if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
  819. sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
  820. sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
  821. }
  822. /* figure out bus/orignal core idx */
  823. sii->pub.buscoretype = NODEV_CORE_ID;
  824. sii->pub.buscorerev = NOREV;
  825. sii->buscoreidx = BADIDX;
  826. pci = pcie = false;
  827. pcirev = pcierev = NOREV;
  828. pciidx = pcieidx = BADIDX;
  829. for (i = 0; i < sii->numcores; i++) {
  830. uint cid, crev;
  831. ai_setcoreidx(&sii->pub, i);
  832. cid = ai_coreid(&sii->pub);
  833. crev = ai_corerev(&sii->pub);
  834. if (cid == PCI_CORE_ID) {
  835. pciidx = i;
  836. pcirev = crev;
  837. pci = true;
  838. } else if (cid == PCIE_CORE_ID) {
  839. pcieidx = i;
  840. pcierev = crev;
  841. pcie = true;
  842. }
  843. /* find the core idx before entering this func. */
  844. if ((savewin && (savewin == sii->coresba[i])) ||
  845. (cc == sii->regs[i]))
  846. *origidx = i;
  847. }
  848. if (pci && pcie) {
  849. if (ai_ispcie(sii))
  850. pci = false;
  851. else
  852. pcie = false;
  853. }
  854. if (pci) {
  855. sii->pub.buscoretype = PCI_CORE_ID;
  856. sii->pub.buscorerev = pcirev;
  857. sii->buscoreidx = pciidx;
  858. } else if (pcie) {
  859. sii->pub.buscoretype = PCIE_CORE_ID;
  860. sii->pub.buscorerev = pcierev;
  861. sii->buscoreidx = pcieidx;
  862. }
  863. /* fixup necessary chip/core configurations */
  864. if (SI_FAST(&sii->pub)) {
  865. if (!sii->pch) {
  866. sii->pch = pcicore_init(&sii->pub, sii->pbus,
  867. (__iomem void *)PCIEREGS(sii));
  868. if (sii->pch == NULL)
  869. return false;
  870. }
  871. }
  872. if (ai_pci_fixcfg(&sii->pub)) {
  873. /* si_doattach: si_pci_fixcfg failed */
  874. return false;
  875. }
  876. /* return to the original core */
  877. ai_setcoreidx(&sii->pub, *origidx);
  878. return true;
  879. }
  880. /*
  881. * get boardtype and boardrev
  882. */
  883. static __used void ai_nvram_process(struct si_info *sii)
  884. {
  885. uint w = 0;
  886. /* do a pci config read to get subsystem id and subvendor id */
  887. pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
  888. sii->pub.boardvendor = w & 0xffff;
  889. sii->pub.boardtype = (w >> 16) & 0xffff;
  890. }
  891. static struct si_info *ai_doattach(struct si_info *sii,
  892. void __iomem *regs, struct pci_dev *pbus)
  893. {
  894. struct si_pub *sih = &sii->pub;
  895. u32 w, savewin;
  896. struct chipcregs __iomem *cc;
  897. uint socitype;
  898. uint origidx;
  899. memset((unsigned char *) sii, 0, sizeof(struct si_info));
  900. savewin = 0;
  901. sii->buscoreidx = BADIDX;
  902. sii->curmap = regs;
  903. sii->pbus = pbus;
  904. /* find Chipcommon address */
  905. pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
  906. if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
  907. savewin = SI_ENUM_BASE;
  908. pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
  909. SI_ENUM_BASE);
  910. cc = (struct chipcregs __iomem *) regs;
  911. /* bus/core/clk setup for register access */
  912. if (!ai_buscore_prep(sii))
  913. return NULL;
  914. /*
  915. * ChipID recognition.
  916. * We assume we can read chipid at offset 0 from the regs arg.
  917. * If we add other chiptypes (or if we need to support old sdio
  918. * hosts w/o chipcommon), some way of recognizing them needs to
  919. * be added here.
  920. */
  921. w = R_REG(&cc->chipid);
  922. socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
  923. /* Might as wll fill in chip id rev & pkg */
  924. sih->chip = w & CID_ID_MASK;
  925. sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
  926. sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
  927. /* scan for cores */
  928. if (socitype == SOCI_AI) {
  929. SI_MSG("Found chip type AI (0x%08x)\n", w);
  930. /* pass chipc address instead of original core base */
  931. ai_scan(&sii->pub, cc);
  932. } else {
  933. /* Found chip of unknown type */
  934. return NULL;
  935. }
  936. /* no cores found, bail out */
  937. if (sii->numcores == 0)
  938. return NULL;
  939. /* bus/core/clk setup */
  940. origidx = SI_CC_IDX;
  941. if (!ai_buscore_setup(sii, savewin, &origidx))
  942. goto exit;
  943. /* Init nvram from sprom/otp if they exist */
  944. if (srom_var_init(&sii->pub, cc))
  945. goto exit;
  946. ai_nvram_process(sii);
  947. /* === NVRAM, clock is ready === */
  948. cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
  949. W_REG(&cc->gpiopullup, 0);
  950. W_REG(&cc->gpiopulldown, 0);
  951. ai_setcoreidx(sih, origidx);
  952. /* PMU specific initializations */
  953. if (ai_get_cccaps(sih) & CC_CAP_PMU) {
  954. u32 xtalfreq;
  955. si_pmu_init(sih);
  956. si_pmu_chip_init(sih);
  957. xtalfreq = si_pmu_measure_alpclk(sih);
  958. si_pmu_pll_init(sih, xtalfreq);
  959. si_pmu_res_init(sih);
  960. si_pmu_swreg_init(sih);
  961. }
  962. /* setup the GPIO based LED powersave register */
  963. w = getintvar(sih, BRCMS_SROM_LEDDC);
  964. if (w == 0)
  965. w = DEFAULT_GPIOTIMERVAL;
  966. ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
  967. ~0, w);
  968. if (PCIE(sih))
  969. pcicore_attach(sii->pch, SI_DOATTACH);
  970. if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
  971. /*
  972. * enable 12 mA drive strenth for 43224 and
  973. * set chipControl register bit 15
  974. */
  975. if (ai_get_chiprev(sih) == 0) {
  976. SI_MSG("Applying 43224A0 WARs\n");
  977. ai_corereg(sih, SI_CC_IDX,
  978. offsetof(struct chipcregs, chipcontrol),
  979. CCTRL43224_GPIO_TOGGLE,
  980. CCTRL43224_GPIO_TOGGLE);
  981. si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
  982. CCTRL_43224A0_12MA_LED_DRIVE);
  983. }
  984. if (ai_get_chiprev(sih) >= 1) {
  985. SI_MSG("Applying 43224B0+ WARs\n");
  986. si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
  987. CCTRL_43224B0_12MA_LED_DRIVE);
  988. }
  989. }
  990. if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
  991. /*
  992. * enable 12 mA drive strenth for 4313 and
  993. * set chipControl register bit 1
  994. */
  995. SI_MSG("Applying 4313 WARs\n");
  996. si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
  997. CCTRL_4313_12MA_LED_DRIVE);
  998. }
  999. return sii;
  1000. exit:
  1001. if (sii->pch)
  1002. pcicore_deinit(sii->pch);
  1003. sii->pch = NULL;
  1004. return NULL;
  1005. }
  1006. /*
  1007. * Allocate a si handle.
  1008. * devid - pci device id (used to determine chip#)
  1009. * osh - opaque OS handle
  1010. * regs - virtual address of initial core registers
  1011. */
  1012. struct si_pub *
  1013. ai_attach(void __iomem *regs, struct pci_dev *sdh)
  1014. {
  1015. struct si_info *sii;
  1016. /* alloc struct si_info */
  1017. sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
  1018. if (sii == NULL)
  1019. return NULL;
  1020. if (ai_doattach(sii, regs, sdh) == NULL) {
  1021. kfree(sii);
  1022. return NULL;
  1023. }
  1024. return (struct si_pub *) sii;
  1025. }
  1026. /* may be called with core in reset */
  1027. void ai_detach(struct si_pub *sih)
  1028. {
  1029. struct si_info *sii;
  1030. struct si_pub *si_local = NULL;
  1031. memcpy(&si_local, &sih, sizeof(struct si_pub **));
  1032. sii = (struct si_info *)sih;
  1033. if (sii == NULL)
  1034. return;
  1035. if (sii->pch)
  1036. pcicore_deinit(sii->pch);
  1037. sii->pch = NULL;
  1038. srom_free_vars(sih);
  1039. kfree(sii);
  1040. }
  1041. /* register driver interrupt disabling and restoring callback functions */
  1042. void
  1043. ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
  1044. void *intrsrestore_fn,
  1045. void *intrsenabled_fn, void *intr_arg)
  1046. {
  1047. struct si_info *sii;
  1048. sii = (struct si_info *)sih;
  1049. sii->intr_arg = intr_arg;
  1050. sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
  1051. sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
  1052. sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
  1053. /* save current core id. when this function called, the current core
  1054. * must be the core which provides driver functions(il, et, wl, etc.)
  1055. */
  1056. sii->dev_coreid = sii->coreid[sii->curidx];
  1057. }
  1058. void ai_deregister_intr_callback(struct si_pub *sih)
  1059. {
  1060. struct si_info *sii;
  1061. sii = (struct si_info *)sih;
  1062. sii->intrsoff_fn = NULL;
  1063. }
  1064. uint ai_coreid(struct si_pub *sih)
  1065. {
  1066. struct si_info *sii;
  1067. sii = (struct si_info *)sih;
  1068. return sii->coreid[sii->curidx];
  1069. }
  1070. uint ai_coreidx(struct si_pub *sih)
  1071. {
  1072. struct si_info *sii;
  1073. sii = (struct si_info *)sih;
  1074. return sii->curidx;
  1075. }
  1076. bool ai_backplane64(struct si_pub *sih)
  1077. {
  1078. return (ai_get_cccaps(sih) & CC_CAP_BKPLN64) != 0;
  1079. }
  1080. /* return index of coreid or BADIDX if not found */
  1081. uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
  1082. {
  1083. struct si_info *sii;
  1084. uint found;
  1085. uint i;
  1086. sii = (struct si_info *)sih;
  1087. found = 0;
  1088. for (i = 0; i < sii->numcores; i++)
  1089. if (sii->coreid[i] == coreid) {
  1090. if (found == coreunit)
  1091. return i;
  1092. found++;
  1093. }
  1094. return BADIDX;
  1095. }
  1096. /*
  1097. * This function changes logical "focus" to the indicated core;
  1098. * must be called with interrupts off.
  1099. * Moreover, callers should keep interrupts off during switching
  1100. * out of and back to d11 core.
  1101. */
  1102. void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
  1103. {
  1104. uint idx;
  1105. idx = ai_findcoreidx(sih, coreid, coreunit);
  1106. if (idx >= SI_MAXCORES)
  1107. return NULL;
  1108. return ai_setcoreidx(sih, idx);
  1109. }
  1110. /* Turn off interrupt as required by ai_setcore, before switch core */
  1111. void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
  1112. uint *intr_val)
  1113. {
  1114. void __iomem *cc;
  1115. struct si_info *sii;
  1116. sii = (struct si_info *)sih;
  1117. if (SI_FAST(sih)) {
  1118. /* Overloading the origidx variable to remember the coreid,
  1119. * this works because the core ids cannot be confused with
  1120. * core indices.
  1121. */
  1122. *origidx = coreid;
  1123. if (coreid == CC_CORE_ID)
  1124. return CCREGS_FAST(sii);
  1125. else if (coreid == ai_get_buscoretype(sih))
  1126. return PCIEREGS(sii);
  1127. }
  1128. INTR_OFF(sii, *intr_val);
  1129. *origidx = sii->curidx;
  1130. cc = ai_setcore(sih, coreid, 0);
  1131. return cc;
  1132. }
  1133. /* restore coreidx and restore interrupt */
  1134. void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
  1135. {
  1136. struct si_info *sii;
  1137. sii = (struct si_info *)sih;
  1138. if (SI_FAST(sih)
  1139. && ((coreid == CC_CORE_ID) || (coreid == ai_get_buscoretype(sih))))
  1140. return;
  1141. ai_setcoreidx(sih, coreid);
  1142. INTR_RESTORE(sii, intr_val);
  1143. }
  1144. void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
  1145. {
  1146. struct si_info *sii = (struct si_info *)sih;
  1147. u32 *w = (u32 *) sii->curwrap;
  1148. W_REG(w + (offset / 4), val);
  1149. return;
  1150. }
  1151. /*
  1152. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
  1153. * operation, switch back to the original core, and return the new value.
  1154. *
  1155. * When using the silicon backplane, no fiddling with interrupts or core
  1156. * switches is needed.
  1157. *
  1158. * Also, when using pci/pcie, we can optimize away the core switching for pci
  1159. * registers and (on newer pci cores) chipcommon registers.
  1160. */
  1161. uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
  1162. uint val)
  1163. {
  1164. uint origidx = 0;
  1165. u32 __iomem *r = NULL;
  1166. uint w;
  1167. uint intr_val = 0;
  1168. bool fast = false;
  1169. struct si_info *sii;
  1170. sii = (struct si_info *)sih;
  1171. if (coreidx >= SI_MAXCORES)
  1172. return 0;
  1173. /*
  1174. * If pci/pcie, we can get at pci/pcie regs
  1175. * and on newer cores to chipc
  1176. */
  1177. if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sih)) {
  1178. /* Chipc registers are mapped at 12KB */
  1179. fast = true;
  1180. r = (u32 __iomem *)((__iomem char *)sii->curmap +
  1181. PCI_16KB0_CCREGS_OFFSET + regoff);
  1182. } else if (sii->buscoreidx == coreidx) {
  1183. /*
  1184. * pci registers are at either in the last 2KB of
  1185. * an 8KB window or, in pcie and pci rev 13 at 8KB
  1186. */
  1187. fast = true;
  1188. if (SI_FAST(sih))
  1189. r = (u32 __iomem *)((__iomem char *)sii->curmap +
  1190. PCI_16KB0_PCIREGS_OFFSET + regoff);
  1191. else
  1192. r = (u32 __iomem *)((__iomem char *)sii->curmap +
  1193. ((regoff >= SBCONFIGOFF) ?
  1194. PCI_BAR0_PCISBR_OFFSET :
  1195. PCI_BAR0_PCIREGS_OFFSET) + regoff);
  1196. }
  1197. if (!fast) {
  1198. INTR_OFF(sii, intr_val);
  1199. /* save current core index */
  1200. origidx = ai_coreidx(&sii->pub);
  1201. /* switch core */
  1202. r = (u32 __iomem *) ((unsigned char __iomem *)
  1203. ai_setcoreidx(&sii->pub, coreidx) + regoff);
  1204. }
  1205. /* mask and set */
  1206. if (mask || val) {
  1207. w = (R_REG(r) & ~mask) | val;
  1208. W_REG(r, w);
  1209. }
  1210. /* readback */
  1211. w = R_REG(r);
  1212. if (!fast) {
  1213. /* restore core index */
  1214. if (origidx != coreidx)
  1215. ai_setcoreidx(&sii->pub, origidx);
  1216. INTR_RESTORE(sii, intr_val);
  1217. }
  1218. return w;
  1219. }
  1220. void ai_core_disable(struct si_pub *sih, u32 bits)
  1221. {
  1222. struct si_info *sii;
  1223. u32 dummy;
  1224. struct aidmp *ai;
  1225. sii = (struct si_info *)sih;
  1226. ai = sii->curwrap;
  1227. /* if core is already in reset, just return */
  1228. if (R_REG(&ai->resetctrl) & AIRC_RESET)
  1229. return;
  1230. W_REG(&ai->ioctrl, bits);
  1231. dummy = R_REG(&ai->ioctrl);
  1232. udelay(10);
  1233. W_REG(&ai->resetctrl, AIRC_RESET);
  1234. udelay(1);
  1235. }
  1236. /* reset and re-enable a core
  1237. * inputs:
  1238. * bits - core specific bits that are set during and after reset sequence
  1239. * resetbits - core specific bits that are set only during reset sequence
  1240. */
  1241. void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
  1242. {
  1243. struct si_info *sii;
  1244. struct aidmp *ai;
  1245. u32 dummy;
  1246. sii = (struct si_info *)sih;
  1247. ai = sii->curwrap;
  1248. /*
  1249. * Must do the disable sequence first to work
  1250. * for arbitrary current core state.
  1251. */
  1252. ai_core_disable(sih, (bits | resetbits));
  1253. /*
  1254. * Now do the initialization sequence.
  1255. */
  1256. W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
  1257. dummy = R_REG(&ai->ioctrl);
  1258. W_REG(&ai->resetctrl, 0);
  1259. udelay(1);
  1260. W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
  1261. dummy = R_REG(&ai->ioctrl);
  1262. udelay(1);
  1263. }
  1264. /* return the slow clock source - LPO, XTAL, or PCI */
  1265. static uint ai_slowclk_src(struct si_info *sii)
  1266. {
  1267. struct chipcregs __iomem *cc;
  1268. u32 val;
  1269. if (ai_get_ccrev(&sii->pub) < 6) {
  1270. pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
  1271. &val);
  1272. if (val & PCI_CFG_GPIO_SCS)
  1273. return SCC_SS_PCI;
  1274. return SCC_SS_XTAL;
  1275. } else if (ai_get_ccrev(&sii->pub) < 10) {
  1276. cc = (struct chipcregs __iomem *)
  1277. ai_setcoreidx(&sii->pub, sii->curidx);
  1278. return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
  1279. } else /* Insta-clock */
  1280. return SCC_SS_XTAL;
  1281. }
  1282. /*
  1283. * return the ILP (slowclock) min or max frequency
  1284. * precondition: we've established the chip has dynamic clk control
  1285. */
  1286. static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
  1287. struct chipcregs __iomem *cc)
  1288. {
  1289. u32 slowclk;
  1290. uint div;
  1291. slowclk = ai_slowclk_src(sii);
  1292. if (ai_get_ccrev(&sii->pub) < 6) {
  1293. if (slowclk == SCC_SS_PCI)
  1294. return max_freq ? (PCIMAXFREQ / 64)
  1295. : (PCIMINFREQ / 64);
  1296. else
  1297. return max_freq ? (XTALMAXFREQ / 32)
  1298. : (XTALMINFREQ / 32);
  1299. } else if (ai_get_ccrev(&sii->pub) < 10) {
  1300. div = 4 *
  1301. (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
  1302. SCC_CD_SHIFT) + 1);
  1303. if (slowclk == SCC_SS_LPO)
  1304. return max_freq ? LPOMAXFREQ : LPOMINFREQ;
  1305. else if (slowclk == SCC_SS_XTAL)
  1306. return max_freq ? (XTALMAXFREQ / div)
  1307. : (XTALMINFREQ / div);
  1308. else if (slowclk == SCC_SS_PCI)
  1309. return max_freq ? (PCIMAXFREQ / div)
  1310. : (PCIMINFREQ / div);
  1311. } else {
  1312. /* Chipc rev 10 is InstaClock */
  1313. div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
  1314. div = 4 * (div + 1);
  1315. return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
  1316. }
  1317. return 0;
  1318. }
  1319. static void
  1320. ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
  1321. {
  1322. uint slowmaxfreq, pll_delay, slowclk;
  1323. uint pll_on_delay, fref_sel_delay;
  1324. pll_delay = PLL_DELAY;
  1325. /*
  1326. * If the slow clock is not sourced by the xtal then
  1327. * add the xtal_on_delay since the xtal will also be
  1328. * powered down by dynamic clk control logic.
  1329. */
  1330. slowclk = ai_slowclk_src(sii);
  1331. if (slowclk != SCC_SS_XTAL)
  1332. pll_delay += XTAL_ON_DELAY;
  1333. /* Starting with 4318 it is ILP that is used for the delays */
  1334. slowmaxfreq =
  1335. ai_slowclk_freq(sii,
  1336. (ai_get_ccrev(&sii->pub) >= 10) ? false : true, cc);
  1337. pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
  1338. fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
  1339. W_REG(&cc->pll_on_delay, pll_on_delay);
  1340. W_REG(&cc->fref_sel_delay, fref_sel_delay);
  1341. }
  1342. /* initialize power control delay registers */
  1343. void ai_clkctl_init(struct si_pub *sih)
  1344. {
  1345. struct si_info *sii;
  1346. uint origidx = 0;
  1347. struct chipcregs __iomem *cc;
  1348. bool fast;
  1349. if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
  1350. return;
  1351. sii = (struct si_info *)sih;
  1352. fast = SI_FAST(sih);
  1353. if (!fast) {
  1354. origidx = sii->curidx;
  1355. cc = (struct chipcregs __iomem *)
  1356. ai_setcore(sih, CC_CORE_ID, 0);
  1357. if (cc == NULL)
  1358. return;
  1359. } else {
  1360. cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
  1361. if (cc == NULL)
  1362. return;
  1363. }
  1364. /* set all Instaclk chip ILP to 1 MHz */
  1365. if (ai_get_ccrev(sih) >= 10)
  1366. SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
  1367. (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
  1368. ai_clkctl_setdelay(sii, cc);
  1369. if (!fast)
  1370. ai_setcoreidx(sih, origidx);
  1371. }
  1372. /*
  1373. * return the value suitable for writing to the
  1374. * dot11 core FAST_PWRUP_DELAY register
  1375. */
  1376. u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
  1377. {
  1378. struct si_info *sii;
  1379. uint origidx = 0;
  1380. struct chipcregs __iomem *cc;
  1381. uint slowminfreq;
  1382. u16 fpdelay;
  1383. uint intr_val = 0;
  1384. bool fast;
  1385. sii = (struct si_info *)sih;
  1386. if (ai_get_cccaps(sih) & CC_CAP_PMU) {
  1387. INTR_OFF(sii, intr_val);
  1388. fpdelay = si_pmu_fast_pwrup_delay(sih);
  1389. INTR_RESTORE(sii, intr_val);
  1390. return fpdelay;
  1391. }
  1392. if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
  1393. return 0;
  1394. fast = SI_FAST(sih);
  1395. fpdelay = 0;
  1396. if (!fast) {
  1397. origidx = sii->curidx;
  1398. INTR_OFF(sii, intr_val);
  1399. cc = (struct chipcregs __iomem *)
  1400. ai_setcore(sih, CC_CORE_ID, 0);
  1401. if (cc == NULL)
  1402. goto done;
  1403. } else {
  1404. cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
  1405. if (cc == NULL)
  1406. goto done;
  1407. }
  1408. slowminfreq = ai_slowclk_freq(sii, false, cc);
  1409. fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
  1410. (slowminfreq - 1)) / slowminfreq;
  1411. done:
  1412. if (!fast) {
  1413. ai_setcoreidx(sih, origidx);
  1414. INTR_RESTORE(sii, intr_val);
  1415. }
  1416. return fpdelay;
  1417. }
  1418. /* turn primary xtal and/or pll off/on */
  1419. int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
  1420. {
  1421. struct si_info *sii;
  1422. u32 in, out, outen;
  1423. sii = (struct si_info *)sih;
  1424. /* pcie core doesn't have any mapping to control the xtal pu */
  1425. if (PCIE(sih))
  1426. return -1;
  1427. pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
  1428. pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
  1429. pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
  1430. /*
  1431. * Avoid glitching the clock if GPRS is already using it.
  1432. * We can't actually read the state of the PLLPD so we infer it
  1433. * by the value of XTAL_PU which *is* readable via gpioin.
  1434. */
  1435. if (on && (in & PCI_CFG_GPIO_XTAL))
  1436. return 0;
  1437. if (what & XTAL)
  1438. outen |= PCI_CFG_GPIO_XTAL;
  1439. if (what & PLL)
  1440. outen |= PCI_CFG_GPIO_PLL;
  1441. if (on) {
  1442. /* turn primary xtal on */
  1443. if (what & XTAL) {
  1444. out |= PCI_CFG_GPIO_XTAL;
  1445. if (what & PLL)
  1446. out |= PCI_CFG_GPIO_PLL;
  1447. pci_write_config_dword(sii->pbus,
  1448. PCI_GPIO_OUT, out);
  1449. pci_write_config_dword(sii->pbus,
  1450. PCI_GPIO_OUTEN, outen);
  1451. udelay(XTAL_ON_DELAY);
  1452. }
  1453. /* turn pll on */
  1454. if (what & PLL) {
  1455. out &= ~PCI_CFG_GPIO_PLL;
  1456. pci_write_config_dword(sii->pbus,
  1457. PCI_GPIO_OUT, out);
  1458. mdelay(2);
  1459. }
  1460. } else {
  1461. if (what & XTAL)
  1462. out &= ~PCI_CFG_GPIO_XTAL;
  1463. if (what & PLL)
  1464. out |= PCI_CFG_GPIO_PLL;
  1465. pci_write_config_dword(sii->pbus,
  1466. PCI_GPIO_OUT, out);
  1467. pci_write_config_dword(sii->pbus,
  1468. PCI_GPIO_OUTEN, outen);
  1469. }
  1470. return 0;
  1471. }
  1472. /* clk control mechanism through chipcommon, no policy checking */
  1473. static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
  1474. {
  1475. uint origidx = 0;
  1476. struct chipcregs __iomem *cc;
  1477. u32 scc;
  1478. uint intr_val = 0;
  1479. bool fast = SI_FAST(&sii->pub);
  1480. /* chipcommon cores prior to rev6 don't support dynamic clock control */
  1481. if (ai_get_ccrev(&sii->pub) < 6)
  1482. return false;
  1483. if (!fast) {
  1484. INTR_OFF(sii, intr_val);
  1485. origidx = sii->curidx;
  1486. cc = (struct chipcregs __iomem *)
  1487. ai_setcore(&sii->pub, CC_CORE_ID, 0);
  1488. } else {
  1489. cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
  1490. if (cc == NULL)
  1491. goto done;
  1492. }
  1493. if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
  1494. (ai_get_ccrev(&sii->pub) < 20))
  1495. goto done;
  1496. switch (mode) {
  1497. case CLK_FAST: /* FORCEHT, fast (pll) clock */
  1498. if (ai_get_ccrev(&sii->pub) < 10) {
  1499. /*
  1500. * don't forget to force xtal back
  1501. * on before we clear SCC_DYN_XTAL..
  1502. */
  1503. ai_clkctl_xtal(&sii->pub, XTAL, ON);
  1504. SET_REG(&cc->slow_clk_ctl,
  1505. (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
  1506. } else if (ai_get_ccrev(&sii->pub) < 20) {
  1507. OR_REG(&cc->system_clk_ctl, SYCC_HR);
  1508. } else {
  1509. OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
  1510. }
  1511. /* wait for the PLL */
  1512. if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
  1513. u32 htavail = CCS_HTAVAIL;
  1514. SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
  1515. == 0), PMU_MAX_TRANSITION_DLY);
  1516. } else {
  1517. udelay(PLL_DELAY);
  1518. }
  1519. break;
  1520. case CLK_DYNAMIC: /* enable dynamic clock control */
  1521. if (ai_get_ccrev(&sii->pub) < 10) {
  1522. scc = R_REG(&cc->slow_clk_ctl);
  1523. scc &= ~(SCC_FS | SCC_IP | SCC_XC);
  1524. if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
  1525. scc |= SCC_XC;
  1526. W_REG(&cc->slow_clk_ctl, scc);
  1527. /*
  1528. * for dynamic control, we have to
  1529. * release our xtal_pu "force on"
  1530. */
  1531. if (scc & SCC_XC)
  1532. ai_clkctl_xtal(&sii->pub, XTAL, OFF);
  1533. } else if (ai_get_ccrev(&sii->pub) < 20) {
  1534. /* Instaclock */
  1535. AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
  1536. } else {
  1537. AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
  1538. }
  1539. break;
  1540. default:
  1541. break;
  1542. }
  1543. done:
  1544. if (!fast) {
  1545. ai_setcoreidx(&sii->pub, origidx);
  1546. INTR_RESTORE(sii, intr_val);
  1547. }
  1548. return mode == CLK_FAST;
  1549. }
  1550. /*
  1551. * clock control policy function throught chipcommon
  1552. *
  1553. * set dynamic clk control mode (forceslow, forcefast, dynamic)
  1554. * returns true if we are forcing fast clock
  1555. * this is a wrapper over the next internal function
  1556. * to allow flexible policy settings for outside caller
  1557. */
  1558. bool ai_clkctl_cc(struct si_pub *sih, uint mode)
  1559. {
  1560. struct si_info *sii;
  1561. sii = (struct si_info *)sih;
  1562. /* chipcommon cores prior to rev6 don't support dynamic clock control */
  1563. if (ai_get_ccrev(sih) < 6)
  1564. return false;
  1565. if (PCI_FORCEHT(sih))
  1566. return mode == CLK_FAST;
  1567. return _ai_clkctl_cc(sii, mode);
  1568. }
  1569. /* Build device path */
  1570. int ai_devpath(struct si_pub *sih, char *path, int size)
  1571. {
  1572. int slen;
  1573. if (!path || size <= 0)
  1574. return -1;
  1575. slen = snprintf(path, (size_t) size, "pci/%u/%u/",
  1576. ((struct si_info *)sih)->pbus->bus->number,
  1577. PCI_SLOT(((struct pci_dev *)
  1578. (((struct si_info *)(sih))->pbus))->devfn));
  1579. if (slen < 0 || slen >= size) {
  1580. path[0] = '\0';
  1581. return -1;
  1582. }
  1583. return 0;
  1584. }
  1585. void ai_pci_up(struct si_pub *sih)
  1586. {
  1587. struct si_info *sii;
  1588. sii = (struct si_info *)sih;
  1589. if (PCI_FORCEHT(sih))
  1590. _ai_clkctl_cc(sii, CLK_FAST);
  1591. if (PCIE(sih))
  1592. pcicore_up(sii->pch, SI_PCIUP);
  1593. }
  1594. /* Unconfigure and/or apply various WARs when system is going to sleep mode */
  1595. void ai_pci_sleep(struct si_pub *sih)
  1596. {
  1597. struct si_info *sii;
  1598. sii = (struct si_info *)sih;
  1599. pcicore_sleep(sii->pch);
  1600. }
  1601. /* Unconfigure and/or apply various WARs when going down */
  1602. void ai_pci_down(struct si_pub *sih)
  1603. {
  1604. struct si_info *sii;
  1605. sii = (struct si_info *)sih;
  1606. /* release FORCEHT since chip is going to "down" state */
  1607. if (PCI_FORCEHT(sih))
  1608. _ai_clkctl_cc(sii, CLK_DYNAMIC);
  1609. pcicore_down(sii->pch, SI_PCIDOWN);
  1610. }
  1611. /*
  1612. * Configure the pci core for pci client (NIC) action
  1613. * coremask is the bitvec of cores by index to be enabled.
  1614. */
  1615. void ai_pci_setup(struct si_pub *sih, uint coremask)
  1616. {
  1617. struct si_info *sii;
  1618. struct sbpciregs __iomem *regs = NULL;
  1619. u32 siflag = 0, w;
  1620. uint idx = 0;
  1621. sii = (struct si_info *)sih;
  1622. if (PCI(sih)) {
  1623. /* get current core index */
  1624. idx = sii->curidx;
  1625. /* we interrupt on this backplane flag number */
  1626. siflag = ai_flag(sih);
  1627. /* switch over to pci core */
  1628. regs = ai_setcoreidx(sih, sii->buscoreidx);
  1629. }
  1630. /*
  1631. * Enable sb->pci interrupts. Assume
  1632. * PCI rev 2.3 support was added in pci core rev 6 and things changed..
  1633. */
  1634. if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
  1635. /* pci config write to set this core bit in PCIIntMask */
  1636. pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
  1637. w |= (coremask << PCI_SBIM_SHIFT);
  1638. pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
  1639. } else {
  1640. /* set sbintvec bit for our flag number */
  1641. ai_setint(sih, siflag);
  1642. }
  1643. if (PCI(sih)) {
  1644. pcicore_pci_setup(sii->pch, regs);
  1645. /* switch back to previous core */
  1646. ai_setcoreidx(sih, idx);
  1647. }
  1648. }
  1649. /*
  1650. * Fixup SROMless PCI device's configuration.
  1651. * The current core may be changed upon return.
  1652. */
  1653. int ai_pci_fixcfg(struct si_pub *sih)
  1654. {
  1655. uint origidx;
  1656. void __iomem *regs = NULL;
  1657. struct si_info *sii = (struct si_info *)sih;
  1658. /* Fixup PI in SROM shadow area to enable the correct PCI core access */
  1659. /* save the current index */
  1660. origidx = ai_coreidx(&sii->pub);
  1661. /* check 'pi' is correct and fix it if not */
  1662. regs = ai_setcore(&sii->pub, ai_get_buscoretype(sih), 0);
  1663. if (ai_get_buscoretype(sih) == PCIE_CORE_ID)
  1664. pcicore_fixcfg_pcie(sii->pch,
  1665. (struct sbpcieregs __iomem *)regs);
  1666. else if (ai_get_buscoretype(sih) == PCI_CORE_ID)
  1667. pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
  1668. /* restore the original index */
  1669. ai_setcoreidx(&sii->pub, origidx);
  1670. pcicore_hwup(sii->pch);
  1671. return 0;
  1672. }
  1673. /* mask&set gpiocontrol bits */
  1674. u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
  1675. {
  1676. uint regoff;
  1677. regoff = offsetof(struct chipcregs, gpiocontrol);
  1678. return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
  1679. }
  1680. void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
  1681. {
  1682. struct si_info *sii;
  1683. struct chipcregs __iomem *cc;
  1684. uint origidx;
  1685. u32 val;
  1686. sii = (struct si_info *)sih;
  1687. origidx = ai_coreidx(sih);
  1688. cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
  1689. val = R_REG(&cc->chipcontrol);
  1690. if (on) {
  1691. if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
  1692. /* Ext PA Controls for 4331 12x9 Package */
  1693. W_REG(&cc->chipcontrol, val |
  1694. CCTRL4331_EXTPA_EN |
  1695. CCTRL4331_EXTPA_ON_GPIO2_5);
  1696. else
  1697. /* Ext PA Controls for 4331 12x12 Package */
  1698. W_REG(&cc->chipcontrol,
  1699. val | CCTRL4331_EXTPA_EN);
  1700. } else {
  1701. val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
  1702. W_REG(&cc->chipcontrol, val);
  1703. }
  1704. ai_setcoreidx(sih, origidx);
  1705. }
  1706. /* Enable BT-COEX & Ex-PA for 4313 */
  1707. void ai_epa_4313war(struct si_pub *sih)
  1708. {
  1709. struct si_info *sii;
  1710. struct chipcregs __iomem *cc;
  1711. uint origidx;
  1712. sii = (struct si_info *)sih;
  1713. origidx = ai_coreidx(sih);
  1714. cc = ai_setcore(sih, CC_CORE_ID, 0);
  1715. /* EPA Fix */
  1716. W_REG(&cc->gpiocontrol,
  1717. R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
  1718. ai_setcoreidx(sih, origidx);
  1719. }
  1720. /* check if the device is removed */
  1721. bool ai_deviceremoved(struct si_pub *sih)
  1722. {
  1723. u32 w;
  1724. struct si_info *sii;
  1725. sii = (struct si_info *)sih;
  1726. pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
  1727. if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
  1728. return true;
  1729. return false;
  1730. }
  1731. bool ai_is_sprom_available(struct si_pub *sih)
  1732. {
  1733. struct si_info *sii = (struct si_info *)sih;
  1734. if (ai_get_ccrev(sih) >= 31) {
  1735. uint origidx;
  1736. struct chipcregs __iomem *cc;
  1737. u32 sromctrl;
  1738. if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
  1739. return false;
  1740. origidx = sii->curidx;
  1741. cc = ai_setcoreidx(sih, SI_CC_IDX);
  1742. sromctrl = R_REG(&cc->sromcontrol);
  1743. ai_setcoreidx(sih, origidx);
  1744. return sromctrl & SRC_PRESENT;
  1745. }
  1746. switch (ai_get_chip_id(sih)) {
  1747. case BCM4313_CHIP_ID:
  1748. return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
  1749. default:
  1750. return true;
  1751. }
  1752. }
  1753. bool ai_is_otp_disabled(struct si_pub *sih)
  1754. {
  1755. struct si_info *sii = (struct si_info *)sih;
  1756. switch (ai_get_chip_id(sih)) {
  1757. case BCM4313_CHIP_ID:
  1758. return (sii->chipst & CST4313_OTP_PRESENT) == 0;
  1759. /* These chips always have their OTP on */
  1760. case BCM43224_CHIP_ID:
  1761. case BCM43225_CHIP_ID:
  1762. default:
  1763. return false;
  1764. }
  1765. }