amd64_edac.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798
  1. #include "amd64_edac.h"
  2. #include <asm/amd_nb.h>
  3. static struct edac_pci_ctl_info *amd64_ctl_pci;
  4. static int report_gart_errors;
  5. module_param(report_gart_errors, int, 0644);
  6. /*
  7. * Set by command line parameter. If BIOS has enabled the ECC, this override is
  8. * cleared to prevent re-enabling the hardware by this driver.
  9. */
  10. static int ecc_enable_override;
  11. module_param(ecc_enable_override, int, 0644);
  12. static struct msr __percpu *msrs;
  13. /*
  14. * count successfully initialized driver instances for setup_pci_device()
  15. */
  16. static atomic_t drv_instances = ATOMIC_INIT(0);
  17. /* Per-node driver instances */
  18. static struct mem_ctl_info **mcis;
  19. static struct ecc_settings **ecc_stngs;
  20. /*
  21. * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
  22. * later.
  23. */
  24. static int ddr2_dbam_revCG[] = {
  25. [0] = 32,
  26. [1] = 64,
  27. [2] = 128,
  28. [3] = 256,
  29. [4] = 512,
  30. [5] = 1024,
  31. [6] = 2048,
  32. };
  33. static int ddr2_dbam_revD[] = {
  34. [0] = 32,
  35. [1] = 64,
  36. [2 ... 3] = 128,
  37. [4] = 256,
  38. [5] = 512,
  39. [6] = 256,
  40. [7] = 512,
  41. [8 ... 9] = 1024,
  42. [10] = 2048,
  43. };
  44. static int ddr2_dbam[] = { [0] = 128,
  45. [1] = 256,
  46. [2 ... 4] = 512,
  47. [5 ... 6] = 1024,
  48. [7 ... 8] = 2048,
  49. [9 ... 10] = 4096,
  50. [11] = 8192,
  51. };
  52. static int ddr3_dbam[] = { [0] = -1,
  53. [1] = 256,
  54. [2] = 512,
  55. [3 ... 4] = -1,
  56. [5 ... 6] = 1024,
  57. [7 ... 8] = 2048,
  58. [9 ... 10] = 4096,
  59. [11] = 8192,
  60. };
  61. /*
  62. * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
  63. * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
  64. * or higher value'.
  65. *
  66. *FIXME: Produce a better mapping/linearisation.
  67. */
  68. struct scrubrate {
  69. u32 scrubval; /* bit pattern for scrub rate */
  70. u32 bandwidth; /* bandwidth consumed (bytes/sec) */
  71. } scrubrates[] = {
  72. { 0x01, 1600000000UL},
  73. { 0x02, 800000000UL},
  74. { 0x03, 400000000UL},
  75. { 0x04, 200000000UL},
  76. { 0x05, 100000000UL},
  77. { 0x06, 50000000UL},
  78. { 0x07, 25000000UL},
  79. { 0x08, 12284069UL},
  80. { 0x09, 6274509UL},
  81. { 0x0A, 3121951UL},
  82. { 0x0B, 1560975UL},
  83. { 0x0C, 781440UL},
  84. { 0x0D, 390720UL},
  85. { 0x0E, 195300UL},
  86. { 0x0F, 97650UL},
  87. { 0x10, 48854UL},
  88. { 0x11, 24427UL},
  89. { 0x12, 12213UL},
  90. { 0x13, 6101UL},
  91. { 0x14, 3051UL},
  92. { 0x15, 1523UL},
  93. { 0x16, 761UL},
  94. { 0x00, 0UL}, /* scrubbing off */
  95. };
  96. static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
  97. u32 *val, const char *func)
  98. {
  99. int err = 0;
  100. err = pci_read_config_dword(pdev, offset, val);
  101. if (err)
  102. amd64_warn("%s: error reading F%dx%03x.\n",
  103. func, PCI_FUNC(pdev->devfn), offset);
  104. return err;
  105. }
  106. int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
  107. u32 val, const char *func)
  108. {
  109. int err = 0;
  110. err = pci_write_config_dword(pdev, offset, val);
  111. if (err)
  112. amd64_warn("%s: error writing to F%dx%03x.\n",
  113. func, PCI_FUNC(pdev->devfn), offset);
  114. return err;
  115. }
  116. /*
  117. *
  118. * Depending on the family, F2 DCT reads need special handling:
  119. *
  120. * K8: has a single DCT only
  121. *
  122. * F10h: each DCT has its own set of regs
  123. * DCT0 -> F2x040..
  124. * DCT1 -> F2x140..
  125. *
  126. * F15h: we select which DCT we access using F1x10C[DctCfgSel]
  127. *
  128. */
  129. static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
  130. const char *func)
  131. {
  132. if (addr >= 0x100)
  133. return -EINVAL;
  134. return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
  135. }
  136. static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
  137. const char *func)
  138. {
  139. return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
  140. }
  141. static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
  142. const char *func)
  143. {
  144. u32 reg = 0;
  145. u8 dct = 0;
  146. if (addr >= 0x140 && addr <= 0x1a0) {
  147. dct = 1;
  148. addr -= 0x100;
  149. }
  150. amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
  151. reg &= 0xfffffffe;
  152. reg |= dct;
  153. amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
  154. return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
  155. }
  156. /*
  157. * Memory scrubber control interface. For K8, memory scrubbing is handled by
  158. * hardware and can involve L2 cache, dcache as well as the main memory. With
  159. * F10, this is extended to L3 cache scrubbing on CPU models sporting that
  160. * functionality.
  161. *
  162. * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
  163. * (dram) over to cache lines. This is nasty, so we will use bandwidth in
  164. * bytes/sec for the setting.
  165. *
  166. * Currently, we only do dram scrubbing. If the scrubbing is done in software on
  167. * other archs, we might not have access to the caches directly.
  168. */
  169. /*
  170. * scan the scrub rate mapping table for a close or matching bandwidth value to
  171. * issue. If requested is too big, then use last maximum value found.
  172. */
  173. static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
  174. {
  175. u32 scrubval;
  176. int i;
  177. /*
  178. * map the configured rate (new_bw) to a value specific to the AMD64
  179. * memory controller and apply to register. Search for the first
  180. * bandwidth entry that is greater or equal than the setting requested
  181. * and program that. If at last entry, turn off DRAM scrubbing.
  182. */
  183. for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
  184. /*
  185. * skip scrub rates which aren't recommended
  186. * (see F10 BKDG, F3x58)
  187. */
  188. if (scrubrates[i].scrubval < min_rate)
  189. continue;
  190. if (scrubrates[i].bandwidth <= new_bw)
  191. break;
  192. /*
  193. * if no suitable bandwidth found, turn off DRAM scrubbing
  194. * entirely by falling back to the last element in the
  195. * scrubrates array.
  196. */
  197. }
  198. scrubval = scrubrates[i].scrubval;
  199. pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
  200. if (scrubval)
  201. return scrubrates[i].bandwidth;
  202. return 0;
  203. }
  204. static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
  205. {
  206. struct amd64_pvt *pvt = mci->pvt_info;
  207. return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
  208. }
  209. static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
  210. {
  211. struct amd64_pvt *pvt = mci->pvt_info;
  212. u32 scrubval = 0;
  213. int i, retval = -EINVAL;
  214. amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
  215. scrubval = scrubval & 0x001F;
  216. amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
  217. for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
  218. if (scrubrates[i].scrubval == scrubval) {
  219. retval = scrubrates[i].bandwidth;
  220. break;
  221. }
  222. }
  223. return retval;
  224. }
  225. /* Map from a CSROW entry to the mask entry that operates on it */
  226. static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
  227. {
  228. if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
  229. return csrow;
  230. else
  231. return csrow >> 1;
  232. }
  233. /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
  234. static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
  235. {
  236. if (dct == 0)
  237. return pvt->dcsb0[csrow];
  238. else
  239. return pvt->dcsb1[csrow];
  240. }
  241. /*
  242. * Return the 'mask' address the i'th CS entry. This function is needed because
  243. * there number of DCSM registers on Rev E and prior vs Rev F and later is
  244. * different.
  245. */
  246. static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
  247. {
  248. if (dct == 0)
  249. return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
  250. else
  251. return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
  252. }
  253. /*
  254. * returns true if the SysAddr given by sys_addr matches the
  255. * DRAM base/limit associated with node_id
  256. */
  257. static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, int nid)
  258. {
  259. u64 addr;
  260. /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
  261. * all ones if the most significant implemented address bit is 1.
  262. * Here we discard bits 63-40. See section 3.4.2 of AMD publication
  263. * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
  264. * Application Programming.
  265. */
  266. addr = sys_addr & 0x000000ffffffffffull;
  267. return ((addr >= get_dram_base(pvt, nid)) &&
  268. (addr <= get_dram_limit(pvt, nid)));
  269. }
  270. /*
  271. * Attempt to map a SysAddr to a node. On success, return a pointer to the
  272. * mem_ctl_info structure for the node that the SysAddr maps to.
  273. *
  274. * On failure, return NULL.
  275. */
  276. static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
  277. u64 sys_addr)
  278. {
  279. struct amd64_pvt *pvt;
  280. int node_id;
  281. u32 intlv_en, bits;
  282. /*
  283. * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
  284. * 3.4.4.2) registers to map the SysAddr to a node ID.
  285. */
  286. pvt = mci->pvt_info;
  287. /*
  288. * The value of this field should be the same for all DRAM Base
  289. * registers. Therefore we arbitrarily choose to read it from the
  290. * register for node 0.
  291. */
  292. intlv_en = dram_intlv_en(pvt, 0);
  293. if (intlv_en == 0) {
  294. for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
  295. if (amd64_base_limit_match(pvt, sys_addr, node_id))
  296. goto found;
  297. }
  298. goto err_no_match;
  299. }
  300. if (unlikely((intlv_en != 0x01) &&
  301. (intlv_en != 0x03) &&
  302. (intlv_en != 0x07))) {
  303. amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
  304. return NULL;
  305. }
  306. bits = (((u32) sys_addr) >> 12) & intlv_en;
  307. for (node_id = 0; ; ) {
  308. if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
  309. break; /* intlv_sel field matches */
  310. if (++node_id >= DRAM_RANGES)
  311. goto err_no_match;
  312. }
  313. /* sanity test for sys_addr */
  314. if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
  315. amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
  316. "range for node %d with node interleaving enabled.\n",
  317. __func__, sys_addr, node_id);
  318. return NULL;
  319. }
  320. found:
  321. return edac_mc_find(node_id);
  322. err_no_match:
  323. debugf2("sys_addr 0x%lx doesn't match any node\n",
  324. (unsigned long)sys_addr);
  325. return NULL;
  326. }
  327. /*
  328. * Extract the DRAM CS base address from selected csrow register.
  329. */
  330. static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
  331. {
  332. return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
  333. pvt->dcs_shift;
  334. }
  335. /*
  336. * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
  337. */
  338. static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
  339. {
  340. u64 dcsm_bits, other_bits;
  341. u64 mask;
  342. /* Extract bits from DRAM CS Mask. */
  343. dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
  344. other_bits = pvt->dcsm_mask;
  345. other_bits = ~(other_bits << pvt->dcs_shift);
  346. /*
  347. * The extracted bits from DCSM belong in the spaces represented by
  348. * the cleared bits in other_bits.
  349. */
  350. mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
  351. return mask;
  352. }
  353. /*
  354. * @input_addr is an InputAddr associated with the node given by mci. Return the
  355. * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
  356. */
  357. static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
  358. {
  359. struct amd64_pvt *pvt;
  360. int csrow;
  361. u64 base, mask;
  362. pvt = mci->pvt_info;
  363. /*
  364. * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
  365. * base/mask register pair, test the condition shown near the start of
  366. * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
  367. */
  368. for (csrow = 0; csrow < pvt->cs_count; csrow++) {
  369. /* This DRAM chip select is disabled on this node */
  370. if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
  371. continue;
  372. base = base_from_dct_base(pvt, csrow);
  373. mask = ~mask_from_dct_mask(pvt, csrow);
  374. if ((input_addr & mask) == (base & mask)) {
  375. debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
  376. (unsigned long)input_addr, csrow,
  377. pvt->mc_node_id);
  378. return csrow;
  379. }
  380. }
  381. debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
  382. (unsigned long)input_addr, pvt->mc_node_id);
  383. return -1;
  384. }
  385. /*
  386. * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
  387. * for the node represented by mci. Info is passed back in *hole_base,
  388. * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
  389. * info is invalid. Info may be invalid for either of the following reasons:
  390. *
  391. * - The revision of the node is not E or greater. In this case, the DRAM Hole
  392. * Address Register does not exist.
  393. *
  394. * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
  395. * indicating that its contents are not valid.
  396. *
  397. * The values passed back in *hole_base, *hole_offset, and *hole_size are
  398. * complete 32-bit values despite the fact that the bitfields in the DHAR
  399. * only represent bits 31-24 of the base and offset values.
  400. */
  401. int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
  402. u64 *hole_offset, u64 *hole_size)
  403. {
  404. struct amd64_pvt *pvt = mci->pvt_info;
  405. u64 base;
  406. /* only revE and later have the DRAM Hole Address Register */
  407. if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
  408. debugf1(" revision %d for node %d does not support DHAR\n",
  409. pvt->ext_model, pvt->mc_node_id);
  410. return 1;
  411. }
  412. /* valid for Fam10h and above */
  413. if (boot_cpu_data.x86 >= 0x10 &&
  414. (pvt->dhar & DRAM_MEM_HOIST_VALID) == 0) {
  415. debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
  416. return 1;
  417. }
  418. if ((pvt->dhar & DHAR_VALID) == 0) {
  419. debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
  420. pvt->mc_node_id);
  421. return 1;
  422. }
  423. /* This node has Memory Hoisting */
  424. /* +------------------+--------------------+--------------------+-----
  425. * | memory | DRAM hole | relocated |
  426. * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
  427. * | | | DRAM hole |
  428. * | | | [0x100000000, |
  429. * | | | (0x100000000+ |
  430. * | | | (0xffffffff-x))] |
  431. * +------------------+--------------------+--------------------+-----
  432. *
  433. * Above is a diagram of physical memory showing the DRAM hole and the
  434. * relocated addresses from the DRAM hole. As shown, the DRAM hole
  435. * starts at address x (the base address) and extends through address
  436. * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
  437. * addresses in the hole so that they start at 0x100000000.
  438. */
  439. base = dhar_base(pvt);
  440. *hole_base = base;
  441. *hole_size = (0x1ull << 32) - base;
  442. if (boot_cpu_data.x86 > 0xf)
  443. *hole_offset = f10_dhar_offset(pvt);
  444. else
  445. *hole_offset = k8_dhar_offset(pvt);
  446. debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
  447. pvt->mc_node_id, (unsigned long)*hole_base,
  448. (unsigned long)*hole_offset, (unsigned long)*hole_size);
  449. return 0;
  450. }
  451. EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
  452. /*
  453. * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
  454. * assumed that sys_addr maps to the node given by mci.
  455. *
  456. * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
  457. * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
  458. * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
  459. * then it is also involved in translating a SysAddr to a DramAddr. Sections
  460. * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
  461. * These parts of the documentation are unclear. I interpret them as follows:
  462. *
  463. * When node n receives a SysAddr, it processes the SysAddr as follows:
  464. *
  465. * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
  466. * Limit registers for node n. If the SysAddr is not within the range
  467. * specified by the base and limit values, then node n ignores the Sysaddr
  468. * (since it does not map to node n). Otherwise continue to step 2 below.
  469. *
  470. * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
  471. * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
  472. * the range of relocated addresses (starting at 0x100000000) from the DRAM
  473. * hole. If not, skip to step 3 below. Else get the value of the
  474. * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
  475. * offset defined by this value from the SysAddr.
  476. *
  477. * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
  478. * Base register for node n. To obtain the DramAddr, subtract the base
  479. * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
  480. */
  481. static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
  482. {
  483. struct amd64_pvt *pvt = mci->pvt_info;
  484. u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
  485. int ret = 0;
  486. dram_base = get_dram_base(pvt, pvt->mc_node_id);
  487. ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
  488. &hole_size);
  489. if (!ret) {
  490. if ((sys_addr >= (1ull << 32)) &&
  491. (sys_addr < ((1ull << 32) + hole_size))) {
  492. /* use DHAR to translate SysAddr to DramAddr */
  493. dram_addr = sys_addr - hole_offset;
  494. debugf2("using DHAR to translate SysAddr 0x%lx to "
  495. "DramAddr 0x%lx\n",
  496. (unsigned long)sys_addr,
  497. (unsigned long)dram_addr);
  498. return dram_addr;
  499. }
  500. }
  501. /*
  502. * Translate the SysAddr to a DramAddr as shown near the start of
  503. * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
  504. * only deals with 40-bit values. Therefore we discard bits 63-40 of
  505. * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
  506. * discard are all 1s. Otherwise the bits we discard are all 0s. See
  507. * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
  508. * Programmer's Manual Volume 1 Application Programming.
  509. */
  510. dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
  511. debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
  512. "DramAddr 0x%lx\n", (unsigned long)sys_addr,
  513. (unsigned long)dram_addr);
  514. return dram_addr;
  515. }
  516. /*
  517. * @intlv_en is the value of the IntlvEn field from a DRAM Base register
  518. * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
  519. * for node interleaving.
  520. */
  521. static int num_node_interleave_bits(unsigned intlv_en)
  522. {
  523. static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
  524. int n;
  525. BUG_ON(intlv_en > 7);
  526. n = intlv_shift_table[intlv_en];
  527. return n;
  528. }
  529. /* Translate the DramAddr given by @dram_addr to an InputAddr. */
  530. static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
  531. {
  532. struct amd64_pvt *pvt;
  533. int intlv_shift;
  534. u64 input_addr;
  535. pvt = mci->pvt_info;
  536. /*
  537. * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
  538. * concerning translating a DramAddr to an InputAddr.
  539. */
  540. intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
  541. input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
  542. (dram_addr & 0xfff);
  543. debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
  544. intlv_shift, (unsigned long)dram_addr,
  545. (unsigned long)input_addr);
  546. return input_addr;
  547. }
  548. /*
  549. * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
  550. * assumed that @sys_addr maps to the node given by mci.
  551. */
  552. static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
  553. {
  554. u64 input_addr;
  555. input_addr =
  556. dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
  557. debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
  558. (unsigned long)sys_addr, (unsigned long)input_addr);
  559. return input_addr;
  560. }
  561. /*
  562. * @input_addr is an InputAddr associated with the node represented by mci.
  563. * Translate @input_addr to a DramAddr and return the result.
  564. */
  565. static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
  566. {
  567. struct amd64_pvt *pvt;
  568. int node_id, intlv_shift;
  569. u64 bits, dram_addr;
  570. u32 intlv_sel;
  571. /*
  572. * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
  573. * shows how to translate a DramAddr to an InputAddr. Here we reverse
  574. * this procedure. When translating from a DramAddr to an InputAddr, the
  575. * bits used for node interleaving are discarded. Here we recover these
  576. * bits from the IntlvSel field of the DRAM Limit register (section
  577. * 3.4.4.2) for the node that input_addr is associated with.
  578. */
  579. pvt = mci->pvt_info;
  580. node_id = pvt->mc_node_id;
  581. BUG_ON((node_id < 0) || (node_id > 7));
  582. intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
  583. if (intlv_shift == 0) {
  584. debugf1(" InputAddr 0x%lx translates to DramAddr of "
  585. "same value\n", (unsigned long)input_addr);
  586. return input_addr;
  587. }
  588. bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
  589. (input_addr & 0xfff);
  590. intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
  591. dram_addr = bits + (intlv_sel << 12);
  592. debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
  593. "(%d node interleave bits)\n", (unsigned long)input_addr,
  594. (unsigned long)dram_addr, intlv_shift);
  595. return dram_addr;
  596. }
  597. /*
  598. * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
  599. * @dram_addr to a SysAddr.
  600. */
  601. static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
  602. {
  603. struct amd64_pvt *pvt = mci->pvt_info;
  604. u64 hole_base, hole_offset, hole_size, base, sys_addr;
  605. int ret = 0;
  606. ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
  607. &hole_size);
  608. if (!ret) {
  609. if ((dram_addr >= hole_base) &&
  610. (dram_addr < (hole_base + hole_size))) {
  611. sys_addr = dram_addr + hole_offset;
  612. debugf1("using DHAR to translate DramAddr 0x%lx to "
  613. "SysAddr 0x%lx\n", (unsigned long)dram_addr,
  614. (unsigned long)sys_addr);
  615. return sys_addr;
  616. }
  617. }
  618. base = get_dram_base(pvt, pvt->mc_node_id);
  619. sys_addr = dram_addr + base;
  620. /*
  621. * The sys_addr we have computed up to this point is a 40-bit value
  622. * because the k8 deals with 40-bit values. However, the value we are
  623. * supposed to return is a full 64-bit physical address. The AMD
  624. * x86-64 architecture specifies that the most significant implemented
  625. * address bit through bit 63 of a physical address must be either all
  626. * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
  627. * 64-bit value below. See section 3.4.2 of AMD publication 24592:
  628. * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
  629. * Programming.
  630. */
  631. sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
  632. debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
  633. pvt->mc_node_id, (unsigned long)dram_addr,
  634. (unsigned long)sys_addr);
  635. return sys_addr;
  636. }
  637. /*
  638. * @input_addr is an InputAddr associated with the node given by mci. Translate
  639. * @input_addr to a SysAddr.
  640. */
  641. static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
  642. u64 input_addr)
  643. {
  644. return dram_addr_to_sys_addr(mci,
  645. input_addr_to_dram_addr(mci, input_addr));
  646. }
  647. /*
  648. * Find the minimum and maximum InputAddr values that map to the given @csrow.
  649. * Pass back these values in *input_addr_min and *input_addr_max.
  650. */
  651. static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
  652. u64 *input_addr_min, u64 *input_addr_max)
  653. {
  654. struct amd64_pvt *pvt;
  655. u64 base, mask;
  656. pvt = mci->pvt_info;
  657. BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
  658. base = base_from_dct_base(pvt, csrow);
  659. mask = mask_from_dct_mask(pvt, csrow);
  660. *input_addr_min = base & ~mask;
  661. *input_addr_max = base | mask | pvt->dcs_mask_notused;
  662. }
  663. /* Map the Error address to a PAGE and PAGE OFFSET. */
  664. static inline void error_address_to_page_and_offset(u64 error_address,
  665. u32 *page, u32 *offset)
  666. {
  667. *page = (u32) (error_address >> PAGE_SHIFT);
  668. *offset = ((u32) error_address) & ~PAGE_MASK;
  669. }
  670. /*
  671. * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
  672. * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
  673. * of a node that detected an ECC memory error. mci represents the node that
  674. * the error address maps to (possibly different from the node that detected
  675. * the error). Return the number of the csrow that sys_addr maps to, or -1 on
  676. * error.
  677. */
  678. static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
  679. {
  680. int csrow;
  681. csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
  682. if (csrow == -1)
  683. amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
  684. "address 0x%lx\n", (unsigned long)sys_addr);
  685. return csrow;
  686. }
  687. static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
  688. static u16 extract_syndrome(struct err_regs *err)
  689. {
  690. return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
  691. }
  692. /*
  693. * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
  694. * are ECC capable.
  695. */
  696. static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
  697. {
  698. int bit;
  699. enum dev_type edac_cap = EDAC_FLAG_NONE;
  700. bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
  701. ? 19
  702. : 17;
  703. if (pvt->dclr0 & BIT(bit))
  704. edac_cap = EDAC_FLAG_SECDED;
  705. return edac_cap;
  706. }
  707. static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
  708. static void amd64_dump_dramcfg_low(u32 dclr, int chan)
  709. {
  710. debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
  711. debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
  712. (dclr & BIT(16)) ? "un" : "",
  713. (dclr & BIT(19)) ? "yes" : "no");
  714. debugf1(" PAR/ERR parity: %s\n",
  715. (dclr & BIT(8)) ? "enabled" : "disabled");
  716. debugf1(" DCT 128bit mode width: %s\n",
  717. (dclr & BIT(11)) ? "128b" : "64b");
  718. debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
  719. (dclr & BIT(12)) ? "yes" : "no",
  720. (dclr & BIT(13)) ? "yes" : "no",
  721. (dclr & BIT(14)) ? "yes" : "no",
  722. (dclr & BIT(15)) ? "yes" : "no");
  723. }
  724. /* Display and decode various NB registers for debug purposes. */
  725. static void dump_misc_regs(struct amd64_pvt *pvt)
  726. {
  727. debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
  728. debugf1(" NB two channel DRAM capable: %s\n",
  729. (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
  730. debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
  731. (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
  732. (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
  733. amd64_dump_dramcfg_low(pvt->dclr0, 0);
  734. debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
  735. debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
  736. "offset: 0x%08x\n",
  737. pvt->dhar, dhar_base(pvt),
  738. (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
  739. : f10_dhar_offset(pvt));
  740. debugf1(" DramHoleValid: %s\n",
  741. (pvt->dhar & DHAR_VALID) ? "yes" : "no");
  742. amd64_debug_display_dimm_sizes(0, pvt);
  743. /* everything below this point is Fam10h and above */
  744. if (boot_cpu_data.x86 == 0xf)
  745. return;
  746. amd64_debug_display_dimm_sizes(1, pvt);
  747. amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
  748. /* Only if NOT ganged does dclr1 have valid info */
  749. if (!dct_ganging_enabled(pvt))
  750. amd64_dump_dramcfg_low(pvt->dclr1, 1);
  751. }
  752. static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
  753. {
  754. amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
  755. amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
  756. }
  757. /*
  758. * NOTE: CPU Revision Dependent code: Rev E and Rev F
  759. *
  760. * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
  761. * set the shift factor for the DCSB and DCSM values.
  762. *
  763. * ->dcs_mask_notused, RevE:
  764. *
  765. * To find the max InputAddr for the csrow, start with the base address and set
  766. * all bits that are "don't care" bits in the test at the start of section
  767. * 3.5.4 (p. 84).
  768. *
  769. * The "don't care" bits are all set bits in the mask and all bits in the gaps
  770. * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
  771. * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
  772. * gaps.
  773. *
  774. * ->dcs_mask_notused, RevF and later:
  775. *
  776. * To find the max InputAddr for the csrow, start with the base address and set
  777. * all bits that are "don't care" bits in the test at the start of NPT section
  778. * 4.5.4 (p. 87).
  779. *
  780. * The "don't care" bits are all set bits in the mask and all bits in the gaps
  781. * between bit ranges [36:27] and [21:13].
  782. *
  783. * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
  784. * which are all bits in the above-mentioned gaps.
  785. */
  786. static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
  787. {
  788. if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
  789. pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
  790. pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
  791. pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
  792. pvt->dcs_shift = REV_E_DCS_SHIFT;
  793. pvt->cs_count = 8;
  794. pvt->num_dcsm = 8;
  795. } else {
  796. pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
  797. pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
  798. pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
  799. pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
  800. pvt->cs_count = 8;
  801. pvt->num_dcsm = 4;
  802. }
  803. }
  804. /*
  805. * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
  806. */
  807. static void read_dct_base_mask(struct amd64_pvt *pvt)
  808. {
  809. int cs, reg;
  810. amd64_set_dct_base_and_mask(pvt);
  811. for (cs = 0; cs < pvt->cs_count; cs++) {
  812. reg = K8_DCSB0 + (cs * 4);
  813. if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb0[cs]))
  814. debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
  815. cs, pvt->dcsb0[cs], reg);
  816. if (!dct_ganging_enabled(pvt)) {
  817. reg = F10_DCSB1 + (cs * 4);
  818. if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb1[cs]))
  819. debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
  820. cs, pvt->dcsb1[cs], reg);
  821. }
  822. }
  823. for (cs = 0; cs < pvt->num_dcsm; cs++) {
  824. reg = K8_DCSM0 + (cs * 4);
  825. if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm0[cs]))
  826. debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
  827. cs, pvt->dcsm0[cs], reg);
  828. if (!dct_ganging_enabled(pvt)) {
  829. reg = F10_DCSM1 + (cs * 4);
  830. if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm1[cs]))
  831. debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
  832. cs, pvt->dcsm1[cs], reg);
  833. }
  834. }
  835. }
  836. static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
  837. {
  838. enum mem_type type;
  839. if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
  840. if (pvt->dchr0 & DDR3_MODE)
  841. type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
  842. else
  843. type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
  844. } else {
  845. type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
  846. }
  847. amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
  848. return type;
  849. }
  850. /*
  851. * Read the DRAM Configuration Low register. It differs between CG, D & E revs
  852. * and the later RevF memory controllers (DDR vs DDR2)
  853. *
  854. * Return:
  855. * number of memory channels in operation
  856. * Pass back:
  857. * contents of the DCL0_LOW register
  858. */
  859. static int k8_early_channel_count(struct amd64_pvt *pvt)
  860. {
  861. int flag, err = 0;
  862. err = amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
  863. if (err)
  864. return err;
  865. if (pvt->ext_model >= K8_REV_F)
  866. /* RevF (NPT) and later */
  867. flag = pvt->dclr0 & F10_WIDTH_128;
  868. else
  869. /* RevE and earlier */
  870. flag = pvt->dclr0 & REVE_WIDTH_128;
  871. /* not used */
  872. pvt->dclr1 = 0;
  873. return (flag) ? 2 : 1;
  874. }
  875. /* extract the ERROR ADDRESS for the K8 CPUs */
  876. static u64 k8_get_error_address(struct mem_ctl_info *mci,
  877. struct err_regs *info)
  878. {
  879. return (((u64) (info->nbeah & 0xff)) << 32) +
  880. (info->nbeal & ~0x03);
  881. }
  882. static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
  883. {
  884. u32 off = range << 3;
  885. amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
  886. amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
  887. if (boot_cpu_data.x86 == 0xf)
  888. return;
  889. if (!dram_rw(pvt, range))
  890. return;
  891. amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
  892. amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
  893. }
  894. static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
  895. struct err_regs *err_info, u64 sys_addr)
  896. {
  897. struct mem_ctl_info *src_mci;
  898. int channel, csrow;
  899. u32 page, offset;
  900. u16 syndrome;
  901. syndrome = extract_syndrome(err_info);
  902. /* CHIPKILL enabled */
  903. if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
  904. channel = get_channel_from_ecc_syndrome(mci, syndrome);
  905. if (channel < 0) {
  906. /*
  907. * Syndrome didn't map, so we don't know which of the
  908. * 2 DIMMs is in error. So we need to ID 'both' of them
  909. * as suspect.
  910. */
  911. amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
  912. "error reporting race\n", syndrome);
  913. edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
  914. return;
  915. }
  916. } else {
  917. /*
  918. * non-chipkill ecc mode
  919. *
  920. * The k8 documentation is unclear about how to determine the
  921. * channel number when using non-chipkill memory. This method
  922. * was obtained from email communication with someone at AMD.
  923. * (Wish the email was placed in this comment - norsk)
  924. */
  925. channel = ((sys_addr & BIT(3)) != 0);
  926. }
  927. /*
  928. * Find out which node the error address belongs to. This may be
  929. * different from the node that detected the error.
  930. */
  931. src_mci = find_mc_by_sys_addr(mci, sys_addr);
  932. if (!src_mci) {
  933. amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
  934. (unsigned long)sys_addr);
  935. edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
  936. return;
  937. }
  938. /* Now map the sys_addr to a CSROW */
  939. csrow = sys_addr_to_csrow(src_mci, sys_addr);
  940. if (csrow < 0) {
  941. edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
  942. } else {
  943. error_address_to_page_and_offset(sys_addr, &page, &offset);
  944. edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
  945. channel, EDAC_MOD_STR);
  946. }
  947. }
  948. static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
  949. {
  950. int *dbam_map;
  951. if (pvt->ext_model >= K8_REV_F)
  952. dbam_map = ddr2_dbam;
  953. else if (pvt->ext_model >= K8_REV_D)
  954. dbam_map = ddr2_dbam_revD;
  955. else
  956. dbam_map = ddr2_dbam_revCG;
  957. return dbam_map[cs_mode];
  958. }
  959. /*
  960. * Get the number of DCT channels in use.
  961. *
  962. * Return:
  963. * number of Memory Channels in operation
  964. * Pass back:
  965. * contents of the DCL0_LOW register
  966. */
  967. static int f10_early_channel_count(struct amd64_pvt *pvt)
  968. {
  969. int dbams[] = { DBAM0, DBAM1 };
  970. int i, j, channels = 0;
  971. u32 dbam;
  972. /* If we are in 128 bit mode, then we are using 2 channels */
  973. if (pvt->dclr0 & F10_WIDTH_128) {
  974. channels = 2;
  975. return channels;
  976. }
  977. /*
  978. * Need to check if in unganged mode: In such, there are 2 channels,
  979. * but they are not in 128 bit mode and thus the above 'dclr0' status
  980. * bit will be OFF.
  981. *
  982. * Need to check DCT0[0] and DCT1[0] to see if only one of them has
  983. * their CSEnable bit on. If so, then SINGLE DIMM case.
  984. */
  985. debugf0("Data width is not 128 bits - need more decoding\n");
  986. /*
  987. * Check DRAM Bank Address Mapping values for each DIMM to see if there
  988. * is more than just one DIMM present in unganged mode. Need to check
  989. * both controllers since DIMMs can be placed in either one.
  990. */
  991. for (i = 0; i < ARRAY_SIZE(dbams); i++) {
  992. if (amd64_read_dct_pci_cfg(pvt, dbams[i], &dbam))
  993. goto err_reg;
  994. for (j = 0; j < 4; j++) {
  995. if (DBAM_DIMM(j, dbam) > 0) {
  996. channels++;
  997. break;
  998. }
  999. }
  1000. }
  1001. if (channels > 2)
  1002. channels = 2;
  1003. amd64_info("MCT channel count: %d\n", channels);
  1004. return channels;
  1005. err_reg:
  1006. return -1;
  1007. }
  1008. static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
  1009. {
  1010. int *dbam_map;
  1011. if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
  1012. dbam_map = ddr3_dbam;
  1013. else
  1014. dbam_map = ddr2_dbam;
  1015. return dbam_map[cs_mode];
  1016. }
  1017. static u64 f10_get_error_address(struct mem_ctl_info *mci,
  1018. struct err_regs *info)
  1019. {
  1020. return (((u64) (info->nbeah & 0xffff)) << 32) +
  1021. (info->nbeal & ~0x01);
  1022. }
  1023. static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
  1024. {
  1025. if (!amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_LOW, &pvt->dct_sel_low)) {
  1026. debugf0("F2x110 (DCTL Sel. Low): 0x%08x, High range addrs at: 0x%x\n",
  1027. pvt->dct_sel_low, dct_sel_baseaddr(pvt));
  1028. debugf0(" DCT mode: %s, All DCTs on: %s\n",
  1029. (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
  1030. (dct_dram_enabled(pvt) ? "yes" : "no"));
  1031. if (!dct_ganging_enabled(pvt))
  1032. debugf0(" Address range split per DCT: %s\n",
  1033. (dct_high_range_enabled(pvt) ? "yes" : "no"));
  1034. debugf0(" DCT data interleave for ECC: %s, "
  1035. "DRAM cleared since last warm reset: %s\n",
  1036. (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
  1037. (dct_memory_cleared(pvt) ? "yes" : "no"));
  1038. debugf0(" DCT channel interleave: %s, "
  1039. "DCT interleave bits selector: 0x%x\n",
  1040. (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
  1041. dct_sel_interleave_addr(pvt));
  1042. }
  1043. amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_HIGH, &pvt->dct_sel_hi);
  1044. }
  1045. /*
  1046. * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
  1047. * Interleaving Modes.
  1048. */
  1049. static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
  1050. int hi_range_sel, u32 intlv_en)
  1051. {
  1052. u32 cs, temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
  1053. if (dct_ganging_enabled(pvt))
  1054. cs = 0;
  1055. else if (hi_range_sel)
  1056. cs = dct_sel_high;
  1057. else if (dct_interleave_enabled(pvt)) {
  1058. /*
  1059. * see F2x110[DctSelIntLvAddr] - channel interleave mode
  1060. */
  1061. if (dct_sel_interleave_addr(pvt) == 0)
  1062. cs = sys_addr >> 6 & 1;
  1063. else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
  1064. temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
  1065. if (dct_sel_interleave_addr(pvt) & 1)
  1066. cs = (sys_addr >> 9 & 1) ^ temp;
  1067. else
  1068. cs = (sys_addr >> 6 & 1) ^ temp;
  1069. } else if (intlv_en & 4)
  1070. cs = sys_addr >> 15 & 1;
  1071. else if (intlv_en & 2)
  1072. cs = sys_addr >> 14 & 1;
  1073. else if (intlv_en & 1)
  1074. cs = sys_addr >> 13 & 1;
  1075. else
  1076. cs = sys_addr >> 12 & 1;
  1077. } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
  1078. cs = ~dct_sel_high & 1;
  1079. else
  1080. cs = 0;
  1081. return cs;
  1082. }
  1083. static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
  1084. {
  1085. if (intlv_en == 1)
  1086. return 1;
  1087. else if (intlv_en == 3)
  1088. return 2;
  1089. else if (intlv_en == 7)
  1090. return 3;
  1091. return 0;
  1092. }
  1093. /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
  1094. static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
  1095. u32 dct_sel_base_addr,
  1096. u64 dct_sel_base_off,
  1097. u32 hole_valid, u64 hole_off,
  1098. u64 dram_base)
  1099. {
  1100. u64 chan_off;
  1101. if (hi_range_sel) {
  1102. if (!(dct_sel_base_addr & 0xFFFF0000) &&
  1103. hole_valid && (sys_addr >= 0x100000000ULL))
  1104. chan_off = hole_off;
  1105. else
  1106. chan_off = dct_sel_base_off;
  1107. } else {
  1108. if (hole_valid && (sys_addr >= 0x100000000ULL))
  1109. chan_off = hole_off;
  1110. else
  1111. chan_off = dram_base & 0xFFFFF8000000ULL;
  1112. }
  1113. return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
  1114. (chan_off & 0x0000FFFFFF800000ULL);
  1115. }
  1116. /* Hack for the time being - Can we get this from BIOS?? */
  1117. #define CH0SPARE_RANK 0
  1118. #define CH1SPARE_RANK 1
  1119. /*
  1120. * checks if the csrow passed in is marked as SPARED, if so returns the new
  1121. * spare row
  1122. */
  1123. static inline int f10_process_possible_spare(int csrow,
  1124. u32 cs, struct amd64_pvt *pvt)
  1125. {
  1126. u32 swap_done;
  1127. u32 bad_dram_cs;
  1128. /* Depending on channel, isolate respective SPARING info */
  1129. if (cs) {
  1130. swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
  1131. bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
  1132. if (swap_done && (csrow == bad_dram_cs))
  1133. csrow = CH1SPARE_RANK;
  1134. } else {
  1135. swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
  1136. bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
  1137. if (swap_done && (csrow == bad_dram_cs))
  1138. csrow = CH0SPARE_RANK;
  1139. }
  1140. return csrow;
  1141. }
  1142. /*
  1143. * Iterate over the DRAM DCT "base" and "mask" registers looking for a
  1144. * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
  1145. *
  1146. * Return:
  1147. * -EINVAL: NOT FOUND
  1148. * 0..csrow = Chip-Select Row
  1149. */
  1150. static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
  1151. {
  1152. struct mem_ctl_info *mci;
  1153. struct amd64_pvt *pvt;
  1154. u32 cs_base, cs_mask;
  1155. int cs_found = -EINVAL;
  1156. int csrow;
  1157. mci = mcis[nid];
  1158. if (!mci)
  1159. return cs_found;
  1160. pvt = mci->pvt_info;
  1161. debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
  1162. for (csrow = 0; csrow < pvt->cs_count; csrow++) {
  1163. cs_base = amd64_get_dct_base(pvt, cs, csrow);
  1164. if (!(cs_base & K8_DCSB_CS_ENABLE))
  1165. continue;
  1166. /*
  1167. * We have an ENABLED CSROW, Isolate just the MASK bits of the
  1168. * target: [28:19] and [13:5], which map to [36:27] and [21:13]
  1169. * of the actual address.
  1170. */
  1171. cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
  1172. /*
  1173. * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
  1174. * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
  1175. */
  1176. cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
  1177. debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
  1178. csrow, cs_base, cs_mask);
  1179. cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
  1180. debugf1(" Final CSMask=0x%x\n", cs_mask);
  1181. debugf1(" (InputAddr & ~CSMask)=0x%x "
  1182. "(CSBase & ~CSMask)=0x%x\n",
  1183. (in_addr & ~cs_mask), (cs_base & ~cs_mask));
  1184. if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
  1185. cs_found = f10_process_possible_spare(csrow, cs, pvt);
  1186. debugf1(" MATCH csrow=%d\n", cs_found);
  1187. break;
  1188. }
  1189. }
  1190. return cs_found;
  1191. }
  1192. /* For a given @dram_range, check if @sys_addr falls within it. */
  1193. static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
  1194. u64 sys_addr, int *nid, int *chan_sel)
  1195. {
  1196. int cs_found = -EINVAL, high_range = 0;
  1197. u32 intlv_shift;
  1198. u64 hole_off;
  1199. u32 hole_valid, tmp, dct_sel_base, channel;
  1200. u64 chan_addr, dct_sel_base_off;
  1201. u8 node_id = dram_dst_node(pvt, range);
  1202. u32 intlv_en = dram_intlv_en(pvt, range);
  1203. u32 intlv_sel = dram_intlv_sel(pvt, range);
  1204. u64 dram_base = get_dram_base(pvt, range);
  1205. debugf1("(range %d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
  1206. range, dram_base, sys_addr, get_dram_limit(pvt, range));
  1207. /*
  1208. * This assumes that one node's DHAR is the same as all the other
  1209. * nodes' DHAR.
  1210. */
  1211. hole_off = f10_dhar_offset(pvt);
  1212. hole_valid = (pvt->dhar & DHAR_VALID);
  1213. dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
  1214. debugf1(" HoleOffset=0x%016llx HoleValid=%d IntlvSel=0x%x\n",
  1215. hole_off, hole_valid, intlv_sel);
  1216. if (intlv_en &&
  1217. (intlv_sel != ((sys_addr >> 12) & intlv_en)))
  1218. return -EINVAL;
  1219. dct_sel_base = dct_sel_baseaddr(pvt);
  1220. /*
  1221. * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
  1222. * select between DCT0 and DCT1.
  1223. */
  1224. if (dct_high_range_enabled(pvt) &&
  1225. !dct_ganging_enabled(pvt) &&
  1226. ((sys_addr >> 27) >= (dct_sel_base >> 11)))
  1227. high_range = 1;
  1228. channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
  1229. chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
  1230. dct_sel_base_off, hole_valid,
  1231. hole_off, dram_base);
  1232. intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
  1233. /* remove Node ID (in case of memory interleaving) */
  1234. tmp = chan_addr & 0xFC0;
  1235. chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
  1236. /* remove channel interleave and hash */
  1237. if (dct_interleave_enabled(pvt) &&
  1238. !dct_high_range_enabled(pvt) &&
  1239. !dct_ganging_enabled(pvt)) {
  1240. if (dct_sel_interleave_addr(pvt) != 1)
  1241. chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
  1242. else {
  1243. tmp = chan_addr & 0xFC0;
  1244. chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
  1245. | tmp;
  1246. }
  1247. }
  1248. debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
  1249. chan_addr, (u32)(chan_addr >> 8));
  1250. cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
  1251. if (cs_found >= 0) {
  1252. *nid = node_id;
  1253. *chan_sel = channel;
  1254. }
  1255. return cs_found;
  1256. }
  1257. static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
  1258. int *node, int *chan_sel)
  1259. {
  1260. int range, cs_found = -EINVAL;
  1261. for (range = 0; range < DRAM_RANGES; range++) {
  1262. if (!dram_rw(pvt, range))
  1263. continue;
  1264. if ((get_dram_base(pvt, range) <= sys_addr) &&
  1265. (get_dram_limit(pvt, range) >= sys_addr)) {
  1266. cs_found = f10_match_to_this_node(pvt, range,
  1267. sys_addr, node,
  1268. chan_sel);
  1269. if (cs_found >= 0)
  1270. break;
  1271. }
  1272. }
  1273. return cs_found;
  1274. }
  1275. /*
  1276. * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
  1277. * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
  1278. *
  1279. * The @sys_addr is usually an error address received from the hardware
  1280. * (MCX_ADDR).
  1281. */
  1282. static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
  1283. struct err_regs *err_info,
  1284. u64 sys_addr)
  1285. {
  1286. struct amd64_pvt *pvt = mci->pvt_info;
  1287. u32 page, offset;
  1288. int nid, csrow, chan = 0;
  1289. u16 syndrome;
  1290. csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
  1291. if (csrow < 0) {
  1292. edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
  1293. return;
  1294. }
  1295. error_address_to_page_and_offset(sys_addr, &page, &offset);
  1296. syndrome = extract_syndrome(err_info);
  1297. /*
  1298. * We need the syndromes for channel detection only when we're
  1299. * ganged. Otherwise @chan should already contain the channel at
  1300. * this point.
  1301. */
  1302. if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
  1303. chan = get_channel_from_ecc_syndrome(mci, syndrome);
  1304. if (chan >= 0)
  1305. edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
  1306. EDAC_MOD_STR);
  1307. else
  1308. /*
  1309. * Channel unknown, report all channels on this CSROW as failed.
  1310. */
  1311. for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
  1312. edac_mc_handle_ce(mci, page, offset, syndrome,
  1313. csrow, chan, EDAC_MOD_STR);
  1314. }
  1315. /*
  1316. * debug routine to display the memory sizes of all logical DIMMs and its
  1317. * CSROWs as well
  1318. */
  1319. static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
  1320. {
  1321. int dimm, size0, size1, factor = 0;
  1322. u32 dbam;
  1323. u32 *dcsb;
  1324. if (boot_cpu_data.x86 == 0xf) {
  1325. if (pvt->dclr0 & F10_WIDTH_128)
  1326. factor = 1;
  1327. /* K8 families < revF not supported yet */
  1328. if (pvt->ext_model < K8_REV_F)
  1329. return;
  1330. else
  1331. WARN_ON(ctrl != 0);
  1332. }
  1333. dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
  1334. dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
  1335. debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
  1336. edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
  1337. /* Dump memory sizes for DIMM and its CSROWs */
  1338. for (dimm = 0; dimm < 4; dimm++) {
  1339. size0 = 0;
  1340. if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
  1341. size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
  1342. size1 = 0;
  1343. if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
  1344. size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
  1345. amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
  1346. dimm * 2, size0 << factor,
  1347. dimm * 2 + 1, size1 << factor);
  1348. }
  1349. }
  1350. static struct amd64_family_type amd64_family_types[] = {
  1351. [K8_CPUS] = {
  1352. .ctl_name = "K8",
  1353. .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
  1354. .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
  1355. .ops = {
  1356. .early_channel_count = k8_early_channel_count,
  1357. .get_error_address = k8_get_error_address,
  1358. .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
  1359. .dbam_to_cs = k8_dbam_to_chip_select,
  1360. .read_dct_pci_cfg = k8_read_dct_pci_cfg,
  1361. }
  1362. },
  1363. [F10_CPUS] = {
  1364. .ctl_name = "F10h",
  1365. .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
  1366. .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
  1367. .ops = {
  1368. .early_channel_count = f10_early_channel_count,
  1369. .get_error_address = f10_get_error_address,
  1370. .read_dram_ctl_register = f10_read_dram_ctl_register,
  1371. .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
  1372. .dbam_to_cs = f10_dbam_to_chip_select,
  1373. .read_dct_pci_cfg = f10_read_dct_pci_cfg,
  1374. }
  1375. },
  1376. [F15_CPUS] = {
  1377. .ctl_name = "F15h",
  1378. .ops = {
  1379. .read_dct_pci_cfg = f15_read_dct_pci_cfg,
  1380. }
  1381. },
  1382. };
  1383. static struct pci_dev *pci_get_related_function(unsigned int vendor,
  1384. unsigned int device,
  1385. struct pci_dev *related)
  1386. {
  1387. struct pci_dev *dev = NULL;
  1388. dev = pci_get_device(vendor, device, dev);
  1389. while (dev) {
  1390. if ((dev->bus->number == related->bus->number) &&
  1391. (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
  1392. break;
  1393. dev = pci_get_device(vendor, device, dev);
  1394. }
  1395. return dev;
  1396. }
  1397. /*
  1398. * These are tables of eigenvectors (one per line) which can be used for the
  1399. * construction of the syndrome tables. The modified syndrome search algorithm
  1400. * uses those to find the symbol in error and thus the DIMM.
  1401. *
  1402. * Algorithm courtesy of Ross LaFetra from AMD.
  1403. */
  1404. static u16 x4_vectors[] = {
  1405. 0x2f57, 0x1afe, 0x66cc, 0xdd88,
  1406. 0x11eb, 0x3396, 0x7f4c, 0xeac8,
  1407. 0x0001, 0x0002, 0x0004, 0x0008,
  1408. 0x1013, 0x3032, 0x4044, 0x8088,
  1409. 0x106b, 0x30d6, 0x70fc, 0xe0a8,
  1410. 0x4857, 0xc4fe, 0x13cc, 0x3288,
  1411. 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
  1412. 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
  1413. 0x15c1, 0x2a42, 0x89ac, 0x4758,
  1414. 0x2b03, 0x1602, 0x4f0c, 0xca08,
  1415. 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
  1416. 0x8ba7, 0x465e, 0x244c, 0x1cc8,
  1417. 0x2b87, 0x164e, 0x642c, 0xdc18,
  1418. 0x40b9, 0x80de, 0x1094, 0x20e8,
  1419. 0x27db, 0x1eb6, 0x9dac, 0x7b58,
  1420. 0x11c1, 0x2242, 0x84ac, 0x4c58,
  1421. 0x1be5, 0x2d7a, 0x5e34, 0xa718,
  1422. 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
  1423. 0x4c97, 0xc87e, 0x11fc, 0x33a8,
  1424. 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
  1425. 0x16b3, 0x3d62, 0x4f34, 0x8518,
  1426. 0x1e2f, 0x391a, 0x5cac, 0xf858,
  1427. 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
  1428. 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
  1429. 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
  1430. 0x4397, 0xc27e, 0x17fc, 0x3ea8,
  1431. 0x1617, 0x3d3e, 0x6464, 0xb8b8,
  1432. 0x23ff, 0x12aa, 0xab6c, 0x56d8,
  1433. 0x2dfb, 0x1ba6, 0x913c, 0x7328,
  1434. 0x185d, 0x2ca6, 0x7914, 0x9e28,
  1435. 0x171b, 0x3e36, 0x7d7c, 0xebe8,
  1436. 0x4199, 0x82ee, 0x19f4, 0x2e58,
  1437. 0x4807, 0xc40e, 0x130c, 0x3208,
  1438. 0x1905, 0x2e0a, 0x5804, 0xac08,
  1439. 0x213f, 0x132a, 0xadfc, 0x5ba8,
  1440. 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
  1441. };
  1442. static u16 x8_vectors[] = {
  1443. 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
  1444. 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
  1445. 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
  1446. 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
  1447. 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
  1448. 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
  1449. 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
  1450. 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
  1451. 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
  1452. 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
  1453. 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
  1454. 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
  1455. 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
  1456. 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
  1457. 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
  1458. 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
  1459. 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
  1460. 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
  1461. 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
  1462. };
  1463. static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
  1464. int v_dim)
  1465. {
  1466. unsigned int i, err_sym;
  1467. for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
  1468. u16 s = syndrome;
  1469. int v_idx = err_sym * v_dim;
  1470. int v_end = (err_sym + 1) * v_dim;
  1471. /* walk over all 16 bits of the syndrome */
  1472. for (i = 1; i < (1U << 16); i <<= 1) {
  1473. /* if bit is set in that eigenvector... */
  1474. if (v_idx < v_end && vectors[v_idx] & i) {
  1475. u16 ev_comp = vectors[v_idx++];
  1476. /* ... and bit set in the modified syndrome, */
  1477. if (s & i) {
  1478. /* remove it. */
  1479. s ^= ev_comp;
  1480. if (!s)
  1481. return err_sym;
  1482. }
  1483. } else if (s & i)
  1484. /* can't get to zero, move to next symbol */
  1485. break;
  1486. }
  1487. }
  1488. debugf0("syndrome(%x) not found\n", syndrome);
  1489. return -1;
  1490. }
  1491. static int map_err_sym_to_channel(int err_sym, int sym_size)
  1492. {
  1493. if (sym_size == 4)
  1494. switch (err_sym) {
  1495. case 0x20:
  1496. case 0x21:
  1497. return 0;
  1498. break;
  1499. case 0x22:
  1500. case 0x23:
  1501. return 1;
  1502. break;
  1503. default:
  1504. return err_sym >> 4;
  1505. break;
  1506. }
  1507. /* x8 symbols */
  1508. else
  1509. switch (err_sym) {
  1510. /* imaginary bits not in a DIMM */
  1511. case 0x10:
  1512. WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
  1513. err_sym);
  1514. return -1;
  1515. break;
  1516. case 0x11:
  1517. return 0;
  1518. break;
  1519. case 0x12:
  1520. return 1;
  1521. break;
  1522. default:
  1523. return err_sym >> 3;
  1524. break;
  1525. }
  1526. return -1;
  1527. }
  1528. static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
  1529. {
  1530. struct amd64_pvt *pvt = mci->pvt_info;
  1531. int err_sym = -1;
  1532. if (pvt->syn_type == 8)
  1533. err_sym = decode_syndrome(syndrome, x8_vectors,
  1534. ARRAY_SIZE(x8_vectors),
  1535. pvt->syn_type);
  1536. else if (pvt->syn_type == 4)
  1537. err_sym = decode_syndrome(syndrome, x4_vectors,
  1538. ARRAY_SIZE(x4_vectors),
  1539. pvt->syn_type);
  1540. else {
  1541. amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
  1542. return err_sym;
  1543. }
  1544. return map_err_sym_to_channel(err_sym, pvt->syn_type);
  1545. }
  1546. /*
  1547. * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
  1548. * ADDRESS and process.
  1549. */
  1550. static void amd64_handle_ce(struct mem_ctl_info *mci,
  1551. struct err_regs *info)
  1552. {
  1553. struct amd64_pvt *pvt = mci->pvt_info;
  1554. u64 sys_addr;
  1555. /* Ensure that the Error Address is VALID */
  1556. if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
  1557. amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
  1558. edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
  1559. return;
  1560. }
  1561. sys_addr = pvt->ops->get_error_address(mci, info);
  1562. amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
  1563. pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
  1564. }
  1565. /* Handle any Un-correctable Errors (UEs) */
  1566. static void amd64_handle_ue(struct mem_ctl_info *mci,
  1567. struct err_regs *info)
  1568. {
  1569. struct amd64_pvt *pvt = mci->pvt_info;
  1570. struct mem_ctl_info *log_mci, *src_mci = NULL;
  1571. int csrow;
  1572. u64 sys_addr;
  1573. u32 page, offset;
  1574. log_mci = mci;
  1575. if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
  1576. amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
  1577. edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
  1578. return;
  1579. }
  1580. sys_addr = pvt->ops->get_error_address(mci, info);
  1581. /*
  1582. * Find out which node the error address belongs to. This may be
  1583. * different from the node that detected the error.
  1584. */
  1585. src_mci = find_mc_by_sys_addr(mci, sys_addr);
  1586. if (!src_mci) {
  1587. amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
  1588. (unsigned long)sys_addr);
  1589. edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
  1590. return;
  1591. }
  1592. log_mci = src_mci;
  1593. csrow = sys_addr_to_csrow(log_mci, sys_addr);
  1594. if (csrow < 0) {
  1595. amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
  1596. (unsigned long)sys_addr);
  1597. edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
  1598. } else {
  1599. error_address_to_page_and_offset(sys_addr, &page, &offset);
  1600. edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
  1601. }
  1602. }
  1603. static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
  1604. struct err_regs *info)
  1605. {
  1606. u16 ec = EC(info->nbsl);
  1607. u8 xec = XEC(info->nbsl, 0x1f);
  1608. int ecc_type = (info->nbsh >> 13) & 0x3;
  1609. /* Bail early out if this was an 'observed' error */
  1610. if (PP(ec) == K8_NBSL_PP_OBS)
  1611. return;
  1612. /* Do only ECC errors */
  1613. if (xec && xec != F10_NBSL_EXT_ERR_ECC)
  1614. return;
  1615. if (ecc_type == 2)
  1616. amd64_handle_ce(mci, info);
  1617. else if (ecc_type == 1)
  1618. amd64_handle_ue(mci, info);
  1619. }
  1620. void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
  1621. {
  1622. struct mem_ctl_info *mci = mcis[node_id];
  1623. struct err_regs regs;
  1624. regs.nbsl = (u32) m->status;
  1625. regs.nbsh = (u32)(m->status >> 32);
  1626. regs.nbeal = (u32) m->addr;
  1627. regs.nbeah = (u32)(m->addr >> 32);
  1628. regs.nbcfg = nbcfg;
  1629. __amd64_decode_bus_error(mci, &regs);
  1630. /*
  1631. * Check the UE bit of the NB status high register, if set generate some
  1632. * logs. If NOT a GART error, then process the event as a NO-INFO event.
  1633. * If it was a GART error, skip that process.
  1634. *
  1635. * FIXME: this should go somewhere else, if at all.
  1636. */
  1637. if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
  1638. edac_mc_handle_ue_no_info(mci, "UE bit is set");
  1639. }
  1640. /*
  1641. * Use pvt->F2 which contains the F2 CPU PCI device to get the related
  1642. * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
  1643. */
  1644. static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
  1645. {
  1646. /* Reserve the ADDRESS MAP Device */
  1647. pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
  1648. if (!pvt->F1) {
  1649. amd64_err("error address map device not found: "
  1650. "vendor %x device 0x%x (broken BIOS?)\n",
  1651. PCI_VENDOR_ID_AMD, f1_id);
  1652. return -ENODEV;
  1653. }
  1654. /* Reserve the MISC Device */
  1655. pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
  1656. if (!pvt->F3) {
  1657. pci_dev_put(pvt->F1);
  1658. pvt->F1 = NULL;
  1659. amd64_err("error F3 device not found: "
  1660. "vendor %x device 0x%x (broken BIOS?)\n",
  1661. PCI_VENDOR_ID_AMD, f3_id);
  1662. return -ENODEV;
  1663. }
  1664. debugf1("F1: %s\n", pci_name(pvt->F1));
  1665. debugf1("F2: %s\n", pci_name(pvt->F2));
  1666. debugf1("F3: %s\n", pci_name(pvt->F3));
  1667. return 0;
  1668. }
  1669. static void free_mc_sibling_devs(struct amd64_pvt *pvt)
  1670. {
  1671. pci_dev_put(pvt->F1);
  1672. pci_dev_put(pvt->F3);
  1673. }
  1674. /*
  1675. * Retrieve the hardware registers of the memory controller (this includes the
  1676. * 'Address Map' and 'Misc' device regs)
  1677. */
  1678. static void read_mc_regs(struct amd64_pvt *pvt)
  1679. {
  1680. u64 msr_val;
  1681. u32 tmp;
  1682. int range;
  1683. /*
  1684. * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
  1685. * those are Read-As-Zero
  1686. */
  1687. rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
  1688. debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
  1689. /* check first whether TOP_MEM2 is enabled */
  1690. rdmsrl(MSR_K8_SYSCFG, msr_val);
  1691. if (msr_val & (1U << 21)) {
  1692. rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
  1693. debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
  1694. } else
  1695. debugf0(" TOP_MEM2 disabled.\n");
  1696. amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
  1697. if (pvt->ops->read_dram_ctl_register)
  1698. pvt->ops->read_dram_ctl_register(pvt);
  1699. for (range = 0; range < DRAM_RANGES; range++) {
  1700. u8 rw;
  1701. /* read settings for this DRAM range */
  1702. read_dram_base_limit_regs(pvt, range);
  1703. rw = dram_rw(pvt, range);
  1704. if (!rw)
  1705. continue;
  1706. debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
  1707. range,
  1708. get_dram_base(pvt, range),
  1709. get_dram_limit(pvt, range));
  1710. debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
  1711. dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
  1712. (rw & 0x1) ? "R" : "-",
  1713. (rw & 0x2) ? "W" : "-",
  1714. dram_intlv_sel(pvt, range),
  1715. dram_dst_node(pvt, range));
  1716. }
  1717. read_dct_base_mask(pvt);
  1718. amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
  1719. amd64_read_dbam_reg(pvt);
  1720. amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
  1721. amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
  1722. amd64_read_dct_pci_cfg(pvt, F10_DCHR_0, &pvt->dchr0);
  1723. if (!dct_ganging_enabled(pvt)) {
  1724. amd64_read_dct_pci_cfg(pvt, F10_DCLR_1, &pvt->dclr1);
  1725. amd64_read_dct_pci_cfg(pvt, F10_DCHR_1, &pvt->dchr1);
  1726. }
  1727. if (boot_cpu_data.x86 >= 0x10)
  1728. amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
  1729. if (boot_cpu_data.x86 == 0x10 &&
  1730. boot_cpu_data.x86_model > 7 &&
  1731. /* F3x180[EccSymbolSize]=1 => x8 symbols */
  1732. tmp & BIT(25))
  1733. pvt->syn_type = 8;
  1734. else
  1735. pvt->syn_type = 4;
  1736. dump_misc_regs(pvt);
  1737. }
  1738. /*
  1739. * NOTE: CPU Revision Dependent code
  1740. *
  1741. * Input:
  1742. * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
  1743. * k8 private pointer to -->
  1744. * DRAM Bank Address mapping register
  1745. * node_id
  1746. * DCL register where dual_channel_active is
  1747. *
  1748. * The DBAM register consists of 4 sets of 4 bits each definitions:
  1749. *
  1750. * Bits: CSROWs
  1751. * 0-3 CSROWs 0 and 1
  1752. * 4-7 CSROWs 2 and 3
  1753. * 8-11 CSROWs 4 and 5
  1754. * 12-15 CSROWs 6 and 7
  1755. *
  1756. * Values range from: 0 to 15
  1757. * The meaning of the values depends on CPU revision and dual-channel state,
  1758. * see relevant BKDG more info.
  1759. *
  1760. * The memory controller provides for total of only 8 CSROWs in its current
  1761. * architecture. Each "pair" of CSROWs normally represents just one DIMM in
  1762. * single channel or two (2) DIMMs in dual channel mode.
  1763. *
  1764. * The following code logic collapses the various tables for CSROW based on CPU
  1765. * revision.
  1766. *
  1767. * Returns:
  1768. * The number of PAGE_SIZE pages on the specified CSROW number it
  1769. * encompasses
  1770. *
  1771. */
  1772. static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
  1773. {
  1774. u32 cs_mode, nr_pages;
  1775. /*
  1776. * The math on this doesn't look right on the surface because x/2*4 can
  1777. * be simplified to x*2 but this expression makes use of the fact that
  1778. * it is integral math where 1/2=0. This intermediate value becomes the
  1779. * number of bits to shift the DBAM register to extract the proper CSROW
  1780. * field.
  1781. */
  1782. cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
  1783. nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
  1784. /*
  1785. * If dual channel then double the memory size of single channel.
  1786. * Channel count is 1 or 2
  1787. */
  1788. nr_pages <<= (pvt->channel_count - 1);
  1789. debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
  1790. debugf0(" nr_pages= %u channel-count = %d\n",
  1791. nr_pages, pvt->channel_count);
  1792. return nr_pages;
  1793. }
  1794. /*
  1795. * Initialize the array of csrow attribute instances, based on the values
  1796. * from pci config hardware registers.
  1797. */
  1798. static int init_csrows(struct mem_ctl_info *mci)
  1799. {
  1800. struct csrow_info *csrow;
  1801. struct amd64_pvt *pvt = mci->pvt_info;
  1802. u64 input_addr_min, input_addr_max, sys_addr;
  1803. u32 val;
  1804. int i, empty = 1;
  1805. amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
  1806. pvt->nbcfg = val;
  1807. pvt->ctl_error_info.nbcfg = val;
  1808. debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
  1809. pvt->mc_node_id, val,
  1810. !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
  1811. for (i = 0; i < pvt->cs_count; i++) {
  1812. csrow = &mci->csrows[i];
  1813. if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
  1814. debugf1("----CSROW %d EMPTY for node %d\n", i,
  1815. pvt->mc_node_id);
  1816. continue;
  1817. }
  1818. debugf1("----CSROW %d VALID for MC node %d\n",
  1819. i, pvt->mc_node_id);
  1820. empty = 0;
  1821. csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
  1822. find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
  1823. sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
  1824. csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
  1825. sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
  1826. csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
  1827. csrow->page_mask = ~mask_from_dct_mask(pvt, i);
  1828. /* 8 bytes of resolution */
  1829. csrow->mtype = amd64_determine_memory_type(pvt, i);
  1830. debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
  1831. debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
  1832. (unsigned long)input_addr_min,
  1833. (unsigned long)input_addr_max);
  1834. debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
  1835. (unsigned long)sys_addr, csrow->page_mask);
  1836. debugf1(" nr_pages: %u first_page: 0x%lx "
  1837. "last_page: 0x%lx\n",
  1838. (unsigned)csrow->nr_pages,
  1839. csrow->first_page, csrow->last_page);
  1840. /*
  1841. * determine whether CHIPKILL or JUST ECC or NO ECC is operating
  1842. */
  1843. if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
  1844. csrow->edac_mode =
  1845. (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
  1846. EDAC_S4ECD4ED : EDAC_SECDED;
  1847. else
  1848. csrow->edac_mode = EDAC_NONE;
  1849. }
  1850. return empty;
  1851. }
  1852. /* get all cores on this DCT */
  1853. static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
  1854. {
  1855. int cpu;
  1856. for_each_online_cpu(cpu)
  1857. if (amd_get_nb_id(cpu) == nid)
  1858. cpumask_set_cpu(cpu, mask);
  1859. }
  1860. /* check MCG_CTL on all the cpus on this node */
  1861. static bool amd64_nb_mce_bank_enabled_on_node(int nid)
  1862. {
  1863. cpumask_var_t mask;
  1864. int cpu, nbe;
  1865. bool ret = false;
  1866. if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
  1867. amd64_warn("%s: Error allocating mask\n", __func__);
  1868. return false;
  1869. }
  1870. get_cpus_on_this_dct_cpumask(mask, nid);
  1871. rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
  1872. for_each_cpu(cpu, mask) {
  1873. struct msr *reg = per_cpu_ptr(msrs, cpu);
  1874. nbe = reg->l & K8_MSR_MCGCTL_NBE;
  1875. debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
  1876. cpu, reg->q,
  1877. (nbe ? "enabled" : "disabled"));
  1878. if (!nbe)
  1879. goto out;
  1880. }
  1881. ret = true;
  1882. out:
  1883. free_cpumask_var(mask);
  1884. return ret;
  1885. }
  1886. static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
  1887. {
  1888. cpumask_var_t cmask;
  1889. int cpu;
  1890. if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
  1891. amd64_warn("%s: error allocating mask\n", __func__);
  1892. return false;
  1893. }
  1894. get_cpus_on_this_dct_cpumask(cmask, nid);
  1895. rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
  1896. for_each_cpu(cpu, cmask) {
  1897. struct msr *reg = per_cpu_ptr(msrs, cpu);
  1898. if (on) {
  1899. if (reg->l & K8_MSR_MCGCTL_NBE)
  1900. s->flags.nb_mce_enable = 1;
  1901. reg->l |= K8_MSR_MCGCTL_NBE;
  1902. } else {
  1903. /*
  1904. * Turn off NB MCE reporting only when it was off before
  1905. */
  1906. if (!s->flags.nb_mce_enable)
  1907. reg->l &= ~K8_MSR_MCGCTL_NBE;
  1908. }
  1909. }
  1910. wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
  1911. free_cpumask_var(cmask);
  1912. return 0;
  1913. }
  1914. static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
  1915. struct pci_dev *F3)
  1916. {
  1917. bool ret = true;
  1918. u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
  1919. if (toggle_ecc_err_reporting(s, nid, ON)) {
  1920. amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
  1921. return false;
  1922. }
  1923. amd64_read_pci_cfg(F3, K8_NBCTL, &value);
  1924. /* turn on UECCEn and CECCEn bits */
  1925. s->old_nbctl = value & mask;
  1926. s->nbctl_valid = true;
  1927. value |= mask;
  1928. amd64_write_pci_cfg(F3, K8_NBCTL, value);
  1929. amd64_read_pci_cfg(F3, K8_NBCFG, &value);
  1930. debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
  1931. nid, value,
  1932. !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
  1933. if (!(value & K8_NBCFG_ECC_ENABLE)) {
  1934. amd64_warn("DRAM ECC disabled on this node, enabling...\n");
  1935. s->flags.nb_ecc_prev = 0;
  1936. /* Attempt to turn on DRAM ECC Enable */
  1937. value |= K8_NBCFG_ECC_ENABLE;
  1938. amd64_write_pci_cfg(F3, K8_NBCFG, value);
  1939. amd64_read_pci_cfg(F3, K8_NBCFG, &value);
  1940. if (!(value & K8_NBCFG_ECC_ENABLE)) {
  1941. amd64_warn("Hardware rejected DRAM ECC enable,"
  1942. "check memory DIMM configuration.\n");
  1943. ret = false;
  1944. } else {
  1945. amd64_info("Hardware accepted DRAM ECC Enable\n");
  1946. }
  1947. } else {
  1948. s->flags.nb_ecc_prev = 1;
  1949. }
  1950. debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
  1951. nid, value,
  1952. !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
  1953. return ret;
  1954. }
  1955. static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
  1956. struct pci_dev *F3)
  1957. {
  1958. u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
  1959. if (!s->nbctl_valid)
  1960. return;
  1961. amd64_read_pci_cfg(F3, K8_NBCTL, &value);
  1962. value &= ~mask;
  1963. value |= s->old_nbctl;
  1964. amd64_write_pci_cfg(F3, K8_NBCTL, value);
  1965. /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
  1966. if (!s->flags.nb_ecc_prev) {
  1967. amd64_read_pci_cfg(F3, K8_NBCFG, &value);
  1968. value &= ~K8_NBCFG_ECC_ENABLE;
  1969. amd64_write_pci_cfg(F3, K8_NBCFG, value);
  1970. }
  1971. /* restore the NB Enable MCGCTL bit */
  1972. if (toggle_ecc_err_reporting(s, nid, OFF))
  1973. amd64_warn("Error restoring NB MCGCTL settings!\n");
  1974. }
  1975. /*
  1976. * EDAC requires that the BIOS have ECC enabled before
  1977. * taking over the processing of ECC errors. A command line
  1978. * option allows to force-enable hardware ECC later in
  1979. * enable_ecc_error_reporting().
  1980. */
  1981. static const char *ecc_msg =
  1982. "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
  1983. " Either enable ECC checking or force module loading by setting "
  1984. "'ecc_enable_override'.\n"
  1985. " (Note that use of the override may cause unknown side effects.)\n";
  1986. static bool ecc_enabled(struct pci_dev *F3, u8 nid)
  1987. {
  1988. u32 value;
  1989. u8 ecc_en = 0;
  1990. bool nb_mce_en = false;
  1991. amd64_read_pci_cfg(F3, K8_NBCFG, &value);
  1992. ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
  1993. amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
  1994. nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
  1995. if (!nb_mce_en)
  1996. amd64_notice("NB MCE bank disabled, set MSR "
  1997. "0x%08x[4] on node %d to enable.\n",
  1998. MSR_IA32_MCG_CTL, nid);
  1999. if (!ecc_en || !nb_mce_en) {
  2000. amd64_notice("%s", ecc_msg);
  2001. return false;
  2002. }
  2003. return true;
  2004. }
  2005. struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
  2006. ARRAY_SIZE(amd64_inj_attrs) +
  2007. 1];
  2008. struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
  2009. static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
  2010. {
  2011. unsigned int i = 0, j = 0;
  2012. for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
  2013. sysfs_attrs[i] = amd64_dbg_attrs[i];
  2014. if (boot_cpu_data.x86 >= 0x10)
  2015. for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
  2016. sysfs_attrs[i] = amd64_inj_attrs[j];
  2017. sysfs_attrs[i] = terminator;
  2018. mci->mc_driver_sysfs_attributes = sysfs_attrs;
  2019. }
  2020. static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
  2021. {
  2022. struct amd64_pvt *pvt = mci->pvt_info;
  2023. mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
  2024. mci->edac_ctl_cap = EDAC_FLAG_NONE;
  2025. if (pvt->nbcap & K8_NBCAP_SECDED)
  2026. mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
  2027. if (pvt->nbcap & K8_NBCAP_CHIPKILL)
  2028. mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
  2029. mci->edac_cap = amd64_determine_edac_cap(pvt);
  2030. mci->mod_name = EDAC_MOD_STR;
  2031. mci->mod_ver = EDAC_AMD64_VERSION;
  2032. mci->ctl_name = pvt->ctl_name;
  2033. mci->dev_name = pci_name(pvt->F2);
  2034. mci->ctl_page_to_phys = NULL;
  2035. /* memory scrubber interface */
  2036. mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
  2037. mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
  2038. }
  2039. /*
  2040. * returns a pointer to the family descriptor on success, NULL otherwise.
  2041. */
  2042. static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
  2043. {
  2044. u8 fam = boot_cpu_data.x86;
  2045. struct amd64_family_type *fam_type = NULL;
  2046. switch (fam) {
  2047. case 0xf:
  2048. fam_type = &amd64_family_types[K8_CPUS];
  2049. pvt->ops = &amd64_family_types[K8_CPUS].ops;
  2050. pvt->ctl_name = fam_type->ctl_name;
  2051. pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
  2052. break;
  2053. case 0x10:
  2054. fam_type = &amd64_family_types[F10_CPUS];
  2055. pvt->ops = &amd64_family_types[F10_CPUS].ops;
  2056. pvt->ctl_name = fam_type->ctl_name;
  2057. pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
  2058. break;
  2059. default:
  2060. amd64_err("Unsupported family!\n");
  2061. return NULL;
  2062. }
  2063. pvt->ext_model = boot_cpu_data.x86_model >> 4;
  2064. amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
  2065. (fam == 0xf ?
  2066. (pvt->ext_model >= K8_REV_F ? "revF or later "
  2067. : "revE or earlier ")
  2068. : ""), pvt->mc_node_id);
  2069. return fam_type;
  2070. }
  2071. static int amd64_init_one_instance(struct pci_dev *F2)
  2072. {
  2073. struct amd64_pvt *pvt = NULL;
  2074. struct amd64_family_type *fam_type = NULL;
  2075. struct mem_ctl_info *mci = NULL;
  2076. int err = 0, ret;
  2077. u8 nid = get_node_id(F2);
  2078. ret = -ENOMEM;
  2079. pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
  2080. if (!pvt)
  2081. goto err_ret;
  2082. pvt->mc_node_id = nid;
  2083. pvt->F2 = F2;
  2084. ret = -EINVAL;
  2085. fam_type = amd64_per_family_init(pvt);
  2086. if (!fam_type)
  2087. goto err_free;
  2088. ret = -ENODEV;
  2089. err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
  2090. if (err)
  2091. goto err_free;
  2092. read_mc_regs(pvt);
  2093. /*
  2094. * We need to determine how many memory channels there are. Then use
  2095. * that information for calculating the size of the dynamic instance
  2096. * tables in the 'mci' structure.
  2097. */
  2098. ret = -EINVAL;
  2099. pvt->channel_count = pvt->ops->early_channel_count(pvt);
  2100. if (pvt->channel_count < 0)
  2101. goto err_siblings;
  2102. ret = -ENOMEM;
  2103. mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
  2104. if (!mci)
  2105. goto err_siblings;
  2106. mci->pvt_info = pvt;
  2107. mci->dev = &pvt->F2->dev;
  2108. setup_mci_misc_attrs(mci);
  2109. if (init_csrows(mci))
  2110. mci->edac_cap = EDAC_FLAG_NONE;
  2111. set_mc_sysfs_attrs(mci);
  2112. ret = -ENODEV;
  2113. if (edac_mc_add_mc(mci)) {
  2114. debugf1("failed edac_mc_add_mc()\n");
  2115. goto err_add_mc;
  2116. }
  2117. /* register stuff with EDAC MCE */
  2118. if (report_gart_errors)
  2119. amd_report_gart_errors(true);
  2120. amd_register_ecc_decoder(amd64_decode_bus_error);
  2121. mcis[nid] = mci;
  2122. atomic_inc(&drv_instances);
  2123. return 0;
  2124. err_add_mc:
  2125. edac_mc_free(mci);
  2126. err_siblings:
  2127. free_mc_sibling_devs(pvt);
  2128. err_free:
  2129. kfree(pvt);
  2130. err_ret:
  2131. return ret;
  2132. }
  2133. static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
  2134. const struct pci_device_id *mc_type)
  2135. {
  2136. u8 nid = get_node_id(pdev);
  2137. struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
  2138. struct ecc_settings *s;
  2139. int ret = 0;
  2140. ret = pci_enable_device(pdev);
  2141. if (ret < 0) {
  2142. debugf0("ret=%d\n", ret);
  2143. return -EIO;
  2144. }
  2145. ret = -ENOMEM;
  2146. s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
  2147. if (!s)
  2148. goto err_out;
  2149. ecc_stngs[nid] = s;
  2150. if (!ecc_enabled(F3, nid)) {
  2151. ret = -ENODEV;
  2152. if (!ecc_enable_override)
  2153. goto err_enable;
  2154. amd64_warn("Forcing ECC on!\n");
  2155. if (!enable_ecc_error_reporting(s, nid, F3))
  2156. goto err_enable;
  2157. }
  2158. ret = amd64_init_one_instance(pdev);
  2159. if (ret < 0) {
  2160. amd64_err("Error probing instance: %d\n", nid);
  2161. restore_ecc_error_reporting(s, nid, F3);
  2162. }
  2163. return ret;
  2164. err_enable:
  2165. kfree(s);
  2166. ecc_stngs[nid] = NULL;
  2167. err_out:
  2168. return ret;
  2169. }
  2170. static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
  2171. {
  2172. struct mem_ctl_info *mci;
  2173. struct amd64_pvt *pvt;
  2174. u8 nid = get_node_id(pdev);
  2175. struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
  2176. struct ecc_settings *s = ecc_stngs[nid];
  2177. /* Remove from EDAC CORE tracking list */
  2178. mci = edac_mc_del_mc(&pdev->dev);
  2179. if (!mci)
  2180. return;
  2181. pvt = mci->pvt_info;
  2182. restore_ecc_error_reporting(s, nid, F3);
  2183. free_mc_sibling_devs(pvt);
  2184. /* unregister from EDAC MCE */
  2185. amd_report_gart_errors(false);
  2186. amd_unregister_ecc_decoder(amd64_decode_bus_error);
  2187. kfree(ecc_stngs[nid]);
  2188. ecc_stngs[nid] = NULL;
  2189. /* Free the EDAC CORE resources */
  2190. mci->pvt_info = NULL;
  2191. mcis[nid] = NULL;
  2192. kfree(pvt);
  2193. edac_mc_free(mci);
  2194. }
  2195. /*
  2196. * This table is part of the interface for loading drivers for PCI devices. The
  2197. * PCI core identifies what devices are on a system during boot, and then
  2198. * inquiry this table to see if this driver is for a given device found.
  2199. */
  2200. static const struct pci_device_id amd64_pci_table[] __devinitdata = {
  2201. {
  2202. .vendor = PCI_VENDOR_ID_AMD,
  2203. .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
  2204. .subvendor = PCI_ANY_ID,
  2205. .subdevice = PCI_ANY_ID,
  2206. .class = 0,
  2207. .class_mask = 0,
  2208. },
  2209. {
  2210. .vendor = PCI_VENDOR_ID_AMD,
  2211. .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
  2212. .subvendor = PCI_ANY_ID,
  2213. .subdevice = PCI_ANY_ID,
  2214. .class = 0,
  2215. .class_mask = 0,
  2216. },
  2217. {0, }
  2218. };
  2219. MODULE_DEVICE_TABLE(pci, amd64_pci_table);
  2220. static struct pci_driver amd64_pci_driver = {
  2221. .name = EDAC_MOD_STR,
  2222. .probe = amd64_probe_one_instance,
  2223. .remove = __devexit_p(amd64_remove_one_instance),
  2224. .id_table = amd64_pci_table,
  2225. };
  2226. static void setup_pci_device(void)
  2227. {
  2228. struct mem_ctl_info *mci;
  2229. struct amd64_pvt *pvt;
  2230. if (amd64_ctl_pci)
  2231. return;
  2232. mci = mcis[0];
  2233. if (mci) {
  2234. pvt = mci->pvt_info;
  2235. amd64_ctl_pci =
  2236. edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
  2237. if (!amd64_ctl_pci) {
  2238. pr_warning("%s(): Unable to create PCI control\n",
  2239. __func__);
  2240. pr_warning("%s(): PCI error report via EDAC not set\n",
  2241. __func__);
  2242. }
  2243. }
  2244. }
  2245. static int __init amd64_edac_init(void)
  2246. {
  2247. int err = -ENODEV;
  2248. edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
  2249. opstate_init();
  2250. if (amd_cache_northbridges() < 0)
  2251. goto err_ret;
  2252. err = -ENOMEM;
  2253. mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
  2254. ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
  2255. if (!(mcis && ecc_stngs))
  2256. goto err_ret;
  2257. msrs = msrs_alloc();
  2258. if (!msrs)
  2259. goto err_free;
  2260. err = pci_register_driver(&amd64_pci_driver);
  2261. if (err)
  2262. goto err_pci;
  2263. err = -ENODEV;
  2264. if (!atomic_read(&drv_instances))
  2265. goto err_no_instances;
  2266. setup_pci_device();
  2267. return 0;
  2268. err_no_instances:
  2269. pci_unregister_driver(&amd64_pci_driver);
  2270. err_pci:
  2271. msrs_free(msrs);
  2272. msrs = NULL;
  2273. err_free:
  2274. kfree(mcis);
  2275. mcis = NULL;
  2276. kfree(ecc_stngs);
  2277. ecc_stngs = NULL;
  2278. err_ret:
  2279. return err;
  2280. }
  2281. static void __exit amd64_edac_exit(void)
  2282. {
  2283. if (amd64_ctl_pci)
  2284. edac_pci_release_generic_ctl(amd64_ctl_pci);
  2285. pci_unregister_driver(&amd64_pci_driver);
  2286. kfree(ecc_stngs);
  2287. ecc_stngs = NULL;
  2288. kfree(mcis);
  2289. mcis = NULL;
  2290. msrs_free(msrs);
  2291. msrs = NULL;
  2292. }
  2293. module_init(amd64_edac_init);
  2294. module_exit(amd64_edac_exit);
  2295. MODULE_LICENSE("GPL");
  2296. MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
  2297. "Dave Peterson, Thayne Harbaugh");
  2298. MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
  2299. EDAC_AMD64_VERSION);
  2300. module_param(edac_op_state, int, 0444);
  2301. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");