amd64_edac.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /*
  2. * AMD64 class Memory Controller kernel module
  3. *
  4. * Copyright (c) 2009 SoftwareBitMaker.
  5. * Copyright (c) 2009 Advanced Micro Devices, Inc.
  6. *
  7. * This file may be distributed under the terms of the
  8. * GNU General Public License.
  9. *
  10. * Originally Written by Thayne Harbaugh
  11. *
  12. * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
  13. * - K8 CPU Revision D and greater support
  14. *
  15. * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
  16. * - Module largely rewritten, with new (and hopefully correct)
  17. * code for dealing with node and chip select interleaving,
  18. * various code cleanup, and bug fixes
  19. * - Added support for memory hoisting using DRAM hole address
  20. * register
  21. *
  22. * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
  23. * -K8 Rev (1207) revision support added, required Revision
  24. * specific mini-driver code to support Rev F as well as
  25. * prior revisions
  26. *
  27. * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
  28. * -Family 10h revision support added. New PCI Device IDs,
  29. * indicating new changes. Actual registers modified
  30. * were slight, less than the Rev E to Rev F transition
  31. * but changing the PCI Device ID was the proper thing to
  32. * do, as it provides for almost automactic family
  33. * detection. The mods to Rev F required more family
  34. * information detection.
  35. *
  36. * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
  37. * - misc fixes and code cleanups
  38. *
  39. * This module is based on the following documents
  40. * (available from http://www.amd.com/):
  41. *
  42. * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
  43. * Opteron Processors
  44. * AMD publication #: 26094
  45. *` Revision: 3.26
  46. *
  47. * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
  48. * Processors
  49. * AMD publication #: 32559
  50. * Revision: 3.00
  51. * Issue Date: May 2006
  52. *
  53. * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
  54. * Processors
  55. * AMD publication #: 31116
  56. * Revision: 3.00
  57. * Issue Date: September 07, 2007
  58. *
  59. * Sections in the first 2 documents are no longer in sync with each other.
  60. * The Family 10h BKDG was totally re-written from scratch with a new
  61. * presentation model.
  62. * Therefore, comments that refer to a Document section might be off.
  63. */
  64. #include <linux/module.h>
  65. #include <linux/ctype.h>
  66. #include <linux/init.h>
  67. #include <linux/pci.h>
  68. #include <linux/pci_ids.h>
  69. #include <linux/slab.h>
  70. #include <linux/mmzone.h>
  71. #include <linux/edac.h>
  72. #include <asm/msr.h>
  73. #include "edac_core.h"
  74. #include "edac_mce_amd.h"
  75. #define amd64_printk(level, fmt, arg...) \
  76. edac_printk(level, "amd64", fmt, ##arg)
  77. #define amd64_mc_printk(mci, level, fmt, arg...) \
  78. edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
  79. /*
  80. * Throughout the comments in this code, the following terms are used:
  81. *
  82. * SysAddr, DramAddr, and InputAddr
  83. *
  84. * These terms come directly from the amd64 documentation
  85. * (AMD publication #26094). They are defined as follows:
  86. *
  87. * SysAddr:
  88. * This is a physical address generated by a CPU core or a device
  89. * doing DMA. If generated by a CPU core, a SysAddr is the result of
  90. * a virtual to physical address translation by the CPU core's address
  91. * translation mechanism (MMU).
  92. *
  93. * DramAddr:
  94. * A DramAddr is derived from a SysAddr by subtracting an offset that
  95. * depends on which node the SysAddr maps to and whether the SysAddr
  96. * is within a range affected by memory hoisting. The DRAM Base
  97. * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
  98. * determine which node a SysAddr maps to.
  99. *
  100. * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
  101. * is within the range of addresses specified by this register, then
  102. * a value x from the DHAR is subtracted from the SysAddr to produce a
  103. * DramAddr. Here, x represents the base address for the node that
  104. * the SysAddr maps to plus an offset due to memory hoisting. See
  105. * section 3.4.8 and the comments in amd64_get_dram_hole_info() and
  106. * sys_addr_to_dram_addr() below for more information.
  107. *
  108. * If the SysAddr is not affected by the DHAR then a value y is
  109. * subtracted from the SysAddr to produce a DramAddr. Here, y is the
  110. * base address for the node that the SysAddr maps to. See section
  111. * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
  112. * information.
  113. *
  114. * InputAddr:
  115. * A DramAddr is translated to an InputAddr before being passed to the
  116. * memory controller for the node that the DramAddr is associated
  117. * with. The memory controller then maps the InputAddr to a csrow.
  118. * If node interleaving is not in use, then the InputAddr has the same
  119. * value as the DramAddr. Otherwise, the InputAddr is produced by
  120. * discarding the bits used for node interleaving from the DramAddr.
  121. * See section 3.4.4 for more information.
  122. *
  123. * The memory controller for a given node uses its DRAM CS Base and
  124. * DRAM CS Mask registers to map an InputAddr to a csrow. See
  125. * sections 3.5.4 and 3.5.5 for more information.
  126. */
  127. #define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__
  128. #define EDAC_MOD_STR "amd64_edac"
  129. #define EDAC_MAX_NUMNODES 8
  130. /* Extended Model from CPUID, for CPU Revision numbers */
  131. #define K8_REV_D 1
  132. #define K8_REV_E 2
  133. #define K8_REV_F 4
  134. /* Hardware limit on ChipSelect rows per MC and processors per system */
  135. #define MAX_CS_COUNT 8
  136. #define DRAM_REG_COUNT 8
  137. #define ON true
  138. #define OFF false
  139. /*
  140. * PCI-defined configuration space registers
  141. */
  142. /*
  143. * Function 1 - Address Map
  144. */
  145. #define K8_DRAM_BASE_LOW 0x40
  146. #define K8_DRAM_LIMIT_LOW 0x44
  147. #define K8_DHAR 0xf0
  148. #define DHAR_VALID BIT(0)
  149. #define F10_DRAM_MEM_HOIST_VALID BIT(1)
  150. #define DHAR_BASE_MASK 0xff000000
  151. #define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
  152. #define K8_DHAR_OFFSET_MASK 0x0000ff00
  153. #define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
  154. #define F10_DHAR_OFFSET_MASK 0x0000ff80
  155. /* NOTE: Extra mask bit vs K8 */
  156. #define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
  157. /* F10 High BASE/LIMIT registers */
  158. #define F10_DRAM_BASE_HIGH 0x140
  159. #define F10_DRAM_LIMIT_HIGH 0x144
  160. /*
  161. * Function 2 - DRAM controller
  162. */
  163. #define K8_DCSB0 0x40
  164. #define F10_DCSB1 0x140
  165. #define K8_DCSB_CS_ENABLE BIT(0)
  166. #define K8_DCSB_NPT_SPARE BIT(1)
  167. #define K8_DCSB_NPT_TESTFAIL BIT(2)
  168. /*
  169. * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
  170. * the address
  171. */
  172. #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
  173. #define REV_E_DCS_SHIFT 4
  174. #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
  175. #define REV_F_F1Xh_DCS_SHIFT 8
  176. /*
  177. * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
  178. * to form the address
  179. */
  180. #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
  181. #define REV_F_DCS_SHIFT 8
  182. /* DRAM CS Mask Registers */
  183. #define K8_DCSM0 0x60
  184. #define F10_DCSM1 0x160
  185. /* REV E: select [29:21] and [15:9] from DCSM */
  186. #define REV_E_DCSM_MASK_BITS 0x3FE0FE00
  187. /* unused bits [24:20] and [12:0] */
  188. #define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
  189. /* REV F and later: select [28:19] and [13:5] from DCSM */
  190. #define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
  191. /* unused bits [26:22] and [12:0] */
  192. #define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
  193. #define DBAM0 0x80
  194. #define DBAM1 0x180
  195. /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
  196. #define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
  197. #define DBAM_MAX_VALUE 11
  198. #define F10_DCLR_0 0x90
  199. #define F10_DCLR_1 0x190
  200. #define REVE_WIDTH_128 BIT(16)
  201. #define F10_WIDTH_128 BIT(11)
  202. #define F10_DCHR_0 0x94
  203. #define F10_DCHR_1 0x194
  204. #define F10_DCHR_FOUR_RANK_DIMM BIT(18)
  205. #define DDR3_MODE BIT(8)
  206. #define F10_DCHR_MblMode BIT(6)
  207. #define F10_DCTL_SEL_LOW 0x110
  208. #define dct_sel_baseaddr(pvt) \
  209. ((pvt->dram_ctl_select_low) & 0xFFFFF800)
  210. #define dct_sel_interleave_addr(pvt) \
  211. (((pvt->dram_ctl_select_low) >> 6) & 0x3)
  212. enum {
  213. F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0),
  214. F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2),
  215. F10_DCTL_SEL_LOW_DctGangEn = BIT(4),
  216. F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5),
  217. F10_DCTL_SEL_LOW_DramEnable = BIT(8),
  218. F10_DCTL_SEL_LOW_MemCleared = BIT(10),
  219. };
  220. #define dct_high_range_enabled(pvt) \
  221. (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
  222. #define dct_interleave_enabled(pvt) \
  223. (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
  224. #define dct_ganging_enabled(pvt) \
  225. (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
  226. #define dct_data_intlv_enabled(pvt) \
  227. (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
  228. #define dct_dram_enabled(pvt) \
  229. (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
  230. #define dct_memory_cleared(pvt) \
  231. (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
  232. #define F10_DCTL_SEL_HIGH 0x114
  233. /*
  234. * Function 3 - Misc Control
  235. */
  236. #define K8_NBCTL 0x40
  237. /* Correctable ECC error reporting enable */
  238. #define K8_NBCTL_CECCEn BIT(0)
  239. /* UnCorrectable ECC error reporting enable */
  240. #define K8_NBCTL_UECCEn BIT(1)
  241. #define K8_NBCFG 0x44
  242. #define K8_NBCFG_CHIPKILL BIT(23)
  243. #define K8_NBCFG_ECC_ENABLE BIT(22)
  244. #define K8_NBSL 0x48
  245. /* Family F10h: Normalized Extended Error Codes */
  246. #define F10_NBSL_EXT_ERR_RES 0x0
  247. #define F10_NBSL_EXT_ERR_ECC 0x8
  248. /* Next two are overloaded values */
  249. #define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
  250. #define F10_NBSL_EXT_ERR_L3_PROTO 0xB
  251. #define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
  252. #define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
  253. #define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
  254. /* Next two are overloaded values */
  255. #define F10_NBSL_EXT_ERR_GART_WALK 0xF
  256. #define F10_NBSL_EXT_ERR_DEV_WALK 0xF
  257. /* 0x10 to 0x1B: Reserved */
  258. #define F10_NBSL_EXT_ERR_L3_DATA 0x1C
  259. #define F10_NBSL_EXT_ERR_L3_TAG 0x1D
  260. #define F10_NBSL_EXT_ERR_L3_LRU 0x1E
  261. /* K8: Normalized Extended Error Codes */
  262. #define K8_NBSL_EXT_ERR_ECC 0x0
  263. #define K8_NBSL_EXT_ERR_CRC 0x1
  264. #define K8_NBSL_EXT_ERR_SYNC 0x2
  265. #define K8_NBSL_EXT_ERR_MST 0x3
  266. #define K8_NBSL_EXT_ERR_TGT 0x4
  267. #define K8_NBSL_EXT_ERR_GART 0x5
  268. #define K8_NBSL_EXT_ERR_RMW 0x6
  269. #define K8_NBSL_EXT_ERR_WDT 0x7
  270. #define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
  271. #define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
  272. /*
  273. * The following are for BUS type errors AFTER values have been normalized by
  274. * shifting right
  275. */
  276. #define K8_NBSL_PP_SRC 0x0
  277. #define K8_NBSL_PP_RES 0x1
  278. #define K8_NBSL_PP_OBS 0x2
  279. #define K8_NBSL_PP_GENERIC 0x3
  280. #define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
  281. #define K8_NBEAL 0x50
  282. #define K8_NBEAH 0x54
  283. #define K8_SCRCTRL 0x58
  284. #define F10_NB_CFG_LOW 0x88
  285. #define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
  286. #define F10_NB_CFG_HIGH 0x8C
  287. #define F10_ONLINE_SPARE 0xB0
  288. #define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
  289. #define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
  290. #define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
  291. #define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
  292. #define F10_NB_ARRAY_ADDR 0xB8
  293. #define F10_NB_ARRAY_DRAM_ECC 0x80000000
  294. /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
  295. #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
  296. #define F10_NB_ARRAY_DATA 0xBC
  297. #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
  298. (BIT(((word) & 0xF) + 20) | \
  299. BIT(17) | bits)
  300. #define SET_NB_DRAM_INJECTION_READ(word, bits) \
  301. (BIT(((word) & 0xF) + 20) | \
  302. BIT(16) | bits)
  303. #define K8_NBCAP 0xE8
  304. #define K8_NBCAP_CORES (BIT(12)|BIT(13))
  305. #define K8_NBCAP_CHIPKILL BIT(4)
  306. #define K8_NBCAP_SECDED BIT(3)
  307. #define K8_NBCAP_DCT_DUAL BIT(0)
  308. /* MSRs */
  309. #define K8_MSR_MCGCTL_NBE BIT(4)
  310. #define K8_MSR_MC4CTL 0x0410
  311. #define K8_MSR_MC4STAT 0x0411
  312. #define K8_MSR_MC4ADDR 0x0412
  313. /* AMD sets the first MC device at device ID 0x18. */
  314. static inline int get_node_id(struct pci_dev *pdev)
  315. {
  316. return PCI_SLOT(pdev->devfn) - 0x18;
  317. }
  318. enum amd64_chipset_families {
  319. K8_CPUS = 0,
  320. F10_CPUS,
  321. F11_CPUS,
  322. };
  323. /* Error injection control structure */
  324. struct error_injection {
  325. u32 section;
  326. u32 word;
  327. u32 bit_map;
  328. };
  329. struct amd64_pvt {
  330. /* pci_device handles which we utilize */
  331. struct pci_dev *addr_f1_ctl;
  332. struct pci_dev *dram_f2_ctl;
  333. struct pci_dev *misc_f3_ctl;
  334. int mc_node_id; /* MC index of this MC node */
  335. int ext_model; /* extended model value of this node */
  336. struct low_ops *ops; /* pointer to per PCI Device ID func table */
  337. int channel_count;
  338. /* Raw registers */
  339. u32 dclr0; /* DRAM Configuration Low DCT0 reg */
  340. u32 dclr1; /* DRAM Configuration Low DCT1 reg */
  341. u32 dchr0; /* DRAM Configuration High DCT0 reg */
  342. u32 dchr1; /* DRAM Configuration High DCT1 reg */
  343. u32 nbcap; /* North Bridge Capabilities */
  344. u32 nbcfg; /* F10 North Bridge Configuration */
  345. u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
  346. u32 dhar; /* DRAM Hoist reg */
  347. u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
  348. u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
  349. /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
  350. u32 dcsb0[MAX_CS_COUNT];
  351. u32 dcsb1[MAX_CS_COUNT];
  352. /* DRAM CS Mask Registers F2x[1,0][6C:60] */
  353. u32 dcsm0[MAX_CS_COUNT];
  354. u32 dcsm1[MAX_CS_COUNT];
  355. /*
  356. * Decoded parts of DRAM BASE and LIMIT Registers
  357. * F1x[78,70,68,60,58,50,48,40]
  358. */
  359. u64 dram_base[DRAM_REG_COUNT];
  360. u64 dram_limit[DRAM_REG_COUNT];
  361. u8 dram_IntlvSel[DRAM_REG_COUNT];
  362. u8 dram_IntlvEn[DRAM_REG_COUNT];
  363. u8 dram_DstNode[DRAM_REG_COUNT];
  364. u8 dram_rw_en[DRAM_REG_COUNT];
  365. /*
  366. * The following fields are set at (load) run time, after CPU revision
  367. * has been determined, since the dct_base and dct_mask registers vary
  368. * based on revision
  369. */
  370. u32 dcsb_base; /* DCSB base bits */
  371. u32 dcsm_mask; /* DCSM mask bits */
  372. u32 cs_count; /* num chip selects (== num DCSB registers) */
  373. u32 num_dcsm; /* Number of DCSM registers */
  374. u32 dcs_mask_notused; /* DCSM notused mask bits */
  375. u32 dcs_shift; /* DCSB and DCSM shift value */
  376. u64 top_mem; /* top of memory below 4GB */
  377. u64 top_mem2; /* top of memory above 4GB */
  378. u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
  379. u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
  380. u32 online_spare; /* On-Line spare Reg */
  381. /* temp storage for when input is received from sysfs */
  382. struct err_regs ctl_error_info;
  383. /* place to store error injection parameters prior to issue */
  384. struct error_injection injection;
  385. /* Save old hw registers' values before we modified them */
  386. u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
  387. u32 old_nbctl;
  388. /* MC Type Index value: socket F vs Family 10h */
  389. u32 mc_type_index;
  390. /* misc settings */
  391. struct flags {
  392. unsigned long cf8_extcfg:1;
  393. unsigned long ecc_report:1;
  394. } flags;
  395. };
  396. struct scrubrate {
  397. u32 scrubval; /* bit pattern for scrub rate */
  398. u32 bandwidth; /* bandwidth consumed (bytes/sec) */
  399. };
  400. extern struct scrubrate scrubrates[23];
  401. extern const char *tt_msgs[4];
  402. extern const char *ll_msgs[4];
  403. extern const char *rrrr_msgs[16];
  404. extern const char *to_msgs[2];
  405. extern const char *pp_msgs[4];
  406. extern const char *ii_msgs[4];
  407. extern const char *ext_msgs[32];
  408. extern const char *htlink_msgs[8];
  409. #ifdef CONFIG_EDAC_DEBUG
  410. #define NUM_DBG_ATTRS 9
  411. #else
  412. #define NUM_DBG_ATTRS 0
  413. #endif
  414. #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
  415. #define NUM_INJ_ATTRS 5
  416. #else
  417. #define NUM_INJ_ATTRS 0
  418. #endif
  419. extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
  420. amd64_inj_attrs[NUM_INJ_ATTRS];
  421. /*
  422. * Each of the PCI Device IDs types have their own set of hardware accessor
  423. * functions and per device encoding/decoding logic.
  424. */
  425. struct low_ops {
  426. int (*early_channel_count) (struct amd64_pvt *pvt);
  427. u64 (*get_error_address) (struct mem_ctl_info *mci,
  428. struct err_regs *info);
  429. void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
  430. void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
  431. void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
  432. struct err_regs *info, u64 SystemAddr);
  433. int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
  434. };
  435. struct amd64_family_type {
  436. const char *ctl_name;
  437. u16 addr_f1_ctl;
  438. u16 misc_f3_ctl;
  439. struct low_ops ops;
  440. };
  441. static struct amd64_family_type amd64_family_types[];
  442. static inline const char *get_amd_family_name(int index)
  443. {
  444. return amd64_family_types[index].ctl_name;
  445. }
  446. static inline struct low_ops *family_ops(int index)
  447. {
  448. return &amd64_family_types[index].ops;
  449. }
  450. static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
  451. u32 *val, const char *func)
  452. {
  453. int err = 0;
  454. err = pci_read_config_dword(pdev, offset, val);
  455. if (err)
  456. amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n",
  457. func, PCI_FUNC(pdev->devfn), offset);
  458. return err;
  459. }
  460. #define amd64_read_pci_cfg(pdev, offset, val) \
  461. amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
  462. /*
  463. * For future CPU versions, verify the following as new 'slow' rates appear and
  464. * modify the necessary skip values for the supported CPU.
  465. */
  466. #define K8_MIN_SCRUB_RATE_BITS 0x0
  467. #define F10_MIN_SCRUB_RATE_BITS 0x5
  468. #define F11_MIN_SCRUB_RATE_BITS 0x6
  469. int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
  470. u64 *hole_offset, u64 *hole_size);