amd_bus.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. #include <linux/init.h>
  2. #include <linux/pci.h>
  3. #include <linux/topology.h>
  4. #include <linux/cpu.h>
  5. #include <asm/pci_x86.h>
  6. #ifdef CONFIG_X86_64
  7. #include <asm/pci-direct.h>
  8. #include <asm/mpspec.h>
  9. #include <linux/cpumask.h>
  10. #endif
  11. /*
  12. * This discovers the pcibus <-> node mapping on AMD K8.
  13. * also get peer root bus resource for io,mmio
  14. */
  15. #ifdef CONFIG_X86_64
  16. /*
  17. * sub bus (transparent) will use entres from 3 to store extra from root,
  18. * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES?
  19. */
  20. #define RES_NUM 16
  21. struct pci_root_info {
  22. char name[12];
  23. unsigned int res_num;
  24. struct resource res[RES_NUM];
  25. int bus_min;
  26. int bus_max;
  27. int node;
  28. int link;
  29. };
  30. /* 4 at this time, it may become to 32 */
  31. #define PCI_ROOT_NR 4
  32. static int pci_root_num;
  33. static struct pci_root_info pci_root_info[PCI_ROOT_NR];
  34. void x86_pci_root_bus_res_quirks(struct pci_bus *b)
  35. {
  36. int i;
  37. int j;
  38. struct pci_root_info *info;
  39. /* don't go for it if _CRS is used already */
  40. if (b->resource[0] != &ioport_resource ||
  41. b->resource[1] != &iomem_resource)
  42. return;
  43. /* if only one root bus, don't need to anything */
  44. if (pci_root_num < 2)
  45. return;
  46. for (i = 0; i < pci_root_num; i++) {
  47. if (pci_root_info[i].bus_min == b->number)
  48. break;
  49. }
  50. if (i == pci_root_num)
  51. return;
  52. printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
  53. b->number);
  54. info = &pci_root_info[i];
  55. for (j = 0; j < info->res_num; j++) {
  56. struct resource *res;
  57. struct resource *root;
  58. res = &info->res[j];
  59. b->resource[j] = res;
  60. if (res->flags & IORESOURCE_IO)
  61. root = &ioport_resource;
  62. else
  63. root = &iomem_resource;
  64. insert_resource(root, res);
  65. }
  66. }
  67. #define RANGE_NUM 16
  68. struct res_range {
  69. size_t start;
  70. size_t end;
  71. };
  72. static void __init update_range(struct res_range *range, size_t start,
  73. size_t end)
  74. {
  75. int i;
  76. int j;
  77. for (j = 0; j < RANGE_NUM; j++) {
  78. if (!range[j].end)
  79. continue;
  80. if (start <= range[j].start && end >= range[j].end) {
  81. range[j].start = 0;
  82. range[j].end = 0;
  83. continue;
  84. }
  85. if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) {
  86. range[j].start = end + 1;
  87. continue;
  88. }
  89. if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) {
  90. range[j].end = start - 1;
  91. continue;
  92. }
  93. if (start > range[j].start && end < range[j].end) {
  94. /* find the new spare */
  95. for (i = 0; i < RANGE_NUM; i++) {
  96. if (range[i].end == 0)
  97. break;
  98. }
  99. if (i < RANGE_NUM) {
  100. range[i].end = range[j].end;
  101. range[i].start = end + 1;
  102. } else {
  103. printk(KERN_ERR "run of slot in ranges\n");
  104. }
  105. range[j].end = start - 1;
  106. continue;
  107. }
  108. }
  109. }
  110. static void __init update_res(struct pci_root_info *info, size_t start,
  111. size_t end, unsigned long flags, int merge)
  112. {
  113. int i;
  114. struct resource *res;
  115. if (!merge)
  116. goto addit;
  117. /* try to merge it with old one */
  118. for (i = 0; i < info->res_num; i++) {
  119. size_t final_start, final_end;
  120. size_t common_start, common_end;
  121. res = &info->res[i];
  122. if (res->flags != flags)
  123. continue;
  124. common_start = max((size_t)res->start, start);
  125. common_end = min((size_t)res->end, end);
  126. if (common_start > common_end + 1)
  127. continue;
  128. final_start = min((size_t)res->start, start);
  129. final_end = max((size_t)res->end, end);
  130. res->start = final_start;
  131. res->end = final_end;
  132. return;
  133. }
  134. addit:
  135. /* need to add that */
  136. if (info->res_num >= RES_NUM)
  137. return;
  138. res = &info->res[info->res_num];
  139. res->name = info->name;
  140. res->flags = flags;
  141. res->start = start;
  142. res->end = end;
  143. res->child = NULL;
  144. info->res_num++;
  145. }
  146. struct pci_hostbridge_probe {
  147. u32 bus;
  148. u32 slot;
  149. u32 vendor;
  150. u32 device;
  151. };
  152. static struct pci_hostbridge_probe pci_probes[] __initdata = {
  153. { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1100 },
  154. { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
  155. { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
  156. { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
  157. };
  158. static u64 __initdata fam10h_mmconf_start;
  159. static u64 __initdata fam10h_mmconf_end;
  160. static void __init get_pci_mmcfg_amd_fam10h_range(void)
  161. {
  162. u32 address;
  163. u64 base, msr;
  164. unsigned segn_busn_bits;
  165. /* assume all cpus from fam10h have mmconf */
  166. if (boot_cpu_data.x86 < 0x10)
  167. return;
  168. address = MSR_FAM10H_MMIO_CONF_BASE;
  169. rdmsrl(address, msr);
  170. /* mmconfig is not enable */
  171. if (!(msr & FAM10H_MMIO_CONF_ENABLE))
  172. return;
  173. base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
  174. segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
  175. FAM10H_MMIO_CONF_BUSRANGE_MASK;
  176. fam10h_mmconf_start = base;
  177. fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
  178. }
  179. /**
  180. * early_fill_mp_bus_to_node()
  181. * called before pcibios_scan_root and pci_scan_bus
  182. * fills the mp_bus_to_cpumask array based according to the LDT Bus Number
  183. * Registers found in the K8 northbridge
  184. */
  185. static int __init early_fill_mp_bus_info(void)
  186. {
  187. int i;
  188. int j;
  189. unsigned bus;
  190. unsigned slot;
  191. int found;
  192. int node;
  193. int link;
  194. int def_node;
  195. int def_link;
  196. struct pci_root_info *info;
  197. u32 reg;
  198. struct resource *res;
  199. size_t start;
  200. size_t end;
  201. struct res_range range[RANGE_NUM];
  202. u64 val;
  203. u32 address;
  204. if (!early_pci_allowed())
  205. return -1;
  206. found = 0;
  207. for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
  208. u32 id;
  209. u16 device;
  210. u16 vendor;
  211. bus = pci_probes[i].bus;
  212. slot = pci_probes[i].slot;
  213. id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID);
  214. vendor = id & 0xffff;
  215. device = (id>>16) & 0xffff;
  216. if (pci_probes[i].vendor == vendor &&
  217. pci_probes[i].device == device) {
  218. found = 1;
  219. break;
  220. }
  221. }
  222. if (!found)
  223. return 0;
  224. pci_root_num = 0;
  225. for (i = 0; i < 4; i++) {
  226. int min_bus;
  227. int max_bus;
  228. reg = read_pci_config(bus, slot, 1, 0xe0 + (i << 2));
  229. /* Check if that register is enabled for bus range */
  230. if ((reg & 7) != 3)
  231. continue;
  232. min_bus = (reg >> 16) & 0xff;
  233. max_bus = (reg >> 24) & 0xff;
  234. node = (reg >> 4) & 0x07;
  235. #ifdef CONFIG_NUMA
  236. for (j = min_bus; j <= max_bus; j++)
  237. set_mp_bus_to_node(j, node);
  238. #endif
  239. link = (reg >> 8) & 0x03;
  240. info = &pci_root_info[pci_root_num];
  241. info->bus_min = min_bus;
  242. info->bus_max = max_bus;
  243. info->node = node;
  244. info->link = link;
  245. sprintf(info->name, "PCI Bus #%02x", min_bus);
  246. pci_root_num++;
  247. }
  248. /* get the default node and link for left over res */
  249. reg = read_pci_config(bus, slot, 0, 0x60);
  250. def_node = (reg >> 8) & 0x07;
  251. reg = read_pci_config(bus, slot, 0, 0x64);
  252. def_link = (reg >> 8) & 0x03;
  253. memset(range, 0, sizeof(range));
  254. range[0].end = 0xffff;
  255. /* io port resource */
  256. for (i = 0; i < 4; i++) {
  257. reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3));
  258. if (!(reg & 3))
  259. continue;
  260. start = reg & 0xfff000;
  261. reg = read_pci_config(bus, slot, 1, 0xc4 + (i << 3));
  262. node = reg & 0x07;
  263. link = (reg >> 4) & 0x03;
  264. end = (reg & 0xfff000) | 0xfff;
  265. /* find the position */
  266. for (j = 0; j < pci_root_num; j++) {
  267. info = &pci_root_info[j];
  268. if (info->node == node && info->link == link)
  269. break;
  270. }
  271. if (j == pci_root_num)
  272. continue; /* not found */
  273. info = &pci_root_info[j];
  274. printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n",
  275. node, link, (u64)start, (u64)end);
  276. /* kernel only handle 16 bit only */
  277. if (end > 0xffff)
  278. end = 0xffff;
  279. update_res(info, start, end, IORESOURCE_IO, 1);
  280. update_range(range, start, end);
  281. }
  282. /* add left over io port range to def node/link, [0, 0xffff] */
  283. /* find the position */
  284. for (j = 0; j < pci_root_num; j++) {
  285. info = &pci_root_info[j];
  286. if (info->node == def_node && info->link == def_link)
  287. break;
  288. }
  289. if (j < pci_root_num) {
  290. info = &pci_root_info[j];
  291. for (i = 0; i < RANGE_NUM; i++) {
  292. if (!range[i].end)
  293. continue;
  294. update_res(info, range[i].start, range[i].end,
  295. IORESOURCE_IO, 1);
  296. }
  297. }
  298. memset(range, 0, sizeof(range));
  299. /* 0xfd00000000-0xffffffffff for HT */
  300. range[0].end = (0xfdULL<<32) - 1;
  301. /* need to take out [0, TOM) for RAM*/
  302. address = MSR_K8_TOP_MEM1;
  303. rdmsrl(address, val);
  304. end = (val & 0xffffff800000ULL);
  305. printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20);
  306. if (end < (1ULL<<32))
  307. update_range(range, 0, end - 1);
  308. /* get mmconfig */
  309. get_pci_mmcfg_amd_fam10h_range();
  310. /* need to take out mmconf range */
  311. if (fam10h_mmconf_end) {
  312. printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
  313. update_range(range, fam10h_mmconf_start, fam10h_mmconf_end);
  314. }
  315. /* mmio resource */
  316. for (i = 0; i < 8; i++) {
  317. reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3));
  318. if (!(reg & 3))
  319. continue;
  320. start = reg & 0xffffff00; /* 39:16 on 31:8*/
  321. start <<= 8;
  322. reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3));
  323. node = reg & 0x07;
  324. link = (reg >> 4) & 0x03;
  325. end = (reg & 0xffffff00);
  326. end <<= 8;
  327. end |= 0xffff;
  328. /* find the position */
  329. for (j = 0; j < pci_root_num; j++) {
  330. info = &pci_root_info[j];
  331. if (info->node == node && info->link == link)
  332. break;
  333. }
  334. if (j == pci_root_num)
  335. continue; /* not found */
  336. info = &pci_root_info[j];
  337. printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]",
  338. node, link, (u64)start, (u64)end);
  339. /*
  340. * some sick allocation would have range overlap with fam10h
  341. * mmconf range, so need to update start and end.
  342. */
  343. if (fam10h_mmconf_end) {
  344. int changed = 0;
  345. u64 endx = 0;
  346. if (start >= fam10h_mmconf_start &&
  347. start <= fam10h_mmconf_end) {
  348. start = fam10h_mmconf_end + 1;
  349. changed = 1;
  350. }
  351. if (end >= fam10h_mmconf_start &&
  352. end <= fam10h_mmconf_end) {
  353. end = fam10h_mmconf_start - 1;
  354. changed = 1;
  355. }
  356. if (start < fam10h_mmconf_start &&
  357. end > fam10h_mmconf_end) {
  358. /* we got a hole */
  359. endx = fam10h_mmconf_start - 1;
  360. update_res(info, start, endx, IORESOURCE_MEM, 0);
  361. update_range(range, start, endx);
  362. printk(KERN_CONT " ==> [%llx, %llx]", (u64)start, endx);
  363. start = fam10h_mmconf_end + 1;
  364. changed = 1;
  365. }
  366. if (changed) {
  367. if (start <= end) {
  368. printk(KERN_CONT " %s [%llx, %llx]", endx?"and":"==>", (u64)start, (u64)end);
  369. } else {
  370. printk(KERN_CONT "%s\n", endx?"":" ==> none");
  371. continue;
  372. }
  373. }
  374. }
  375. update_res(info, start, end, IORESOURCE_MEM, 1);
  376. update_range(range, start, end);
  377. printk(KERN_CONT "\n");
  378. }
  379. /* need to take out [4G, TOM2) for RAM*/
  380. /* SYS_CFG */
  381. address = MSR_K8_SYSCFG;
  382. rdmsrl(address, val);
  383. /* TOP_MEM2 is enabled? */
  384. if (val & (1<<21)) {
  385. /* TOP_MEM2 */
  386. address = MSR_K8_TOP_MEM2;
  387. rdmsrl(address, val);
  388. end = (val & 0xffffff800000ULL);
  389. printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20);
  390. update_range(range, 1ULL<<32, end - 1);
  391. }
  392. /*
  393. * add left over mmio range to def node/link ?
  394. * that is tricky, just record range in from start_min to 4G
  395. */
  396. for (j = 0; j < pci_root_num; j++) {
  397. info = &pci_root_info[j];
  398. if (info->node == def_node && info->link == def_link)
  399. break;
  400. }
  401. if (j < pci_root_num) {
  402. info = &pci_root_info[j];
  403. for (i = 0; i < RANGE_NUM; i++) {
  404. if (!range[i].end)
  405. continue;
  406. update_res(info, range[i].start, range[i].end,
  407. IORESOURCE_MEM, 1);
  408. }
  409. }
  410. for (i = 0; i < pci_root_num; i++) {
  411. int res_num;
  412. int busnum;
  413. info = &pci_root_info[i];
  414. res_num = info->res_num;
  415. busnum = info->bus_min;
  416. printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n",
  417. info->bus_min, info->bus_max, info->node, info->link);
  418. for (j = 0; j < res_num; j++) {
  419. res = &info->res[j];
  420. printk(KERN_DEBUG "bus: %02x index %x %s: [%llx, %llx]\n",
  421. busnum, j,
  422. (res->flags & IORESOURCE_IO)?"io port":"mmio",
  423. res->start, res->end);
  424. }
  425. }
  426. return 0;
  427. }
  428. #else /* !CONFIG_X86_64 */
  429. static int __init early_fill_mp_bus_info(void) { return 0; }
  430. #endif /* !CONFIG_X86_64 */
  431. /* common 32/64 bit code */
  432. #define ENABLE_CF8_EXT_CFG (1ULL << 46)
  433. static void enable_pci_io_ecs(void *unused)
  434. {
  435. u64 reg;
  436. rdmsrl(MSR_AMD64_NB_CFG, reg);
  437. if (!(reg & ENABLE_CF8_EXT_CFG)) {
  438. reg |= ENABLE_CF8_EXT_CFG;
  439. wrmsrl(MSR_AMD64_NB_CFG, reg);
  440. }
  441. }
  442. static int __cpuinit amd_cpu_notify(struct notifier_block *self,
  443. unsigned long action, void *hcpu)
  444. {
  445. int cpu = (long)hcpu;
  446. switch (action) {
  447. case CPU_ONLINE:
  448. case CPU_ONLINE_FROZEN:
  449. smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
  450. break;
  451. default:
  452. break;
  453. }
  454. return NOTIFY_OK;
  455. }
  456. static struct notifier_block __cpuinitdata amd_cpu_notifier = {
  457. .notifier_call = amd_cpu_notify,
  458. };
  459. static int __init pci_io_ecs_init(void)
  460. {
  461. int cpu;
  462. /* assume all cpus from fam10h have IO ECS */
  463. if (boot_cpu_data.x86 < 0x10)
  464. return 0;
  465. register_cpu_notifier(&amd_cpu_notifier);
  466. for_each_online_cpu(cpu)
  467. amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
  468. (void *)(long)cpu);
  469. pci_probe |= PCI_HAS_IO_ECS;
  470. return 0;
  471. }
  472. static int __init amd_postcore_init(void)
  473. {
  474. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
  475. return 0;
  476. early_fill_mp_bus_info();
  477. pci_io_ecs_init();
  478. return 0;
  479. }
  480. postcore_initcall(amd_postcore_init);