amd_nb.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /*
  2. * Shared support code for AMD K8 northbridges and derivates.
  3. * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/types.h>
  7. #include <linux/slab.h>
  8. #include <linux/init.h>
  9. #include <linux/errno.h>
  10. #include <linux/export.h>
  11. #include <linux/spinlock.h>
  12. #include <asm/amd_nb.h>
  13. #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
  14. #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
  15. #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
  16. /* Protect the PCI config register pairs used for SMN and DF indirect access. */
  17. static DEFINE_MUTEX(smn_mutex);
  18. static u32 *flush_words;
  19. static const struct pci_device_id amd_root_ids[] = {
  20. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
  21. {}
  22. };
  23. const struct pci_device_id amd_nb_misc_ids[] = {
  24. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
  25. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
  26. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
  27. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
  28. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
  29. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
  30. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
  31. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
  32. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
  33. {}
  34. };
  35. EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
  36. static const struct pci_device_id amd_nb_link_ids[] = {
  37. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
  38. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
  39. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
  40. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
  41. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
  42. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
  43. {}
  44. };
  45. const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
  46. { 0x00, 0x18, 0x20 },
  47. { 0xff, 0x00, 0x20 },
  48. { 0xfe, 0x00, 0x20 },
  49. { }
  50. };
  51. static struct amd_northbridge_info amd_northbridges;
  52. u16 amd_nb_num(void)
  53. {
  54. return amd_northbridges.num;
  55. }
  56. EXPORT_SYMBOL_GPL(amd_nb_num);
  57. bool amd_nb_has_feature(unsigned int feature)
  58. {
  59. return ((amd_northbridges.flags & feature) == feature);
  60. }
  61. EXPORT_SYMBOL_GPL(amd_nb_has_feature);
  62. struct amd_northbridge *node_to_amd_nb(int node)
  63. {
  64. return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
  65. }
  66. EXPORT_SYMBOL_GPL(node_to_amd_nb);
  67. static struct pci_dev *next_northbridge(struct pci_dev *dev,
  68. const struct pci_device_id *ids)
  69. {
  70. do {
  71. dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
  72. if (!dev)
  73. break;
  74. } while (!pci_match_id(ids, dev));
  75. return dev;
  76. }
  77. static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
  78. {
  79. struct pci_dev *root;
  80. int err = -ENODEV;
  81. if (node >= amd_northbridges.num)
  82. goto out;
  83. root = node_to_amd_nb(node)->root;
  84. if (!root)
  85. goto out;
  86. mutex_lock(&smn_mutex);
  87. err = pci_write_config_dword(root, 0x60, address);
  88. if (err) {
  89. pr_warn("Error programming SMN address 0x%x.\n", address);
  90. goto out_unlock;
  91. }
  92. err = (write ? pci_write_config_dword(root, 0x64, *value)
  93. : pci_read_config_dword(root, 0x64, value));
  94. if (err)
  95. pr_warn("Error %s SMN address 0x%x.\n",
  96. (write ? "writing to" : "reading from"), address);
  97. out_unlock:
  98. mutex_unlock(&smn_mutex);
  99. out:
  100. return err;
  101. }
  102. int amd_smn_read(u16 node, u32 address, u32 *value)
  103. {
  104. return __amd_smn_rw(node, address, value, false);
  105. }
  106. EXPORT_SYMBOL_GPL(amd_smn_read);
  107. int amd_smn_write(u16 node, u32 address, u32 value)
  108. {
  109. return __amd_smn_rw(node, address, &value, true);
  110. }
  111. EXPORT_SYMBOL_GPL(amd_smn_write);
  112. /*
  113. * Data Fabric Indirect Access uses FICAA/FICAD.
  114. *
  115. * Fabric Indirect Configuration Access Address (FICAA): Constructed based
  116. * on the device's Instance Id and the PCI function and register offset of
  117. * the desired register.
  118. *
  119. * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
  120. * and FICAD HI registers but so far we only need the LO register.
  121. */
  122. int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
  123. {
  124. struct pci_dev *F4;
  125. u32 ficaa;
  126. int err = -ENODEV;
  127. if (node >= amd_northbridges.num)
  128. goto out;
  129. F4 = node_to_amd_nb(node)->link;
  130. if (!F4)
  131. goto out;
  132. ficaa = 1;
  133. ficaa |= reg & 0x3FC;
  134. ficaa |= (func & 0x7) << 11;
  135. ficaa |= instance_id << 16;
  136. mutex_lock(&smn_mutex);
  137. err = pci_write_config_dword(F4, 0x5C, ficaa);
  138. if (err) {
  139. pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
  140. goto out_unlock;
  141. }
  142. err = pci_read_config_dword(F4, 0x98, lo);
  143. if (err)
  144. pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
  145. out_unlock:
  146. mutex_unlock(&smn_mutex);
  147. out:
  148. return err;
  149. }
  150. EXPORT_SYMBOL_GPL(amd_df_indirect_read);
  151. int amd_cache_northbridges(void)
  152. {
  153. u16 i = 0;
  154. struct amd_northbridge *nb;
  155. struct pci_dev *root, *misc, *link;
  156. if (amd_northbridges.num)
  157. return 0;
  158. misc = NULL;
  159. while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
  160. i++;
  161. if (!i)
  162. return -ENODEV;
  163. nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
  164. if (!nb)
  165. return -ENOMEM;
  166. amd_northbridges.nb = nb;
  167. amd_northbridges.num = i;
  168. link = misc = root = NULL;
  169. for (i = 0; i != amd_northbridges.num; i++) {
  170. node_to_amd_nb(i)->root = root =
  171. next_northbridge(root, amd_root_ids);
  172. node_to_amd_nb(i)->misc = misc =
  173. next_northbridge(misc, amd_nb_misc_ids);
  174. node_to_amd_nb(i)->link = link =
  175. next_northbridge(link, amd_nb_link_ids);
  176. }
  177. if (amd_gart_present())
  178. amd_northbridges.flags |= AMD_NB_GART;
  179. /*
  180. * Check for L3 cache presence.
  181. */
  182. if (!cpuid_edx(0x80000006))
  183. return 0;
  184. /*
  185. * Some CPU families support L3 Cache Index Disable. There are some
  186. * limitations because of E382 and E388 on family 0x10.
  187. */
  188. if (boot_cpu_data.x86 == 0x10 &&
  189. boot_cpu_data.x86_model >= 0x8 &&
  190. (boot_cpu_data.x86_model > 0x9 ||
  191. boot_cpu_data.x86_mask >= 0x1))
  192. amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
  193. if (boot_cpu_data.x86 == 0x15)
  194. amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
  195. /* L3 cache partitioning is supported on family 0x15 */
  196. if (boot_cpu_data.x86 == 0x15)
  197. amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
  198. return 0;
  199. }
  200. EXPORT_SYMBOL_GPL(amd_cache_northbridges);
  201. /*
  202. * Ignores subdevice/subvendor but as far as I can figure out
  203. * they're useless anyways
  204. */
  205. bool __init early_is_amd_nb(u32 device)
  206. {
  207. const struct pci_device_id *id;
  208. u32 vendor = device & 0xffff;
  209. device >>= 16;
  210. for (id = amd_nb_misc_ids; id->vendor; id++)
  211. if (vendor == id->vendor && device == id->device)
  212. return true;
  213. return false;
  214. }
  215. struct resource *amd_get_mmconfig_range(struct resource *res)
  216. {
  217. u32 address;
  218. u64 base, msr;
  219. unsigned int segn_busn_bits;
  220. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
  221. return NULL;
  222. /* assume all cpus from fam10h have mmconfig */
  223. if (boot_cpu_data.x86 < 0x10)
  224. return NULL;
  225. address = MSR_FAM10H_MMIO_CONF_BASE;
  226. rdmsrl(address, msr);
  227. /* mmconfig is not enabled */
  228. if (!(msr & FAM10H_MMIO_CONF_ENABLE))
  229. return NULL;
  230. base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
  231. segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
  232. FAM10H_MMIO_CONF_BUSRANGE_MASK;
  233. res->flags = IORESOURCE_MEM;
  234. res->start = base;
  235. res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
  236. return res;
  237. }
  238. int amd_get_subcaches(int cpu)
  239. {
  240. struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
  241. unsigned int mask;
  242. if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
  243. return 0;
  244. pci_read_config_dword(link, 0x1d4, &mask);
  245. return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
  246. }
  247. int amd_set_subcaches(int cpu, unsigned long mask)
  248. {
  249. static unsigned int reset, ban;
  250. struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
  251. unsigned int reg;
  252. int cuid;
  253. if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
  254. return -EINVAL;
  255. /* if necessary, collect reset state of L3 partitioning and BAN mode */
  256. if (reset == 0) {
  257. pci_read_config_dword(nb->link, 0x1d4, &reset);
  258. pci_read_config_dword(nb->misc, 0x1b8, &ban);
  259. ban &= 0x180000;
  260. }
  261. /* deactivate BAN mode if any subcaches are to be disabled */
  262. if (mask != 0xf) {
  263. pci_read_config_dword(nb->misc, 0x1b8, &reg);
  264. pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
  265. }
  266. cuid = cpu_data(cpu).cpu_core_id;
  267. mask <<= 4 * cuid;
  268. mask |= (0xf ^ (1 << cuid)) << 26;
  269. pci_write_config_dword(nb->link, 0x1d4, mask);
  270. /* reset BAN mode if L3 partitioning returned to reset state */
  271. pci_read_config_dword(nb->link, 0x1d4, &reg);
  272. if (reg == reset) {
  273. pci_read_config_dword(nb->misc, 0x1b8, &reg);
  274. reg &= ~0x180000;
  275. pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
  276. }
  277. return 0;
  278. }
  279. static void amd_cache_gart(void)
  280. {
  281. u16 i;
  282. if (!amd_nb_has_feature(AMD_NB_GART))
  283. return;
  284. flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
  285. if (!flush_words) {
  286. amd_northbridges.flags &= ~AMD_NB_GART;
  287. pr_notice("Cannot initialize GART flush words, GART support disabled\n");
  288. return;
  289. }
  290. for (i = 0; i != amd_northbridges.num; i++)
  291. pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
  292. }
  293. void amd_flush_garts(void)
  294. {
  295. int flushed, i;
  296. unsigned long flags;
  297. static DEFINE_SPINLOCK(gart_lock);
  298. if (!amd_nb_has_feature(AMD_NB_GART))
  299. return;
  300. /*
  301. * Avoid races between AGP and IOMMU. In theory it's not needed
  302. * but I'm not sure if the hardware won't lose flush requests
  303. * when another is pending. This whole thing is so expensive anyways
  304. * that it doesn't matter to serialize more. -AK
  305. */
  306. spin_lock_irqsave(&gart_lock, flags);
  307. flushed = 0;
  308. for (i = 0; i < amd_northbridges.num; i++) {
  309. pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
  310. flush_words[i] | 1);
  311. flushed++;
  312. }
  313. for (i = 0; i < amd_northbridges.num; i++) {
  314. u32 w;
  315. /* Make sure the hardware actually executed the flush*/
  316. for (;;) {
  317. pci_read_config_dword(node_to_amd_nb(i)->misc,
  318. 0x9c, &w);
  319. if (!(w & 1))
  320. break;
  321. cpu_relax();
  322. }
  323. }
  324. spin_unlock_irqrestore(&gart_lock, flags);
  325. if (!flushed)
  326. pr_notice("nothing to flush?\n");
  327. }
  328. EXPORT_SYMBOL_GPL(amd_flush_garts);
  329. static __init int init_amd_nbs(void)
  330. {
  331. amd_cache_northbridges();
  332. amd_cache_gart();
  333. return 0;
  334. }
  335. /* This has to go after the PCI subsystem */
  336. fs_initcall(init_amd_nbs);