intel_cacheinfo.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217
  1. /*
  2. * Routines to indentify caches on Intel CPU.
  3. *
  4. * Changes:
  5. * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
  6. * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
  7. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
  8. */
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/device.h>
  12. #include <linux/compiler.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched.h>
  15. #include <linux/pci.h>
  16. #include <asm/processor.h>
  17. #include <linux/smp.h>
  18. #include <asm/amd_nb.h>
  19. #include <asm/smp.h>
  20. #define LVL_1_INST 1
  21. #define LVL_1_DATA 2
  22. #define LVL_2 3
  23. #define LVL_3 4
  24. #define LVL_TRACE 5
  25. struct _cache_table {
  26. unsigned char descriptor;
  27. char cache_type;
  28. short size;
  29. };
  30. #define MB(x) ((x) * 1024)
  31. /* All the cache descriptor types we care about (no TLB or
  32. trace cache entries) */
  33. static const struct _cache_table __cpuinitconst cache_table[] =
  34. {
  35. { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
  36. { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
  37. { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
  38. { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
  39. { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
  40. { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
  41. { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
  42. { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
  43. { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  44. { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  45. { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  46. { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  47. { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
  48. { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
  49. { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  50. { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  51. { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
  52. { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  53. { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  54. { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  55. { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
  56. { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
  57. { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
  58. { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
  59. { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
  60. { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
  61. { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
  62. { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
  63. { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
  64. { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
  65. { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
  66. { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
  67. { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
  68. { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
  69. { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
  70. { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  71. { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  72. { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  73. { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  74. { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
  75. { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
  76. { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
  77. { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
  78. { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
  79. { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  80. { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  81. { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  82. { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  83. { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
  84. { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
  85. { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
  86. { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
  87. { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
  88. { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
  89. { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
  90. { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
  91. { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
  92. { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
  93. { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
  94. { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
  95. { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
  96. { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
  97. { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
  98. { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
  99. { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
  100. { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
  101. { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
  102. { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
  103. { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
  104. { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
  105. { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
  106. { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
  107. { 0x00, 0, 0}
  108. };
  109. enum _cache_type {
  110. CACHE_TYPE_NULL = 0,
  111. CACHE_TYPE_DATA = 1,
  112. CACHE_TYPE_INST = 2,
  113. CACHE_TYPE_UNIFIED = 3
  114. };
  115. union _cpuid4_leaf_eax {
  116. struct {
  117. enum _cache_type type:5;
  118. unsigned int level:3;
  119. unsigned int is_self_initializing:1;
  120. unsigned int is_fully_associative:1;
  121. unsigned int reserved:4;
  122. unsigned int num_threads_sharing:12;
  123. unsigned int num_cores_on_die:6;
  124. } split;
  125. u32 full;
  126. };
  127. union _cpuid4_leaf_ebx {
  128. struct {
  129. unsigned int coherency_line_size:12;
  130. unsigned int physical_line_partition:10;
  131. unsigned int ways_of_associativity:10;
  132. } split;
  133. u32 full;
  134. };
  135. union _cpuid4_leaf_ecx {
  136. struct {
  137. unsigned int number_of_sets:32;
  138. } split;
  139. u32 full;
  140. };
  141. struct _cpuid4_info_regs {
  142. union _cpuid4_leaf_eax eax;
  143. union _cpuid4_leaf_ebx ebx;
  144. union _cpuid4_leaf_ecx ecx;
  145. unsigned long size;
  146. struct amd_northbridge *nb;
  147. };
  148. struct _cpuid4_info {
  149. struct _cpuid4_info_regs base;
  150. DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
  151. };
  152. unsigned short num_cache_leaves;
  153. /* AMD doesn't have CPUID4. Emulate it here to report the same
  154. information to the user. This makes some assumptions about the machine:
  155. L2 not shared, no SMT etc. that is currently true on AMD CPUs.
  156. In theory the TLBs could be reported as fake type (they are in "dummy").
  157. Maybe later */
  158. union l1_cache {
  159. struct {
  160. unsigned line_size:8;
  161. unsigned lines_per_tag:8;
  162. unsigned assoc:8;
  163. unsigned size_in_kb:8;
  164. };
  165. unsigned val;
  166. };
  167. union l2_cache {
  168. struct {
  169. unsigned line_size:8;
  170. unsigned lines_per_tag:4;
  171. unsigned assoc:4;
  172. unsigned size_in_kb:16;
  173. };
  174. unsigned val;
  175. };
  176. union l3_cache {
  177. struct {
  178. unsigned line_size:8;
  179. unsigned lines_per_tag:4;
  180. unsigned assoc:4;
  181. unsigned res:2;
  182. unsigned size_encoded:14;
  183. };
  184. unsigned val;
  185. };
  186. static const unsigned short __cpuinitconst assocs[] = {
  187. [1] = 1,
  188. [2] = 2,
  189. [4] = 4,
  190. [6] = 8,
  191. [8] = 16,
  192. [0xa] = 32,
  193. [0xb] = 48,
  194. [0xc] = 64,
  195. [0xd] = 96,
  196. [0xe] = 128,
  197. [0xf] = 0xffff /* fully associative - no way to show this currently */
  198. };
  199. static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
  200. static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
  201. static void __cpuinit
  202. amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
  203. union _cpuid4_leaf_ebx *ebx,
  204. union _cpuid4_leaf_ecx *ecx)
  205. {
  206. unsigned dummy;
  207. unsigned line_size, lines_per_tag, assoc, size_in_kb;
  208. union l1_cache l1i, l1d;
  209. union l2_cache l2;
  210. union l3_cache l3;
  211. union l1_cache *l1 = &l1d;
  212. eax->full = 0;
  213. ebx->full = 0;
  214. ecx->full = 0;
  215. cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
  216. cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
  217. switch (leaf) {
  218. case 1:
  219. l1 = &l1i;
  220. case 0:
  221. if (!l1->val)
  222. return;
  223. assoc = assocs[l1->assoc];
  224. line_size = l1->line_size;
  225. lines_per_tag = l1->lines_per_tag;
  226. size_in_kb = l1->size_in_kb;
  227. break;
  228. case 2:
  229. if (!l2.val)
  230. return;
  231. assoc = assocs[l2.assoc];
  232. line_size = l2.line_size;
  233. lines_per_tag = l2.lines_per_tag;
  234. /* cpu_data has errata corrections for K7 applied */
  235. size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
  236. break;
  237. case 3:
  238. if (!l3.val)
  239. return;
  240. assoc = assocs[l3.assoc];
  241. line_size = l3.line_size;
  242. lines_per_tag = l3.lines_per_tag;
  243. size_in_kb = l3.size_encoded * 512;
  244. if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
  245. size_in_kb = size_in_kb >> 1;
  246. assoc = assoc >> 1;
  247. }
  248. break;
  249. default:
  250. return;
  251. }
  252. eax->split.is_self_initializing = 1;
  253. eax->split.type = types[leaf];
  254. eax->split.level = levels[leaf];
  255. eax->split.num_threads_sharing = 0;
  256. eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
  257. if (assoc == 0xffff)
  258. eax->split.is_fully_associative = 1;
  259. ebx->split.coherency_line_size = line_size - 1;
  260. ebx->split.ways_of_associativity = assoc - 1;
  261. ebx->split.physical_line_partition = lines_per_tag - 1;
  262. ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
  263. (ebx->split.ways_of_associativity + 1) - 1;
  264. }
  265. struct _cache_attr {
  266. struct attribute attr;
  267. ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
  268. ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
  269. unsigned int);
  270. };
  271. #ifdef CONFIG_AMD_NB
  272. /*
  273. * L3 cache descriptors
  274. */
  275. static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
  276. {
  277. struct amd_l3_cache *l3 = &nb->l3_cache;
  278. unsigned int sc0, sc1, sc2, sc3;
  279. u32 val = 0;
  280. pci_read_config_dword(nb->misc, 0x1C4, &val);
  281. /* calculate subcache sizes */
  282. l3->subcaches[0] = sc0 = !(val & BIT(0));
  283. l3->subcaches[1] = sc1 = !(val & BIT(4));
  284. if (boot_cpu_data.x86 == 0x15) {
  285. l3->subcaches[0] = sc0 += !(val & BIT(1));
  286. l3->subcaches[1] = sc1 += !(val & BIT(5));
  287. }
  288. l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
  289. l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
  290. l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
  291. }
  292. static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
  293. {
  294. int node;
  295. /* only for L3, and not in virtualized environments */
  296. if (index < 3)
  297. return;
  298. node = amd_get_nb_id(smp_processor_id());
  299. this_leaf->nb = node_to_amd_nb(node);
  300. if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
  301. amd_calc_l3_indices(this_leaf->nb);
  302. }
  303. /*
  304. * check whether a slot used for disabling an L3 index is occupied.
  305. * @l3: L3 cache descriptor
  306. * @slot: slot number (0..1)
  307. *
  308. * @returns: the disabled index if used or negative value if slot free.
  309. */
  310. int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
  311. {
  312. unsigned int reg = 0;
  313. pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
  314. /* check whether this slot is activated already */
  315. if (reg & (3UL << 30))
  316. return reg & 0xfff;
  317. return -1;
  318. }
  319. static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
  320. unsigned int slot)
  321. {
  322. int index;
  323. if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
  324. return -EINVAL;
  325. index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
  326. if (index >= 0)
  327. return sprintf(buf, "%d\n", index);
  328. return sprintf(buf, "FREE\n");
  329. }
  330. #define SHOW_CACHE_DISABLE(slot) \
  331. static ssize_t \
  332. show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
  333. unsigned int cpu) \
  334. { \
  335. return show_cache_disable(this_leaf, buf, slot); \
  336. }
  337. SHOW_CACHE_DISABLE(0)
  338. SHOW_CACHE_DISABLE(1)
  339. static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
  340. unsigned slot, unsigned long idx)
  341. {
  342. int i;
  343. idx |= BIT(30);
  344. /*
  345. * disable index in all 4 subcaches
  346. */
  347. for (i = 0; i < 4; i++) {
  348. u32 reg = idx | (i << 20);
  349. if (!nb->l3_cache.subcaches[i])
  350. continue;
  351. pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
  352. /*
  353. * We need to WBINVD on a core on the node containing the L3
  354. * cache which indices we disable therefore a simple wbinvd()
  355. * is not sufficient.
  356. */
  357. wbinvd_on_cpu(cpu);
  358. reg |= BIT(31);
  359. pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
  360. }
  361. }
  362. /*
  363. * disable a L3 cache index by using a disable-slot
  364. *
  365. * @l3: L3 cache descriptor
  366. * @cpu: A CPU on the node containing the L3 cache
  367. * @slot: slot number (0..1)
  368. * @index: index to disable
  369. *
  370. * @return: 0 on success, error status on failure
  371. */
  372. int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
  373. unsigned long index)
  374. {
  375. int ret = 0;
  376. /* check if @slot is already used or the index is already disabled */
  377. ret = amd_get_l3_disable_slot(nb, slot);
  378. if (ret >= 0)
  379. return -EEXIST;
  380. if (index > nb->l3_cache.indices)
  381. return -EINVAL;
  382. /* check whether the other slot has disabled the same index already */
  383. if (index == amd_get_l3_disable_slot(nb, !slot))
  384. return -EEXIST;
  385. amd_l3_disable_index(nb, cpu, slot, index);
  386. return 0;
  387. }
  388. static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
  389. const char *buf, size_t count,
  390. unsigned int slot)
  391. {
  392. unsigned long val = 0;
  393. int cpu, err = 0;
  394. if (!capable(CAP_SYS_ADMIN))
  395. return -EPERM;
  396. if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
  397. return -EINVAL;
  398. cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
  399. if (strict_strtoul(buf, 10, &val) < 0)
  400. return -EINVAL;
  401. err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
  402. if (err) {
  403. if (err == -EEXIST)
  404. pr_warning("L3 slot %d in use/index already disabled!\n",
  405. slot);
  406. return err;
  407. }
  408. return count;
  409. }
  410. #define STORE_CACHE_DISABLE(slot) \
  411. static ssize_t \
  412. store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
  413. const char *buf, size_t count, \
  414. unsigned int cpu) \
  415. { \
  416. return store_cache_disable(this_leaf, buf, count, slot); \
  417. }
  418. STORE_CACHE_DISABLE(0)
  419. STORE_CACHE_DISABLE(1)
  420. static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
  421. show_cache_disable_0, store_cache_disable_0);
  422. static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
  423. show_cache_disable_1, store_cache_disable_1);
  424. static ssize_t
  425. show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
  426. {
  427. if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
  428. return -EINVAL;
  429. return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
  430. }
  431. static ssize_t
  432. store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
  433. unsigned int cpu)
  434. {
  435. unsigned long val;
  436. if (!capable(CAP_SYS_ADMIN))
  437. return -EPERM;
  438. if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
  439. return -EINVAL;
  440. if (strict_strtoul(buf, 16, &val) < 0)
  441. return -EINVAL;
  442. if (amd_set_subcaches(cpu, val))
  443. return -EINVAL;
  444. return count;
  445. }
  446. static struct _cache_attr subcaches =
  447. __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
  448. #else /* CONFIG_AMD_NB */
  449. #define amd_init_l3_cache(x, y)
  450. #endif /* CONFIG_AMD_NB */
  451. static int
  452. __cpuinit cpuid4_cache_lookup_regs(int index,
  453. struct _cpuid4_info_regs *this_leaf)
  454. {
  455. union _cpuid4_leaf_eax eax;
  456. union _cpuid4_leaf_ebx ebx;
  457. union _cpuid4_leaf_ecx ecx;
  458. unsigned edx;
  459. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  460. amd_cpuid4(index, &eax, &ebx, &ecx);
  461. amd_init_l3_cache(this_leaf, index);
  462. } else {
  463. cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
  464. }
  465. if (eax.split.type == CACHE_TYPE_NULL)
  466. return -EIO; /* better error ? */
  467. this_leaf->eax = eax;
  468. this_leaf->ebx = ebx;
  469. this_leaf->ecx = ecx;
  470. this_leaf->size = (ecx.split.number_of_sets + 1) *
  471. (ebx.split.coherency_line_size + 1) *
  472. (ebx.split.physical_line_partition + 1) *
  473. (ebx.split.ways_of_associativity + 1);
  474. return 0;
  475. }
  476. static int __cpuinit find_num_cache_leaves(void)
  477. {
  478. unsigned int eax, ebx, ecx, edx;
  479. union _cpuid4_leaf_eax cache_eax;
  480. int i = -1;
  481. do {
  482. ++i;
  483. /* Do cpuid(4) loop to find out num_cache_leaves */
  484. cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
  485. cache_eax.full = eax;
  486. } while (cache_eax.split.type != CACHE_TYPE_NULL);
  487. return i;
  488. }
  489. unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
  490. {
  491. /* Cache sizes */
  492. unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
  493. unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
  494. unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
  495. unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
  496. #ifdef CONFIG_X86_HT
  497. unsigned int cpu = c->cpu_index;
  498. #endif
  499. if (c->cpuid_level > 3) {
  500. static int is_initialized;
  501. if (is_initialized == 0) {
  502. /* Init num_cache_leaves from boot CPU */
  503. num_cache_leaves = find_num_cache_leaves();
  504. is_initialized++;
  505. }
  506. /*
  507. * Whenever possible use cpuid(4), deterministic cache
  508. * parameters cpuid leaf to find the cache details
  509. */
  510. for (i = 0; i < num_cache_leaves; i++) {
  511. struct _cpuid4_info_regs this_leaf;
  512. int retval;
  513. retval = cpuid4_cache_lookup_regs(i, &this_leaf);
  514. if (retval >= 0) {
  515. switch (this_leaf.eax.split.level) {
  516. case 1:
  517. if (this_leaf.eax.split.type ==
  518. CACHE_TYPE_DATA)
  519. new_l1d = this_leaf.size/1024;
  520. else if (this_leaf.eax.split.type ==
  521. CACHE_TYPE_INST)
  522. new_l1i = this_leaf.size/1024;
  523. break;
  524. case 2:
  525. new_l2 = this_leaf.size/1024;
  526. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  527. index_msb = get_count_order(num_threads_sharing);
  528. l2_id = c->apicid >> index_msb;
  529. break;
  530. case 3:
  531. new_l3 = this_leaf.size/1024;
  532. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  533. index_msb = get_count_order(
  534. num_threads_sharing);
  535. l3_id = c->apicid >> index_msb;
  536. break;
  537. default:
  538. break;
  539. }
  540. }
  541. }
  542. }
  543. /*
  544. * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
  545. * trace cache
  546. */
  547. if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
  548. /* supports eax=2 call */
  549. int j, n;
  550. unsigned int regs[4];
  551. unsigned char *dp = (unsigned char *)regs;
  552. int only_trace = 0;
  553. if (num_cache_leaves != 0 && c->x86 == 15)
  554. only_trace = 1;
  555. /* Number of times to iterate */
  556. n = cpuid_eax(2) & 0xFF;
  557. for (i = 0 ; i < n ; i++) {
  558. cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
  559. /* If bit 31 is set, this is an unknown format */
  560. for (j = 0 ; j < 3 ; j++)
  561. if (regs[j] & (1 << 31))
  562. regs[j] = 0;
  563. /* Byte 0 is level count, not a descriptor */
  564. for (j = 1 ; j < 16 ; j++) {
  565. unsigned char des = dp[j];
  566. unsigned char k = 0;
  567. /* look up this descriptor in the table */
  568. while (cache_table[k].descriptor != 0) {
  569. if (cache_table[k].descriptor == des) {
  570. if (only_trace && cache_table[k].cache_type != LVL_TRACE)
  571. break;
  572. switch (cache_table[k].cache_type) {
  573. case LVL_1_INST:
  574. l1i += cache_table[k].size;
  575. break;
  576. case LVL_1_DATA:
  577. l1d += cache_table[k].size;
  578. break;
  579. case LVL_2:
  580. l2 += cache_table[k].size;
  581. break;
  582. case LVL_3:
  583. l3 += cache_table[k].size;
  584. break;
  585. case LVL_TRACE:
  586. trace += cache_table[k].size;
  587. break;
  588. }
  589. break;
  590. }
  591. k++;
  592. }
  593. }
  594. }
  595. }
  596. if (new_l1d)
  597. l1d = new_l1d;
  598. if (new_l1i)
  599. l1i = new_l1i;
  600. if (new_l2) {
  601. l2 = new_l2;
  602. #ifdef CONFIG_X86_HT
  603. per_cpu(cpu_llc_id, cpu) = l2_id;
  604. #endif
  605. }
  606. if (new_l3) {
  607. l3 = new_l3;
  608. #ifdef CONFIG_X86_HT
  609. per_cpu(cpu_llc_id, cpu) = l3_id;
  610. #endif
  611. }
  612. c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
  613. return l2;
  614. }
  615. #ifdef CONFIG_SYSFS
  616. /* pointer to _cpuid4_info array (for each cache leaf) */
  617. static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
  618. #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
  619. #ifdef CONFIG_SMP
  620. static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
  621. {
  622. struct _cpuid4_info *this_leaf;
  623. int ret, i, sibling;
  624. struct cpuinfo_x86 *c = &cpu_data(cpu);
  625. ret = 0;
  626. if (index == 3) {
  627. ret = 1;
  628. for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
  629. if (!per_cpu(ici_cpuid4_info, i))
  630. continue;
  631. this_leaf = CPUID4_INFO_IDX(i, index);
  632. for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
  633. if (!cpu_online(sibling))
  634. continue;
  635. set_bit(sibling, this_leaf->shared_cpu_map);
  636. }
  637. }
  638. } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
  639. ret = 1;
  640. for_each_cpu(i, cpu_sibling_mask(cpu)) {
  641. if (!per_cpu(ici_cpuid4_info, i))
  642. continue;
  643. this_leaf = CPUID4_INFO_IDX(i, index);
  644. for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
  645. if (!cpu_online(sibling))
  646. continue;
  647. set_bit(sibling, this_leaf->shared_cpu_map);
  648. }
  649. }
  650. }
  651. return ret;
  652. }
  653. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  654. {
  655. struct _cpuid4_info *this_leaf, *sibling_leaf;
  656. unsigned long num_threads_sharing;
  657. int index_msb, i;
  658. struct cpuinfo_x86 *c = &cpu_data(cpu);
  659. if (c->x86_vendor == X86_VENDOR_AMD) {
  660. if (cache_shared_amd_cpu_map_setup(cpu, index))
  661. return;
  662. }
  663. this_leaf = CPUID4_INFO_IDX(cpu, index);
  664. num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
  665. if (num_threads_sharing == 1)
  666. cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
  667. else {
  668. index_msb = get_count_order(num_threads_sharing);
  669. for_each_online_cpu(i) {
  670. if (cpu_data(i).apicid >> index_msb ==
  671. c->apicid >> index_msb) {
  672. cpumask_set_cpu(i,
  673. to_cpumask(this_leaf->shared_cpu_map));
  674. if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
  675. sibling_leaf =
  676. CPUID4_INFO_IDX(i, index);
  677. cpumask_set_cpu(cpu, to_cpumask(
  678. sibling_leaf->shared_cpu_map));
  679. }
  680. }
  681. }
  682. }
  683. }
  684. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  685. {
  686. struct _cpuid4_info *this_leaf, *sibling_leaf;
  687. int sibling;
  688. this_leaf = CPUID4_INFO_IDX(cpu, index);
  689. for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
  690. sibling_leaf = CPUID4_INFO_IDX(sibling, index);
  691. cpumask_clear_cpu(cpu,
  692. to_cpumask(sibling_leaf->shared_cpu_map));
  693. }
  694. }
  695. #else
  696. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  697. {
  698. }
  699. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  700. {
  701. }
  702. #endif
  703. static void __cpuinit free_cache_attributes(unsigned int cpu)
  704. {
  705. int i;
  706. for (i = 0; i < num_cache_leaves; i++)
  707. cache_remove_shared_cpu_map(cpu, i);
  708. kfree(per_cpu(ici_cpuid4_info, cpu));
  709. per_cpu(ici_cpuid4_info, cpu) = NULL;
  710. }
  711. static void __cpuinit get_cpu_leaves(void *_retval)
  712. {
  713. int j, *retval = _retval, cpu = smp_processor_id();
  714. /* Do cpuid and store the results */
  715. for (j = 0; j < num_cache_leaves; j++) {
  716. struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
  717. *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
  718. if (unlikely(*retval < 0)) {
  719. int i;
  720. for (i = 0; i < j; i++)
  721. cache_remove_shared_cpu_map(cpu, i);
  722. break;
  723. }
  724. cache_shared_cpu_map_setup(cpu, j);
  725. }
  726. }
  727. static int __cpuinit detect_cache_attributes(unsigned int cpu)
  728. {
  729. int retval;
  730. if (num_cache_leaves == 0)
  731. return -ENOENT;
  732. per_cpu(ici_cpuid4_info, cpu) = kzalloc(
  733. sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
  734. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  735. return -ENOMEM;
  736. smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
  737. if (retval) {
  738. kfree(per_cpu(ici_cpuid4_info, cpu));
  739. per_cpu(ici_cpuid4_info, cpu) = NULL;
  740. }
  741. return retval;
  742. }
  743. #include <linux/kobject.h>
  744. #include <linux/sysfs.h>
  745. #include <linux/cpu.h>
  746. /* pointer to kobject for cpuX/cache */
  747. static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
  748. struct _index_kobject {
  749. struct kobject kobj;
  750. unsigned int cpu;
  751. unsigned short index;
  752. };
  753. /* pointer to array of kobjects for cpuX/cache/indexY */
  754. static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
  755. #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
  756. #define show_one_plus(file_name, object, val) \
  757. static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
  758. unsigned int cpu) \
  759. { \
  760. return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
  761. }
  762. show_one_plus(level, base.eax.split.level, 0);
  763. show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
  764. show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
  765. show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
  766. show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
  767. static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
  768. unsigned int cpu)
  769. {
  770. return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
  771. }
  772. static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
  773. int type, char *buf)
  774. {
  775. ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
  776. int n = 0;
  777. if (len > 1) {
  778. const struct cpumask *mask;
  779. mask = to_cpumask(this_leaf->shared_cpu_map);
  780. n = type ?
  781. cpulist_scnprintf(buf, len-2, mask) :
  782. cpumask_scnprintf(buf, len-2, mask);
  783. buf[n++] = '\n';
  784. buf[n] = '\0';
  785. }
  786. return n;
  787. }
  788. static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
  789. unsigned int cpu)
  790. {
  791. return show_shared_cpu_map_func(leaf, 0, buf);
  792. }
  793. static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
  794. unsigned int cpu)
  795. {
  796. return show_shared_cpu_map_func(leaf, 1, buf);
  797. }
  798. static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
  799. unsigned int cpu)
  800. {
  801. switch (this_leaf->base.eax.split.type) {
  802. case CACHE_TYPE_DATA:
  803. return sprintf(buf, "Data\n");
  804. case CACHE_TYPE_INST:
  805. return sprintf(buf, "Instruction\n");
  806. case CACHE_TYPE_UNIFIED:
  807. return sprintf(buf, "Unified\n");
  808. default:
  809. return sprintf(buf, "Unknown\n");
  810. }
  811. }
  812. #define to_object(k) container_of(k, struct _index_kobject, kobj)
  813. #define to_attr(a) container_of(a, struct _cache_attr, attr)
  814. #define define_one_ro(_name) \
  815. static struct _cache_attr _name = \
  816. __ATTR(_name, 0444, show_##_name, NULL)
  817. define_one_ro(level);
  818. define_one_ro(type);
  819. define_one_ro(coherency_line_size);
  820. define_one_ro(physical_line_partition);
  821. define_one_ro(ways_of_associativity);
  822. define_one_ro(number_of_sets);
  823. define_one_ro(size);
  824. define_one_ro(shared_cpu_map);
  825. define_one_ro(shared_cpu_list);
  826. static struct attribute *default_attrs[] = {
  827. &type.attr,
  828. &level.attr,
  829. &coherency_line_size.attr,
  830. &physical_line_partition.attr,
  831. &ways_of_associativity.attr,
  832. &number_of_sets.attr,
  833. &size.attr,
  834. &shared_cpu_map.attr,
  835. &shared_cpu_list.attr,
  836. NULL
  837. };
  838. #ifdef CONFIG_AMD_NB
  839. static struct attribute ** __cpuinit amd_l3_attrs(void)
  840. {
  841. static struct attribute **attrs;
  842. int n;
  843. if (attrs)
  844. return attrs;
  845. n = sizeof (default_attrs) / sizeof (struct attribute *);
  846. if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
  847. n += 2;
  848. if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
  849. n += 1;
  850. attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
  851. if (attrs == NULL)
  852. return attrs = default_attrs;
  853. for (n = 0; default_attrs[n]; n++)
  854. attrs[n] = default_attrs[n];
  855. if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
  856. attrs[n++] = &cache_disable_0.attr;
  857. attrs[n++] = &cache_disable_1.attr;
  858. }
  859. if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
  860. attrs[n++] = &subcaches.attr;
  861. return attrs;
  862. }
  863. #endif
  864. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  865. {
  866. struct _cache_attr *fattr = to_attr(attr);
  867. struct _index_kobject *this_leaf = to_object(kobj);
  868. ssize_t ret;
  869. ret = fattr->show ?
  870. fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  871. buf, this_leaf->cpu) :
  872. 0;
  873. return ret;
  874. }
  875. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  876. const char *buf, size_t count)
  877. {
  878. struct _cache_attr *fattr = to_attr(attr);
  879. struct _index_kobject *this_leaf = to_object(kobj);
  880. ssize_t ret;
  881. ret = fattr->store ?
  882. fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  883. buf, count, this_leaf->cpu) :
  884. 0;
  885. return ret;
  886. }
  887. static const struct sysfs_ops sysfs_ops = {
  888. .show = show,
  889. .store = store,
  890. };
  891. static struct kobj_type ktype_cache = {
  892. .sysfs_ops = &sysfs_ops,
  893. .default_attrs = default_attrs,
  894. };
  895. static struct kobj_type ktype_percpu_entry = {
  896. .sysfs_ops = &sysfs_ops,
  897. };
  898. static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
  899. {
  900. kfree(per_cpu(ici_cache_kobject, cpu));
  901. kfree(per_cpu(ici_index_kobject, cpu));
  902. per_cpu(ici_cache_kobject, cpu) = NULL;
  903. per_cpu(ici_index_kobject, cpu) = NULL;
  904. free_cache_attributes(cpu);
  905. }
  906. static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
  907. {
  908. int err;
  909. if (num_cache_leaves == 0)
  910. return -ENOENT;
  911. err = detect_cache_attributes(cpu);
  912. if (err)
  913. return err;
  914. /* Allocate all required memory */
  915. per_cpu(ici_cache_kobject, cpu) =
  916. kzalloc(sizeof(struct kobject), GFP_KERNEL);
  917. if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
  918. goto err_out;
  919. per_cpu(ici_index_kobject, cpu) = kzalloc(
  920. sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
  921. if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
  922. goto err_out;
  923. return 0;
  924. err_out:
  925. cpuid4_cache_sysfs_exit(cpu);
  926. return -ENOMEM;
  927. }
  928. static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
  929. /* Add/Remove cache interface for CPU device */
  930. static int __cpuinit cache_add_dev(struct device *dev)
  931. {
  932. unsigned int cpu = dev->id;
  933. unsigned long i, j;
  934. struct _index_kobject *this_object;
  935. struct _cpuid4_info *this_leaf;
  936. int retval;
  937. retval = cpuid4_cache_sysfs_init(cpu);
  938. if (unlikely(retval < 0))
  939. return retval;
  940. retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
  941. &ktype_percpu_entry,
  942. &dev->kobj, "%s", "cache");
  943. if (retval < 0) {
  944. cpuid4_cache_sysfs_exit(cpu);
  945. return retval;
  946. }
  947. for (i = 0; i < num_cache_leaves; i++) {
  948. this_object = INDEX_KOBJECT_PTR(cpu, i);
  949. this_object->cpu = cpu;
  950. this_object->index = i;
  951. this_leaf = CPUID4_INFO_IDX(cpu, i);
  952. ktype_cache.default_attrs = default_attrs;
  953. #ifdef CONFIG_AMD_NB
  954. if (this_leaf->base.nb)
  955. ktype_cache.default_attrs = amd_l3_attrs();
  956. #endif
  957. retval = kobject_init_and_add(&(this_object->kobj),
  958. &ktype_cache,
  959. per_cpu(ici_cache_kobject, cpu),
  960. "index%1lu", i);
  961. if (unlikely(retval)) {
  962. for (j = 0; j < i; j++)
  963. kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
  964. kobject_put(per_cpu(ici_cache_kobject, cpu));
  965. cpuid4_cache_sysfs_exit(cpu);
  966. return retval;
  967. }
  968. kobject_uevent(&(this_object->kobj), KOBJ_ADD);
  969. }
  970. cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
  971. kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
  972. return 0;
  973. }
  974. static void __cpuinit cache_remove_dev(struct device *dev)
  975. {
  976. unsigned int cpu = dev->id;
  977. unsigned long i;
  978. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  979. return;
  980. if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
  981. return;
  982. cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
  983. for (i = 0; i < num_cache_leaves; i++)
  984. kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
  985. kobject_put(per_cpu(ici_cache_kobject, cpu));
  986. cpuid4_cache_sysfs_exit(cpu);
  987. }
  988. static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
  989. unsigned long action, void *hcpu)
  990. {
  991. unsigned int cpu = (unsigned long)hcpu;
  992. struct device *dev;
  993. dev = get_cpu_device(cpu);
  994. switch (action) {
  995. case CPU_ONLINE:
  996. case CPU_ONLINE_FROZEN:
  997. cache_add_dev(dev);
  998. break;
  999. case CPU_DEAD:
  1000. case CPU_DEAD_FROZEN:
  1001. cache_remove_dev(dev);
  1002. break;
  1003. }
  1004. return NOTIFY_OK;
  1005. }
  1006. static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
  1007. .notifier_call = cacheinfo_cpu_callback,
  1008. };
  1009. static int __cpuinit cache_sysfs_init(void)
  1010. {
  1011. int i;
  1012. if (num_cache_leaves == 0)
  1013. return 0;
  1014. for_each_online_cpu(i) {
  1015. int err;
  1016. struct device *dev = get_cpu_device(i);
  1017. err = cache_add_dev(dev);
  1018. if (err)
  1019. return err;
  1020. }
  1021. register_hotcpu_notifier(&cacheinfo_cpu_notifier);
  1022. return 0;
  1023. }
  1024. device_initcall(cache_sysfs_init);
  1025. #endif