intel_cacheinfo.c revision f7627e2513987bb5d4e8cb13c4e0a478352141ac
1/*
2 *      Routines to indentify caches on Intel CPU.
3 *
4 *      Changes:
5 *      Venkatesh Pallipadi	: Adding cache identification through cpuid(4)
6 *		Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 *	Andi Kleen / Andreas Herrmann	: CPUID4 emulation on AMD.
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
15#include <linux/sched.h>
16
17#include <asm/processor.h>
18#include <asm/smp.h>
19
20#define LVL_1_INST	1
21#define LVL_1_DATA	2
22#define LVL_2		3
23#define LVL_3		4
24#define LVL_TRACE	5
25
26struct _cache_table
27{
28	unsigned char descriptor;
29	char cache_type;
30	short size;
31};
32
33/* all the cache descriptor types we care about (no TLB or trace cache entries) */
34static struct _cache_table cache_table[] __cpuinitdata =
35{
36	{ 0x06, LVL_1_INST, 8 },	/* 4-way set assoc, 32 byte line size */
37	{ 0x08, LVL_1_INST, 16 },	/* 4-way set assoc, 32 byte line size */
38	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
39	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
40	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
41	{ 0x23, LVL_3,      1024 },	/* 8-way set assoc, sectored cache, 64 byte line size */
42	{ 0x25, LVL_3,      2048 },	/* 8-way set assoc, sectored cache, 64 byte line size */
43	{ 0x29, LVL_3,      4096 },	/* 8-way set assoc, sectored cache, 64 byte line size */
44	{ 0x2c, LVL_1_DATA, 32 },	/* 8-way set assoc, 64 byte line size */
45	{ 0x30, LVL_1_INST, 32 },	/* 8-way set assoc, 64 byte line size */
46	{ 0x39, LVL_2,      128 },	/* 4-way set assoc, sectored cache, 64 byte line size */
47	{ 0x3a, LVL_2,      192 },	/* 6-way set assoc, sectored cache, 64 byte line size */
48	{ 0x3b, LVL_2,      128 },	/* 2-way set assoc, sectored cache, 64 byte line size */
49	{ 0x3c, LVL_2,      256 },	/* 4-way set assoc, sectored cache, 64 byte line size */
50	{ 0x3d, LVL_2,      384 },	/* 6-way set assoc, sectored cache, 64 byte line size */
51	{ 0x3e, LVL_2,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
52	{ 0x41, LVL_2,      128 },	/* 4-way set assoc, 32 byte line size */
53	{ 0x42, LVL_2,      256 },	/* 4-way set assoc, 32 byte line size */
54	{ 0x43, LVL_2,      512 },	/* 4-way set assoc, 32 byte line size */
55	{ 0x44, LVL_2,      1024 },	/* 4-way set assoc, 32 byte line size */
56	{ 0x45, LVL_2,      2048 },	/* 4-way set assoc, 32 byte line size */
57	{ 0x46, LVL_3,      4096 },	/* 4-way set assoc, 64 byte line size */
58	{ 0x47, LVL_3,      8192 },	/* 8-way set assoc, 64 byte line size */
59	{ 0x49, LVL_3,      4096 },	/* 16-way set assoc, 64 byte line size */
60	{ 0x4a, LVL_3,      6144 },	/* 12-way set assoc, 64 byte line size */
61	{ 0x4b, LVL_3,      8192 },	/* 16-way set assoc, 64 byte line size */
62	{ 0x4c, LVL_3,     12288 },	/* 12-way set assoc, 64 byte line size */
63	{ 0x4d, LVL_3,     16384 },	/* 16-way set assoc, 64 byte line size */
64	{ 0x60, LVL_1_DATA, 16 },	/* 8-way set assoc, sectored cache, 64 byte line size */
65	{ 0x66, LVL_1_DATA, 8 },	/* 4-way set assoc, sectored cache, 64 byte line size */
66	{ 0x67, LVL_1_DATA, 16 },	/* 4-way set assoc, sectored cache, 64 byte line size */
67	{ 0x68, LVL_1_DATA, 32 },	/* 4-way set assoc, sectored cache, 64 byte line size */
68	{ 0x70, LVL_TRACE,  12 },	/* 8-way set assoc */
69	{ 0x71, LVL_TRACE,  16 },	/* 8-way set assoc */
70	{ 0x72, LVL_TRACE,  32 },	/* 8-way set assoc */
71	{ 0x73, LVL_TRACE,  64 },	/* 8-way set assoc */
72	{ 0x78, LVL_2,    1024 },	/* 4-way set assoc, 64 byte line size */
73	{ 0x79, LVL_2,     128 },	/* 8-way set assoc, sectored cache, 64 byte line size */
74	{ 0x7a, LVL_2,     256 },	/* 8-way set assoc, sectored cache, 64 byte line size */
75	{ 0x7b, LVL_2,     512 },	/* 8-way set assoc, sectored cache, 64 byte line size */
76	{ 0x7c, LVL_2,    1024 },	/* 8-way set assoc, sectored cache, 64 byte line size */
77	{ 0x7d, LVL_2,    2048 },	/* 8-way set assoc, 64 byte line size */
78	{ 0x7f, LVL_2,     512 },	/* 2-way set assoc, 64 byte line size */
79	{ 0x82, LVL_2,     256 },	/* 8-way set assoc, 32 byte line size */
80	{ 0x83, LVL_2,     512 },	/* 8-way set assoc, 32 byte line size */
81	{ 0x84, LVL_2,    1024 },	/* 8-way set assoc, 32 byte line size */
82	{ 0x85, LVL_2,    2048 },	/* 8-way set assoc, 32 byte line size */
83	{ 0x86, LVL_2,     512 },	/* 4-way set assoc, 64 byte line size */
84	{ 0x87, LVL_2,    1024 },	/* 8-way set assoc, 64 byte line size */
85	{ 0x00, 0, 0}
86};
87
88
89enum _cache_type
90{
91	CACHE_TYPE_NULL	= 0,
92	CACHE_TYPE_DATA = 1,
93	CACHE_TYPE_INST = 2,
94	CACHE_TYPE_UNIFIED = 3
95};
96
97union _cpuid4_leaf_eax {
98	struct {
99		enum _cache_type	type:5;
100		unsigned int		level:3;
101		unsigned int		is_self_initializing:1;
102		unsigned int		is_fully_associative:1;
103		unsigned int		reserved:4;
104		unsigned int		num_threads_sharing:12;
105		unsigned int		num_cores_on_die:6;
106	} split;
107	u32 full;
108};
109
110union _cpuid4_leaf_ebx {
111	struct {
112		unsigned int		coherency_line_size:12;
113		unsigned int		physical_line_partition:10;
114		unsigned int		ways_of_associativity:10;
115	} split;
116	u32 full;
117};
118
119union _cpuid4_leaf_ecx {
120	struct {
121		unsigned int		number_of_sets:32;
122	} split;
123	u32 full;
124};
125
126struct _cpuid4_info {
127	union _cpuid4_leaf_eax eax;
128	union _cpuid4_leaf_ebx ebx;
129	union _cpuid4_leaf_ecx ecx;
130	unsigned long size;
131	cpumask_t shared_cpu_map;
132};
133
134unsigned short			num_cache_leaves;
135
136/* AMD doesn't have CPUID4. Emulate it here to report the same
137   information to the user.  This makes some assumptions about the machine:
138   L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139
140   In theory the TLBs could be reported as fake type (they are in "dummy").
141   Maybe later */
142union l1_cache {
143	struct {
144		unsigned line_size : 8;
145		unsigned lines_per_tag : 8;
146		unsigned assoc : 8;
147		unsigned size_in_kb : 8;
148	};
149	unsigned val;
150};
151
152union l2_cache {
153	struct {
154		unsigned line_size : 8;
155		unsigned lines_per_tag : 4;
156		unsigned assoc : 4;
157		unsigned size_in_kb : 16;
158	};
159	unsigned val;
160};
161
162union l3_cache {
163	struct {
164		unsigned line_size : 8;
165		unsigned lines_per_tag : 4;
166		unsigned assoc : 4;
167		unsigned res : 2;
168		unsigned size_encoded : 14;
169	};
170	unsigned val;
171};
172
173static const unsigned short assocs[] = {
174	[1] = 1, [2] = 2, [4] = 4, [6] = 8,
175	[8] = 16, [0xa] = 32, [0xb] = 48,
176	[0xc] = 64,
177	[0xf] = 0xffff // ??
178};
179
180static const unsigned char levels[] = { 1, 1, 2, 3 };
181static const unsigned char types[] = { 1, 2, 3, 3 };
182
183static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
184		       union _cpuid4_leaf_ebx *ebx,
185		       union _cpuid4_leaf_ecx *ecx)
186{
187	unsigned dummy;
188	unsigned line_size, lines_per_tag, assoc, size_in_kb;
189	union l1_cache l1i, l1d;
190	union l2_cache l2;
191	union l3_cache l3;
192	union l1_cache *l1 = &l1d;
193
194	eax->full = 0;
195	ebx->full = 0;
196	ecx->full = 0;
197
198	cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
199	cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
200
201	switch (leaf) {
202	case 1:
203		l1 = &l1i;
204	case 0:
205		if (!l1->val)
206			return;
207		assoc = l1->assoc;
208		line_size = l1->line_size;
209		lines_per_tag = l1->lines_per_tag;
210		size_in_kb = l1->size_in_kb;
211		break;
212	case 2:
213		if (!l2.val)
214			return;
215		assoc = l2.assoc;
216		line_size = l2.line_size;
217		lines_per_tag = l2.lines_per_tag;
218		/* cpu_data has errata corrections for K7 applied */
219		size_in_kb = current_cpu_data.x86_cache_size;
220		break;
221	case 3:
222		if (!l3.val)
223			return;
224		assoc = l3.assoc;
225		line_size = l3.line_size;
226		lines_per_tag = l3.lines_per_tag;
227		size_in_kb = l3.size_encoded * 512;
228		break;
229	default:
230		return;
231	}
232
233	eax->split.is_self_initializing = 1;
234	eax->split.type = types[leaf];
235	eax->split.level = levels[leaf];
236	if (leaf == 3)
237		eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
238	else
239		eax->split.num_threads_sharing = 0;
240	eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
241
242
243	if (assoc == 0xf)
244		eax->split.is_fully_associative = 1;
245	ebx->split.coherency_line_size = line_size - 1;
246	ebx->split.ways_of_associativity = assocs[assoc] - 1;
247	ebx->split.physical_line_partition = lines_per_tag - 1;
248	ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
249		(ebx->split.ways_of_associativity + 1) - 1;
250}
251
252static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
253{
254	union _cpuid4_leaf_eax 	eax;
255	union _cpuid4_leaf_ebx 	ebx;
256	union _cpuid4_leaf_ecx 	ecx;
257	unsigned		edx;
258
259	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
260		amd_cpuid4(index, &eax, &ebx, &ecx);
261	else
262		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full,  &edx);
263	if (eax.split.type == CACHE_TYPE_NULL)
264		return -EIO; /* better error ? */
265
266	this_leaf->eax = eax;
267	this_leaf->ebx = ebx;
268	this_leaf->ecx = ecx;
269	this_leaf->size = (ecx.split.number_of_sets + 1) *
270		(ebx.split.coherency_line_size + 1) *
271		(ebx.split.physical_line_partition + 1) *
272		(ebx.split.ways_of_associativity + 1);
273	return 0;
274}
275
276static int __cpuinit find_num_cache_leaves(void)
277{
278	unsigned int		eax, ebx, ecx, edx;
279	union _cpuid4_leaf_eax	cache_eax;
280	int 			i = -1;
281
282	do {
283		++i;
284		/* Do cpuid(4) loop to find out num_cache_leaves */
285		cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
286		cache_eax.full = eax;
287	} while (cache_eax.split.type != CACHE_TYPE_NULL);
288	return i;
289}
290
291unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
292{
293	unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
294	unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
295	unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
296	unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
297#ifdef CONFIG_X86_HT
298	unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
299#endif
300
301	if (c->cpuid_level > 3) {
302		static int is_initialized;
303
304		if (is_initialized == 0) {
305			/* Init num_cache_leaves from boot CPU */
306			num_cache_leaves = find_num_cache_leaves();
307			is_initialized++;
308		}
309
310		/*
311		 * Whenever possible use cpuid(4), deterministic cache
312		 * parameters cpuid leaf to find the cache details
313		 */
314		for (i = 0; i < num_cache_leaves; i++) {
315			struct _cpuid4_info this_leaf;
316
317			int retval;
318
319			retval = cpuid4_cache_lookup(i, &this_leaf);
320			if (retval >= 0) {
321				switch(this_leaf.eax.split.level) {
322				    case 1:
323					if (this_leaf.eax.split.type ==
324							CACHE_TYPE_DATA)
325						new_l1d = this_leaf.size/1024;
326					else if (this_leaf.eax.split.type ==
327							CACHE_TYPE_INST)
328						new_l1i = this_leaf.size/1024;
329					break;
330				    case 2:
331					new_l2 = this_leaf.size/1024;
332					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
333					index_msb = get_count_order(num_threads_sharing);
334					l2_id = c->apicid >> index_msb;
335					break;
336				    case 3:
337					new_l3 = this_leaf.size/1024;
338					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
339					index_msb = get_count_order(num_threads_sharing);
340					l3_id = c->apicid >> index_msb;
341					break;
342				    default:
343					break;
344				}
345			}
346		}
347	}
348	/*
349	 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
350	 * trace cache
351	 */
352	if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
353		/* supports eax=2  call */
354		int i, j, n;
355		int regs[4];
356		unsigned char *dp = (unsigned char *)regs;
357		int only_trace = 0;
358
359		if (num_cache_leaves != 0 && c->x86 == 15)
360			only_trace = 1;
361
362		/* Number of times to iterate */
363		n = cpuid_eax(2) & 0xFF;
364
365		for ( i = 0 ; i < n ; i++ ) {
366			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
367
368			/* If bit 31 is set, this is an unknown format */
369			for ( j = 0 ; j < 3 ; j++ ) {
370				if ( regs[j] < 0 ) regs[j] = 0;
371			}
372
373			/* Byte 0 is level count, not a descriptor */
374			for ( j = 1 ; j < 16 ; j++ ) {
375				unsigned char des = dp[j];
376				unsigned char k = 0;
377
378				/* look up this descriptor in the table */
379				while (cache_table[k].descriptor != 0)
380				{
381					if (cache_table[k].descriptor == des) {
382						if (only_trace && cache_table[k].cache_type != LVL_TRACE)
383							break;
384						switch (cache_table[k].cache_type) {
385						case LVL_1_INST:
386							l1i += cache_table[k].size;
387							break;
388						case LVL_1_DATA:
389							l1d += cache_table[k].size;
390							break;
391						case LVL_2:
392							l2 += cache_table[k].size;
393							break;
394						case LVL_3:
395							l3 += cache_table[k].size;
396							break;
397						case LVL_TRACE:
398							trace += cache_table[k].size;
399							break;
400						}
401
402						break;
403					}
404
405					k++;
406				}
407			}
408		}
409	}
410
411	if (new_l1d)
412		l1d = new_l1d;
413
414	if (new_l1i)
415		l1i = new_l1i;
416
417	if (new_l2) {
418		l2 = new_l2;
419#ifdef CONFIG_X86_HT
420		cpu_llc_id[cpu] = l2_id;
421#endif
422	}
423
424	if (new_l3) {
425		l3 = new_l3;
426#ifdef CONFIG_X86_HT
427		cpu_llc_id[cpu] = l3_id;
428#endif
429	}
430
431	if (trace)
432		printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
433	else if ( l1i )
434		printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
435
436	if (l1d)
437		printk(", L1 D cache: %dK\n", l1d);
438	else
439		printk("\n");
440
441	if (l2)
442		printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
443
444	if (l3)
445		printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
446
447	c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
448
449	return l2;
450}
451
452/* pointer to _cpuid4_info array (for each cache leaf) */
453static struct _cpuid4_info *cpuid4_info[NR_CPUS];
454#define CPUID4_INFO_IDX(x,y)    (&((cpuid4_info[x])[y]))
455
456#ifdef CONFIG_SMP
457static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
458{
459	struct _cpuid4_info	*this_leaf, *sibling_leaf;
460	unsigned long num_threads_sharing;
461	int index_msb, i;
462	struct cpuinfo_x86 *c = cpu_data;
463
464	this_leaf = CPUID4_INFO_IDX(cpu, index);
465	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
466
467	if (num_threads_sharing == 1)
468		cpu_set(cpu, this_leaf->shared_cpu_map);
469	else {
470		index_msb = get_count_order(num_threads_sharing);
471
472		for_each_online_cpu(i) {
473			if (c[i].apicid >> index_msb ==
474			    c[cpu].apicid >> index_msb) {
475				cpu_set(i, this_leaf->shared_cpu_map);
476				if (i != cpu && cpuid4_info[i])  {
477					sibling_leaf = CPUID4_INFO_IDX(i, index);
478					cpu_set(cpu, sibling_leaf->shared_cpu_map);
479				}
480			}
481		}
482	}
483}
484static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
485{
486	struct _cpuid4_info	*this_leaf, *sibling_leaf;
487	int sibling;
488
489	this_leaf = CPUID4_INFO_IDX(cpu, index);
490	for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
491		sibling_leaf = CPUID4_INFO_IDX(sibling, index);
492		cpu_clear(cpu, sibling_leaf->shared_cpu_map);
493	}
494}
495#else
496static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
497static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
498#endif
499
500static void free_cache_attributes(unsigned int cpu)
501{
502	kfree(cpuid4_info[cpu]);
503	cpuid4_info[cpu] = NULL;
504}
505
506static int __cpuinit detect_cache_attributes(unsigned int cpu)
507{
508	struct _cpuid4_info	*this_leaf;
509	unsigned long 		j;
510	int 			retval;
511	cpumask_t		oldmask;
512
513	if (num_cache_leaves == 0)
514		return -ENOENT;
515
516	cpuid4_info[cpu] = kzalloc(
517	    sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
518	if (cpuid4_info[cpu] == NULL)
519		return -ENOMEM;
520
521	oldmask = current->cpus_allowed;
522	retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
523	if (retval)
524		goto out;
525
526	/* Do cpuid and store the results */
527	retval = 0;
528	for (j = 0; j < num_cache_leaves; j++) {
529		this_leaf = CPUID4_INFO_IDX(cpu, j);
530		retval = cpuid4_cache_lookup(j, this_leaf);
531		if (unlikely(retval < 0))
532			break;
533		cache_shared_cpu_map_setup(cpu, j);
534	}
535	set_cpus_allowed(current, oldmask);
536
537out:
538	if (retval)
539		free_cache_attributes(cpu);
540	return retval;
541}
542
543#ifdef CONFIG_SYSFS
544
545#include <linux/kobject.h>
546#include <linux/sysfs.h>
547
548extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
549
550/* pointer to kobject for cpuX/cache */
551static struct kobject * cache_kobject[NR_CPUS];
552
553struct _index_kobject {
554	struct kobject kobj;
555	unsigned int cpu;
556	unsigned short index;
557};
558
559/* pointer to array of kobjects for cpuX/cache/indexY */
560static struct _index_kobject *index_kobject[NR_CPUS];
561#define INDEX_KOBJECT_PTR(x,y)    (&((index_kobject[x])[y]))
562
563#define show_one_plus(file_name, object, val)				\
564static ssize_t show_##file_name						\
565			(struct _cpuid4_info *this_leaf, char *buf)	\
566{									\
567	return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
568}
569
570show_one_plus(level, eax.split.level, 0);
571show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
572show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
573show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
574show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
575
576static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
577{
578	return sprintf (buf, "%luK\n", this_leaf->size / 1024);
579}
580
581static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
582{
583	char mask_str[NR_CPUS];
584	cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
585	return sprintf(buf, "%s\n", mask_str);
586}
587
588static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
589	switch(this_leaf->eax.split.type) {
590	    case CACHE_TYPE_DATA:
591		return sprintf(buf, "Data\n");
592		break;
593	    case CACHE_TYPE_INST:
594		return sprintf(buf, "Instruction\n");
595		break;
596	    case CACHE_TYPE_UNIFIED:
597		return sprintf(buf, "Unified\n");
598		break;
599	    default:
600		return sprintf(buf, "Unknown\n");
601		break;
602	}
603}
604
605struct _cache_attr {
606	struct attribute attr;
607	ssize_t (*show)(struct _cpuid4_info *, char *);
608	ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
609};
610
611#define define_one_ro(_name) \
612static struct _cache_attr _name = \
613	__ATTR(_name, 0444, show_##_name, NULL)
614
615define_one_ro(level);
616define_one_ro(type);
617define_one_ro(coherency_line_size);
618define_one_ro(physical_line_partition);
619define_one_ro(ways_of_associativity);
620define_one_ro(number_of_sets);
621define_one_ro(size);
622define_one_ro(shared_cpu_map);
623
624static struct attribute * default_attrs[] = {
625	&type.attr,
626	&level.attr,
627	&coherency_line_size.attr,
628	&physical_line_partition.attr,
629	&ways_of_associativity.attr,
630	&number_of_sets.attr,
631	&size.attr,
632	&shared_cpu_map.attr,
633	NULL
634};
635
636#define to_object(k) container_of(k, struct _index_kobject, kobj)
637#define to_attr(a) container_of(a, struct _cache_attr, attr)
638
639static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
640{
641	struct _cache_attr *fattr = to_attr(attr);
642	struct _index_kobject *this_leaf = to_object(kobj);
643	ssize_t ret;
644
645	ret = fattr->show ?
646		fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
647			buf) :
648	       	0;
649	return ret;
650}
651
652static ssize_t store(struct kobject * kobj, struct attribute * attr,
653		     const char * buf, size_t count)
654{
655	return 0;
656}
657
658static struct sysfs_ops sysfs_ops = {
659	.show   = show,
660	.store  = store,
661};
662
663static struct kobj_type ktype_cache = {
664	.sysfs_ops	= &sysfs_ops,
665	.default_attrs	= default_attrs,
666};
667
668static struct kobj_type ktype_percpu_entry = {
669	.sysfs_ops	= &sysfs_ops,
670};
671
672static void cpuid4_cache_sysfs_exit(unsigned int cpu)
673{
674	kfree(cache_kobject[cpu]);
675	kfree(index_kobject[cpu]);
676	cache_kobject[cpu] = NULL;
677	index_kobject[cpu] = NULL;
678	free_cache_attributes(cpu);
679}
680
681static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
682{
683
684	if (num_cache_leaves == 0)
685		return -ENOENT;
686
687	detect_cache_attributes(cpu);
688	if (cpuid4_info[cpu] == NULL)
689		return -ENOENT;
690
691	/* Allocate all required memory */
692	cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
693	if (unlikely(cache_kobject[cpu] == NULL))
694		goto err_out;
695
696	index_kobject[cpu] = kzalloc(
697	    sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
698	if (unlikely(index_kobject[cpu] == NULL))
699		goto err_out;
700
701	return 0;
702
703err_out:
704	cpuid4_cache_sysfs_exit(cpu);
705	return -ENOMEM;
706}
707
708/* Add/Remove cache interface for CPU device */
709static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
710{
711	unsigned int cpu = sys_dev->id;
712	unsigned long i, j;
713	struct _index_kobject *this_object;
714	int retval = 0;
715
716	retval = cpuid4_cache_sysfs_init(cpu);
717	if (unlikely(retval < 0))
718		return retval;
719
720	cache_kobject[cpu]->parent = &sys_dev->kobj;
721	kobject_set_name(cache_kobject[cpu], "%s", "cache");
722	cache_kobject[cpu]->ktype = &ktype_percpu_entry;
723	retval = kobject_register(cache_kobject[cpu]);
724
725	for (i = 0; i < num_cache_leaves; i++) {
726		this_object = INDEX_KOBJECT_PTR(cpu,i);
727		this_object->cpu = cpu;
728		this_object->index = i;
729		this_object->kobj.parent = cache_kobject[cpu];
730		kobject_set_name(&(this_object->kobj), "index%1lu", i);
731		this_object->kobj.ktype = &ktype_cache;
732		retval = kobject_register(&(this_object->kobj));
733		if (unlikely(retval)) {
734			for (j = 0; j < i; j++) {
735				kobject_unregister(
736					&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
737			}
738			kobject_unregister(cache_kobject[cpu]);
739			cpuid4_cache_sysfs_exit(cpu);
740			break;
741		}
742	}
743	return retval;
744}
745
746static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
747{
748	unsigned int cpu = sys_dev->id;
749	unsigned long i;
750
751	if (cpuid4_info[cpu] == NULL)
752		return;
753	for (i = 0; i < num_cache_leaves; i++) {
754		cache_remove_shared_cpu_map(cpu, i);
755		kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
756	}
757	kobject_unregister(cache_kobject[cpu]);
758	cpuid4_cache_sysfs_exit(cpu);
759	return;
760}
761
762static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
763					unsigned long action, void *hcpu)
764{
765	unsigned int cpu = (unsigned long)hcpu;
766	struct sys_device *sys_dev;
767
768	sys_dev = get_cpu_sysdev(cpu);
769	switch (action) {
770	case CPU_ONLINE:
771	case CPU_ONLINE_FROZEN:
772		cache_add_dev(sys_dev);
773		break;
774	case CPU_DEAD:
775	case CPU_DEAD_FROZEN:
776		cache_remove_dev(sys_dev);
777		break;
778	}
779	return NOTIFY_OK;
780}
781
782static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
783{
784    .notifier_call = cacheinfo_cpu_callback,
785};
786
787static int __cpuinit cache_sysfs_init(void)
788{
789	int i;
790
791	if (num_cache_leaves == 0)
792		return 0;
793
794	register_hotcpu_notifier(&cacheinfo_cpu_notifier);
795
796	for_each_online_cpu(i) {
797		cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
798			(void *)(long)i);
799	}
800
801	return 0;
802}
803
804device_initcall(cache_sysfs_init);
805
806#endif
807