patch-2.4.26 linux-2.4.26/arch/x86_64/kernel/setup.c
Next file: linux-2.4.26/arch/x86_64/kernel/setup64.c
Previous file: linux-2.4.26/arch/x86_64/kernel/process.c
Back to the patch index
Back to the overall index
- Lines: 325
- Date:
2004-04-14 06:05:28.000000000 -0700
- Orig file:
linux-2.4.25/arch/x86_64/kernel/setup.c
- Orig date:
2003-11-28 10:26:19.000000000 -0800
diff -urN linux-2.4.25/arch/x86_64/kernel/setup.c linux-2.4.26/arch/x86_64/kernel/setup.c
@@ -54,6 +54,10 @@
#endif
+int swiotlb;
+
+extern int phys_proc_id[NR_CPUS];
+
/*
* Machine setup..
*/
@@ -228,6 +232,8 @@
e820_end_of_ram();
+ check_efer();
+
init_memory_mapping();
#ifdef CONFIG_BLK_DEV_INITRD
@@ -308,8 +314,7 @@
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
- if (!acpi_disabled)
- acpi_boot_init();
+ acpi_boot_init();
#endif
#ifdef CONFIG_X86_LOCAL_APIC
/*
@@ -341,6 +346,12 @@
#ifdef CONFIG_GART_IOMMU
iommu_hole_init();
#endif
+#ifdef CONFIG_SWIOTLB
+ if (!iommu_aperture && end_pfn >= 0xffffffff>>PAGE_SHIFT) {
+ swiotlb_init();
+ swiotlb = 1;
+ }
+#endif
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
@@ -404,6 +415,198 @@
}
}
+#define LVL_1_INST 1
+#define LVL_1_DATA 2
+#define LVL_2 3
+#define LVL_3 4
+#define LVL_TRACE 5
+
+struct _cache_table
+{
+ unsigned char descriptor;
+ char cache_type;
+ short size;
+};
+
+/* all the cache descriptor types we care about (no TLB or trace cache entries) */
+static struct _cache_table cache_table[] __initdata =
+{
+ { 0x06, LVL_1_INST, 8 },
+ { 0x08, LVL_1_INST, 16 },
+ { 0x0A, LVL_1_DATA, 8 },
+ { 0x0C, LVL_1_DATA, 16 },
+ { 0x22, LVL_3, 512 },
+ { 0x23, LVL_3, 1024 },
+ { 0x25, LVL_3, 2048 },
+ { 0x29, LVL_3, 4096 },
+ { 0x39, LVL_2, 128 },
+ { 0x3C, LVL_2, 256 },
+ { 0x41, LVL_2, 128 },
+ { 0x42, LVL_2, 256 },
+ { 0x43, LVL_2, 512 },
+ { 0x44, LVL_2, 1024 },
+ { 0x45, LVL_2, 2048 },
+ { 0x66, LVL_1_DATA, 8 },
+ { 0x67, LVL_1_DATA, 16 },
+ { 0x68, LVL_1_DATA, 32 },
+ { 0x70, LVL_TRACE, 12 },
+ { 0x71, LVL_TRACE, 16 },
+ { 0x72, LVL_TRACE, 32 },
+ { 0x79, LVL_2, 128 },
+ { 0x7A, LVL_2, 256 },
+ { 0x7B, LVL_2, 512 },
+ { 0x7C, LVL_2, 1024 },
+ { 0x82, LVL_2, 256 },
+ { 0x83, LVL_2, 512 },
+ { 0x84, LVL_2, 1024 },
+ { 0x85, LVL_2, 2048 },
+ { 0x00, 0, 0}
+};
+
+int select_idle_routine(struct cpuinfo_x86 *c);
+
+static void __init init_intel(struct cpuinfo_x86 *c)
+{
+ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
+ char *p = NULL;
+ u32 eax, dummy;
+
+ unsigned int n;
+
+
+ select_idle_routine(c);
+ if (c->cpuid_level > 1) {
+ /* supports eax=2 call */
+ int i, j, n;
+ int regs[4];
+ unsigned char *dp = (unsigned char *)regs;
+
+ /* Number of times to iterate */
+ n = cpuid_eax(2) & 0xFF;
+
+ for ( i = 0 ; i < n ; i++ ) {
+ cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
+
+ /* If bit 31 is set, this is an unknown format */
+ for ( j = 0 ; j < 3 ; j++ ) {
+ if ( regs[j] < 0 ) regs[j] = 0;
+ }
+
+ /* Byte 0 is level count, not a descriptor */
+ for ( j = 1 ; j < 16 ; j++ ) {
+ unsigned char des = dp[j];
+ unsigned char k = 0;
+
+ /* look up this descriptor in the table */
+ while (cache_table[k].descriptor != 0)
+ {
+ if (cache_table[k].descriptor == des) {
+ switch (cache_table[k].cache_type) {
+ case LVL_1_INST:
+ l1i += cache_table[k].size;
+ break;
+ case LVL_1_DATA:
+ l1d += cache_table[k].size;
+ break;
+ case LVL_2:
+ l2 += cache_table[k].size;
+ break;
+ case LVL_3:
+ l3 += cache_table[k].size;
+ break;
+ case LVL_TRACE:
+ trace += cache_table[k].size;
+ break;
+ }
+ break;
+ }
+
+ k++;
+ }
+ }
+ }
+
+ if ( trace )
+ printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
+ else if ( l1i )
+ printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
+ if ( l1d )
+ printk(", L1 D cache: %dK\n", l1d);
+ else
+ printk("\n");
+ if ( l2 )
+ printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
+ if ( l3 )
+ printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+
+ /*
+ * This assumes the L3 cache is shared; it typically lives in
+ * the northbridge. The L1 caches are included by the L2
+ * cache, and so should not be included for the purpose of
+ * SMP switching weights.
+ */
+ c->x86_cache_size = l2 ? l2 : (l1i+l1d);
+ }
+
+ if ( p )
+ strcpy(c->x86_model_id, p);
+
+#ifdef CONFIG_SMP
+ if (test_bit(X86_FEATURE_HT, &c->x86_capability)) {
+ int index_lsb, index_msb, tmp;
+ int initial_apic_id;
+ int cpu = smp_processor_id();
+ u32 ebx, ecx, edx;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+ if (smp_num_siblings == 1) {
+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ } else if (smp_num_siblings > 1 ) {
+ index_lsb = 0;
+ index_msb = 31;
+ /*
+ * At this point we only support two siblings per
+ * processor package.
+ */
+#define NR_SIBLINGS 2
+ if (smp_num_siblings != NR_SIBLINGS) {
+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+ smp_num_siblings = 1;
+ return;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 1) == 0) {
+ tmp >>=1 ;
+ index_lsb++;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 0x80000000 ) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+ if (index_lsb != index_msb )
+ index_msb++;
+ initial_apic_id = ebx >> 24 & 0xff;
+ phys_proc_id[cpu] = initial_apic_id >> index_msb;
+
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
+ }
+
+ }
+#endif
+
+ n = cpuid_eax(0x80000000);
+ if (n >= 0x80000008) {
+ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+ }
+
+}
+
static int __init init_amd(struct cpuinfo_x86 *c)
{
int r;
@@ -433,6 +636,8 @@
if (!strcmp(v, "AuthenticAMD"))
c->x86_vendor = X86_VENDOR_AMD;
+ else if (!strcmp(v, "GenuineIntel"))
+ c->x86_vendor = X86_VENDOR_INTEL;
else
c->x86_vendor = X86_VENDOR_UNKNOWN;
}
@@ -448,7 +653,7 @@
*/
void __init identify_cpu(struct cpuinfo_x86 *c)
{
- int junk, i;
+ int i;
u32 xlvl, tfms;
c->loops_per_jiffy = loops_per_jiffy;
@@ -472,7 +677,7 @@
/* Intel-defined flags: level 0x00000001 */
if ( c->cpuid_level >= 0x00000001 ) {
__u32 misc;
- cpuid(0x00000001, &tfms, &misc, &junk,
+ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
&c->x86_capability[0]);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
@@ -521,9 +726,12 @@
init_amd(c);
break;
+ case X86_VENDOR_INTEL:
+ init_intel(c);
+ break;
case X86_VENDOR_UNKNOWN:
default:
- /* Not much we can do here... */
+ display_cacheinfo(c);
break;
}
@@ -581,7 +789,7 @@
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
"pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
- "fxsr", "sse", "sse2", "ss", NULL, "tm", "ia64", NULL,
+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
/* AMD-defined */
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -600,6 +808,12 @@
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Intel Defined (cpuid 1 and ecx) */
+ "pni", NULL, NULL, "monitor", "ds-cpl", NULL, NULL, "est",
+ "tm2", NULL, "cid", NULL, NULL, "cmpxchg16b", NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
static char *x86_power_flags[] = {
"ts", /* temperature sensor */
@@ -636,6 +850,11 @@
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+#ifdef CONFIG_SMP
+ seq_printf(m, "physical id\t: %d\n",phys_proc_id[c - cpu_data]);
+ seq_printf(m, "siblings\t: %d\n",smp_num_siblings);
+#endif
+
seq_printf(m,
"fpu\t\t: yes\n"
"fpu_exception\t: yes\n"
@@ -660,6 +879,7 @@
seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
+ if (c->x86_phys_bits > 0)
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
c->x86_phys_bits, c->x86_virt_bits);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)