diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/align.c | 34 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/epapr_paravirt.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/fadump.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/kvm.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci-common.c | 20 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci-hotplug.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_of_scan.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 82 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 31 |
15 files changed, 69 insertions, 135 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index fcc9a89a4695..fab19ec25597 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -2,6 +2,7 @@ # Makefile for the linux kernel. # +CFLAGS_prom.o = -I$(src)/../../../scripts/dtc/libfdt CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 94908af308d8..34f55524d456 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -25,14 +25,13 @@ #include <asm/cputable.h> #include <asm/emulated_ops.h> #include <asm/switch_to.h> +#include <asm/disassemble.h> struct aligninfo { unsigned char len; unsigned char flags; }; -#define IS_XFORM(inst) (((inst) >> 26) == 31) -#define IS_DSFORM(inst) (((inst) >> 26) >= 56) #define INVALID { 0, 0 } @@ -192,37 +191,6 @@ static struct aligninfo aligninfo[128] = { }; /* - * Create a DSISR value from the instruction - */ -static inline unsigned make_dsisr(unsigned instr) -{ - unsigned dsisr; - - - /* bits 6:15 --> 22:31 */ - dsisr = (instr & 0x03ff0000) >> 16; - - if (IS_XFORM(instr)) { - /* bits 29:30 --> 15:16 */ - dsisr |= (instr & 0x00000006) << 14; - /* bit 25 --> 17 */ - dsisr |= (instr & 0x00000040) << 8; - /* bits 21:24 --> 18:21 */ - dsisr |= (instr & 0x00000780) << 3; - } else { - /* bit 5 --> 17 */ - dsisr |= (instr & 0x04000000) >> 12; - /* bits 1: 4 --> 18:21 */ - dsisr |= (instr & 0x78000000) >> 17; - /* bits 30:31 --> 12:13 */ - if (IS_DSFORM(instr)) - dsisr |= (instr & 0x00000003) << 18; - } - - return dsisr; -} - -/* * The dcbz (data cache block zero) instruction * gives an alignment fault if used on non-cacheable * memory. We handle the fault mainly for the diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index cba2697406b7..f5995a912213 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -54,6 +54,7 @@ #endif #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) #include <asm/kvm_book3s.h> +#include <asm/kvm_ppc.h> #endif #ifdef CONFIG_PPC32 @@ -446,7 +447,9 @@ int main(void) DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); +#ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); +#endif DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -468,6 +471,9 @@ int main(void) DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) + DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); +#endif DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); @@ -494,7 +500,6 @@ int main(void) DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); - DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); #endif #ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); @@ -529,11 +534,13 @@ int main(void) DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); + DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); + DEFINE(VCPU_SHADOW_FSCR, offsetof(struct kvm_vcpu, arch.shadow_fscr)); DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); @@ -615,6 +622,7 @@ int main(void) #ifdef CONFIG_PPC64 SVCPU_FIELD(SVCPU_SLB, slb); SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); + SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr); #endif HSTATE_FIELD(HSTATE_HOST_R1, host_r1); @@ -650,6 +658,7 @@ int main(void) #ifdef CONFIG_PPC_BOOK3S_64 HSTATE_FIELD(HSTATE_CFAR, cfar); HSTATE_FIELD(HSTATE_PPR, ppr); + HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr); #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 18d7c80ddeb9..51dbace3269b 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -81,7 +81,7 @@ void crash_ipi_callback(struct pt_regs *regs) } atomic_inc(&cpus_in_crash); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * Starting the kdump boot. diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 2d7eeae5b4d0..59e4ba74975d 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -37,7 +37,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node, int depth, void *data) { const u32 *insts; - unsigned long len; + int len; int i; insts = of_get_flat_dt_prop(node, "hcall-instructions", &len); diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index d55e8986730a..742694c1d852 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -55,9 +55,9 @@ int crash_mem_ranges; int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) { - __be32 *sections; + const __be32 *sections; int i, num_sections; - unsigned long size; + int size; const int *token; if (depth != 1 || strcmp(uname, "rtas") != 0) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ca1cd7459c4a..248ee7e5bebd 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -304,7 +304,7 @@ void notrace restore_interrupts(void) * being re-enabled and generally sanitized the lazy irq state, * and in the latter case it will leave with interrupts hard * disabled and marked as such, so the local_irq_enable() call - * in cpu_idle() will properly re-enable everything. + * in arch_cpu_idle() will properly re-enable everything. */ bool prep_irq_for_idle(void) { diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 6a0175297b0d..33aa4ddf597d 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -74,7 +74,7 @@ #define KVM_INST_MTSRIN 0x7c0001e4 static bool kvm_patching_worked = true; -static char kvm_tmp[1024 * 1024]; +char kvm_tmp[1024 * 1024]; static int kvm_tmp_index; static inline void kvm_patch_ins(u32 *inst, u32 new_inst) @@ -417,7 +417,7 @@ static void kvm_map_magic_page(void *data) ulong out[8]; in[0] = KVM_MAGIC_PAGE; - in[1] = KVM_MAGIC_PAGE; + in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX; epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ad302f845e5d..d6e195e8cd4c 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -98,6 +98,9 @@ static inline void free_lppacas(void) { } /* * 3 persistent SLBs are registered here. The buffer will be zero * initially, hence will all be invaild until we actually write them. + * + * If you make the number of persistent SLB entries dynamic, please also + * update PR KVM to flush and restore them accordingly. */ static struct slb_shadow *slb_shadow; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index add166aa806a..b49c72fd7f16 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -221,26 +221,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) return NULL; } -static ssize_t pci_show_devspec(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pci_dev *pdev; - struct device_node *np; - - pdev = to_pci_dev (dev); - np = pci_device_to_OF_node(pdev); - if (np == NULL || np->full_name == NULL) - return 0; - return sprintf(buf, "%s", np->full_name); -} -static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); - -/* Add sysfs properties */ -int pcibios_add_platform_entries(struct pci_dev *pdev) -{ - return device_create_file(&pdev->dev, &dev_attr_devspec); -} - /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index c1e17ae68a08..5b789177aa29 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -98,8 +98,7 @@ void pcibios_add_pci_devices(struct pci_bus * bus) max = bus->busn_res.start; for (pass = 0; pass < 2; pass++) { list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) max = pci_scan_bridge(bus, dev, max, pass); } diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index ea6470c21f4e..44562aa97f16 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -371,8 +371,7 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus, /* Now scan child busses */ list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { + if (pci_is_bridge(dev)) { of_scan_pci_bridge(dev); } } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 668aa4791fd7..613a860a203c 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -29,11 +29,11 @@ #include <linux/bitops.h> #include <linux/export.h> #include <linux/kexec.h> -#include <linux/debugfs.h> #include <linux/irq.h> #include <linux/memblock.h> #include <linux/of.h> #include <linux/of_fdt.h> +#include <linux/libfdt.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -118,14 +118,14 @@ static void __init move_device_tree(void) DBG("-> move_device_tree\n"); start = __pa(initial_boot_params); - size = be32_to_cpu(initial_boot_params->totalsize); + size = fdt_totalsize(initial_boot_params); if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) { p = __va(memblock_alloc(size, PAGE_SIZE)); memcpy(p, initial_boot_params, size); - initial_boot_params = (struct boot_param_header *)p; + initial_boot_params = p; DBG("Moved device tree to 0x%p\n", p); } @@ -163,7 +163,7 @@ static struct ibm_pa_feature { {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, }; -static void __init scan_features(unsigned long node, unsigned char *ftrs, +static void __init scan_features(unsigned long node, const unsigned char *ftrs, unsigned long tablelen, struct ibm_pa_feature *fp, unsigned long ft_size) @@ -202,8 +202,8 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs, static void __init check_cpu_pa_features(unsigned long node) { - unsigned char *pa_ftrs; - unsigned long tablelen; + const unsigned char *pa_ftrs; + int tablelen; pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); if (pa_ftrs == NULL) @@ -216,7 +216,7 @@ static void __init check_cpu_pa_features(unsigned long node) #ifdef CONFIG_PPC_STD_MMU_64 static void __init check_cpu_slb_size(unsigned long node) { - __be32 *slb_size_ptr; + const __be32 *slb_size_ptr; slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL); if (slb_size_ptr != NULL) { @@ -257,7 +257,7 @@ static struct feature_property { static inline void identical_pvr_fixup(unsigned long node) { unsigned int pvr; - char *model = of_get_flat_dt_prop(node, "model", NULL); + const char *model = of_get_flat_dt_prop(node, "model", NULL); /* * Since 440GR(x)/440EP(x) processors have the same pvr, @@ -295,11 +295,11 @@ static int __init early_init_dt_scan_cpus(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; const __be32 *intserv; int i, nthreads; - unsigned long len; + int len; int found = -1; int found_thread = 0; @@ -325,9 +325,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, * version 2 of the kexec param format adds the phys cpuid of * booted proc. */ - if (be32_to_cpu(initial_boot_params->version) >= 2) { + if (fdt_version(initial_boot_params) >= 2) { if (be32_to_cpu(intserv[i]) == - be32_to_cpu(initial_boot_params->boot_cpuid_phys)) { + fdt_boot_cpuid_phys(initial_boot_params)) { found = boot_cpu_count; found_thread = i; } @@ -392,7 +392,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, int depth, void *data) { - unsigned long *lprop; /* All these set by kernel, so no need to convert endian */ + const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */ /* Use common scan routine to determine if this is the chosen node */ if (early_init_dt_scan_chosen(node, uname, depth, data) == 0) @@ -443,8 +443,9 @@ int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, */ static int __init early_init_dt_scan_drconf_memory(unsigned long node) { - __be32 *dm, *ls, *usm; - unsigned long l, n, flags; + const __be32 *dm, *ls, *usm; + int l; + unsigned long n, flags; u64 base, size, memblock_size; unsigned int is_kexec_kdump = 0, rngs; @@ -564,9 +565,12 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) static void __init early_reserve_mem_dt(void) { - unsigned long i, len, dt_root; + unsigned long i, dt_root; + int len; const __be32 *prop; + early_init_fdt_scan_reserved_mem(); + dt_root = of_get_flat_dt_root(); prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len); @@ -589,24 +593,14 @@ static void __init early_reserve_mem_dt(void) memblock_reserve(base, size); } } - - early_init_fdt_scan_reserved_mem(); } static void __init early_reserve_mem(void) { - u64 base, size; __be64 *reserve_map; - unsigned long self_base; - unsigned long self_size; reserve_map = (__be64 *)(((unsigned long)initial_boot_params) + - be32_to_cpu(initial_boot_params->off_mem_rsvmap)); - - /* before we do anything, lets reserve the dt blob */ - self_base = __pa((unsigned long)initial_boot_params); - self_size = be32_to_cpu(initial_boot_params->totalsize); - memblock_reserve(self_base, self_size); + fdt_off_mem_rsvmap(initial_boot_params)); /* Look for the new "reserved-regions" property in the DT */ early_reserve_mem_dt(); @@ -636,26 +630,12 @@ static void __init early_reserve_mem(void) size_32 = be32_to_cpup(reserve_map_32++); if (size_32 == 0) break; - /* skip if the reservation is for the blob */ - if (base_32 == self_base && size_32 == self_size) - continue; DBG("reserving: %x -> %x\n", base_32, size_32); memblock_reserve(base_32, size_32); } return; } #endif - DBG("Processing reserve map\n"); - - /* Handle the reserve map in the fdt blob if it exists */ - while (1) { - base = be64_to_cpup(reserve_map++); - size = be64_to_cpup(reserve_map++); - if (size == 0) - break; - DBG("reserving: %llx -> %llx\n", base, size); - memblock_reserve(base, size); - } } void __init early_init_devtree(void *params) @@ -922,23 +902,3 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { return (int)phys_id == get_hard_smp_processor_id(cpu); } - -#if defined(CONFIG_DEBUG_FS) && defined(DEBUG) -static struct debugfs_blob_wrapper flat_dt_blob; - -static int __init export_flat_device_tree(void) -{ - struct dentry *d; - - flat_dt_blob.data = initial_boot_params; - flat_dt_blob.size = be32_to_cpu(initial_boot_params->totalsize); - - d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR, - powerpc_debugfs_root, &flat_dt_blob); - if (!d) - return 1; - - return 0; -} -__initcall(export_flat_device_tree); -#endif diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8cd5ed049b5d..8b4c857c1421 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1142,7 +1142,7 @@ void __init rtas_initialize(void) int __init early_init_dt_scan_rtas(unsigned long node, const char *uname, int depth, void *data) { - u32 *basep, *entryp, *sizep; + const u32 *basep, *entryp, *sizep; if (depth != 1 || strcmp(uname, "rtas") != 0) return 0; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 6af946e9a984..7753af2d2613 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -745,6 +745,28 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } +#ifdef CONFIG_SCHED_SMT +/* cpumask of CPUs with asymetric SMT dependancy */ +static const int powerpc_smt_flags(void) +{ + int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; + + if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { + printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); + flags |= SD_ASYM_PACKING; + } + return flags; +} +#endif + +static struct sched_domain_topology_level powerpc_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + void __init smp_cpus_done(unsigned int max_cpus) { cpumask_var_t old_mask; @@ -769,15 +791,8 @@ void __init smp_cpus_done(unsigned int max_cpus) dump_numa_cpu_topology(); -} + set_sched_topology(powerpc_topology); -int arch_sd_sibling_asym_packing(void) -{ - if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { - printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); - return SD_ASYM_PACKING; - } - return 0; } #ifdef CONFIG_HOTPLUG_CPU |