diff options
author | James Morris <jmorris@namei.org> | 2008-11-14 11:29:12 +1100 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2008-11-14 11:29:12 +1100 |
commit | 2b828925652340277a889cbc11b2d0637f7cdaf7 (patch) | |
tree | 32fcb3d3e466fc419fad2d3717956a5b5ad3d35a /arch/ia64 | |
parent | 3a3b7ce9336952ea7b9564d976d068a238976c9d (diff) | |
parent | 58e20d8d344b0ee083febb18c2b021d2427e56ca (diff) | |
download | linux-next-2b828925652340277a889cbc11b2d0637f7cdaf7.tar.gz |
Merge branch 'master' into next
Conflicts:
security/keys/internal.h
security/keys/process_keys.c
security/keys/request_key.c
Fixed conflicts above by using the non 'tsk' versions.
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 19 | ||||
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 9 | ||||
-rw-r--r-- | arch/ia64/include/asm/io.h | 24 | ||||
-rw-r--r-- | arch/ia64/include/asm/machvec.h | 22 | ||||
-rw-r--r-- | arch/ia64/include/asm/meminit.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/sal.h | 15 | ||||
-rw-r--r-- | arch/ia64/include/asm/sn/sn_sal.h | 45 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 29 | ||||
-rw-r--r-- | arch/ia64/kernel/pci-dma.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 9 | ||||
-rw-r--r-- | arch/ia64/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 12 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 5 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 1 | ||||
-rw-r--r-- | arch/ia64/uv/kernel/setup.c | 6 |
15 files changed, 119 insertions, 82 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 27eec71429b0..6bd91ed7cd03 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -148,6 +148,7 @@ config IA64_GENERIC select ACPI_NUMA select SWIOTLB select PCI_MSI + select DMAR help This selects the system type of your hardware. A "generic" kernel will run on any supported IA-64 system. However, if you configure @@ -585,7 +586,7 @@ source "fs/Kconfig.binfmt" endmenu -menu "Power management and ACPI" +menu "Power management and ACPI options" source "kernel/power/Kconfig" @@ -641,6 +642,8 @@ source "net/Kconfig" source "drivers/Kconfig" +source "arch/ia64/hp/sim/Kconfig" + config MSPEC tristate "Memory special operations driver" depends on IA64 @@ -652,6 +655,12 @@ config MSPEC source "fs/Kconfig" +source "arch/ia64/Kconfig.debug" + +source "security/Kconfig" + +source "crypto/Kconfig" + source "arch/ia64/kvm/Kconfig" source "lib/Kconfig" @@ -678,11 +687,3 @@ config IRQ_PER_CPU config IOMMU_HELPER def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) - -source "arch/ia64/hp/sim/Kconfig" - -source "arch/ia64/Kconfig.debug" - -source "security/Kconfig" - -source "crypto/Kconfig" diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 88b6e6f3fd88..2769dbfd03bf 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -13,19 +13,12 @@ */ #include <linux/device.h> +#include <linux/swiotlb.h> #include <asm/machvec.h> /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); -extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; -extern ia64_mv_dma_free_coherent swiotlb_free_coherent; -extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs; -extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs; -extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs; -extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs; -extern ia64_mv_dma_supported swiotlb_dma_supported; -extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; /* hwiommu declarations & definitions: */ diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 7f257507cd86..0d9d16e2d949 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -434,28 +434,4 @@ extern void memset_io(volatile void __iomem *s, int c, long n); # endif /* __KERNEL__ */ -/* - * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that - * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). - * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on - * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing - * over BIO-level virtual merging. - */ -extern unsigned long ia64_max_iommu_merge_mask; -#if 1 -#define BIO_VMERGE_BOUNDARY 0 -#else -/* - * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be - * replaced by dma_merge_mask() or something of that sort. Note: the only way - * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets - * expanded into: - * - * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) - * - * which is precisely what we want. - */ -#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) -#endif - #endif /* _ASM_IA64_IO_H */ diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 1ea28bcee33b..59c17e446683 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -11,6 +11,7 @@ #define _ASM_IA64_MACHVEC_H #include <linux/types.h> +#include <linux/swiotlb.h> /* forward declarations: */ struct device; @@ -298,27 +299,6 @@ extern void machvec_init_from_cmdline(const char *cmdline); # endif /* CONFIG_IA64_GENERIC */ /* - * Declare default routines which aren't declared anywhere else: - */ -extern ia64_mv_dma_init swiotlb_init; -extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; -extern ia64_mv_dma_free_coherent swiotlb_free_coherent; -extern ia64_mv_dma_map_single swiotlb_map_single; -extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs; -extern ia64_mv_dma_unmap_single swiotlb_unmap_single; -extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs; -extern ia64_mv_dma_map_sg swiotlb_map_sg; -extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs; -extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; -extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs; -extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; -extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; -extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; -extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device; -extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; -extern ia64_mv_dma_supported swiotlb_dma_supported; - -/* * Define default versions so we can extend machvec for new platforms without having * to update the machvec files for all existing platforms. */ diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 6bc96ee54327..c0cea375620a 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -48,7 +48,6 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); */ #define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1)) #define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) -#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1)) #ifdef CONFIG_NUMA extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index ea310c0812aa..966797a97c94 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h @@ -337,11 +337,24 @@ typedef struct sal_log_record_header { #define sal_log_severity_fatal 1 #define sal_log_severity_corrected 2 +/* + * Error Recovery Info (ERI) bit decode. From SAL Spec section B.2.2 Table B-3 + * Error Section Error_Recovery_Info Field Definition. + */ +#define ERI_NOT_VALID 0x0 /* Error Recovery Field is not valid */ +#define ERI_NOT_ACCESSIBLE 0x30 /* Resource not accessible */ +#define ERI_CONTAINMENT_WARN 0x22 /* Corrupt data propagated */ +#define ERI_UNCORRECTED_ERROR 0x20 /* Uncorrected error */ +#define ERI_COMPONENT_RESET 0x24 /* Component must be reset */ +#define ERI_CORR_ERROR_LOG 0x21 /* Corrected error, needs logging */ +#define ERI_CORR_ERROR_THRESH 0x29 /* Corrected error threshold exceeded */ + /* Definition of log section header structures */ typedef struct sal_log_sec_header { efi_guid_t guid; /* Unique Section ID */ sal_log_revision_t revision; /* Major and Minor revision of Section */ - u16 reserved; + u8 error_recovery_info; /* Platform error recovery status */ + u8 reserved; u32 len; /* Section length */ } sal_log_section_hdr_t; diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h index 57e649d388b8..e310fc0135dc 100644 --- a/arch/ia64/include/asm/sn/sn_sal.h +++ b/arch/ia64/include/asm/sn/sn_sal.h @@ -90,6 +90,8 @@ #define SN_SAL_SET_CPU_NUMBER 0x02000068 #define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069 +#define SN_SAL_WATCHLIST_ALLOC 0x02000070 +#define SN_SAL_WATCHLIST_FREE 0x02000071 /* * Service-specific constants @@ -1185,4 +1187,47 @@ ia64_sn_kernel_launch_event(void) SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0); return rv.status; } + +union sn_watchlist_u { + u64 val; + struct { + u64 blade : 16, + size : 32, + filler : 16; + }; +}; + +static inline int +sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size, + unsigned long *intr_mmr_offset) +{ + struct ia64_sal_retval rv; + unsigned long addr; + union sn_watchlist_u size_blade; + int watchlist; + + addr = (unsigned long)mq; + size_blade.size = mq_size; + size_blade.blade = blade; + + /* + * bios returns watchlist number or negative error number. + */ + ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr, + size_blade.val, (u64)intr_mmr_offset, + (u64)&watchlist, 0, 0, 0); + if (rv.status < 0) + return rv.status; + + return watchlist; +} + +static inline int +sn_mq_watchlist_free(int blade, int watchlist_num) +{ + struct ia64_sal_retval rv; + ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade, + watchlist_num, 0, 0, 0, 0, 0); + return rv.status; +} #endif /* _ASM_IA64_SN_SN_SAL_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 0635015d0aaa..bd7acc71e8a9 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -678,6 +678,30 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) return 0; } +int __init early_acpi_boot_init(void) +{ + int ret; + + /* + * do a partial walk of MADT to determine how many CPUs + * we have including offline CPUs + */ + if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { + printk(KERN_ERR PREFIX "Can't find MADT\n"); + return 0; + } + + ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, + acpi_parse_lsapic, NR_CPUS); + if (ret < 1) + printk(KERN_ERR PREFIX + "Error parsing MADT - no LAPIC entries\n"); + + return 0; +} + + + int __init acpi_boot_init(void) { @@ -701,11 +725,6 @@ int __init acpi_boot_init(void) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); - if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS) - < 1) - printk(KERN_ERR PREFIX - "Error parsing MADT - no LAPIC entries\n"); - if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 031abbf9c875..dbdb778efa05 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -12,13 +12,11 @@ #include <asm/machvec.h> #include <linux/dma-mapping.h> -#include <asm/machvec.h> #include <asm/system.h> #ifdef CONFIG_DMAR #include <linux/kernel.h> -#include <linux/string.h> #include <asm/page.h> #include <asm/iommu.h> diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index ae7911702bf8..865af27c7737 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -359,7 +359,7 @@ reserve_memory (void) } #endif -#ifdef CONFIG_CRASH_KERNEL +#ifdef CONFIG_CRASH_DUMP if (reserve_elfcorehdr(&rsvd_region[n].start, &rsvd_region[n].end) == 0) n++; @@ -561,8 +561,12 @@ setup_arch (char **cmdline_p) #ifdef CONFIG_ACPI /* Initialize the ACPI boot-time table parser */ acpi_table_init(); + early_acpi_boot_init(); # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); +#ifdef CONFIG_ACPI_HOTPLUG_CPU + prefill_possible_map(); +#endif per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 32 : cpus_weight(early_cpu_possible_map)), additional_cpus > 0 ? additional_cpus : 0); @@ -853,9 +857,6 @@ void __init setup_per_cpu_areas (void) { /* start_kernel() requires this... */ -#ifdef CONFIG_ACPI_HOTPLUG_CPU - prefill_possible_map(); -#endif } /* diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index 8e99fed6b3fd..f833a0b4188d 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -20,6 +20,8 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM && EXPERIMENTAL + # for device assignment: + depends on PCI select PREEMPT_NOTIFIERS select ANON_INODES ---help--- diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 3caac477de9e..af1464f7a6ad 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -673,16 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu_load(vcpu); + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); - vcpu_put(vcpu); - return -EAGAIN; + r = -EAGAIN; + goto out; } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); kvm_set_mmio_data(vcpu); @@ -690,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->mmio_needed = 0; } r = __vcpu_run(vcpu, kvm_run); - +out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 341e3fee280c..e9b2a4e121c0 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define MODE_IND(psr) \ (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) +#ifndef CONFIG_SMP +#define _vmm_raw_spin_lock(x) do {}while(0) +#define _vmm_raw_spin_unlock(x) do {}while(0) +#else #define _vmm_raw_spin_lock(x) \ do { \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ @@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn) do { barrier(); \ ((spinlock_t *)x)->raw_lock.lock = 0; } \ while (0) +#endif void vmm_spin_lock(spinlock_t *lock); void vmm_spin_unlock(spinlock_t *lock); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index d8c5fcd89e5b..d85ba98d9008 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -635,7 +635,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; #endif start = GRANULEROUNDDOWN(start); - start = ORDERROUNDDOWN(start); end = GRANULEROUNDUP(end); mem_data[node].max_pfn = max(mem_data[node].max_pfn, end >> PAGE_SHIFT); diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c index cf5f28ae96c4..7a5ae633198b 100644 --- a/arch/ia64/uv/kernel/setup.c +++ b/arch/ia64/uv/kernel/setup.c @@ -19,6 +19,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); #ifdef CONFIG_IA64_SGI_UV int sn_prom_type; +long sn_partition_id; +EXPORT_SYMBOL(sn_partition_id); +long sn_coherency_id; +EXPORT_SYMBOL_GPL(sn_coherency_id); +long sn_region_size; +EXPORT_SYMBOL(sn_region_size); #endif struct redir_addr { |