diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-09-29 18:27:03 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-09-29 18:27:03 -0700 |
commit | d7173493683853047d6efc8d4454c70469a7b033 (patch) | |
tree | 8e6dac16d3104494b5cc2f33756c582ecdc3b753 | |
parent | 80d26236c7c583ea2f43b68008ab7e067102c62c (diff) | |
parent | 15c03dd4859ab16f9212238f29dd315654aa94f6 (diff) | |
download | linux-next-d7173493683853047d6efc8d4454c70469a7b033.tar.gz |
Merge 3.12-rc3 into char-misc-next
We need/want the mei fixes in here so we can apply other updates that
are depending on them.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
230 files changed, 2355 insertions, 1253 deletions
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario S: Canada K2P 0X8 N: Mikael Pettersson -E: mikpe@it.uu.se -W: http://user.it.uu.se/~mikpe/linux/ +E: mikpelinux@gmail.com D: Miscellaneous fixes N: Reed H. Petty diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt index 6d1c0988cfc7..c67b975c8906 100644 --- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt @@ -1,11 +1,11 @@ -* Samsung Exynos specific extensions to the Synopsis Designware Mobile +* Samsung Exynos specific extensions to the Synopsys Designware Mobile Storage Host Controller -The Synopsis designware mobile storage host controller is used to interface +The Synopsys designware mobile storage host controller is used to interface a SoC with storage medium such as eMMC or SD/MMC cards. This file documents -differences between the core Synopsis dw mshc controller properties described -by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific -extensions to the Synopsis Designware Mobile Storage Host Controller. +differences between the core Synopsys dw mshc controller properties described +by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific +extensions to the Synopsys Designware Mobile Storage Host Controller. Required Properties: diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt index 8a3d91d47b6a..c559f3f36309 100644 --- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt @@ -1,11 +1,11 @@ -* Rockchip specific extensions to the Synopsis Designware Mobile +* Rockchip specific extensions to the Synopsys Designware Mobile Storage Host Controller -The Synopsis designware mobile storage host controller is used to interface +The Synopsys designware mobile storage host controller is used to interface a SoC with storage medium such as eMMC or SD/MMC cards. This file documents -differences between the core Synopsis dw mshc controller properties described -by synopsis-dw-mshc.txt and the properties used by the Rockchip specific -extensions to the Synopsis Designware Mobile Storage Host Controller. +differences between the core Synopsys dw mshc controller properties described +by synopsys-dw-mshc.txt and the properties used by the Rockchip specific +extensions to the Synopsys Designware Mobile Storage Host Controller. Required Properties: diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt index cdcebea9c6f5..066a78b034ca 100644 --- a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt @@ -1,14 +1,14 @@ -* Synopsis Designware Mobile Storage Host Controller +* Synopsys Designware Mobile Storage Host Controller -The Synopsis designware mobile storage host controller is used to interface +The Synopsys designware mobile storage host controller is used to interface a SoC with storage medium such as eMMC or SD/MMC cards. This file documents differences between the core mmc properties described by mmc.txt and the -properties used by the Synopsis Designware Mobile Storage Host Controller. +properties used by the Synopsys Designware Mobile Storage Host Controller. Required Properties: * compatible: should be - - snps,dw-mshc: for controllers compliant with synopsis dw-mshc. + - snps,dw-mshc: for controllers compliant with synopsys dw-mshc. * #address-cells: should be 1. * #size-cells: should be 0. diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt index eabcb4b5db6e..e216af356847 100644 --- a/Documentation/devicetree/bindings/pci/designware-pcie.txt +++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt @@ -1,4 +1,4 @@ -* Synopsis Designware PCIe interface +* Synopsys Designware PCIe interface Required properties: - compatible: should contain "snps,dw-pcie" to identify the diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt index c5e032c85bf9..c5e032c85bf9 100644 --- a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt +++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 1a036cd972fb..539a23631990 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3485,6 +3485,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. the unplug protocol never -- do not unplug even if version check succeeds + xen_nopvspin [X86,XEN] + Disables the ticketlock slowpath using Xen PV + optimizations. + xirc2ps_cs= [NET,PCMCIA] Format: <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index a46ddb85e83a..f911e3656209 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207 imac27 IMac 27 Inch auto BIOS setup (default) +Cirrus Logic CS4208 +=================== + mba6 MacBook Air 6,1 and 6,2 + gpio0 Enable GPIO 0 amp + auto BIOS setup (default) + VIA VT17xx/VT18xx/VT20xx ======================== auto BIOS setup (default) diff --git a/MAINTAINERS b/MAINTAINERS index e61c2e83fc2b..284969fa2896 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1812,7 +1812,8 @@ S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE -M: Christian Daudt <csd@broadcom.com> +M: Christian Daudt <bcm@fixthebug.org> +L: bcm-kernel-feedback-list@broadcom.com T: git git://git.github.com/broadcom/bcm11351 S: Maintained F: arch/arm/mach-bcm/ @@ -2639,6 +2640,18 @@ F: include/linux/device-mapper.h F: include/linux/dm-*.h F: include/uapi/linux/dm-*.h +DIGI NEO AND CLASSIC PCI PRODUCTS +M: Lidza Louina <lidza.louina@gmail.com> +L: driverdev-devel@linuxdriverproject.org +S: Maintained +F: drivers/staging/dgnc/ + +DIGI EPCA PCI PRODUCTS +M: Lidza Louina <lidza.louina@gmail.com> +L: driverdev-devel@linuxdriverproject.org +S: Maintained +F: drivers/staging/dgap/ + DIOLAN U2C-12 I2C DRIVER M: Guenter Roeck <linux@roeck-us.net> L: linux-i2c@vger.kernel.org @@ -6595,7 +6608,7 @@ S: Obsolete F: drivers/net/wireless/prism54/ PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER -M: Mikael Pettersson <mikpe@it.uu.se> +M: Mikael Pettersson <mikpelinux@gmail.com> L: linux-ide@vger.kernel.org S: Maintained F: drivers/ata/sata_promise.* @@ -8724,9 +8737,8 @@ F: Documentation/hid/hiddev.txt F: drivers/hid/usbhid/ USB/IP DRIVERS -M: Matt Mooney <mfm@muteddisk.com> L: linux-usb@vger.kernel.org -S: Maintained +S: Orphan F: drivers/staging/usbip/ USB ISP116X DRIVER @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = One Giant Leap for Frogkind # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index 1feb169274fe..af2cc6eabcc7 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP config HAVE_ARCH_JUMP_LABEL bool -config HAVE_ARCH_MUTEX_CPU_RELAX - bool - config HAVE_RCU_TABLE_FREE bool diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3f7714d8d2d2..1ad6fb6c094d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2217,8 +2217,7 @@ config NEON config KERNEL_MODE_NEON bool "Support for NEON in kernel mode" - default n - depends on NEON + depends on NEON && AEABI help Say Y to include support for NEON in kernel mode. diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S index 19d6cd6f29f9..3a14ea8fe97e 100644 --- a/arch/arm/crypto/aes-armv4.S +++ b/arch/arm/crypto/aes-armv4.S @@ -148,7 +148,7 @@ AES_Te: @ const AES_KEY *key) { .align 5 ENTRY(AES_encrypt) - sub r3,pc,#8 @ AES_encrypt + adr r3,AES_encrypt stmdb sp!,{r1,r4-r12,lr} mov r12,r0 @ inp mov r11,r2 @@ -381,7 +381,7 @@ _armv4_AES_encrypt: .align 5 ENTRY(private_AES_set_encrypt_key) _armv4_AES_set_encrypt_key: - sub r3,pc,#8 @ AES_set_encrypt_key + adr r3,_armv4_AES_set_encrypt_key teq r0,#0 moveq r0,#-1 beq .Labrt @@ -843,7 +843,7 @@ AES_Td: @ const AES_KEY *key) { .align 5 ENTRY(AES_decrypt) - sub r3,pc,#8 @ AES_decrypt + adr r3,AES_decrypt stmdb sp!,{r1,r4-r12,lr} mov r12,r0 @ inp mov r11,r2 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 7e1f76027f66..72abdc541f38 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -19,6 +19,13 @@ #include <asm/unified.h> #include <asm/compiler.h> +#if __LINUX_ARM_ARCH__ < 6 +#include <asm-generic/uaccess-unaligned.h> +#else +#define __get_user_unaligned __get_user +#define __put_user_unaligned __put_user +#endif + #define VERIFY_READ 0 #define VERIFY_WRITE 1 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 74ad15d1a065..bc6bd9683ba4 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -442,10 +442,10 @@ local_restart: ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF - cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) +2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back bcs arm_syscall -2: mov why, #0 @ no longer a real syscall + mov why, #0 @ no longer a real syscall b sys_ni_syscall @ not private func #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index de23a9beed13..39f89fbd5111 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -329,10 +329,10 @@ #ifdef CONFIG_CONTEXT_TRACKING .if \save stmdb sp!, {r0-r3, ip, lr} - bl user_exit + bl context_tracking_user_exit ldmia sp!, {r0-r3, ip, lr} .else - bl user_exit + bl context_tracking_user_exit .endif #endif .endm @@ -341,10 +341,10 @@ #ifdef CONFIG_CONTEXT_TRACKING .if \save stmdb sp!, {r0-r3, ip, lr} - bl user_enter + bl context_tracking_user_enter ldmia sp!, {r0-r3, ip, lr} .else - bl user_enter + bl context_tracking_user_enter .endif #endif .endm diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 51680d15ca8e..d445d060e346 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -187,7 +187,7 @@ /* * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other - * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and + * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. */ diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index f25a7e9f8cbc..5f8b95512580 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev, { int i; - /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) { - if (cpu_needs_post_dma_flush(dev)) + if (cpu_needs_post_dma_flush(dev)) + for (i = 0; i < nelems; i++, sg++) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); - } } static void mips_dma_sync_sg_for_device(struct device *dev, @@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev, { int i; - /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) { - if (!plat_device_is_coherent(dev)) + if (!plat_device_is_coherent(dev)) + for (i = 0; i < nelems; i++, sg++) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); - } } int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h index eb59bfe23e85..93c9980e1b6b 100644 --- a/arch/openrisc/include/asm/prom.h +++ b/arch/openrisc/include/asm/prom.h @@ -14,53 +14,9 @@ * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ - -#include <linux/of.h> /* linux/of.h gets to determine #include ordering */ - #ifndef _ASM_OPENRISC_PROM_H #define _ASM_OPENRISC_PROM_H -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ -#include <linux/types.h> -#include <asm/irq.h> -#include <linux/irqdomain.h> -#include <linux/atomic.h> -#include <linux/of_irq.h> -#include <linux/of_fdt.h> -#include <linux/of_address.h> -#include <linux/proc_fs.h> -#include <linux/platform_device.h> #define HAVE_ARCH_DEVTREE_FIXUPS -/* Other Prototypes */ -extern int early_uartlite_console(void); - -/* Parse the ibm,dma-window property of an OF node into the busno, phys and - * size parameters. - */ -void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, - unsigned long *busno, unsigned long *phys, unsigned long *size); - -extern void kdump_move_device_tree(void); - -/* Get the MAC address */ -extern const void *of_get_mac_address(struct device_node *np); - -/** - * of_irq_map_pci - Resolve the interrupt for a PCI device - * @pdev: the device whose interrupt is to be resolved - * @out_irq: structure of_irq filled by this function - * - * This function resolves the PCI interrupt for a given PCI device. If a - * device-node exists for a given pci_dev, it will use normal OF tree - * walking. If not, it will implement standard swizzling and walk up the - * PCI tree until an device-node is found, at which point it will finish - * resolving using the OF tree walking. - */ -struct pci_dev; -extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); - -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ #endif /* _ASM_OPENRISC_PROM_H */ diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 6a15c968d214..15ca2255f438 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c -src-plat-y := of.c +src-plat-y := of.c epapr.c src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ treeboot-walnut.c cuboot-acadia.c \ cuboot-kilauea.c simpleboot.c \ @@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \ prpmc2800.c src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c -src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c +src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c src-wlib := $(sort $(src-wlib-y)) src-plat := $(sort $(src-plat-y)) diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c new file mode 100644 index 000000000000..c10191006673 --- /dev/null +++ b/arch/powerpc/boot/epapr-wrapper.c @@ -0,0 +1,9 @@ +extern void epapr_platform_init(unsigned long r3, unsigned long r4, + unsigned long r5, unsigned long r6, + unsigned long r7); + +void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + epapr_platform_init(r3, r4, r5, r6, r7); +} diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c index 06c1961bd124..02e91aa2194a 100644 --- a/arch/powerpc/boot/epapr.c +++ b/arch/powerpc/boot/epapr.c @@ -48,8 +48,8 @@ static void platform_fixups(void) fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); } -void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) +void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) { epapr_magic = r6; ima_size = r7; diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c index 61d9899aa0d0..62e2f43ec1df 100644 --- a/arch/powerpc/boot/of.c +++ b/arch/powerpc/boot/of.c @@ -26,6 +26,9 @@ static unsigned long claim_base; +void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7); + static void *of_try_claim(unsigned long size) { unsigned long addr = 0; @@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr) } } -void platform_init(unsigned long a1, unsigned long a2, void *promptr) +static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr) { platform_ops.image_hdr = of_image_hdr; platform_ops.malloc = of_try_claim; @@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr) loader_info.initrd_size = a2; } } + +void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* Detect OF vs. ePAPR boot */ + if (r5) + of_platform_init(r3, r4, (void *)r5); + else + epapr_platform_init(r3, r4, r5, r6, r7); +} + diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 6761c746048d..cd7af841ba05 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -148,18 +148,18 @@ make_space=y case "$platform" in pseries) - platformo=$object/of.o + platformo="$object/of.o $object/epapr.o" link_address='0x4000000' ;; maple) - platformo=$object/of.o + platformo="$object/of.o $object/epapr.o" link_address='0x400000' ;; pmac|chrp) - platformo=$object/of.o + platformo="$object/of.o $object/epapr.o" ;; coff) - platformo="$object/crt0.o $object/of.o" + platformo="$object/crt0.o $object/of.o $object/epapr.o" lds=$object/zImage.coff.lds link_address='0x500000' pie= @@ -253,6 +253,7 @@ treeboot-iss4xx-mpic) platformo="$object/treeboot-iss4xx.o" ;; epapr) + platformo="$object/epapr.o $object/epapr-wrapper.o" link_address='0x20000000' pie=-pie ;; diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 0e40843a1c6e..41f13cec8a8f 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS]; extern void irq_ctx_init(void); extern void call_do_softirq(struct thread_info *tp); -extern int call_handle_irq(int irq, void *p1, - struct thread_info *tp, void *func); +extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp); extern void do_IRQ(struct pt_regs *regs); +extern void __do_irq(struct pt_regs *regs); int irq_choose_cpu(const struct cpumask *mask); diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index e378cccfca55..ce4de5aed7b5 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -149,8 +149,6 @@ typedef struct { struct thread_struct { unsigned long ksp; /* Kernel stack pointer */ - unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ - #ifdef CONFIG_PPC64 unsigned long ksp_vsid; #endif @@ -162,6 +160,7 @@ struct thread_struct { #endif #ifdef CONFIG_PPC32 void *pgdir; /* root of page-table tree */ + unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ #endif #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* @@ -321,7 +320,6 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ - .ksp_limit = INIT_SP_LIMIT, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .fs = KERNEL_DS, \ .fpr = {{0}}, \ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index d8958be5f31a..502c7a4e73f7 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -80,10 +80,11 @@ int main(void) DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); + DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); + DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); #endif /* CONFIG_PPC64 */ DEFINE(KSP, offsetof(struct thread_struct, ksp)); - DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); #ifdef CONFIG_BOOKE DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c69440cef7af..57d286a78f86 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -441,50 +441,6 @@ void migrate_irqs(void) } #endif -static inline void handle_one_irq(unsigned int irq) -{ - struct thread_info *curtp, *irqtp; - unsigned long saved_sp_limit; - struct irq_desc *desc; - - desc = irq_to_desc(irq); - if (!desc) - return; - - /* Switch to the irq stack to handle this */ - curtp = current_thread_info(); - irqtp = hardirq_ctx[smp_processor_id()]; - - if (curtp == irqtp) { - /* We're already on the irq stack, just handle it */ - desc->handle_irq(irq, desc); - return; - } - - saved_sp_limit = current->thread.ksp_limit; - - irqtp->task = curtp->task; - irqtp->flags = 0; - - /* Copy the softirq bits in preempt_count so that the - * softirq checks work in the hardirq context. */ - irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | - (curtp->preempt_count & SOFTIRQ_MASK); - - current->thread.ksp_limit = (unsigned long)irqtp + - _ALIGN_UP(sizeof(struct thread_info), 16); - - call_handle_irq(irq, desc, irqtp, desc->handle_irq); - current->thread.ksp_limit = saved_sp_limit; - irqtp->task = NULL; - - /* Set any flag that may have been set on the - * alternate stack - */ - if (irqtp->flags) - set_bits(irqtp->flags, &curtp->flags); -} - static inline void check_stack_overflow(void) { #ifdef CONFIG_DEBUG_STACKOVERFLOW @@ -501,9 +457,9 @@ static inline void check_stack_overflow(void) #endif } -void do_IRQ(struct pt_regs *regs) +void __do_irq(struct pt_regs *regs) { - struct pt_regs *old_regs = set_irq_regs(regs); + struct irq_desc *desc; unsigned int irq; irq_enter(); @@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs) */ irq = ppc_md.get_irq(); - /* We can hard enable interrupts now */ + /* We can hard enable interrupts now to allow perf interrupts */ may_hard_irq_enable(); /* And finally process it */ - if (irq != NO_IRQ) - handle_one_irq(irq); - else + if (unlikely(irq == NO_IRQ)) __get_cpu_var(irq_stat).spurious_irqs++; + else { + desc = irq_to_desc(irq); + if (likely(desc)) + desc->handle_irq(irq, desc); + } trace_irq_exit(regs); irq_exit(); +} + +void do_IRQ(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + struct thread_info *curtp, *irqtp; + + /* Switch to the irq stack to handle this */ + curtp = current_thread_info(); + irqtp = hardirq_ctx[raw_smp_processor_id()]; + + /* Already there ? */ + if (unlikely(curtp == irqtp)) { + __do_irq(regs); + set_irq_regs(old_regs); + return; + } + + /* Prepare the thread_info in the irq stack */ + irqtp->task = curtp->task; + irqtp->flags = 0; + + /* Copy the preempt_count so that the [soft]irq checks work. */ + irqtp->preempt_count = curtp->preempt_count; + + /* Switch stack and call */ + call_do_irq(regs, irqtp); + + /* Restore stack limit */ + irqtp->task = NULL; + + /* Copy back updates to the thread_info */ + if (irqtp->flags) + set_bits(irqtp->flags, &curtp->flags); + set_irq_regs(old_regs); } @@ -592,28 +586,22 @@ void irq_ctx_init(void) memset((void *)softirq_ctx[i], 0, THREAD_SIZE); tp = softirq_ctx[i]; tp->cpu = i; - tp->preempt_count = 0; memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); tp = hardirq_ctx[i]; tp->cpu = i; - tp->preempt_count = HARDIRQ_OFFSET; } } static inline void do_softirq_onstack(void) { struct thread_info *curtp, *irqtp; - unsigned long saved_sp_limit = current->thread.ksp_limit; curtp = current_thread_info(); irqtp = softirq_ctx[smp_processor_id()]; irqtp->task = curtp->task; irqtp->flags = 0; - current->thread.ksp_limit = (unsigned long)irqtp + - _ALIGN_UP(sizeof(struct thread_info), 16); call_do_softirq(irqtp); - current->thread.ksp_limit = saved_sp_limit; irqtp->task = NULL; /* Set any flag that may have been set on the diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 777d999f563b..2b0ad9845363 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -36,26 +36,41 @@ .text +/* + * We store the saved ksp_limit in the unused part + * of the STACK_FRAME_OVERHEAD + */ _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) + lwz r10,THREAD+KSP_LIMIT(r2) + addi r11,r3,THREAD_INFO_GAP stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) mr r1,r3 + stw r10,8(r1) + stw r11,THREAD+KSP_LIMIT(r2) bl __do_softirq + lwz r10,8(r1) lwz r1,0(r1) lwz r0,4(r1) + stw r10,THREAD+KSP_LIMIT(r2) mtlr r0 blr -_GLOBAL(call_handle_irq) +_GLOBAL(call_do_irq) mflr r0 stw r0,4(r1) - mtctr r6 - stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) - mr r1,r5 - bctrl + lwz r10,THREAD+KSP_LIMIT(r2) + addi r11,r3,THREAD_INFO_GAP + stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) + mr r1,r4 + stw r10,8(r1) + stw r11,THREAD+KSP_LIMIT(r2) + bl __do_irq + lwz r10,8(r1) lwz r1,0(r1) lwz r0,4(r1) + stw r10,THREAD+KSP_LIMIT(r2) mtlr r0 blr diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 971d7e78aff2..e59caf874d05 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq) mtlr r0 blr -_GLOBAL(call_handle_irq) - ld r8,0(r6) +_GLOBAL(call_do_irq) mflr r0 std r0,16(r1) - mtctr r8 - stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) - mr r1,r5 - bctrl + stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) + mr r1,r4 + bl .__do_irq ld r1,0(r1) ld r0,16(r1) mtlr r0 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 6f428da53e20..96d2fdf3aa9e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, kregs = (struct pt_regs *) sp; sp -= STACK_FRAME_OVERHEAD; p->thread.ksp = sp; +#ifdef CONFIG_PPC32 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + _ALIGN_UP(sizeof(struct thread_info), 16); - +#endif #ifdef CONFIG_HAVE_HW_BREAKPOINT p->thread.ptrace_bps[0] = NULL; #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 12e656ffe60e..5fe2842e8bab 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt; static cell_t __initdata regbuf[1024]; +static bool rtas_has_query_cpu_stopped; + /* * Error results ... some OF calls will return "-1" on error, some @@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void) prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", &val, sizeof(val)); + /* Check if it supports "query-cpu-stopped-state" */ + if (prom_getprop(rtas_node, "query-cpu-stopped-state", + &val, sizeof(val)) != PROM_ERROR) + rtas_has_query_cpu_stopped = true; + #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__) /* PowerVN takeover hack */ prom_rtas_data = base; @@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void) = (void *) LOW_ADDR(__secondary_hold_acknowledge); unsigned long secondary_hold = LOW_ADDR(__secondary_hold); + /* + * On pseries, if RTAS supports "query-cpu-stopped-state", + * we skip this stage, the CPUs will be started by the + * kernel using RTAS. + */ + if ((of_platform == PLATFORM_PSERIES || + of_platform == PLATFORM_PSERIES_LPAR) && + rtas_has_query_cpu_stopped) { + prom_printf("prom_hold_cpus: skipped\n"); + return; + } + prom_debug("prom_hold_cpus: start...\n"); prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); @@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * On non-powermacs, put all CPUs in spin-loops. * * PowerMacs use a different mechanism to spin CPUs + * + * (This must be done after instanciating RTAS) */ if (of_platform != PLATFORM_POWERMAC && of_platform != PLATFORM_OPAL) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index a7ee978fb860..b1faa1593c90 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) */ if ((ra == 1) && !(regs->msr & MSR_PR) \ && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) { +#ifdef CONFIG_PPC32 /* * Check if we will touch kernel sack overflow */ @@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) err = -EINVAL; break; } - +#endif /* CONFIG_PPC32 */ /* * Check if we already set since that means we'll * lose the previous value. diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 1c1771a40250..24f58cb0a543 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -233,18 +233,24 @@ static void __init smp_init_pseries(void) alloc_bootmem_cpumask_var(&of_spin_mask); - /* Mark threads which are still spinning in hold loops. */ - if (cpu_has_feature(CPU_FTR_SMT)) { - for_each_present_cpu(i) { - if (cpu_thread_in_core(i) == 0) - cpumask_set_cpu(i, of_spin_mask); - } - } else { - cpumask_copy(of_spin_mask, cpu_present_mask); + /* + * Mark threads which are still spinning in hold loops + * + * We know prom_init will not have started them if RTAS supports + * query-cpu-stopped-state. + */ + if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) { + if (cpu_has_feature(CPU_FTR_SMT)) { + for_each_present_cpu(i) { + if (cpu_thread_in_core(i) == 0) + cpumask_set_cpu(i, of_spin_mask); + } + } else + cpumask_copy(of_spin_mask, cpu_present_mask); + + cpumask_clear_cpu(boot_cpuid, of_spin_mask); } - cpumask_clear_cpu(boot_cpuid, of_spin_mask); - /* Non-lpar has additional take/give timebase */ if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { smp_ops->give_timebase = rtas_give_timebase; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index dcc6ac2d8026..7143793859fa 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -93,6 +93,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_SAVE_PAGE_KEYS if HIBERNATION + select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS2 @@ -102,7 +103,6 @@ config S390 select GENERIC_TIME_VSYSCALL_OLD select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 - select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h index 688271f5f2e4..458c1f7fbc18 100644 --- a/arch/s390/include/asm/mutex.h +++ b/arch/s390/include/asm/mutex.h @@ -7,5 +7,3 @@ */ #include <asm-generic/mutex-dec.h> - -#define arch_mutex_cpu_relax() barrier() diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 0eb37505cab1..ca7821f07260 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -198,6 +198,8 @@ static inline void cpu_relax(void) barrier(); } +#define arch_mutex_cpu_relax() barrier() + static inline void psw_set_key(unsigned int key) { asm volatile("spka 0(%0)" : : "d" (key)); diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 701fe8c59e1f..83e5d216105e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); extern int arch_spin_trylock_retry(arch_spinlock_t *); extern void arch_spin_relax(arch_spinlock_t *lock); +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.owner_cpu == 0; +} + static inline void arch_spin_lock(arch_spinlock_t *lp) { int old; diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 6aef9fbc09b7..b913915e8e63 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; } -static inline unsigned long mfn_to_pfn(unsigned long mfn) +static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) { unsigned long pfn; - int ret = 0; + int ret; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; - if (unlikely(mfn >= machine_to_phys_nr)) { - pfn = ~0; - goto try_override; - } - pfn = 0; + if (unlikely(mfn >= machine_to_phys_nr)) + return ~0; + /* * The array access can fail (e.g., device space beyond end of RAM). * In such cases it doesn't matter what we return (we return garbage), * but we must handle the fault without crashing! */ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); -try_override: - /* ret might be < 0 if there are no entries in the m2p for mfn */ if (ret < 0) - pfn = ~0; - else if (get_phys_to_machine(pfn) != mfn) + return ~0; + + return pfn; +} + +static inline unsigned long mfn_to_pfn(unsigned long mfn) +{ + unsigned long pfn; + + if (xen_feature(XENFEAT_auto_translated_physmap)) + return mfn; + + pfn = mfn_to_pfn_no_overrides(mfn); + if (get_phys_to_machine(pfn) != mfn) { /* * If this appears to be a foreign mfn (because the pfn * doesn't map back to the mfn), then check the local override @@ -111,6 +119,7 @@ try_override: * m2p_find_override_pfn returns ~0 if it doesn't find anything. */ pfn = m2p_find_override_pfn(mfn, ~0); + } /* * pfn is ~0 if there are no entries in the m2p for mfn or if the diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8355c84b9729..897783b3302a 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void) err = amd_pmu_init(); break; default: - return 0; + err = -ENOTSUPP; } if (err != 0) { pr_cont("no PMU driver, software events only.\n"); @@ -1883,9 +1883,9 @@ static struct pmu pmu = { void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) { - userpg->cap_usr_time = 0; - userpg->cap_usr_time_zero = 0; - userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; + userpg->cap_user_time = 0; + userpg->cap_user_time_zero = 0; + userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; userpg->pmc_width = x86_pmu.cntval_bits; if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) @@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) return; - userpg->cap_usr_time = 1; + userpg->cap_user_time = 1; userpg->time_mult = this_cpu_read(cyc2ns); userpg->time_shift = CYC2NS_SCALE_FACTOR; userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; if (sched_clock_stable && !check_tsc_disabled()) { - userpg->cap_usr_time_zero = 1; + userpg->cap_user_time_zero = 1; userpg->time_zero = this_cpu_read(cyc2ns_offset); } } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 9db76c31b3c3..f31a1655d1ff 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void) break; case 55: /* Atom 22nm "Silvermont" */ + case 77: /* Avoton "Silvermont" */ memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 8ed44589b0e4..4118f9f68315 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) box->hrtimer.function = uncore_pmu_hrtimer; } -struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) +static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node) { struct intel_uncore_box *box; int i, size; size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); - box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); + box = kzalloc_node(size, GFP_KERNEL, node); if (!box) return NULL; @@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu, struct intel_uncore_box *fake_box; int ret = -EINVAL, n; - fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); if (!fake_box) return -ENOMEM; @@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id } type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; - box = uncore_alloc_box(type, 0); + box = uncore_alloc_box(type, NUMA_NO_NODE); if (!box) return -ENOMEM; @@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id) if (pmu->func_id < 0) pmu->func_id = j; - box = uncore_alloc_box(type, cpu); + box = uncore_alloc_box(type, cpu_to_node(cpu)); if (!box) return -ENOMEM; diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7123b5df479d..af99f71aeb7f 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu) /* need to apply patch? */ if (rev >= mc_amd->hdr.patch_id) { c->microcode = rev; + uci->cpu_sig.rev = rev; return 0; } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 563ed91e6faa..e643e744e4d8 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { }, { /* Handle problems with rebooting on the Precision M6600. */ .callback = set_pci_reboot, - .ident = "Dell OptiPlex 990", + .ident = "Dell Precision M6600", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), }, }, + { /* Handle problems with rebooting on the Dell PowerEdge C6100. */ + .callback = set_pci_reboot, + .ident = "Dell PowerEdge C6100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "C6100"), + }, + }, + { /* Some C6100 machines were shipped with vendor being 'Dell'. */ + .callback = set_pci_reboot, + .ident = "Dell PowerEdge C6100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell"), + DMI_MATCH(DMI_PRODUCT_NAME, "C6100"), + }, + }, { } }; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 90f6ed127096..c7e22ab29a5a 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void) for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; - if (!(md->attribute & EFI_MEMORY_RUNTIME) && - md->type != EFI_BOOT_SERVICES_CODE && - md->type != EFI_BOOT_SERVICES_DATA) - continue; + if (!(md->attribute & EFI_MEMORY_RUNTIME)) { +#ifdef CONFIG_X86_64 + if (md->type != EFI_BOOT_SERVICES_CODE && + md->type != EFI_BOOT_SERVICES_DATA) +#endif + continue; + } size = md->num_pages << EFI_PAGE_SHIFT; end = md->phys_addr + size; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 8b901e8d782d..a61c7d5811be 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page, unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; - int ret = 0; pfn = page_to_pfn(page); if (!PageHighMem(page)) { @@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page, * frontend pages while they are being shared with the backend, * because mfn_to_pfn (that ends up being called by GUPF) will * return the backend pfn rather than the frontend pfn. */ - ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); - if (ret == 0 && get_phys_to_machine(pfn) == mfn) + pfn = mfn_to_pfn_no_overrides(mfn); + if (get_phys_to_machine(pfn) == mfn) set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); return 0; @@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page, unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; - int ret = 0; pfn = page_to_pfn(page); mfn = get_phys_to_machine(pfn); @@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page, * the original pfn causes mfn_to_pfn(mfn) to return the frontend * pfn again. */ mfn &= ~FOREIGN_FRAME_BIT; - ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); - if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && + pfn = mfn_to_pfn_no_overrides(mfn); + if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && m2p_find_override(mfn) == NULL) set_phys_to_machine(pfn, mfn); diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 253f63fceea1..be6b86078957 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu) } +/* + * Our init of PV spinlocks is split in two init functions due to us + * using paravirt patching and jump labels patching and having to do + * all of this before SMP code is invoked. + * + * The paravirt patching needs to be done _before_ the alternative asm code + * is started, otherwise we would not patch the core kernel code. + */ void __init xen_init_spinlocks(void) { @@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void) return; } - static_key_slow_inc(¶virt_ticketlocks_enabled); - pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); pv_lock_ops.unlock_kick = xen_unlock_kick; } +/* + * While the jump_label init code needs to happend _after_ the jump labels are + * enabled and before SMP is started. Hence we use pre-SMP initcall level + * init. We cannot do it in xen_init_spinlocks as that is done before + * jump labels are activated. + */ +static __init int xen_init_spinlocks_jump(void) +{ + if (!xen_pvspin) + return 0; + + static_key_slow_inc(¶virt_ticketlocks_enabled); + return 0; +} +early_initcall(xen_init_spinlocks_jump); + static __init int xen_parse_nopvspin(char *arg) { xen_pvspin = false; diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index f40acef80269..a6977e12d574 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c @@ -39,6 +39,7 @@ #include <linux/ipmi.h> #include <linux/device.h> #include <linux/pnp.h> +#include <linux/spinlock.h> MODULE_AUTHOR("Zhao Yakui"); MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); @@ -57,7 +58,7 @@ struct acpi_ipmi_device { struct list_head head; /* the IPMI request message list */ struct list_head tx_msg_list; - struct mutex tx_msg_lock; + spinlock_t tx_msg_lock; acpi_handle handle; struct pnp_dev *pnp_dev; ipmi_user_t user_interface; @@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, struct kernel_ipmi_msg *msg; struct acpi_ipmi_buffer *buffer; struct acpi_ipmi_device *device; + unsigned long flags; msg = &tx_msg->tx_message; /* @@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, /* Get the msgid */ device = tx_msg->device; - mutex_lock(&device->tx_msg_lock); + spin_lock_irqsave(&device->tx_msg_lock, flags); device->curr_msgid++; tx_msg->tx_msgid = device->curr_msgid; - mutex_unlock(&device->tx_msg_lock); + spin_unlock_irqrestore(&device->tx_msg_lock, flags); } static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, @@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) int msg_found = 0; struct acpi_ipmi_msg *tx_msg; struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; + unsigned long flags; if (msg->user != ipmi_device->user_interface) { dev_warn(&pnp_dev->dev, "Unexpected response is returned. " @@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) ipmi_free_recv_msg(msg); return; } - mutex_lock(&ipmi_device->tx_msg_lock); + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { if (msg->msgid == tx_msg->tx_msgid) { msg_found = 1; @@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) } } - mutex_unlock(&ipmi_device->tx_msg_lock); + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); if (!msg_found) { dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " "returned.\n", msg->msgid); @@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, struct acpi_ipmi_device *ipmi_device = handler_context; int err, rem_time; acpi_status status; + unsigned long flags; /* * IPMI opregion message. * IPMI message is firstly written to the BMC and system software @@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, return AE_NO_MEMORY; acpi_format_ipmi_msg(tx_msg, address, value); - mutex_lock(&ipmi_device->tx_msg_lock); + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); - mutex_unlock(&ipmi_device->tx_msg_lock); + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); err = ipmi_request_settime(ipmi_device->user_interface, &tx_msg->addr, tx_msg->tx_msgid, @@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, status = AE_OK; end_label: - mutex_lock(&ipmi_device->tx_msg_lock); + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); list_del(&tx_msg->head); - mutex_unlock(&ipmi_device->tx_msg_lock); + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); kfree(tx_msg); return status; } @@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) INIT_LIST_HEAD(&ipmi_device->head); - mutex_init(&ipmi_device->tx_msg_lock); + spin_lock_init(&ipmi_device->tx_msg_lock); INIT_LIST_HEAD(&ipmi_device->tx_msg_list); ipmi_install_space_handler(ipmi_device); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fbdb82e70d10..611ce9061dc5 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver) EXPORT_SYMBOL(acpi_bus_register_driver); /** - * acpi_bus_unregister_driver - unregisters a driver with the APIC bus + * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus * @driver: driver to unregister * * Unregisters a driver with the ACPI bus. Searches the namespace for all diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 958ba2a420c3..97f4acb54ad6 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -2,7 +2,7 @@ * sata_promise.c - Promise SATA * * Maintained by: Tejun Heo <tj@kernel.org> - * Mikael Pettersson <mikpe@it.uu.se> + * Mikael Pettersson * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/base/core.c b/drivers/base/core.c index c7cfadcf6752..34abf4d8a45f 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move); */ void device_shutdown(void) { - struct device *dev; + struct device *dev, *parent; spin_lock(&devices_kset->list_lock); /* @@ -2034,7 +2034,7 @@ void device_shutdown(void) * prevent it from being freed because parent's * lock is to be held */ - get_device(dev->parent); + parent = get_device(dev->parent); get_device(dev); /* * Make sure the device is off the kset list, in the @@ -2044,8 +2044,8 @@ void device_shutdown(void) spin_unlock(&devices_kset->list_lock); /* hold lock to avoid race with probe/release */ - if (dev->parent) - device_lock(dev->parent); + if (parent) + device_lock(parent); device_lock(dev); /* Don't allow any more runtime suspends */ @@ -2063,11 +2063,11 @@ void device_shutdown(void) } device_unlock(dev); - if (dev->parent) - device_unlock(dev->parent); + if (parent) + device_unlock(parent); put_device(dev); - put_device(dev->parent); + put_device(parent); spin_lock(&devices_kset->list_lock); } diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index d2d95ff5353b..edfa2515bc86 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 639d26b90b91..2b9440384536 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -1193,6 +1193,7 @@ out_passthru: ida_pci_info_struct pciinfo; if (!arg) return -EINVAL; + memset(&pciinfo, 0, sizeof(pciinfo)); pciinfo.bus = host->pci_dev->bus->number; pciinfo.dev_fn = host->pci_dev->devfn; pciinfo.board_id = host->board_id; diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 7a7929ba2658..06189e55b4e5 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) return length; } -ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct tpm_private *priv = TPM_VPRIV(chip); - u8 locality = priv->shr->locality; - - return sprintf(buf, "%d\n", locality); -} - -ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct tpm_private *priv = TPM_VPRIV(chip); - u8 val; - - int rv = kstrtou8(buf, 0, &val); - if (rv) - return rv; - - priv->shr->locality = val; - - return len; -} - static const struct file_operations vtpm_ops = { .owner = THIS_MODULE, .llseek = no_llseek, @@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); -static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality, - tpm_store_locality); static struct attribute *vtpm_attrs[] = { &dev_attr_pubek.attr, @@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = { &dev_attr_cancel.attr, &dev_attr_durations.attr, &dev_attr_timeouts.attr, - &dev_attr_locality.attr, NULL, }; @@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs, }; -#define TPM_LONG_TIMEOUT (10 * 60 * HZ) - static const struct tpm_vendor_specific tpm_vtpm = { .status = vtpm_status, .recv = vtpm_recv, @@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = { .miscdev = { .fops = &vtpm_ops, }, - .duration = { - TPM_LONG_TIMEOUT, - TPM_LONG_TIMEOUT, - TPM_LONG_TIMEOUT, - }, }; static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 41c69469ce20..971d796e071d 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -26,6 +26,7 @@ config DW_APB_TIMER_OF config ARMADA_370_XP_TIMER bool + select CLKSRC_OF config ORION_TIMER select CLKSRC_OF diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index 37f5325bec95..b9ddd9e3a2f5 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c @@ -30,6 +30,9 @@ void __init clocksource_of_init(void) clocksource_of_init_fn init_func; for_each_matching_node_and_match(np, __clksrc_of_table, &match) { + if (!of_device_is_available(np)) + continue; + init_func = match->data; init_func(np); } diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index b9c81b7c3a3b..3a5909c12d42 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) ced->name = dev_name(&p->pdev->dev); ced->features = CLOCK_EVT_FEAT_ONESHOT; ced->rating = 200; - ced->cpumask = cpumask_of(0); + ced->cpumask = cpu_possible_mask; ced->set_next_event = em_sti_clock_event_next; ced->set_mode = em_sti_clock_event_mode; diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 5b34768f4d7c..62b0de6a1837 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) evt->irq); return -EIO; } - irq_set_affinity(evt->irq, cpumask_of(cpu)); } else { enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); } @@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct mct_clock_event_device *mevt; + unsigned int cpu; /* * Grab cpu pointer in each case to avoid spurious @@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self, mevt = this_cpu_ptr(&percpu_mct_tick); exynos4_local_timer_setup(&mevt->evt); break; + case CPU_ONLINE: + cpu = (unsigned long)hcpu; + if (mct_int_type == MCT_INT_SPI) + irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu], + cpumask_of(cpu)); + break; case CPU_DYING: mevt = this_cpu_ptr(&percpu_mct_tick); exynos4_local_timer_stop(&mevt->evt); @@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem &percpu_mct_tick); WARN(err, "MCT: can't request IRQ %d (%d)\n", mct_irqs[MCT_L0_IRQ], err); + } else { + irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0)); } err = register_cpu_notifier(&exynos4_mct_cpu_nb); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index a1260b4549db..d2c3253e015e 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void) { int ret; + /* don't keep reloading if cpufreq_driver exists */ + if (cpufreq_get_current_driver()) + return 0; + if (acpi_disabled) return 0; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 89b3c52cd5c3..04548f7023af 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu) { unsigned int ret_freq = 0; + if (cpufreq_disabled() || !cpufreq_driver) + return -ENOENT; + if (!down_read_trylock(&cpufreq_rwsem)) return 0; diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index d514c152fd1a..be5380ecdcd4 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c @@ -457,7 +457,7 @@ err_free_table: opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); err_put_node: of_node_put(np); - dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); + dev_err(&pdev->dev, "%s: failed initialization\n", __func__); return ret; } diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index b1f8fc69023f..60e84043aa34 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); break; case DRM_MODE_DPMS_OFF: - /* disable audio and video ports */ - reg_write(encoder, REG_ENA_AP, 0x00); + /* disable video ports */ reg_write(encoder, REG_ENA_VP_0, 0x00); reg_write(encoder, REG_ENA_VP_1, 0x00); reg_write(encoder, REG_ENA_VP_2, 0x00); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index df9253d890ee..cdfb9da0e4ce 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4800,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return SHRINK_STOP; + return 0; if (dev_priv->mm.shrinker_no_lock_stealing) - return SHRINK_STOP; + return 0; unlock = false; } @@ -4901,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return 0; + return SHRINK_STOP; if (dev_priv->mm.shrinker_no_lock_stealing) - return 0; + return SHRINK_STOP; unlock = false; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index aba9d7498996..dae364f0028c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, /* Seek the first printf which is hits start position */ if (e->pos < e->start) { - len = vsnprintf(NULL, 0, f, args); - if (!__i915_error_seek(e, len)) + va_list tmp; + + va_copy(tmp, args); + if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) return; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d8a1d98693e7..e5822e79f912 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) pipeconf = 0; + if (dev_priv->quirks & QUIRK_PIPEA_FORCE && + I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) + pipeconf |= PIPECONF_ENABLE; + if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { /* Enable pixel doubling when the dot clock is > 90% of the (display) * core speed. diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2151d13772b8..79c14e298ba6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, DRM_DEBUG_KMS("aux_ch native nack\n"); return -EREMOTEIO; case AUX_NATIVE_REPLY_DEFER: - udelay(100); + /* + * For now, just give more slack to branch devices. We + * could check the DPCD for I2C bit rate capabilities, + * and if available, adjust the interval. We could also + * be more careful with DP-to-Legacy adapters where a + * long legacy cable may force very low I2C bit rates. + */ + if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT) + usleep_range(500, 600); + else + usleep_range(300, 400); continue; default: DRM_ERROR("aux_ch invalid native reply 0x%02x\n", diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index f2c6d7909ae2..dd6f84bf6c22 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder, DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; + /* TV has it's own notion of sync and other mode flags, so clear them. */ + pipe_config->adjusted_mode.flags = 0; + + /* + * FIXME: We don't check whether the input mode is actually what we want + * or whether userspace is doing something stupid. + */ + return true; } diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c index 5db5bbaedae2..bc7fd11ad8be 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c @@ -19,8 +19,6 @@ #include "msm_drv.h" #include "mdp4_kms.h" -#include <mach/iommu.h> - static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); static int mdp4_hw_init(struct msm_kms *kms) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 008d772384c7..b3a2f1629041 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -18,8 +18,6 @@ #include "msm_drv.h" #include "msm_gpu.h" -#include <mach/iommu.h> - static void msm_fb_output_poll_changed(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; @@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, int i, ret; for (i = 0; i < cnt; i++) { + /* TODO maybe some day msm iommu won't require this hack: */ + struct device *msm_iommu_get_ctx(const char *ctx_name); struct device *ctx = msm_iommu_get_ctx(names[i]); if (!ctx) continue; @@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) * imx drm driver on iMX5 */ dev_err(dev->dev, "failed to load kms\n"); - ret = PTR_ERR(priv->kms); + ret = PTR_ERR(kms); goto fail; } @@ -697,7 +697,7 @@ static struct drm_driver msm_driver = { .gem_vm_ops = &vm_ops, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, - .dumb_destroy = msm_gem_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, .debugfs_cleanup = msm_debugfs_cleanup, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 29eacfa29cfb..2bae46c66a30 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -319,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); } -int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle) -{ - /* No special work needed, drop the reference and see what falls out */ - return drm_gem_handle_delete(file, handle); -} - int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 05ff315e8e9e..b162e98a2953 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = { 25000, 30000, RADEON_SCLK_UP } }; +void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, + u32 *max_clock) +{ + u32 i, clock = 0; + + if ((table == NULL) || (table->count == 0)) { + *max_clock = clock; + return; + } + + for (i = 0; i < table->count; i++) { + if (clock < table->entries[i].clk) + clock = table->entries[i].clk; + } + *max_clock = clock; +} + void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage) { @@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev, bool disable_mclk_switching; u32 mclk, sclk; u16 vddc, vddci; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; if ((rdev->pm.dpm.new_active_crtc_count > 1) || btc_dpm_vblank_too_short(rdev)) @@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev, ps->low.vddci = max_limits->vddci; } + /* limit clocks to max supported clocks based on voltage dependency tables */ + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + &max_sclk_vddc); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &max_mclk_vddci); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &max_mclk_vddc); + + if (max_sclk_vddc) { + if (ps->low.sclk > max_sclk_vddc) + ps->low.sclk = max_sclk_vddc; + if (ps->medium.sclk > max_sclk_vddc) + ps->medium.sclk = max_sclk_vddc; + if (ps->high.sclk > max_sclk_vddc) + ps->high.sclk = max_sclk_vddc; + } + if (max_mclk_vddci) { + if (ps->low.mclk > max_mclk_vddci) + ps->low.mclk = max_mclk_vddci; + if (ps->medium.mclk > max_mclk_vddci) + ps->medium.mclk = max_mclk_vddci; + if (ps->high.mclk > max_mclk_vddci) + ps->high.mclk = max_mclk_vddci; + } + if (max_mclk_vddc) { + if (ps->low.mclk > max_mclk_vddc) + ps->low.mclk = max_mclk_vddc; + if (ps->medium.mclk > max_mclk_vddc) + ps->medium.mclk = max_mclk_vddc; + if (ps->high.mclk > max_mclk_vddc) + ps->high.mclk = max_mclk_vddc; + } + /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h index 1a15e0e41950..3b6f12b7760b 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.h +++ b/drivers/gpu/drm/radeon/btc_dpm.h @@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev, struct rv7xx_pl *pl); void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage); +void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, + u32 *max_clock); void btc_apply_voltage_delta_rules(struct radeon_device *rdev, u16 max_vddc, u16 max_vddci, u16 *vddc, u16 *vddci); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 899627443030..51e947a97edf 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] = }; extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); +extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, + u32 *max_clock); extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, u32 arb_freq_src, u32 arb_freq_dest); extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); @@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_clock_and_voltage_limits *max_limits; bool disable_mclk_switching; u32 sclk, mclk; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; if ((rdev->pm.dpm.new_active_crtc_count > 1) || @@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, } } + /* limit clocks to max supported clocks based on voltage dependency tables */ + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + &max_sclk_vddc); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &max_mclk_vddci); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &max_mclk_vddc); + + for (i = 0; i < ps->performance_level_count; i++) { + if (max_sclk_vddc) { + if (ps->performance_levels[i].sclk > max_sclk_vddc) + ps->performance_levels[i].sclk = max_sclk_vddc; + } + if (max_mclk_vddci) { + if (ps->performance_levels[i].mclk > max_mclk_vddci) + ps->performance_levels[i].mclk = max_mclk_vddci; + } + if (max_mclk_vddc) { + if (ps->performance_levels[i].mclk > max_mclk_vddc) + ps->performance_levels[i].mclk = max_mclk_vddc; + } + } + /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index adbdb6503b05..d02fd1c045d5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2845,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev) rdev->config.cik.tile_config |= (3 << 0); break; } - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.cik.tile_config |= 1 << 4; - else - rdev->config.cik.tile_config |= 0 << 4; + rdev->config.cik.tile_config |= + ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; rdev->config.cik.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.cik.tile_config |= @@ -4456,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* size in MB on si */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; rdev->mc.visible_vram_size = rdev->mc.aper_size; si_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -4735,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev, u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; - char *block = (char *)&mc_client; + char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, + (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; - printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", + printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", protections, vmid, addr, (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", - block, mc_id); + block, mc_client, mc_id); } /** diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 6c398a456d78..f26339028154 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, bool disable_mclk_switching; u32 mclk, sclk; u16 vddc, vddci; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; if ((rdev->pm.dpm.new_active_crtc_count > 1) || @@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, } } + /* limit clocks to max supported clocks based on voltage dependency tables */ + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + &max_sclk_vddc); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &max_mclk_vddci); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &max_mclk_vddc); + + for (i = 0; i < ps->performance_level_count; i++) { + if (max_sclk_vddc) { + if (ps->performance_levels[i].sclk > max_sclk_vddc) + ps->performance_levels[i].sclk = max_sclk_vddc; + } + if (max_mclk_vddci) { + if (ps->performance_levels[i].mclk > max_mclk_vddci) + ps->performance_levels[i].mclk = max_mclk_vddci; + } + if (max_mclk_vddc) { + if (ps->performance_levels[i].mclk > max_mclk_vddc) + ps->performance_levels[i].mclk = max_mclk_vddc; + } + } + /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 24175717307b..d71333033b2b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); seq_printf(m, "%u dwords in ring\n", count); - for (j = 0; j <= count; j++) { - i = (rdp + j) & ring->ptr_mask; - seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); + if (ring->ready) { + for (j = 0; j <= count; j++) { + i = (rdp + j) & ring->ptr_mask; + seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); + } } return 0; } diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index e65f211a7be0..5513d8f06252 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(limits->entries[i].usVoltage); + le16_to_cpu(entry->usVoltage); entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); } diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f443010ce90b..b0fa6002af3e 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ - if (ASIC_IS_DCE3(rdev)) { - /* according to the reg specs, this should DCE3.2 only, but in - * practice it seems to cover DCE3.0 as well. - */ + if (ASIC_IS_DCE32(rdev)) { if (dig->dig_encoder == 0) { dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); @@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ } + } else if (ASIC_IS_DCE3(rdev)) { + /* according to the reg specs, this should DCE3.2 only, but in + * practice it seems to cover DCE3.0/3.1 as well. + */ + if (dig->dig_encoder == 0) { + WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); + WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); + WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ + } else { + WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100); + WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); + WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ + } } else { - /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ + /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | AUDIO_DTO_MODULE(clock / 10)); } diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 5003385a7512..8f7e04538fd6 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = { .wait_for_vblank = &avivo_wait_for_vblank, .set_backlight_level = &atombios_set_backlight_level, .get_backlight_level = &atombios_get_backlight_level, + .hdmi_enable = &r600_hdmi_enable, + .hdmi_setmode = &r600_hdmi_setmode, }, .copy = { .blit = &r600_copy_cpdma, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 404e25d285ba..f79ee184ffd5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); uint16_t data_offset, size; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; + struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign; uint8_t frev, crev; int i, num_indices; @@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); - + ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) + ((u8 *)&ss_info->asSS_Info[0]); for (i = 0; i < num_indices; i++) { - if (ss_info->asSS_Info[i].ucSS_Id == id) { + if (ss_assign->ucSS_Id == id) { ss->percentage = - le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); - ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; - ss->step = ss_info->asSS_Info[i].ucSS_Step; - ss->delay = ss_info->asSS_Info[i].ucSS_Delay; - ss->range = ss_info->asSS_Info[i].ucSS_Range; - ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; + le16_to_cpu(ss_assign->usSpreadSpectrumPercentage); + ss->type = ss_assign->ucSpreadSpectrumType; + ss->step = ss_assign->ucSS_Step; + ss->delay = ss_assign->ucSS_Delay; + ss->range = ss_assign->ucSS_Range; + ss->refdiv = ss_assign->ucRecommendedRef_Div; return true; } + ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) + ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT)); } } return false; @@ -1477,6 +1481,12 @@ union asic_ss_info { struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; }; +union asic_ss_assignment { + struct _ATOM_ASIC_SS_ASSIGNMENT v1; + struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2; + struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3; +}; + bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id, u32 clock) @@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); uint16_t data_offset, size; union asic_ss_info *ss_info; + union asic_ss_assignment *ss_assign; uint8_t frev, crev; int i, num_indices; @@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT); + ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]); for (i = 0; i < num_indices; i++) { - if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { + if ((ss_assign->v1.ucClockIndication == id) && + (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) { ss->percentage = - le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); - ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; - ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); + le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage); + ss->type = ss_assign->v1.ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); return true; } + ss_assign = (union asic_ss_assignment *) + ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT)); } break; case 2: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); + ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]); for (i = 0; i < num_indices; i++) { - if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { + if ((ss_assign->v2.ucClockIndication == id) && + (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) { ss->percentage = - le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); - ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; - ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); + le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage); + ss->type = ss_assign->v2.ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); if ((crev == 2) && ((id == ASIC_INTERNAL_ENGINE_SS) || (id == ASIC_INTERNAL_MEMORY_SS))) ss->rate /= 100; return true; } + ss_assign = (union asic_ss_assignment *) + ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2)); } break; case 3: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); + ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]); for (i = 0; i < num_indices; i++) { - if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { + if ((ss_assign->v3.ucClockIndication == id) && + (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) { ss->percentage = - le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); - ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; - ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); + le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage); + ss->type = ss_assign->v3.ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); if ((id == ASIC_INTERNAL_ENGINE_SS) || (id == ASIC_INTERNAL_MEMORY_SS)) ss->rate /= 100; @@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, radeon_atombios_get_igp_ss_overrides(rdev, ss, id); return true; } + ss_assign = (union asic_ss_assignment *) + ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3)); } break; default: diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ac6ece61a476..66c222836631 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -85,8 +85,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) VRAM, also but everything into VRAM on AGP cards to avoid image corruptions */ if (p->ring == R600_RING_TYPE_UVD_INDEX && - (i == 0 || p->rdev->flags & RADEON_IS_AGP)) { - /* TODO: is this still needed for NI+ ? */ + p->rdev->family < CHIP_PALM && + (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { + p->relocs[i].lobj.domain = RADEON_GEM_DOMAIN_VRAM; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e29faa73b574..841d0e09be3e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev, return r; } if ((radeon_testing & 1)) { - radeon_test_moves(rdev); + if (rdev->accel_working) + radeon_test_moves(rdev); + else + DRM_INFO("radeon: acceleration disabled, skipping move tests\n"); } if ((radeon_testing & 2)) { - radeon_test_syncing(rdev); + if (rdev->accel_working) + radeon_test_syncing(rdev); + else + DRM_INFO("radeon: acceleration disabled, skipping sync tests\n"); } if (radeon_benchmarking) { - radeon_benchmark(rdev, radeon_benchmarking); + if (rdev->accel_working) + radeon_benchmark(rdev, radeon_benchmarking); + else + DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 87e1d69e8fdb..ac07ad1d4f8c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1002,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev) { /* set up the default clocks if the MC ucode is loaded */ if ((rdev->family >= CHIP_BARTS) && - (rdev->family <= CHIP_HAINAN) && + (rdev->family <= CHIP_CAYMAN) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, @@ -1046,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) if (ret) { DRM_ERROR("radeon: dpm resume failed\n"); if ((rdev->family >= CHIP_BARTS) && - (rdev->family <= CHIP_HAINAN) && + (rdev->family <= CHIP_CAYMAN) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, @@ -1097,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev) radeon_pm_init_profile(rdev); /* set up the default clocks if the MC ucode is loaded */ if ((rdev->family >= CHIP_BARTS) && - (rdev->family <= CHIP_HAINAN) && + (rdev->family <= CHIP_CAYMAN) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, @@ -1183,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) if (ret) { rdev->pm.dpm_enabled = false; if ((rdev->family >= CHIP_BARTS) && - (rdev->family <= CHIP_HAINAN) && + (rdev->family <= CHIP_CAYMAN) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 46a25f037b84..18254e1c3e71 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) * packet that is the root issue */ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; - for (j = 0; j <= (count + 32); j++) { - seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); - i = (i + 1) & ring->ptr_mask; + if (ring->ready) { + for (j = 0; j <= (count + 32); j++) { + seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); + i = (i + 1) & ring->ptr_mask; + } } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1a01bbff9bfa..a0f11856ddde 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, return -EINVAL; } - /* TODO: is this still necessary on NI+ ? */ - if ((cmd == 0 || cmd == 0x3) && + if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) && (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", start, end); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index cfe5d4d28915..9ace28702c76 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; if ((rdev->pm.dpm.new_active_crtc_count > 1) || @@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, } } + /* limit clocks to max supported clocks based on voltage dependency tables */ + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + &max_sclk_vddc); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &max_mclk_vddci); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &max_mclk_vddc); + + for (i = 0; i < ps->performance_level_count; i++) { + if (max_sclk_vddc) { + if (ps->performance_levels[i].sclk > max_sclk_vddc) + ps->performance_levels[i].sclk = max_sclk_vddc; + } + if (max_mclk_vddci) { + if (ps->performance_levels[i].mclk > max_mclk_vddci) + ps->performance_levels[i].mclk = max_mclk_vddci; + } + if (max_mclk_vddc) { + if (ps->performance_levels[i].mclk > max_mclk_vddc) + ps->performance_levels[i].mclk = max_mclk_vddc; + } + } + /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 7266805d9786..3100fa9cb52f 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev) /* enable VCPU clock */ WREG32(UVD_VCPU_CNTL, 1 << 9); - /* enable UMC */ - WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); + /* enable UMC and NC0 */ + WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13))); /* boot up the VCPU */ WREG32(UVD_SOFT_RESET, 0); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 4faea979975e..af6edf9b1936 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -194,7 +194,7 @@ int vmbus_connect(void) do { ret = vmbus_negotiate_version(msginfo, version); - if (ret) + if (ret == -ETIMEDOUT) goto cleanup; if (vmbus_connection.conn_state == CONNECTED) diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index 28b03325b872..09988b289622 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -32,13 +32,17 @@ /* * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) */ +#define WS2008_SRV_MAJOR 1 +#define WS2008_SRV_MINOR 0 +#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR) + #define WIN7_SRV_MAJOR 3 #define WIN7_SRV_MINOR 0 -#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) +#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) #define WIN8_SRV_MAJOR 4 #define WIN8_SRV_MINOR 0 -#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) +#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) /* * Global state maintained for transaction that is being processed. @@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context) struct icmsg_hdr *icmsghdrp; struct icmsg_negotiate *negop = NULL; + int util_fw_version; + int kvp_srv_version; if (kvp_transaction.active) { /* @@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context) if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { /* - * We start with win8 version and if the host cannot - * support that we use the previous version. + * Based on the host, select appropriate + * framework and service versions we will + * negotiate. */ - if (vmbus_prep_negotiate_resp(icmsghdrp, negop, - recv_buffer, UTIL_FW_MAJOR_MINOR, - WIN8_SRV_MAJOR_MINOR)) - goto done; - + switch (vmbus_proto_version) { + case (VERSION_WS2008): + util_fw_version = UTIL_WS2K8_FW_VERSION; + kvp_srv_version = WS2008_SRV_VERSION; + break; + case (VERSION_WIN7): + util_fw_version = UTIL_FW_VERSION; + kvp_srv_version = WIN7_SRV_VERSION; + break; + default: + util_fw_version = UTIL_FW_VERSION; + kvp_srv_version = WIN8_SRV_VERSION; + } vmbus_prep_negotiate_resp(icmsghdrp, negop, - recv_buffer, UTIL_FW_MAJOR_MINOR, - WIN7_SRV_MAJOR_MINOR); + recv_buffer, util_fw_version, + kvp_srv_version); } else { kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ @@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context) return; } -done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index e4572f3f2834..0c3546224376 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -26,7 +26,7 @@ #define VSS_MAJOR 5 #define VSS_MINOR 0 -#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR) +#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) @@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context) if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { vmbus_prep_negotiate_resp(icmsghdrp, negop, - recv_buffer, UTIL_FW_MAJOR_MINOR, - VSS_MAJOR_MINOR); + recv_buffer, UTIL_FW_VERSION, + VSS_VERSION); } else { vss_msg = (struct hv_vss_msg *)&recv_buffer[ sizeof(struct vmbuspipe_hdr) + diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 9113850edb43..62dfd246b948 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -28,17 +28,32 @@ #include <linux/reboot.h> #include <linux/hyperv.h> -#define SHUTDOWN_MAJOR 3 -#define SHUTDOWN_MINOR 0 -#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR) -#define TIMESYNCH_MAJOR 3 -#define TIMESYNCH_MINOR 0 -#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR) +#define SD_MAJOR 3 +#define SD_MINOR 0 +#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR) -#define HEARTBEAT_MAJOR 3 -#define HEARTBEAT_MINOR 0 -#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR) +#define SD_WS2008_MAJOR 1 +#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR) + +#define TS_MAJOR 3 +#define TS_MINOR 0 +#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR) + +#define TS_WS2008_MAJOR 1 +#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR) + +#define HB_MAJOR 3 +#define HB_MINOR 0 +#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR) + +#define HB_WS2008_MAJOR 1 +#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR) + +static int sd_srv_version; +static int ts_srv_version; +static int hb_srv_version; +static int util_fw_version; static void shutdown_onchannelcallback(void *context); static struct hv_util_service util_shutdown = { @@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context) if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { vmbus_prep_negotiate_resp(icmsghdrp, negop, - shut_txf_buf, UTIL_FW_MAJOR_MINOR, - SHUTDOWN_MAJOR_MINOR); + shut_txf_buf, util_fw_version, + sd_srv_version); } else { shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ @@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context) struct icmsg_hdr *icmsghdrp; struct ictimesync_data *timedatap; u8 *time_txf_buf = util_timesynch.recv_buffer; + struct icmsg_negotiate *negop = NULL; vmbus_recvpacket(channel, time_txf_buf, PAGE_SIZE, &recvlen, &requestid); @@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context) sizeof(struct vmbuspipe_hdr)]; if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { - vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf, - UTIL_FW_MAJOR_MINOR, - TIMESYNCH_MAJOR_MINOR); + vmbus_prep_negotiate_resp(icmsghdrp, negop, + time_txf_buf, + util_fw_version, + ts_srv_version); } else { timedatap = (struct ictimesync_data *)&time_txf_buf[ sizeof(struct vmbuspipe_hdr) + @@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context) struct icmsg_hdr *icmsghdrp; struct heartbeat_msg_data *heartbeat_msg; u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; + struct icmsg_negotiate *negop = NULL; vmbus_recvpacket(channel, hbeat_txf_buf, PAGE_SIZE, &recvlen, &requestid); @@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context) sizeof(struct vmbuspipe_hdr)]; if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { - vmbus_prep_negotiate_resp(icmsghdrp, NULL, - hbeat_txf_buf, UTIL_FW_MAJOR_MINOR, - HEARTBEAT_MAJOR_MINOR); + vmbus_prep_negotiate_resp(icmsghdrp, negop, + hbeat_txf_buf, util_fw_version, + hb_srv_version); } else { heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ @@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev, goto error; hv_set_drvdata(dev, srv); + /* + * Based on the host; initialize the framework and + * service version numbers we will negotiate. + */ + switch (vmbus_proto_version) { + case (VERSION_WS2008): + util_fw_version = UTIL_WS2K8_FW_VERSION; + sd_srv_version = SD_WS2008_VERSION; + ts_srv_version = TS_WS2008_VERSION; + hb_srv_version = HB_WS2008_VERSION; + break; + + default: + util_fw_version = UTIL_FW_VERSION; + sd_srv_version = SD_VERSION; + ts_srv_version = TS_VERSION; + hb_srv_version = HB_VERSION; + } + return 0; error: diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 62c2e32e25ef..98814d12a604 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void) { struct applesmc_registers *s = &smcreg; bool left_light_sensor, right_light_sensor; + unsigned int count; u8 tmp[1]; int ret; if (s->init_complete) return 0; - ret = read_register_count(&s->key_count); + ret = read_register_count(&count); if (ret) return ret; + if (s->cache && s->key_count != count) { + pr_warn("key count changed from %d to %d\n", + s->key_count, count); + kfree(s->cache); + s->cache = NULL; + } + s->key_count = count; + if (!s->cache) s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); if (!s->cache) diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index dbecf08399f8..5888feef1ac5 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -98,6 +98,8 @@ #define DW_IC_ERR_TX_ABRT 0x1 +#define DW_IC_TAR_10BITADDR_MASTER BIT(12) + /* * status codes */ @@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; - u32 ic_con; + u32 ic_con, ic_tar = 0; /* Disable the adapter */ __i2c_dw_enable(dev, false); - /* set the slave (target) address */ - dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR); - /* if the slave address is ten bit address, enable 10BITADDR */ ic_con = dw_readl(dev, DW_IC_CON); - if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) + if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { ic_con |= DW_IC_CON_10BITADDR_MASTER; - else + /* + * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing + * mode has to be enabled via bit 12 of IC_TAR register. + * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be + * detected from registers. + */ + ic_tar = DW_IC_TAR_10BITADDR_MASTER; + } else { ic_con &= ~DW_IC_CON_10BITADDR_MASTER; + } + dw_writel(dev, ic_con, DW_IC_CON); + /* + * Set the slave (target) address and enable 10-bit addressing mode + * if applicable. + */ + dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); + /* Enable the adapter */ __i2c_dw_enable(dev, true); diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 8ed79a086f85..1672effbcebb 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, desc = &priv->hw[priv->head]; + /* Initialize the DMA buffer */ + memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer)); + /* Initialize the descriptor */ memset(desc, 0, sizeof(struct ismt_desc)); desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 7f3a47443494..d3e9cc3153a9 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data) ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; - writel_relaxed(data_reg_lo, + writel(data_reg_lo, drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); - writel_relaxed(data_reg_hi, + writel(data_reg_hi, drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); } else { @@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = { MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); #ifdef CONFIG_OF +#ifdef CONFIG_HAVE_CLK static int mv64xxx_calc_freq(const int tclk, const int n, const int m) { @@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, return false; return true; } +#endif /* CONFIG_HAVE_CLK */ static int mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, struct device *dev) { - const struct of_device_id *device; - struct device_node *np = dev->of_node; - u32 bus_freq, tclk; - int rc = 0; - /* CLK is mandatory when using DT to describe the i2c bus. We * need to know tclk in order to calculate bus clock * factors. @@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, /* Have OF but no CLK */ return -ENODEV; #else + const struct of_device_id *device; + struct device_node *np = dev->of_node; + u32 bus_freq, tclk; + int rc = 0; + if (IS_ERR(drv_data->clk)) { rc = -ENODEV; goto out; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 3535f3c0f7b4..3747b9bf67d6 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&i2c->adap); - clk_disable_unprepare(i2c->clk); - if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) s3c24xx_i2c_dt_gpio_free(i2c); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index b39f6f0b45f2..0f12382aa35d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -498,7 +498,7 @@ struct cached_dev { */ atomic_t has_dirty; - struct ratelimit writeback_rate; + struct bch_ratelimit writeback_rate; struct delayed_work writeback_rate_update; /* @@ -507,10 +507,9 @@ struct cached_dev { */ sector_t last_read; - /* Number of writeback bios in flight */ - atomic_t in_flight; + /* Limit number of writeback bios in flight */ + struct semaphore in_flight; struct closure_with_timer writeback; - struct closure_waitlist writeback_wait; struct keybuf writeback_keys; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 8010eed06a51..22d1ae72c282 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search) /* Mergesort */ +static void sort_key_next(struct btree_iter *iter, + struct btree_iter_set *i) +{ + i->k = bkey_next(i->k); + + if (i->k == i->end) + *i = iter->data[--iter->used]; +} + static void btree_sort_fixup(struct btree_iter *iter) { while (iter->used > 1) { struct btree_iter_set *top = iter->data, *i = top + 1; - struct bkey *k; if (iter->used > 2 && btree_iter_cmp(i[0], i[1])) i++; - for (k = i->k; - k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0; - k = bkey_next(k)) - if (top->k > i->k) - __bch_cut_front(top->k, k); - else if (KEY_SIZE(k)) - bch_cut_back(&START_KEY(k), top->k); - - if (top->k < i->k || k == i->k) + if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) break; - heap_sift(iter, i - top, btree_iter_cmp); + if (!KEY_SIZE(i->k)) { + sort_key_next(iter, i); + heap_sift(iter, i - top, btree_iter_cmp); + continue; + } + + if (top->k > i->k) { + if (bkey_cmp(top->k, i->k) >= 0) + sort_key_next(iter, i); + else + bch_cut_front(top->k, i->k); + + heap_sift(iter, i - top, btree_iter_cmp); + } else { + /* can't happen because of comparison func */ + BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); + bch_cut_back(&START_KEY(i->k), top->k); + } } } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index f9764e61978b..f42fc7ed9cd6 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b) return; err: - bch_cache_set_error(b->c, "io error reading bucket %lu", + bch_cache_set_error(b->c, "io error reading bucket %zu", PTR_BUCKET_NR(b->c, &b->key, 0)); } @@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, return SHRINK_STOP; /* Return -1 if we can't do anything right now */ - if (sc->gfp_mask & __GFP_WAIT) + if (sc->gfp_mask & __GFP_IO) mutex_lock(&c->bucket_lock); else if (!mutex_trylock(&c->bucket_lock)) return -1; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ba95ab84b2be..8435f81e5d85 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list, bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); pr_debug("%u journal buckets", ca->sb.njournal_buckets); - /* Read journal buckets ordered by golden ratio hash to quickly + /* + * Read journal buckets ordered by golden ratio hash to quickly * find a sequence of buckets with valid journal entries */ for (i = 0; i < ca->sb.njournal_buckets; i++) { @@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list, goto bsearch; } - /* If that fails, check all the buckets we haven't checked + /* + * If that fails, check all the buckets we haven't checked * already */ pr_debug("falling back to linear search"); - for (l = 0; l < ca->sb.njournal_buckets; l++) { - if (test_bit(l, bitmap)) - continue; - + for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); + l < ca->sb.njournal_buckets; + l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) if (read_bucket(l)) goto bsearch; - } + + if (list_empty(list)) + continue; bsearch: /* Binary search */ m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); @@ -197,10 +200,12 @@ bsearch: r = m; } - /* Read buckets in reverse order until we stop finding more + /* + * Read buckets in reverse order until we stop finding more * journal entries */ - pr_debug("finishing up"); + pr_debug("finishing up: m %u njournal_buckets %u", + m, ca->sb.njournal_buckets); l = m; while (1) { @@ -228,9 +233,10 @@ bsearch: } } - c->journal.seq = list_entry(list->prev, - struct journal_replay, - list)->j.seq; + if (!list_empty(list)) + c->journal.seq = list_entry(list->prev, + struct journal_replay, + list)->j.seq; return 0; #undef read_bucket @@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca) return; } - switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { + switch (atomic_read(&ja->discard_in_flight)) { case DISCARD_IN_FLIGHT: return; @@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl) if (cl) BUG_ON(!closure_wait(&w->wait, cl)); + closure_flush(&c->journal.io); __journal_try_write(c, true); } } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 786a1a4f74d8..71eb233b9ace 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s) } else { bch_writeback_add(dc); - if (s->op.flush_journal) { + if (bio->bi_rw & REQ_FLUSH) { /* Also need to send a flush to the backing device */ - s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, - dc->disk.bio_split); + struct bio *flush = bio_alloc_bioset(0, GFP_NOIO, + dc->disk.bio_split); - bio->bi_size = 0; - bio->bi_vcnt = 0; - closure_bio_submit(bio, cl, s->d); + flush->bi_rw = WRITE_FLUSH; + flush->bi_bdev = bio->bi_bdev; + flush->bi_end_io = request_endio; + flush->bi_private = cl; + + closure_bio_submit(flush, cl, s->d); } else { s->op.cache_bio = bio; } diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 4fe6ab2fbe2e..924dcfdae111 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -223,8 +223,13 @@ STORE(__cached_dev) } if (attr == &sysfs_label) { - /* note: endlines are preserved */ - memcpy(dc->sb.label, buf, SB_LABEL_SIZE); + if (size > SB_LABEL_SIZE) + return -EINVAL; + memcpy(dc->sb.label, buf, size); + if (size < SB_LABEL_SIZE) + dc->sb.label[size] = '\0'; + if (size && dc->sb.label[size - 1] == '\n') + dc->sb.label[size - 1] = '\0'; bch_write_bdev_super(dc, NULL); if (dc->disk.c) { memcpy(dc->disk.c->uuids[dc->disk.id].label, diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 98eb81159a22..420dad545c7d 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) stats->last = now ?: 1; } -unsigned bch_next_delay(struct ratelimit *d, uint64_t done) +/** + * bch_next_delay() - increment @d by the amount of work done, and return how + * long to delay until the next time to do some work. + * + * @d - the struct bch_ratelimit to update + * @done - the amount of work done, in arbitrary units + * + * Returns the amount of time to delay by, in jiffies + */ +uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { uint64_t now = local_clock(); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 1ae2a73ad85f..ea345c6896f4 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units) (ewma) >> factor; \ }) -struct ratelimit { +struct bch_ratelimit { + /* Next time we want to do some work, in nanoseconds */ uint64_t next; + + /* + * Rate at which we want to do work, in units per nanosecond + * The units here correspond to the units passed to bch_next_delay() + */ unsigned rate; }; -static inline void ratelimit_reset(struct ratelimit *d) +static inline void bch_ratelimit_reset(struct bch_ratelimit *d) { d->next = local_clock(); } -unsigned bch_next_delay(struct ratelimit *d, uint64_t done); +uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done); #define __DIV_SAFE(n, d, zero) \ ({ \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 22cbff551628..ba3ee48320f2 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work) static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) { + uint64_t ret; + if (atomic_read(&dc->disk.detaching) || !dc->writeback_percent) return 0; - return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); + ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); + + return min_t(uint64_t, ret, HZ); } /* Background writeback */ @@ -208,7 +212,7 @@ normal_refill: up_write(&dc->writeback_lock); - ratelimit_reset(&dc->writeback_rate); + bch_ratelimit_reset(&dc->writeback_rate); /* Punt to workqueue only so we don't recurse and blow the stack */ continue_at(cl, read_dirty, dirty_wq); @@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl) } bch_keybuf_del(&dc->writeback_keys, w); - atomic_dec_bug(&dc->in_flight); - - closure_wake_up(&dc->writeback_wait); + up(&dc->in_flight); closure_return_with_destructor(cl, dirty_io_destructor); } @@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl) closure_bio_submit(&io->bio, cl, &io->dc->disk); - continue_at(cl, write_dirty_finish, dirty_wq); + continue_at(cl, write_dirty_finish, system_wq); } static void read_dirty_endio(struct bio *bio, int error) @@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl) closure_bio_submit(&io->bio, cl, &io->dc->disk); - continue_at(cl, write_dirty, dirty_wq); + continue_at(cl, write_dirty, system_wq); } static void read_dirty(struct closure *cl) @@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl) if (delay > 0 && (KEY_START(&w->key) != dc->last_read || - jiffies_to_msecs(delay) > 50)) { - w->private = NULL; - - closure_delay(&dc->writeback, delay); - continue_at(cl, read_dirty, dirty_wq); - } + jiffies_to_msecs(delay) > 50)) + delay = schedule_timeout_uninterruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl) trace_bcache_writeback(&w->key); - closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); + down(&dc->in_flight); + closure_call(&io->cl, read_dirty_submit, NULL, cl); delay = writeback_delay(dc, KEY_SIZE(&w->key)); - - atomic_inc(&dc->in_flight); - - if (!closure_wait_event(&dc->writeback_wait, cl, - atomic_read(&dc->in_flight) < 64)) - continue_at(cl, read_dirty, dirty_wq); } if (0) { @@ -442,7 +435,11 @@ err: bch_keybuf_del(&dc->writeback_keys, w); } - refill_dirty(cl); + /* + * Wait for outstanding writeback IOs to finish (and keybuf slots to be + * freed) before refilling again + */ + continue_at(cl, refill_dirty, dirty_wq); } /* Init */ @@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc) void bch_cached_dev_writeback_init(struct cached_dev *dc) { + sema_init(&dc->in_flight, 64); closure_init_unlocked(&dc->writeback); init_rwsem(&dc->writeback_lock); @@ -513,7 +511,7 @@ void bch_writeback_exit(void) int __init bch_writeback_init(void) { - dirty_wq = create_singlethread_workqueue("bcache_writeback"); + dirty_wq = create_workqueue("bcache_writeback"); if (!dirty_wq) return -ENOMEM; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ea49834377c8..2a20986a2fec 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -19,8 +19,6 @@ #define DM_MSG_PREFIX "io" #define DM_IO_MAX_REGIONS BITS_PER_LONG -#define MIN_IOS 16 -#define MIN_BIOS 16 struct dm_io_client { mempool_t *pool; @@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache; struct dm_io_client *dm_io_client_create(void) { struct dm_io_client *client; + unsigned min_ios = dm_get_reserved_bio_based_ios(); client = kmalloc(sizeof(*client), GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); - client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); + client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); if (!client->pool) goto bad; - client->bios = bioset_create(MIN_BIOS, 0); + client->bios = bioset_create(min_ios, 0); if (!client->bios) goto bad; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b759a127f9c3..de570a558764 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -7,6 +7,7 @@ #include <linux/device-mapper.h> +#include "dm.h" #include "dm-path-selector.h" #include "dm-uevent.h" @@ -116,8 +117,6 @@ struct dm_mpath_io { typedef int (*action_fn) (struct pgpath *pgpath); -#define MIN_IOS 256 /* Mempool size */ - static struct kmem_cache *_mpio_cache; static struct workqueue_struct *kmultipathd, *kmpath_handlerd; @@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg, static struct multipath *alloc_multipath(struct dm_target *ti) { struct multipath *m; + unsigned min_ios = dm_get_reserved_rq_based_ios(); m = kzalloc(sizeof(*m), GFP_KERNEL); if (m) { @@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); - m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); + m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); if (!m->mpio_pool) { kfree(m); return NULL; @@ -1268,6 +1268,7 @@ static int noretry_error(int error) case -EREMOTEIO: case -EILSEQ: case -ENODATA: + case -ENOSPC: return 1; } @@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone, if (!error && !clone->errors) return 0; /* I/O complete */ - if (noretry_error(error)) + if (noretry_error(error)) { + if ((clone->cmd_flags & REQ_WRITE_SAME) && + !clone->q->limits.max_write_same_sectors) { + struct queue_limits *limits; + + /* device doesn't really support WRITE SAME, disable it */ + limits = dm_get_queue_limits(dm_table_get_md(m->ti->table)); + limits->max_write_same_sectors = 0; + } return error; + } if (mpio->pgpath) fail_path(mpio->pgpath); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3ac415675b6c..4caa8e6d59d7 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); - flush_work(&req.work); + flush_workqueue(ps->metadata_wq); return req.result; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c434e5aab2df..aec57d76db5d 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -725,17 +725,16 @@ static int calc_max_buckets(void) */ static int init_hash_tables(struct dm_snapshot *s) { - sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + sector_t hash_size, cow_dev_size, max_buckets; /* * Calculate based on the size of the original volume or * the COW volume... */ cow_dev_size = get_dev_size(s->cow->bdev); - origin_dev_size = get_dev_size(s->origin->bdev); max_buckets = calc_max_buckets(); - hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; + hash_size = cow_dev_size >> s->store->chunk_shift; hash_size = min(hash_size, max_buckets); if (hash_size < 64) diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8ae31e8d3d64..3d404c1371ed 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry, struct dm_stat_percpu *p; /* - * For strict correctness we should use local_irq_disable/enable + * For strict correctness we should use local_irq_save/restore * instead of preempt_disable/enable. * - * This is racy if the driver finishes bios from non-interrupt - * context as well as from interrupt context or from more different - * interrupts. + * preempt_disable/enable is racy if the driver finishes bios + * from non-interrupt context as well as from interrupt context + * or from more different interrupts. * - * However, the race only results in not counting some events, - * so it is acceptable. + * On 64-bit architectures the race only results in not counting some + * events, so it is acceptable. On 32-bit architectures the race could + * cause the counter going off by 2^32, so we need to do proper locking + * there. * * part_stat_lock()/part_stat_unlock() have this race too. */ +#if BITS_PER_LONG == 32 + unsigned long flags; + local_irq_save(flags); +#else preempt_disable(); +#endif p = &s->stat_percpu[smp_processor_id()][entry]; if (!end) { @@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry, p->ticks[idx] += duration; } +#if BITS_PER_LONG == 32 + local_irq_restore(flags); +#else preempt_enable(); +#endif } static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ed063427d676..2c0cf511ec23 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) * them down to the data device. The thin device's discard * processing will cause mappings to be removed from the btree. */ + ti->discard_zeroes_data_unsupported = true; if (pf.discard_enabled && pf.discard_passdown) { ti->num_discard_bios = 1; @@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) * thin devices' discard limits consistent). */ ti->discards_supported = true; - ti->discard_zeroes_data_unsupported = true; } ti->private = pt; @@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) * They get transferred to the live pool in bind_control_target() * called from pool_preresume(). */ - if (!pt->adjusted_pf.discard_enabled) + if (!pt->adjusted_pf.discard_enabled) { + /* + * Must explicitly disallow stacking discard limits otherwise the + * block layer will stack them if pool's data device has support. + * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the + * user to see that, so make sure to set all discard limits to 0. + */ + limits->discard_granularity = 0; return; + } disable_passdown_if_not_supported(pt); @@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); /* In case the pool supports discards, pass them on. */ + ti->discard_zeroes_data_unsupported = true; if (tc->pool->pf.discard_enabled) { ti->discards_supported = true; ti->num_discard_bios = 1; - ti->discard_zeroes_data_unsupported = true; /* Discard bios must be split on a block boundary */ ti->split_discard_bios = true; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6a5e9ed2fcc3..b3e26c7d1417 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -211,10 +211,55 @@ struct dm_md_mempools { struct bio_set *bs; }; -#define MIN_IOS 256 +#define RESERVED_BIO_BASED_IOS 16 +#define RESERVED_REQUEST_BASED_IOS 256 +#define RESERVED_MAX_IOS 1024 static struct kmem_cache *_io_cache; static struct kmem_cache *_rq_tio_cache; +/* + * Bio-based DM's mempools' reserved IOs set by the user. + */ +static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; + +/* + * Request-based DM's mempools' reserved IOs set by the user. + */ +static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; + +static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, + unsigned def, unsigned max) +{ + unsigned ios = ACCESS_ONCE(*reserved_ios); + unsigned modified_ios = 0; + + if (!ios) + modified_ios = def; + else if (ios > max) + modified_ios = max; + + if (modified_ios) { + (void)cmpxchg(reserved_ios, ios, modified_ios); + ios = modified_ios; + } + + return ios; +} + +unsigned dm_get_reserved_bio_based_ios(void) +{ + return __dm_get_reserved_ios(&reserved_bio_based_ios, + RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); +} +EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); + +unsigned dm_get_reserved_rq_based_ios(void) +{ + return __dm_get_reserved_ios(&reserved_rq_based_ios, + RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); +} +EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); + static int __init local_init(void) { int r = -ENOMEM; @@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md) } /* + * The queue_limits are only valid as long as you have a reference + * count on 'md'. + */ +struct queue_limits *dm_get_queue_limits(struct mapped_device *md) +{ + BUG_ON(!atomic_read(&md->holders)); + return &md->queue->limits; +} +EXPORT_SYMBOL_GPL(dm_get_queue_limits); + +/* * Fully initialize a request-based queue (->elevator, ->request_fn, etc). */ static int dm_init_request_based_queue(struct mapped_device *md) @@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u if (type == DM_TYPE_BIO_BASED) { cachep = _io_cache; - pool_size = 16; + pool_size = dm_get_reserved_bio_based_ios(); front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); } else if (type == DM_TYPE_REQUEST_BASED) { cachep = _rq_tio_cache; - pool_size = MIN_IOS; + pool_size = dm_get_reserved_rq_based_ios(); front_pad = offsetof(struct dm_rq_clone_bio_info, clone); /* per_bio_data_size is not used. See __bind_mempools(). */ WARN_ON(per_bio_data_size != 0); } else goto out; - pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); + pools->io_pool = mempool_create_slab_pool(pool_size, cachep); if (!pools->io_pool) goto out; @@ -2924,6 +2980,13 @@ module_exit(dm_exit); module_param(major, uint, 0); MODULE_PARM_DESC(major, "The major number of the device mapper"); + +module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); + +module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); + MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 5e604cc7b4aa..1d1ad7b7e527 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools); /* * Helpers that are used by DM core */ +unsigned dm_get_reserved_bio_based_ios(void); +unsigned dm_get_reserved_rq_based_ios(void); + static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) { return !maxlen || strlen(result) + 1 >= maxlen; diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index 04fd38567729..226c3f3cd3e8 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c @@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev) dev->iamthif_ioctl = false; dev->iamthif_state = MEI_IAMTHIF_IDLE; dev->iamthif_timer = 0; + dev->iamthif_stall_timer = 0; } /** diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 6d0282c08a06..cd2033cd7120 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) if (cl->reading_state != MEI_READ_COMPLETE && !waitqueue_active(&cl->rx_wait)) { + mutex_unlock(&dev->device_lock); if (wait_event_interruptible(cl->rx_wait, - (MEI_READ_COMPLETE == cl->reading_state))) { + cl->reading_state == MEI_READ_COMPLETE || + mei_cl_is_transitioning(cl))) { + if (signal_pending(current)) return -EINTR; return -ERESTARTSYS; diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 6374ed99e233..c8396e582f1c 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl) cl->dev->dev_state == MEI_DEV_ENABLED && cl->state == MEI_FILE_CONNECTED); } +static inline bool mei_cl_is_transitioning(struct mei_cl *cl) +{ + return (MEI_FILE_INITIALIZING == cl->state || + MEI_FILE_DISCONNECTED == cl->state || + MEI_FILE_DISCONNECTING == cl->state); +} bool mei_cl_is_other_connecting(struct mei_cl *cl); int mei_cl_disconnect(struct mei_cl *cl); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index f1c974a0bc0d..9b3a0fb7f265 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev) struct mei_me_client *clients; int b; + dev->me_clients_num = 0; + dev->me_client_presentation_num = 0; + dev->me_client_index = 0; + /* count how many ME clients we have */ for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) dev->me_clients_num++; - if (dev->me_clients_num <= 0) + if (dev->me_clients_num == 0) return; kfree(dev->me_clients); @@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) struct hbm_props_request *prop_req; const size_t len = sizeof(struct hbm_props_request); unsigned long next_client_index; - u8 client_num; + unsigned long client_num; client_num = dev->me_client_presentation_num; @@ -680,8 +684,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->dev_state == MEI_DEV_INIT_CLIENTS && dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { dev->init_clients_timer = 0; - dev->me_client_presentation_num = 0; - dev->me_client_index = 0; mei_hbm_me_cl_allocate(dev); dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 92c73118b13c..6197018e2f16 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); } + /* we're already in reset, cancel the init timer */ + dev->init_clients_timer = 0; + dev->me_clients_num = 0; dev->rd_msg_hdr = 0; dev->wd_pending = false; diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 9aa3b78067d3..c71420ef1e37 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -252,19 +252,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, mutex_unlock(&dev->device_lock); if (wait_event_interruptible(cl->rx_wait, - (MEI_READ_COMPLETE == cl->reading_state || - MEI_FILE_INITIALIZING == cl->state || - MEI_FILE_DISCONNECTED == cl->state || - MEI_FILE_DISCONNECTING == cl->state))) { + MEI_READ_COMPLETE == cl->reading_state || + mei_cl_is_transitioning(cl))) { + if (signal_pending(current)) return -EINTR; return -ERESTARTSYS; } mutex_lock(&dev->device_lock); - if (MEI_FILE_INITIALIZING == cl->state || - MEI_FILE_DISCONNECTED == cl->state || - MEI_FILE_DISCONNECTING == cl->state) { + if (mei_cl_is_transitioning(cl)) { rets = -EBUSY; goto out; } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 7b918b2fb894..456b322013e2 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -396,9 +396,9 @@ struct mei_device { struct mei_me_client *me_clients; /* Note: memory has to be allocated */ DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); - u8 me_clients_num; - u8 me_client_presentation_num; - u8 me_client_index; + unsigned long me_clients_num; + unsigned long me_client_presentation_num; + unsigned long me_client_index; struct mei_cl wd_cl; enum mei_wd_states wd_state; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e8ccf6c0f08a..bdd64b1b4817 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev) pci_enable_bridge(dev->bus->self); - if (pci_is_enabled(dev)) + if (pci_is_enabled(dev)) { + if (!dev->is_busmaster) { + dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n"); + pci_set_master(dev); + } return; + } + retval = pci_enable_device(dev); if (retval) dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 47c5888461ff..a2e52a0c53c9 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -41,7 +41,6 @@ struct imx_drm_device { struct list_head encoder_list; struct list_head connector_list; struct mutex mutex; - int references; int pipes; struct drm_fbdev_cma *fbhelper; }; @@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void) } } - imxdrm->references++; - return imxdrm->drm; unwind_crtc: @@ -280,8 +277,6 @@ void imx_drm_device_put(void) list_for_each_entry(enc, &imxdrm->encoder_list, list) module_put(enc->owner); - imxdrm->references--; - mutex_unlock(&imxdrm->mutex); } EXPORT_SYMBOL_GPL(imx_drm_device_put); @@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, mutex_lock(&imxdrm->mutex); - if (imxdrm->references) { + if (imxdrm->drm->open_count) { ret = -EBUSY; goto err_busy; } @@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder, mutex_lock(&imxdrm->mutex); - if (imxdrm->references) { + if (imxdrm->drm->open_count) { ret = -EBUSY; goto err_busy; } @@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector, mutex_lock(&imxdrm->mutex); - if (imxdrm->references) { + if (imxdrm->drm->open_count) { ret = -EBUSY; goto err_busy; } diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c index 2644edf438c1..c8b43442dc74 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c @@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) if (nob > ulsm_nob) return (-EINVAL); - if (copy_to_user (ulsm, lsm, sizeof(ulsm))) + if (copy_to_user (ulsm, lsm, sizeof(*ulsm))) return (-EFAULT); for (i = 0; i < lsm->lsm_stripe_count; i++) { diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c index d7b3c82b5ead..45dfe94199ae 100644 --- a/drivers/staging/octeon-usb/cvmx-usb.c +++ b/drivers/staging/octeon-usb/cvmx-usb.c @@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number, } } - memset(usb, 0, sizeof(usb)); + memset(usb, 0, sizeof(*usb)); usb->init_flags = flags; /* Initialize the USB state structure */ diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c index c7ff2e4d1f23..9832dcbbd07f 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mp.c +++ b/drivers/staging/rtl8188eu/core/rtw_mp.c @@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data) sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); } - _rtw_memset(data, '\0', sizeof(data)); + _rtw_memset(data, '\0', sizeof(*data)); i = psd_start; while (i < psd_stop) { diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c index 9c2e7a20c09e..ec0028d4e61a 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c @@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter) u8 cut_ver, fab_ver; /* Init Value */ - _rtw_memset(dm_odm, 0, sizeof(dm_odm)); + _rtw_memset(dm_odm, 0, sizeof(*dm_odm)); dm_odm->Adapter = Adapter; diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index cd4100fb3645..95953ebc0279 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev, stop = strncmp(extra, "stop", 4); sscanf(extra, "count =%d, pkt", &count); - _rtw_memset(extra, '\0', sizeof(extra)); + _rtw_memset(extra, '\0', sizeof(*extra)); if (stop == 0) { bStartTest = 0; /* To set Stop */ diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index d3078d200e50..9ca3180ebaa0 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { /*=== Customer ID ===*/ /****** 8188EUS ********/ {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c index 5bc361b16d4c..56144014b7c9 100644 --- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c +++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c @@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen) /* Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); + if (!skb) + return RT_STATUS_FAILURE; memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c index d0cf7d8a20e5..8872e0f84f40 100644 --- a/drivers/staging/vt6656/iwctl.c +++ b/drivers/staging/vt6656/iwctl.c @@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, if (pMgmt == NULL) return -EFAULT; + if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) + return -ENODEV; + buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); if (buf == NULL) return -ENOMEM; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 536971786ae8..6f9d28182445 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev) memset(pMgmt->abyCurrBSSID, 0, 6); pMgmt->eCurrState = WMAC_STATE_IDLE; + pDevice->flags &= ~DEVICE_FLAGS_OPENED; + device_free_tx_bufs(pDevice); device_free_rx_bufs(pDevice); device_free_int_bufs(pDevice); @@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev) usb_free_urb(pDevice->pInterruptURB); BSSvClearNodeDBTable(pDevice, 0); - pDevice->flags &=(~DEVICE_FLAGS_OPENED); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index fb743a8811bb..14f3e852215d 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); for (ii = 0; ii < pDevice->cbTD; ii++) { + if (!pDevice->apTD[ii]) + return NULL; pContext = pDevice->apTD[ii]; if (pContext->bBoolInUse == false) { pContext->bBoolInUse = true; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index c9a9ddd1d0bc..01bf5eb4f238 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; if (canon_change) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); - ldata->line_start = 0; - ldata->canon_head = ldata->read_tail; + ldata->line_start = ldata->canon_head = ldata->read_tail; ldata->erasing = 0; ldata->lnext = 0; } diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 52379e56a31e..44077c0b7670 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf) static int dma_push_rx(struct eg20t_port *priv, int size) { - struct tty_struct *tty; int room; struct uart_port *port = &priv->port; struct tty_port *tport = &port->state->port; - port = &priv->port; - tty = tty_port_tty_get(tport); - if (!tty) { - dev_dbg(priv->port.dev, "%s:tty is busy now", __func__); - return 0; - } - room = tty_buffer_request_room(tport, size); if (room < size) dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", size - room); if (!room) - return room; + return 0; tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); port->icount.rx += room; - tty_kref_put(tty); return room; } @@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr) if (tty == NULL) { for (i = 0; error_msg[i] != NULL; i++) dev_err(&priv->pdev->dev, error_msg[i]); + } else { + tty_kref_put(tty); } } diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index d0d972f7e43e..0489a2bdcdf9 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data) static void tegra_uart_stop_rx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); - struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); + struct tty_struct *tty; struct tty_port *port = &u->state->port; struct dma_tx_state state; unsigned long ier; @@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u) if (!tup->rx_in_progress) return; + tty = tty_port_tty_get(&tup->uport.state->port); + tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ ier = tup->ier_shadow; diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 03ba081c5772..6fd60fece6b4 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, } return 0; case TCFLSH: + retval = tty_check_change(tty); + if (retval) + return retval; return __tty_perform_flush(tty, arg); default: /* Try the mode commands */ diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 4a851e15e58c..77b47d82c9a6 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig @@ -1,6 +1,6 @@ config USB_CHIPIDEA tristate "ChipIdea Highspeed Dual Role Controller" - depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET) + depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA help Say Y here if your system has a dual role high speed USB controller based on ChipIdea silicon IP. Currently, only the diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 74d998d9b45b..be822a2c1776 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", ret); - goto err_clk; + goto err_phy; } } @@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Can't register ci_hdrc platform device, err=%d\n", ret); - goto err_clk; + goto err_phy; } if (data->usbmisc_data) { @@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) disable_device: ci_hdrc_remove_device(data->ci_pdev); +err_phy: + if (data->phy) + usb_phy_shutdown(data->phy); err_clk: clk_disable_unprepare(data->clk); return ret; diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 94626409559a..23763dcec069 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev) dbg_remove_files(ci); free_irq(ci->irq, ci); ci_role_destroy(ci); + kfree(ci->hw_bank.regmap); return 0; } diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 6b4c2f2eb946..9333083dd111 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci) for (i = 0; i < ci->hw_ep_max; i++) { struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; + if (hwep->pending_td) + free_pending_td(hwep); dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); } } @@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget, if (ci->platdata->notify_event) ci->platdata->notify_event(ci, CI_HDRC_CONTROLLER_STOPPED_EVENT); - ci->driver = NULL; spin_unlock_irqrestore(&ci->lock, flags); _gadget_stop_activity(&ci->gadget); spin_lock_irqsave(&ci->lock, flags); pm_runtime_put(&ci->gadget.dev); } + ci->driver = NULL; spin_unlock_irqrestore(&ci->lock, flags); return 0; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 737e3c19967b..71dc5d768fa5 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, if ((index & ~USB_DIR_IN) == 0) return 0; ret = findintfep(ps->dev, index); + if (ret < 0) { + /* + * Some not fully compliant Win apps seem to get + * index wrong and have the endpoint number here + * rather than the endpoint address (with the + * correct direction). Win does let this through, + * so we'll not reject it here but leave it to + * the device to not break KVM. But we warn. + */ + ret = findintfep(ps->dev, index ^ 0x80); + if (ret >= 0) + dev_info(&ps->dev->dev, + "%s: process %i (%s) requesting ep %02x but needs %02x\n", + __func__, task_pid_nr(current), + current->comm, index, index ^ 0x80); + } if (ret >= 0) ret = checkintf(ps, ret); break; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index dde4c83516a1..e6b682c6c236 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state) unsigned long long u2_pel; int ret; + if (udev->state != USB_STATE_CONFIGURED) + return 0; + /* Convert SEL and PEL stored in ns to us */ u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 997ebe420bc9..2e252aae51ca 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -29,6 +29,7 @@ #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 +#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e struct dwc3_pci { struct device *dev; @@ -189,6 +190,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = { PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 1a66c5baa0d1..0658908d8968 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data { struct ffs_file_perms perms; umode_t root_mode; const char *dev_name; - union { - /* set by ffs_fs_mount(), read by ffs_sb_fill() */ - void *private_data; - /* set by ffs_sb_fill(), read by ffs_fs_mount */ - struct ffs_data *ffs_data; - }; + struct ffs_data *ffs_data; }; static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) { struct ffs_sb_fill_data *data = _data; struct inode *inode; - struct ffs_data *ffs; + struct ffs_data *ffs = data->ffs_data; ENTER(); - /* Initialise data */ - ffs = ffs_data_new(); - if (unlikely(!ffs)) - goto Enomem; - ffs->sb = sb; - ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); - if (unlikely(!ffs->dev_name)) - goto Enomem; - ffs->file_perms = data->perms; - ffs->private_data = data->private_data; - - /* used by the caller of this function */ - data->ffs_data = ffs; - + data->ffs_data = NULL; sb->s_fs_info = ffs; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) &data->perms); sb->s_root = d_make_root(inode); if (unlikely(!sb->s_root)) - goto Enomem; + return -ENOMEM; /* EP0 file */ if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, &ffs_ep0_operations, NULL))) - goto Enomem; + return -ENOMEM; return 0; - -Enomem: - return -ENOMEM; } static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) @@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags, struct dentry *rv; int ret; void *ffs_dev; + struct ffs_data *ffs; ENTER(); @@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags, if (unlikely(ret < 0)) return ERR_PTR(ret); + ffs = ffs_data_new(); + if (unlikely(!ffs)) + return ERR_PTR(-ENOMEM); + ffs->file_perms = data.perms; + + ffs->dev_name = kstrdup(dev_name, GFP_KERNEL); + if (unlikely(!ffs->dev_name)) { + ffs_data_put(ffs); + return ERR_PTR(-ENOMEM); + } + ffs_dev = functionfs_acquire_dev_callback(dev_name); - if (IS_ERR(ffs_dev)) - return ffs_dev; + if (IS_ERR(ffs_dev)) { + ffs_data_put(ffs); + return ERR_CAST(ffs_dev); + } + ffs->private_data = ffs_dev; + data.ffs_data = ffs; - data.dev_name = dev_name; - data.private_data = ffs_dev; rv = mount_nodev(t, flags, &data, ffs_sb_fill); - - /* data.ffs_data is set by ffs_sb_fill */ - if (IS_ERR(rv)) + if (IS_ERR(rv) && data.ffs_data) { functionfs_release_dev_callback(data.ffs_data); - + ffs_data_put(data.ffs_data); + } return rv; } diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 4449f565d6c6..f2407b2e8a99 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, } /* Enable USB controller, 83xx or 8536 */ - if (pdata->have_sysif_regs) + if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6) setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); /* Don't need to set host mode here. It will be done by tdi_reset() */ @@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd, case FSL_USB2_PHY_ULPI: if (pdata->have_sysif_regs && pdata->controller_ver) { /* controller version 1.6 or above */ + clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN); setbits32(non_ehci + FSL_SOC_USB_CTRL, - ULPI_PHY_CLK_SEL); - /* - * Due to controller issue of PHY_CLK_VALID in ULPI - * mode, we set USB_CTRL_USB_EN before checking - * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work. - */ - clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL, - UTMI_PHY_EN, USB_CTRL_USB_EN); + ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN); } portsc |= PORT_PTS_ULPI; break; @@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd, if (pdata->have_sysif_regs && pdata->controller_ver && (phy_mode == FSL_USB2_PHY_ULPI)) { /* check PHY_CLK_VALID to get phy clk valid */ - if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & - PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) { + if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & + PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) || + in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) { printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); return -EINVAL; } diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 6bd299e61f58..854c2ec7b699 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = { .remove = usb_hcd_pci_remove, .shutdown = usb_hcd_pci_shutdown, -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM .driver = { .pm = &usb_hcd_pci_pm_ops }, diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c index 60a5de505ca1..adb01d950a16 100644 --- a/drivers/usb/host/imx21-hcd.c +++ b/drivers/usb/host/imx21-hcd.c @@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, i = DIV_ROUND_UP(wrap_frame( cur_frame - urb->start_frame), urb->interval); - if (urb->transfer_flags & URB_ISO_ASAP) { + + /* Treat underruns as if URB_ISO_ASAP was set */ + if ((urb->transfer_flags & URB_ISO_ASAP) || + i >= urb->number_of_packets) { urb->start_frame = wrap_frame(urb->start_frame + i * urb->interval); i = 0; - } else if (i >= urb->number_of_packets) { - ret = -EXDEV; - goto alloc_dmem_failed; } } } diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 8f6b695af6a4..604cad1bcf9c 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -216,31 +216,26 @@ static int ohci_urb_enqueue ( frame &= ~(ed->interval - 1); frame |= ed->branch; urb->start_frame = frame; + ed->last_iso = frame + ed->interval * (size - 1); } } else if (ed->type == PIPE_ISOCHRONOUS) { u16 next = ohci_frame_no(ohci) + 1; u16 frame = ed->last_iso + ed->interval; + u16 length = ed->interval * (size - 1); /* Behind the scheduling threshold? */ if (unlikely(tick_before(frame, next))) { - /* USB_ISO_ASAP: Round up to the first available slot */ + /* URB_ISO_ASAP: Round up to the first available slot */ if (urb->transfer_flags & URB_ISO_ASAP) { frame += (next - frame + ed->interval - 1) & -ed->interval; /* - * Not ASAP: Use the next slot in the stream. If - * the entire URB falls before the threshold, fail. + * Not ASAP: Use the next slot in the stream, + * no matter what. */ } else { - if (tick_before(frame + ed->interval * - (urb->number_of_packets - 1), next)) { - retval = -EXDEV; - usb_hcd_unlink_urb_from_ep(hcd, urb); - goto fail; - } - /* * Some OHCI hardware doesn't handle late TDs * correctly. After retiring them it proceeds @@ -251,9 +246,16 @@ static int ohci_urb_enqueue ( urb_priv->td_cnt = DIV_ROUND_UP( (u16) (next - frame), ed->interval); + if (urb_priv->td_cnt >= urb_priv->length) { + ++urb_priv->td_cnt; /* Mark it */ + ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n", + urb, frame, length, + next); + } } } urb->start_frame = frame; + ed->last_iso = frame + length; } /* fill the TDs and link them to the ed; and diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index df4a6707322d..e7f577e63624 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status) __releases(ohci->lock) __acquires(ohci->lock) { - struct device *dev = ohci_to_hcd(ohci)->self.controller; + struct device *dev = ohci_to_hcd(ohci)->self.controller; + struct usb_host_endpoint *ep = urb->ep; + struct urb_priv *urb_priv; + // ASSERT (urb->hcpriv != 0); + restart: urb_free_priv (ohci, urb->hcpriv); urb->hcpriv = NULL; if (likely(status == -EINPROGRESS)) @@ -80,6 +84,21 @@ __acquires(ohci->lock) ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); } + + /* + * An isochronous URB that is sumitted too late won't have any TDs + * (marked by the fact that the td_cnt value is larger than the + * actual number of TDs). If the next URB on this endpoint is like + * that, give it back now. + */ + if (!list_empty(&ep->urb_list)) { + urb = list_first_entry(&ep->urb_list, struct urb, urb_list); + urb_priv = urb->hcpriv; + if (urb_priv->td_cnt > urb_priv->length) { + status = 0; + goto restart; + } + } } @@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info, td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, (data & 0x0FFF) | 0xE000); - td->ed->last_iso = info & 0xffff; } else { td->hwCBP = cpu_to_hc32 (ohci, data); } @@ -996,7 +1014,7 @@ rescan_this: urb_priv->td_cnt++; /* if URB is done, clean up */ - if (urb_priv->td_cnt == urb_priv->length) { + if (urb_priv->td_cnt >= urb_priv->length) { modified = completed = 1; finish_urb(ohci, urb, 0); } @@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td) urb_priv->td_cnt++; /* If all this urb's TDs are done, call complete() */ - if (urb_priv->td_cnt == urb_priv->length) + if (urb_priv->td_cnt >= urb_priv->length) finish_urb(ohci, urb, status); /* clean schedule: unlink EDs that are no longer busy */ diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index c300bd2f7d1c..0f228c46eeda 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = { .remove = usb_hcd_pci_remove, .shutdown = uhci_shutdown, -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM .driver = { .pm = &usb_hcd_pci_pm_ops }, diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 041c6ddb695c..da6f56d996ce 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, } /* Fell behind? */ - if (uhci_frame_before_eq(frame, next)) { + if (!uhci_frame_before_eq(next, frame)) { /* USB_ISO_ASAP: Round up to the first available slot */ if (urb->transfer_flags & URB_ISO_ASAP) @@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, -qh->period; /* - * Not ASAP: Use the next slot in the stream. If - * the entire URB falls before the threshold, fail. + * Not ASAP: Use the next slot in the stream, + * no matter what. */ else if (!uhci_frame_before_eq(next, frame + (urb->number_of_packets - 1) * qh->period)) - return -EXDEV; + dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n", + urb, frame, + (urb->number_of_packets - 1) * + qh->period, + next); } } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index fae697ed0b70..773a6b28c4f1 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); } - cmd->command_trb = xhci->cmd_ring->enqueue; + cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); xhci_ring_cmd_db(xhci); @@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex) * - Mark a port as being done with device resume, * and ring the endpoint doorbells. * - Stop the Synopsys redriver Compliance Mode polling. + * - Drop and reacquire the xHCI lock, in order to wait for port resume. */ static u32 xhci_get_port_status(struct usb_hcd *hcd, struct xhci_bus_state *bus_state, __le32 __iomem **port_array, - u16 wIndex, u32 raw_port_status) + u16 wIndex, u32 raw_port_status, + unsigned long flags) + __releases(&xhci->lock) + __acquires(&xhci->lock) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); u32 status = 0; @@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, return 0xffffffff; if (time_after_eq(jiffies, bus_state->resume_done[wIndex])) { + int time_left; + xhci_dbg(xhci, "Resume USB2 port %d\n", wIndex + 1); bus_state->resume_done[wIndex] = 0; clear_bit(wIndex, &bus_state->resuming_ports); + + set_bit(wIndex, &bus_state->rexit_ports); xhci_set_link_state(xhci, port_array, wIndex, XDEV_U0); - xhci_dbg(xhci, "set port %d resume\n", - wIndex + 1); - slot_id = xhci_find_slot_id_by_port(hcd, xhci, - wIndex + 1); - if (!slot_id) { - xhci_dbg(xhci, "slot_id is zero\n"); - return 0xffffffff; + + spin_unlock_irqrestore(&xhci->lock, flags); + time_left = wait_for_completion_timeout( + &bus_state->rexit_done[wIndex], + msecs_to_jiffies( + XHCI_MAX_REXIT_TIMEOUT)); + spin_lock_irqsave(&xhci->lock, flags); + + if (time_left) { + slot_id = xhci_find_slot_id_by_port(hcd, + xhci, wIndex + 1); + if (!slot_id) { + xhci_dbg(xhci, "slot_id is zero\n"); + return 0xffffffff; + } + xhci_ring_device(xhci, slot_id); + } else { + int port_status = xhci_readl(xhci, + port_array[wIndex]); + xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", + XHCI_MAX_REXIT_TIMEOUT, + port_status); + status |= USB_PORT_STAT_SUSPEND; + clear_bit(wIndex, &bus_state->rexit_ports); } - xhci_ring_device(xhci, slot_id); + bus_state->port_c_suspend |= 1 << wIndex; bus_state->suspended_ports &= ~(1 << wIndex); } else { @@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; } status = xhci_get_port_status(hcd, bus_state, port_array, - wIndex, temp); + wIndex, temp, flags); if (status == 0xffffffff) goto error; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 53b972c2a09f..83bcd13622c3 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) for (i = 0; i < USB_MAXCHILDREN; ++i) { xhci->bus_state[0].resume_done[i] = 0; xhci->bus_state[1].resume_done[i] = 0; + /* Only the USB 2.0 completions will ever be used. */ + init_completion(&xhci->bus_state[1].rexit_done[i]); } if (scratchpad_alloc(xhci, flags)) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c2d495057eb5..236c3aabe940 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = { /* suspend and resume implemented later */ .shutdown = usb_hcd_pci_shutdown, -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM .driver = { .pm = &usb_hcd_pci_pm_ops }, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 411da1fc7ae8..6bfbd80ec2b9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring) return TRB_TYPE_LINK_LE32(link->control); } +union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring) +{ + /* Enqueue pointer can be left pointing to the link TRB, + * we must handle that + */ + if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control)) + return ring->enq_seg->next->trbs; + return ring->enqueue; +} + /* Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. @@ -859,8 +869,12 @@ remove_finished_td: /* Otherwise ring the doorbell(s) to restart queued transfers */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } - ep->stopped_td = NULL; - ep->stopped_trb = NULL; + + /* Clear stopped_td and stopped_trb if endpoint is not halted */ + if (!(ep->ep_state & EP_HALTED)) { + ep->stopped_td = NULL; + ep->stopped_trb = NULL; + } /* * Drop the lock and complete the URBs in the cancelled TD list. @@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, inc_deq(xhci, xhci->cmd_ring); return; } + /* There is no command to handle if we get a stop event when the + * command ring is empty, event->cmd_trb points to the next + * unset command + */ + if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) + return; } switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) @@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci, } } + /* + * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or + * RExit to a disconnect state). If so, let the the driver know it's + * out of the RExit state. + */ + if (!DEV_SUPERSPEED(temp) && + test_and_clear_bit(faked_port_index, + &bus_state->rexit_ports)) { + complete(&bus_state->rexit_done[faked_port_index]); + bogus_port_status = true; + goto cleanup; + } + if (hcd->speed != HCD_USB3) xhci_test_and_clear_bit(xhci, port_array, faked_port_index, PORT_PLC); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 49b6edb84a79..1e36dbb48366 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, if (command) { cmd_completion = command->completion; cmd_status = &command->status; - command->command_trb = xhci->cmd_ring->enqueue; - - /* Enqueue pointer can be left pointing to the link TRB, - * we must handle that - */ - if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) - command->command_trb = - xhci->cmd_ring->enq_seg->next->trbs; - + command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); list_add_tail(&command->cmd_list, &virt_dev->cmd_list); } else { cmd_completion = &virt_dev->cmd_completion; @@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, } init_completion(cmd_completion); - cmd_trb = xhci->cmd_ring->dequeue; + cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, udev->slot_id, must_succeed); @@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Attempt to submit the Reset Device command to the command ring */ spin_lock_irqsave(&xhci->lock, flags); - reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; - - /* Enqueue pointer can be left pointing to the link TRB, - * we must handle that - */ - if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) - reset_device_cmd->command_trb = - xhci->cmd_ring->enq_seg->next->trbs; + reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); ret = xhci_queue_reset_device(xhci, slot_id); @@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) union xhci_trb *cmd_trb; spin_lock_irqsave(&xhci->lock, flags); - cmd_trb = xhci->cmd_ring->dequeue; + cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); @@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) slot_ctx->dev_info >> 27); spin_lock_irqsave(&xhci->lock, flags); - cmd_trb = xhci->cmd_ring->dequeue; + cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, udev->slot_id); if (ret) { diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 46aa14894148..289fbfbae746 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1412,8 +1412,18 @@ struct xhci_bus_state { unsigned long resume_done[USB_MAXCHILDREN]; /* which ports have started to resume */ unsigned long resuming_ports; + /* Which ports are waiting on RExit to U0 transition. */ + unsigned long rexit_ports; + struct completion rexit_done[USB_MAXCHILDREN]; }; + +/* + * It can take up to 20 ms to transition from RExit to U0 on the + * Intel Lynx Point LP xHCI host. + */ +#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) + static inline unsigned int hcd_index(struct usb_hcd *hcd) { if (hcd->speed == HCD_USB3) @@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, union xhci_trb *cmd_trb); void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id); +union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring); /* xHCI roothub code */ void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c index 75dca19bf214..6ac755270ab4 100644 --- a/drivers/video/mmp/hw/mmp_ctrl.c +++ b/drivers/video/mmp/hw/mmp_ctrl.c @@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev) if (IS_ERR(ctrl->clk)) { dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); ret = -ENOENT; - goto failed_get_clk; + goto failed; } clk_prepare_enable(ctrl->clk); @@ -551,21 +551,8 @@ failed_path_init: path_deinit(path_plat); } - if (ctrl->clk) { - devm_clk_put(ctrl->dev, ctrl->clk); - clk_disable_unprepare(ctrl->clk); - } -failed_get_clk: - devm_free_irq(ctrl->dev, ctrl->irq, ctrl); + clk_disable_unprepare(ctrl->clk); failed: - if (ctrl) { - if (ctrl->reg_base) - devm_iounmap(ctrl->dev, ctrl->reg_base); - devm_release_mem_region(ctrl->dev, res->start, - resource_size(res)); - devm_kfree(ctrl->dev, ctrl); - } - dev_err(&pdev->dev, "device init failed\n"); return ret; diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index d250ed0f806d..27197a8048c0 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host) break; case 3: bits_per_pixel = 32; + break; case 1: default: return -EINVAL; diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c index 7ef079c146e7..c172a5281f9e 100644 --- a/drivers/video/neofb.c +++ b/drivers/video/neofb.c @@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id) if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, info->monspecs.modedb, 16)) { printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); + err = -EINVAL; goto err_map_video; } @@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id) info->fix.smem_len >> 10, info->var.xres, info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); - if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) + err = fb_alloc_cmap(&info->cmap, 256, 0); + if (err < 0) goto err_map_video; err = register_framebuffer(info); diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 171821ddd78d..ba5b40f581f6 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name, return -EINVAL; } - timing_np = of_find_node_by_name(np, name); + timing_np = of_get_child_by_name(np, name); if (!timing_np) { pr_err("%s: could not find node '%s'\n", of_node_full_name(np), name); @@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np) struct display_timings *disp; if (!np) { - pr_err("%s: no devicenode given\n", of_node_full_name(np)); + pr_err("%s: no device node given\n", of_node_full_name(np)); return NULL; } - timings_np = of_find_node_by_name(np, "display-timings"); + timings_np = of_get_child_by_name(np, "display-timings"); if (!timings_np) { pr_err("%s: could not find display-timings node\n", of_node_full_name(np)); diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig index 6c90885b0940..10b25e7cd878 100644 --- a/drivers/video/omap2/displays-new/Kconfig +++ b/drivers/video/omap2/displays-new/Kconfig @@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI config DISPLAY_PANEL_DSI_CM tristate "Generic DSI Command Mode Panel" + depends on BACKLIGHT_CLASS_DEVICE help Driver for generic DSI command mode panels. diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c index 1b60698f141e..ccd9073f706f 100644 --- a/drivers/video/omap2/displays-new/connector-analog-tv.c +++ b/drivers/video/omap2/displays-new/connector-analog-tv.c @@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev) in = omap_dss_find_output(pdata->source); if (in == NULL) { dev_err(&pdev->dev, "Failed to find video source\n"); - return -ENODEV; + return -EPROBE_DEFER; } ddata->in = in; diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c index bc5f8ceda371..63d88ee6dfe4 100644 --- a/drivers/video/omap2/displays-new/connector-dvi.c +++ b/drivers/video/omap2/displays-new/connector-dvi.c @@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev) in = omap_dss_find_output(pdata->source); if (in == NULL) { dev_err(&pdev->dev, "Failed to find video source\n"); - return -ENODEV; + return -EPROBE_DEFER; } ddata->in = in; diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c index c5826716d6ab..9abe2c039ae9 100644 --- a/drivers/video/omap2/displays-new/connector-hdmi.c +++ b/drivers/video/omap2/displays-new/connector-hdmi.c @@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev) in = omap_dss_find_output(pdata->source); if (in == NULL) { dev_err(&pdev->dev, "Failed to find video source\n"); - return -ENODEV; + return -EPROBE_DEFER; } ddata->in = in; diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 02a7340111df..477975009eee 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); + pm_runtime_irq_safe(&pdev->dev); r = dispc_runtime_get(); if (r) diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 47ca86c5c6c0..d838ba829459 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) (info->var.bits_per_pixel * info->var.xres_virtual); if (info->var.yres_virtual < info->var.yres) { dev_err(info->device, "virtual vertical size smaller than real\n"); - goto err_find_mode; - } - - /* maximize virtual vertical size for fast scrolling */ - info->var.yres_virtual = info->fix.smem_len * 8 / - (info->var.bits_per_pixel * info->var.xres_virtual); - if (info->var.yres_virtual < info->var.yres) { - dev_err(info->device, "virtual vertical size smaller than real\n"); + rc = -EINVAL; goto err_find_mode; } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index a50c6e3a7cc4..b232908a6192 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); - scratch_page = get_balloon_scratch_page(); - for (i = 0; i < nr_pages; i++) { page = alloc_page(gfp); if (page == NULL) { @@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) scrub_page(page); + /* + * Ballooned out frames are effectively replaced with + * a scratch frame. Ensure direct mappings and the + * p2m are consistent. + */ + scratch_page = get_balloon_scratch_page(); #ifdef CONFIG_XEN_HAVE_PVMMU if (xen_pv_domain() && !PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping( @@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) BUG_ON(ret); } #endif - } - - /* Ensure that ballooned highmem pages don't have kmaps. */ - kmap_flush_unused(); - flush_tlb_all(); - - /* No more mappings: invalidate P2M and add to balloon. */ - for (i = 0; i < nr_pages; i++) { - pfn = mfn_to_pfn(frame_list[i]); if (!xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long p; p = page_to_pfn(scratch_page); __set_phys_to_machine(pfn, pfn_to_mfn(p)); } + put_balloon_scratch_page(); + balloon_append(pfn_to_page(pfn)); } - put_balloon_scratch_page(); + /* Ensure that ballooned highmem pages don't have kmaps. */ + kmap_flush_unused(); + flush_tlb_all(); set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; @@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src) src_p = kmap_atomic(src_bv->bv_page); dst_p = kmap_atomic(dst_bv->bv_page); - memcpy(dst_p + dst_bv->bv_offset, - src_p + src_bv->bv_offset, + memcpy(dst_p + dst_offset, + src_p + src_offset, bytes); kunmap_atomic(dst_p); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 121da2dc3be8..d4e81e4a9b04 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) { int tmp, hangup_needed = 0; struct ocfs2_super *osb = NULL; - char nodestr[8]; + char nodestr[12]; trace_ocfs2_dismount_volume(sb); diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 73feacc49b2e..fd777032c2ba 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct return NULL; } -static int newer_jl_done(struct reiserfs_journal_cnode *cn) -{ - struct super_block *sb = cn->sb; - b_blocknr_t blocknr = cn->blocknr; - - cn = cn->hprev; - while (cn) { - if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist && - atomic_read(&cn->jlist->j_commit_left) != 0) - return 0; - cn = cn->hprev; - } - return 1; -} - static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **, struct reiserfs_journal_list *, unsigned long, @@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s, reiserfs_warning(s, "clm-2048", "called with wcount %d", atomic_read(&journal->j_wcount)); } - BUG_ON(jl->j_trans_id == 0); /* if flushall == 0, the lock is already held */ if (flushall) { @@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s, return err; } -static int test_transaction(struct super_block *s, - struct reiserfs_journal_list *jl) -{ - struct reiserfs_journal_cnode *cn; - - if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) - return 1; - - cn = jl->j_realblock; - while (cn) { - /* if the blocknr == 0, this has been cleared from the hash, - ** skip it - */ - if (cn->blocknr == 0) { - goto next; - } - if (cn->bh && !newer_jl_done(cn)) - return 0; - next: - cn = cn->next; - cond_resched(); - } - return 0; -} - static int write_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_chunk *chunk) @@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s, break; tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); } + get_journal_list(jl); + get_journal_list(flush_jl); /* try to find a group of blocks we can flush across all the ** transactions, but only bother if we've actually spanned ** across multiple lists @@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s, ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); } flush_journal_list(s, flush_jl, 1); + put_journal_list(s, flush_jl); + put_journal_list(s, jl); return 0; } @@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb, return 1; } -static void flush_old_journal_lists(struct super_block *s) -{ - struct reiserfs_journal *journal = SB_JOURNAL(s); - struct reiserfs_journal_list *jl; - struct list_head *entry; - time_t now = get_seconds(); - - while (!list_empty(&journal->j_journal_list)) { - entry = journal->j_journal_list.next; - jl = JOURNAL_LIST_ENTRY(entry); - /* this check should always be run, to send old lists to disk */ - if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) && - atomic_read(&jl->j_commit_left) == 0 && - test_transaction(s, jl)) { - flush_used_journal_lists(s, jl); - } else { - break; - } - } -} - /* ** long and ugly. If flush, will not return until all commit ** blocks and all real buffers in the trans are on disk. @@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, } } } - flush_old_journal_lists(sb); journal->j_current_jl->j_list_bitmap = get_list_bitmap(sb, journal->j_current_jl); diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 7e5aae4bf46f..6eaf5edf1ea1 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct udf_sb_info *sbi = UDF_SB(sb); + struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb); - mutex_lock(&sbi->s_alloc_mutex); - if (sbi->s_lvid_bh) { - struct logicalVolIntegrityDescImpUse *lvidiu = - udf_sb_lvidiu(sbi); + if (lvidiu) { + mutex_lock(&sbi->s_alloc_mutex); if (S_ISDIR(inode->i_mode)) le32_add_cpu(&lvidiu->numDirs, -1); else le32_add_cpu(&lvidiu->numFiles, -1); udf_updated_lvid(sb); + mutex_unlock(&sbi->s_alloc_mutex); } - mutex_unlock(&sbi->s_alloc_mutex); udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1); } @@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; struct udf_inode_info *iinfo; struct udf_inode_info *dinfo = UDF_I(dir); + struct logicalVolIntegrityDescImpUse *lvidiu; inode = new_inode(sb); @@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) return NULL; } - if (sbi->s_lvid_bh) { - struct logicalVolIntegrityDescImpUse *lvidiu; - + lvidiu = udf_sb_lvidiu(sb); + if (lvidiu) { iinfo->i_unique = lvid_get_unique_id(sb); mutex_lock(&sbi->s_alloc_mutex); - lvidiu = udf_sb_lvidiu(sbi); if (S_ISDIR(mode)) le32_add_cpu(&lvidiu->numDirs, 1); else diff --git a/fs/udf/super.c b/fs/udf/super.c index 839a2bad7f45..91219385691d 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *); static int udf_statfs(struct dentry *, struct kstatfs *); static int udf_show_options(struct seq_file *, struct dentry *); -struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) +struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb) { - struct logicalVolIntegrityDesc *lvid = - (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; - __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions); - __u32 offset = number_of_partitions * 2 * - sizeof(uint32_t)/sizeof(uint8_t); + struct logicalVolIntegrityDesc *lvid; + unsigned int partnum; + unsigned int offset; + + if (!UDF_SB(sb)->s_lvid_bh) + return NULL; + lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data; + partnum = le32_to_cpu(lvid->numOfPartitions); + if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) - + offsetof(struct logicalVolIntegrityDesc, impUse)) / + (2 * sizeof(uint32_t)) < partnum) { + udf_err(sb, "Logical volume integrity descriptor corrupted " + "(numOfPartitions = %u)!\n", partnum); + return NULL; + } + /* The offset is to skip freeSpaceTable and sizeTable arrays */ + offset = partnum * 2 * sizeof(uint32_t); return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); } @@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options) struct udf_options uopt; struct udf_sb_info *sbi = UDF_SB(sb); int error = 0; + struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb); - if (sbi->s_lvid_bh) { - int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); + if (lvidiu) { + int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev); if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY)) return -EACCES; } @@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb) if (!bh) return; - - mutex_lock(&sbi->s_alloc_mutex); lvid = (struct logicalVolIntegrityDesc *)bh->b_data; - lvidiu = udf_sb_lvidiu(sbi); + lvidiu = udf_sb_lvidiu(sb); + if (!lvidiu) + return; + mutex_lock(&sbi->s_alloc_mutex); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; udf_time_to_disk_stamp(&lvid->recordingDateAndTime, @@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb) if (!bh) return; + lvid = (struct logicalVolIntegrityDesc *)bh->b_data; + lvidiu = udf_sb_lvidiu(sb); + if (!lvidiu) + return; mutex_lock(&sbi->s_alloc_mutex); - lvid = (struct logicalVolIntegrityDesc *)bh->b_data; - lvidiu = udf_sb_lvidiu(sbi); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); @@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) if (sbi->s_lvid_bh) { struct logicalVolIntegrityDescImpUse *lvidiu = - udf_sb_lvidiu(sbi); - uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); - uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); - /* uint16_t maxUDFWriteRev = - le16_to_cpu(lvidiu->maxUDFWriteRev); */ + udf_sb_lvidiu(sb); + uint16_t minUDFReadRev; + uint16_t minUDFWriteRev; + if (!lvidiu) { + ret = -EINVAL; + goto error_out; + } + minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); + minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); if (minUDFReadRev > UDF_MAX_READ_VERSION) { udf_err(sb, "minUDFReadRev=%x (max is %x)\n", - le16_to_cpu(lvidiu->minUDFReadRev), + minUDFReadRev, UDF_MAX_READ_VERSION); ret = -EINVAL; goto error_out; @@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) struct logicalVolIntegrityDescImpUse *lvidiu; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); - if (sbi->s_lvid_bh != NULL) - lvidiu = udf_sb_lvidiu(sbi); - else - lvidiu = NULL; - + lvidiu = udf_sb_lvidiu(sb); buf->f_type = UDF_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index ed401e94aa8c..1f32c7bd9f57 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h @@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb) return sb->s_fs_info; } -struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi); +struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb); int udf_compute_nr_groups(struct super_block *sb, u32 partition); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 88c5ea75ebf6..f1d85cfc0a54 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -628,6 +628,7 @@ xfs_buf_item_unlock( else if (aborted) { ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); if (lip->li_flags & XFS_LI_IN_AIL) { + spin_lock(&lip->li_ailp->xa_lock); xfs_trans_ail_delete(lip->li_ailp, lip, SHUTDOWN_LOG_IO_ERROR); } diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 069537c845e5..20bf8e8002d6 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall( /* start with smaller blk num */ forward = nodehdr.forw < nodehdr.back; for (i = 0; i < 2; forward = !forward, i++) { + struct xfs_da3_icnode_hdr thdr; if (forward) blkno = nodehdr.forw; else @@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall( return(error); node = bp->b_addr; - xfs_da3_node_hdr_from_disk(&nodehdr, node); + xfs_da3_node_hdr_from_disk(&thdr, node); xfs_trans_brelse(state->args->trans, bp); - if (count - nodehdr.count >= 0) + if (count - thdr.count >= 0) break; /* fits with at least 25% to spare */ } if (i >= 2) { diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index 1edb5cc3e5f4..18272c766a50 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h @@ -515,7 +515,7 @@ typedef struct xfs_swapext /* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ #define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) #define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) -#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks) +#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks) /* * ioctl commands that replace IRIX syssgi()'s diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 193206ba4358..474807a401c8 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -119,11 +119,6 @@ xfs_inode_free( ip->i_itemp = NULL; } - /* asserts to verify all state is correct here */ - ASSERT(atomic_read(&ip->i_pincount) == 0); - ASSERT(!spin_is_locked(&ip->i_flags_lock)); - ASSERT(!xfs_isiflocked(ip)); - /* * Because we use RCU freeing we need to ensure the inode always * appears to be reclaimed with an invalid inode number when in the @@ -135,6 +130,10 @@ xfs_inode_free( ip->i_ino = 0; spin_unlock(&ip->i_flags_lock); + /* asserts to verify all state is correct here */ + ASSERT(atomic_read(&ip->i_pincount) == 0); + ASSERT(!xfs_isiflocked(ip)); + call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index dabda9521b4b..cc179878fe41 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1970,6 +1970,13 @@ xlog_recover_do_inode_buffer( * magic number. If we don't recognise the magic number in the buffer, then * return a LSN of -1 so that the caller knows it was an unrecognised block and * so can recover the buffer. + * + * Note: we cannot rely solely on magic number matches to determine that the + * buffer has a valid LSN - we also need to verify that it belongs to this + * filesystem, so we need to extract the object's LSN and compare it to that + * which we read from the superblock. If the UUIDs don't match, then we've got a + * stale metadata block from an old filesystem instance that we need to recover + * over the top of. */ static xfs_lsn_t xlog_recover_get_buf_lsn( @@ -1980,6 +1987,8 @@ xlog_recover_get_buf_lsn( __uint16_t magic16; __uint16_t magicda; void *blk = bp->b_addr; + uuid_t *uuid; + xfs_lsn_t lsn = -1; /* v4 filesystems always recover immediately */ if (!xfs_sb_version_hascrc(&mp->m_sb)) @@ -1992,43 +2001,79 @@ xlog_recover_get_buf_lsn( case XFS_ABTB_MAGIC: case XFS_ABTC_MAGIC: case XFS_IBT_CRC_MAGIC: - case XFS_IBT_MAGIC: - return be64_to_cpu( - ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn); + case XFS_IBT_MAGIC: { + struct xfs_btree_block *btb = blk; + + lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); + uuid = &btb->bb_u.s.bb_uuid; + break; + } case XFS_BMAP_CRC_MAGIC: - case XFS_BMAP_MAGIC: - return be64_to_cpu( - ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn); + case XFS_BMAP_MAGIC: { + struct xfs_btree_block *btb = blk; + + lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); + uuid = &btb->bb_u.l.bb_uuid; + break; + } case XFS_AGF_MAGIC: - return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); + lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); + uuid = &((struct xfs_agf *)blk)->agf_uuid; + break; case XFS_AGFL_MAGIC: - return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); + lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); + uuid = &((struct xfs_agfl *)blk)->agfl_uuid; + break; case XFS_AGI_MAGIC: - return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); + lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); + uuid = &((struct xfs_agi *)blk)->agi_uuid; + break; case XFS_SYMLINK_MAGIC: - return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); + lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); + uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid; + break; case XFS_DIR3_BLOCK_MAGIC: case XFS_DIR3_DATA_MAGIC: case XFS_DIR3_FREE_MAGIC: - return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); + lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); + uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; + break; case XFS_ATTR3_RMT_MAGIC: - return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); + lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); + uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; + break; case XFS_SB_MAGIC: - return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); + lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); + uuid = &((struct xfs_dsb *)blk)->sb_uuid; + break; default: break; } + if (lsn != (xfs_lsn_t)-1) { + if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) + goto recover_immediately; + return lsn; + } + magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); switch (magicda) { case XFS_DIR3_LEAF1_MAGIC: case XFS_DIR3_LEAFN_MAGIC: case XFS_DA3_NODE_MAGIC: - return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); + lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); + uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; + break; default: break; } + if (lsn != (xfs_lsn_t)-1) { + if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) + goto recover_immediately; + return lsn; + } + /* * We do individual object checks on dquot and inode buffers as they * have their own individual LSN records. Also, we could have a stale diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 653073de09e3..ed419c62dde1 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti); union map_info *dm_get_mapinfo(struct bio *bio); union map_info *dm_get_rq_mapinfo(struct request *rq); +struct queue_limits *dm_get_queue_limits(struct mapped_device *md); + /* * Geometry functions. */ int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); - /*----------------------------------------------------------------- * Functions for manipulating device-mapper tables. *---------------------------------------------------------------*/ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index c68ecfe29441..15da677478dd 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -30,10 +30,13 @@ /* * Framework version for util services. */ +#define UTIL_FW_MINOR 0 + +#define UTIL_WS2K8_FW_MAJOR 1 +#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR) #define UTIL_FW_MAJOR 3 -#define UTIL_FW_MINOR 0 -#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR) +#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR) /* diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 60e95872da29..ecc82b37c4cc 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie { unsigned int generation; }; -enum mem_cgroup_filter_t { - VISIT, /* visit current node */ - SKIP, /* skip the current node and continue traversal */ - SKIP_TREE, /* skip the whole subtree and continue traversal */ -}; - -/* - * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to - * iterate through the hierarchy tree. Each tree element is checked by the - * predicate before it is returned by the iterator. If a filter returns - * SKIP or SKIP_TREE then the iterator code continues traversal (with the - * next node down the hierarchy or the next node that doesn't belong under the - * memcg's subtree). - */ -typedef enum mem_cgroup_filter_t -(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root); - #ifdef CONFIG_MEMCG /* * All "charge" functions with gfp_mask should use GFP_KERNEL or @@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage, extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, struct page *oldpage, struct page *newpage, bool migration_ok); -struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, - struct mem_cgroup *prev, - struct mem_cgroup_reclaim_cookie *reclaim, - mem_cgroup_iter_filter cond); - -static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, - struct mem_cgroup *prev, - struct mem_cgroup_reclaim_cookie *reclaim) -{ - return mem_cgroup_iter_cond(root, prev, reclaim, NULL); -} - +struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, + struct mem_cgroup *, + struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); /* @@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, mem_cgroup_update_page_stat(page, idx, -1); } -enum mem_cgroup_filter_t -mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, - struct mem_cgroup *root); +unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, + gfp_t gfp_mask, + unsigned long *total_scanned); void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, @@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, struct page *oldpage, struct page *newpage, bool migration_ok) { } -static inline struct mem_cgroup * -mem_cgroup_iter_cond(struct mem_cgroup *root, - struct mem_cgroup *prev, - struct mem_cgroup_reclaim_cookie *reclaim, - mem_cgroup_iter_filter cond) -{ - /* first call must return non-NULL, second return NULL */ - return (struct mem_cgroup *)(unsigned long)!prev; -} static inline struct mem_cgroup * mem_cgroup_iter(struct mem_cgroup *root, @@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, } static inline -enum mem_cgroup_filter_t -mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, - struct mem_cgroup *root) +unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, + gfp_t gfp_mask, + unsigned long *total_scanned) { - return VISIT; + return 0; } static inline void mem_cgroup_split_huge_fixup(struct page *head) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index ccd4260834c5..bab49da8a0f0 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -15,8 +15,8 @@ #include <linux/spinlock_types.h> #include <linux/linkage.h> #include <linux/lockdep.h> - #include <linux/atomic.h> +#include <asm/processor.h> /* * Simple, straightforward mutexes with strict semantics: @@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); -#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX -#define arch_mutex_cpu_relax() cpu_relax() +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() #endif #endif diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 535cecf1e02f..fcd63baee5f2 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -1,8 +1,6 @@ #ifndef __OF_IRQ_H #define __OF_IRQ_H -#if defined(CONFIG_OF) -struct of_irq; #include <linux/types.h> #include <linux/errno.h> #include <linux/irq.h> @@ -10,14 +8,6 @@ struct of_irq; #include <linux/ioport.h> #include <linux/of.h> -/* - * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC - * implements it differently. However, the prototype is the same for all, - * so declare it here regardless of the CONFIG_OF_IRQ setting. - */ -extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); - -#if defined(CONFIG_OF_IRQ) /** * of_irq - container for device_node/irq_specifier pair for an irq controller * @controller: pointer to interrupt controller device tree node @@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index, extern int of_irq_count(struct device_node *dev); extern int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs); -extern struct device_node *of_irq_find_parent(struct device_node *child); extern void of_irq_init(const struct of_device_id *matches); -#endif /* CONFIG_OF_IRQ */ +#if defined(CONFIG_OF) +/* + * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC + * implements it differently. However, the prototype is the same for all, + * so declare it here regardless of the CONFIG_OF_IRQ setting. + */ +extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); +extern struct device_node *of_irq_find_parent(struct device_node *child); #else /* !CONFIG_OF */ static inline unsigned int irq_of_parse_and_map(struct device_node *dev, diff --git a/include/linux/smp.h b/include/linux/smp.h index cfb7ca094b38..731f5237d5f4 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, static inline void kick_all_cpus_sync(void) { } +static inline void __smp_call_function_single(int cpuid, + struct call_single_data *data, int wait) +{ + on_each_cpu(data->func, data->info, wait); +} + #endif /* !SMP */ /* diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index fa8b3adf9ffb..46d41e8b0dcc 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -1007,4 +1007,6 @@ struct drm_radeon_info { #define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 #define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 +#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 + #endif diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 40a1fb807396..009a655a5d35 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -380,10 +380,13 @@ struct perf_event_mmap_page { union { __u64 capabilities; struct { - __u64 cap_usr_time : 1, - cap_usr_rdpmc : 1, - cap_usr_time_zero : 1, - cap_____res : 61; + __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ + cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ + + cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ + cap_user_time : 1, /* The time_* fields are used */ + cap_user_time_zero : 1, /* The time_zero field is used */ + cap_____res : 59; }; }; @@ -442,12 +445,13 @@ struct perf_event_mmap_page { * ((rem * time_mult) >> time_shift); */ __u64 time_zero; + __u32 size; /* Header size up to __reserved[] fields. */ /* * Hole for extension of the self monitor capabilities */ - __u64 __reserved[119]; /* align to 1k */ + __u8 __reserved[118*8+4]; /* align to 1k. */ /* * Control data for the mmap() data buffer. @@ -528,6 +532,7 @@ enum perf_event_type { * u64 len; * u64 pgoff; * char filename[]; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP = 1, diff --git a/ipc/msg.c b/ipc/msg.c index b0d541d42677..9e4310c546ae 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) ipc_rmid(&msg_ids(ns), &s->q_perm); } +static void msg_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct msg_queue *msq = ipc_rcu_to_struct(p); + + security_msg_queue_free(msq); + ipc_rcu_free(head); +} + /** * newque - Create a new msg queue * @ns: namespace @@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, ipc_rcu_free); return retval; } /* ipc_addid() locks msq upon success. */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (id < 0) { - security_msg_queue_free(msq); - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, msg_rcu_free); return id; } @@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); - security_msg_queue_free(msq); - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, msg_rcu_free); } /* @@ -717,7 +724,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, rcu_read_lock(); ipc_lock_object(&msq->q_perm); - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, ipc_rcu_free); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock0; diff --git a/ipc/sem.c b/ipc/sem.c index 69b6a21f3844..19c8b980d1fe 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -243,6 +243,15 @@ static void merge_queues(struct sem_array *sma) } } +static void sem_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct sem_array *sma = ipc_rcu_to_struct(p); + + security_sem_free(sma); + ipc_rcu_free(head); +} + /* * If the request contains only one semaphore operation, and there are * no complex transactions pending, lock only the semaphore involved. @@ -374,12 +383,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns static inline void sem_lock_and_putref(struct sem_array *sma) { sem_lock(sma, NULL, -1); - ipc_rcu_putref(sma); -} - -static inline void sem_putref(struct sem_array *sma) -{ - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); } static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) @@ -458,14 +462,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return retval; } id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); if (id < 0) { - security_sem_free(sma); - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, sem_rcu_free); return id; } ns->used_sems += nsems; @@ -1047,8 +1050,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) wake_up_sem_queue_do(&tasks); ns->used_sems -= sma->sem_nsems; - security_sem_free(sma); - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, sem_rcu_free); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) @@ -1292,7 +1294,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, rcu_read_unlock(); sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) { - sem_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } @@ -1328,20 +1330,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, if(nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) { - sem_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } } if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { - sem_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - sem_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); err = -ERANGE; goto out_free; } @@ -1629,7 +1631,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - sem_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return ERR_PTR(-ENOMEM); } diff --git a/ipc/shm.c b/ipc/shm.c index 2821cdf93adb..d69739610fd4 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) ipc_lock_object(&ipcp->shm_perm); } +static void shm_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct shmid_kernel *shp = ipc_rcu_to_struct(p); + + security_shm_free(shp); + ipc_rcu_free(head); +} + static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { ipc_rmid(&shm_ids(ns), &s->shm_perm); @@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) user_shm_unlock(file_inode(shp->shm_file)->i_size, shp->mlock_user); fput (shp->shm_file); - security_shm_free(shp); - ipc_rcu_putref(shp); + ipc_rcu_putref(shp, shm_rcu_free); } /* @@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { - ipc_rcu_putref(shp); + ipc_rcu_putref(shp, ipc_rcu_free); return error; } @@ -566,8 +574,7 @@ no_id: user_shm_unlock(size, shp->mlock_user); fput(file); no_file: - security_shm_free(shp); - ipc_rcu_putref(shp); + ipc_rcu_putref(shp, shm_rcu_free); return error; } diff --git a/ipc/util.c b/ipc/util.c index e829da9ed01f..fdb8ae740775 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size) kfree(ptr); } -struct ipc_rcu { - struct rcu_head rcu; - atomic_t refcount; -} ____cacheline_aligned_in_smp; - /** * ipc_rcu_alloc - allocate ipc and rcu space * @size: size desired @@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr) return atomic_inc_not_zero(&p->refcount); } -/** - * ipc_schedule_free - free ipc + rcu space - * @head: RCU callback structure for queued work - */ -static void ipc_schedule_free(struct rcu_head *head) -{ - vfree(container_of(head, struct ipc_rcu, rcu)); -} - -void ipc_rcu_putref(void *ptr) +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)) { struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; if (!atomic_dec_and_test(&p->refcount)) return; - if (is_vmalloc_addr(ptr)) { - call_rcu(&p->rcu, ipc_schedule_free); - } else { - kfree_rcu(p, rcu); - } + call_rcu(&p->rcu, func); +} + +void ipc_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + + if (is_vmalloc_addr(p)) + vfree(p); + else + kfree(p); } /** diff --git a/ipc/util.h b/ipc/util.h index c5f3338ba1fa..f2f5036f2eed 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { } static inline void shm_exit_ns(struct ipc_namespace *ns) { } #endif +struct ipc_rcu { + struct rcu_head rcu; + atomic_t refcount; +} ____cacheline_aligned_in_smp; + +#define ipc_rcu_to_struct(p) ((void *)(p+1)) + /* * Structure that holds the parameters needed by the ipc operations * (see after) @@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size); */ void* ipc_rcu_alloc(int size); int ipc_rcu_getref(void *ptr); -void ipc_rcu_putref(void *ptr); +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); +void ipc_rcu_free(struct rcu_head *head); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); diff --git a/kernel/audit.c b/kernel/audit.c index 91e53d04b6a9..7b0e23a740ce 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, sleep_time = timeout_start + audit_backlog_wait_time - jiffies; - if ((long)sleep_time > 0) + if ((long)sleep_time > 0) { wait_for_auditd(sleep_time); - continue; + continue; + } } if (audit_rate_check() && printk_ratelimit()) printk(KERN_WARNING diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 247091bf0587..859c8dfd78a1 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -51,6 +51,15 @@ void context_tracking_user_enter(void) unsigned long flags; /* + * Repeat the user_enter() check here because some archs may be calling + * this from asm and if no CPU needs context tracking, they shouldn't + * go further. Repeat the check here until they support the static key + * check. + */ + if (!static_key_false(&context_tracking_enabled)) + return; + + /* * Some contexts may involve an exception occuring in an irq, * leading to that nesting: * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() @@ -151,6 +160,9 @@ void context_tracking_user_exit(void) { unsigned long flags; + if (!static_key_false(&context_tracking_enabled)) + return; + if (in_interrupt()) return; diff --git a/kernel/events/core.c b/kernel/events/core.c index dd236b66ca3a..cb4238e85b38 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event, *running = ctx_time - event->tstamp_running; } +static void perf_event_init_userpage(struct perf_event *event) +{ + struct perf_event_mmap_page *userpg; + struct ring_buffer *rb; + + rcu_read_lock(); + rb = rcu_dereference(event->rb); + if (!rb) + goto unlock; + + userpg = rb->user_page; + + /* Allow new userspace to detect that bit 0 is deprecated */ + userpg->cap_bit0_is_deprecated = 1; + userpg->size = offsetof(struct perf_event_mmap_page, __reserved); + +unlock: + rcu_read_unlock(); +} + void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) { } @@ -4044,6 +4064,7 @@ again: ring_buffer_attach(event, rb); rcu_assign_pointer(event->rb, rb); + perf_event_init_userpage(event); perf_event_update_userpage(event); unlock: diff --git a/kernel/params.c b/kernel/params.c index 81c4e78c8f4c..c00d5b502aa4 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -254,11 +254,11 @@ int parse_args(const char *doing, STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); -STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); +STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol); STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); -STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); +STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol); STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); -STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); +STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol); STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); int param_set_charp(const char *val, const struct kernel_param *kp) diff --git a/kernel/reboot.c b/kernel/reboot.c index 269ed9384cc4..f813b3474646 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid); #endif enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; -int reboot_default; +/* + * This variable is used privately to keep track of whether or not + * reboot_type is still set to its default value (i.e., reboot= hasn't + * been set on the command line). This is needed so that we can + * suppress DMI scanning for reboot quirks. Without it, it's + * impossible to override a faulty reboot quirk without recompiling. + */ +int reboot_default = 1; int reboot_cpu; enum reboot_type reboot_type = BOOT_ACPI; int reboot_force; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 11cd13667359..7c70201fbc61 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) } if (!se) { - cfs_rq->h_load = rq->avg.load_avg_contrib; + cfs_rq->h_load = cfs_rq->runnable_load_avg; cfs_rq->last_h_load_update = now; } @@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) (busiest->load_per_task * SCHED_POWER_SCALE) / busiest->group_power; - if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= - (scaled_busy_load_per_task * imbn)) { + if (busiest->avg_load + scaled_busy_load_per_task >= + local->avg_load + (scaled_busy_load_per_task * imbn)) { env->imbalance = busiest->load_per_task; return; } @@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * max load less than avg load(as we skip the groups at or below * its cpu_power, while calculating max_load..) */ - if (busiest->avg_load < sds->avg_load) { + if (busiest->avg_load <= sds->avg_load || + local->avg_load >= sds->avg_load) { env->imbalance = 0; return fix_small_imbalance(env, sds); } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 51c4f34d258e..4431610f049a 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = { .unpark = watchdog_enable, }; -static int watchdog_enable_all_cpus(void) +static void restart_watchdog_hrtimer(void *info) +{ + struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); + int ret; + + /* + * No need to cancel and restart hrtimer if it is currently executing + * because it will reprogram itself with the new period now. + * We should never see it unqueued here because we are running per-cpu + * with interrupts disabled. + */ + ret = hrtimer_try_to_cancel(hrtimer); + if (ret == 1) + hrtimer_start(hrtimer, ns_to_ktime(sample_period), + HRTIMER_MODE_REL_PINNED); +} + +static void update_timers(int cpu) +{ + struct call_single_data data = {.func = restart_watchdog_hrtimer}; + /* + * Make sure that perf event counter will adopt to a new + * sampling period. Updating the sampling period directly would + * be much nicer but we do not have an API for that now so + * let's use a big hammer. + * Hrtimer will adopt the new period on the next tick but this + * might be late already so we have to restart the timer as well. + */ + watchdog_nmi_disable(cpu); + __smp_call_function_single(cpu, &data, 1); + watchdog_nmi_enable(cpu); +} + +static void update_timers_all_cpus(void) +{ + int cpu; + + get_online_cpus(); + preempt_disable(); + for_each_online_cpu(cpu) + update_timers(cpu); + preempt_enable(); + put_online_cpus(); +} + +static int watchdog_enable_all_cpus(bool sample_period_changed) { int err = 0; @@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void) pr_err("Failed to create watchdog threads, disabled\n"); else watchdog_running = 1; + } else if (sample_period_changed) { + update_timers_all_cpus(); } return err; @@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int err, old_thresh, old_enabled; + static DEFINE_MUTEX(watchdog_proc_mutex); + mutex_lock(&watchdog_proc_mutex); old_thresh = ACCESS_ONCE(watchdog_thresh); old_enabled = ACCESS_ONCE(watchdog_user_enabled); err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (err || !write) - return err; + goto out; set_sample_period(); /* @@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write, * watchdog_*_all_cpus() function takes care of this. */ if (watchdog_user_enabled && watchdog_thresh) - err = watchdog_enable_all_cpus(); + err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); else watchdog_disable_all_cpus(); @@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write, watchdog_thresh = old_thresh; watchdog_user_enabled = old_enabled; } - +out: + mutex_unlock(&watchdog_proc_mutex); return err; } #endif /* CONFIG_SYSCTL */ @@ -554,5 +604,5 @@ void __init lockup_detector_init(void) set_sample_period(); if (watchdog_user_enabled) - watchdog_enable_all_cpus(); + watchdog_enable_all_cpus(false); } diff --git a/lib/kobject.c b/lib/kobject.c index 962175134702..669bf190d4fb 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) bool kobj_ns_current_may_mount(enum kobj_ns_type type) { - bool may_mount = false; - - if (type == KOBJ_NS_TYPE_NONE) - return true; + bool may_mount = true; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && diff --git a/lib/lockref.c b/lib/lockref.c index 677d036cf3c7..6f9d434c1521 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -4,6 +4,22 @@ #ifdef CONFIG_CMPXCHG_LOCKREF /* + * Allow weakly-ordered memory architectures to provide barrier-less + * cmpxchg semantics for lockref updates. + */ +#ifndef cmpxchg64_relaxed +# define cmpxchg64_relaxed cmpxchg64 +#endif + +/* + * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. + * This is useful for architectures with an expensive cpu_relax(). + */ +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() +#endif + +/* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. */ @@ -14,12 +30,13 @@ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ - old.lock_count = cmpxchg64(&lockref->lock_count, \ - old.lock_count, new.lock_count); \ + old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ + old.lock_count, \ + new.lock_count); \ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ - cpu_relax(); \ + arch_mutex_cpu_relax(); \ } \ } while (0) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d5ff3ce13029..1c52ddbc839b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -39,6 +39,7 @@ #include <linux/limits.h> #include <linux/export.h> #include <linux/mutex.h> +#include <linux/rbtree.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/swapops.h> @@ -160,6 +161,10 @@ struct mem_cgroup_per_zone { struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; + struct rb_node tree_node; /* RB tree node */ + unsigned long long usage_in_excess;/* Set to the value by which */ + /* the soft limit is exceeded*/ + bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ }; @@ -168,6 +173,26 @@ struct mem_cgroup_per_node { struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; }; +/* + * Cgroups above their limits are maintained in a RB-Tree, independent of + * their hierarchy representation + */ + +struct mem_cgroup_tree_per_zone { + struct rb_root rb_root; + spinlock_t lock; +}; + +struct mem_cgroup_tree_per_node { + struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; +}; + +struct mem_cgroup_tree { + struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; +}; + +static struct mem_cgroup_tree soft_limit_tree __read_mostly; + struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; u64 threshold; @@ -303,22 +328,6 @@ struct mem_cgroup { atomic_t numainfo_events; atomic_t numainfo_updating; #endif - /* - * Protects soft_contributed transitions. - * See mem_cgroup_update_soft_limit - */ - spinlock_t soft_lock; - - /* - * If true then this group has increased parents' children_in_excess - * when it got over the soft limit. - * When a group falls bellow the soft limit, parents' children_in_excess - * is decreased and soft_contributed changed to false. - */ - bool soft_contributed; - - /* Number of children that are in soft limit excess */ - atomic_t children_in_excess; struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ @@ -422,6 +431,7 @@ static bool move_file(void) * limit reclaim to prevent infinite loops, if they ever occur. */ #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 +#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 enum charge_type { MEM_CGROUP_CHARGE_TYPE_CACHE = 0, @@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) return mem_cgroup_zoneinfo(memcg, nid, zid); } +static struct mem_cgroup_tree_per_zone * +soft_limit_tree_node_zone(int nid, int zid) +{ + return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; +} + +static struct mem_cgroup_tree_per_zone * +soft_limit_tree_from_page(struct page *page) +{ + int nid = page_to_nid(page); + int zid = page_zonenum(page); + + return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; +} + +static void +__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg, + struct mem_cgroup_per_zone *mz, + struct mem_cgroup_tree_per_zone *mctz, + unsigned long long new_usage_in_excess) +{ + struct rb_node **p = &mctz->rb_root.rb_node; + struct rb_node *parent = NULL; + struct mem_cgroup_per_zone *mz_node; + + if (mz->on_tree) + return; + + mz->usage_in_excess = new_usage_in_excess; + if (!mz->usage_in_excess) + return; + while (*p) { + parent = *p; + mz_node = rb_entry(parent, struct mem_cgroup_per_zone, + tree_node); + if (mz->usage_in_excess < mz_node->usage_in_excess) + p = &(*p)->rb_left; + /* + * We can't avoid mem cgroups that are over their soft + * limit by the same amount + */ + else if (mz->usage_in_excess >= mz_node->usage_in_excess) + p = &(*p)->rb_right; + } + rb_link_node(&mz->tree_node, parent, p); + rb_insert_color(&mz->tree_node, &mctz->rb_root); + mz->on_tree = true; +} + +static void +__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, + struct mem_cgroup_per_zone *mz, + struct mem_cgroup_tree_per_zone *mctz) +{ + if (!mz->on_tree) + return; + rb_erase(&mz->tree_node, &mctz->rb_root); + mz->on_tree = false; +} + +static void +mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, + struct mem_cgroup_per_zone *mz, + struct mem_cgroup_tree_per_zone *mctz) +{ + spin_lock(&mctz->lock); + __mem_cgroup_remove_exceeded(memcg, mz, mctz); + spin_unlock(&mctz->lock); +} + + +static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) +{ + unsigned long long excess; + struct mem_cgroup_per_zone *mz; + struct mem_cgroup_tree_per_zone *mctz; + int nid = page_to_nid(page); + int zid = page_zonenum(page); + mctz = soft_limit_tree_from_page(page); + + /* + * Necessary to update all ancestors when hierarchy is used. + * because their event counter is not touched. + */ + for (; memcg; memcg = parent_mem_cgroup(memcg)) { + mz = mem_cgroup_zoneinfo(memcg, nid, zid); + excess = res_counter_soft_limit_excess(&memcg->res); + /* + * We have to update the tree if mz is on RB-tree or + * mem is over its softlimit. + */ + if (excess || mz->on_tree) { + spin_lock(&mctz->lock); + /* if on-tree, remove it */ + if (mz->on_tree) + __mem_cgroup_remove_exceeded(memcg, mz, mctz); + /* + * Insert again. mz->usage_in_excess will be updated. + * If excess is 0, no tree ops. + */ + __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess); + spin_unlock(&mctz->lock); + } + } +} + +static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) +{ + int node, zone; + struct mem_cgroup_per_zone *mz; + struct mem_cgroup_tree_per_zone *mctz; + + for_each_node(node) { + for (zone = 0; zone < MAX_NR_ZONES; zone++) { + mz = mem_cgroup_zoneinfo(memcg, node, zone); + mctz = soft_limit_tree_node_zone(node, zone); + mem_cgroup_remove_exceeded(memcg, mz, mctz); + } + } +} + +static struct mem_cgroup_per_zone * +__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) +{ + struct rb_node *rightmost = NULL; + struct mem_cgroup_per_zone *mz; + +retry: + mz = NULL; + rightmost = rb_last(&mctz->rb_root); + if (!rightmost) + goto done; /* Nothing to reclaim from */ + + mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); + /* + * Remove the node now but someone else can add it back, + * we will to add it back at the end of reclaim to its correct + * position in the tree. + */ + __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); + if (!res_counter_soft_limit_excess(&mz->memcg->res) || + !css_tryget(&mz->memcg->css)) + goto retry; +done: + return mz; +} + +static struct mem_cgroup_per_zone * +mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) +{ + struct mem_cgroup_per_zone *mz; + + spin_lock(&mctz->lock); + mz = __mem_cgroup_largest_soft_limit_node(mctz); + spin_unlock(&mctz->lock); + return mz; +} + /* * Implementation Note: reading percpu statistics for memcg. * @@ -822,48 +990,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, } /* - * Called from rate-limited memcg_check_events when enough - * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure - * that all the parents up the hierarchy will be notified that this group - * is in excess or that it is not in excess anymore. mmecg->soft_contributed - * makes the transition a single action whenever the state flips from one to - * the other. - */ -static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg) -{ - unsigned long long excess = res_counter_soft_limit_excess(&memcg->res); - struct mem_cgroup *parent = memcg; - int delta = 0; - - spin_lock(&memcg->soft_lock); - if (excess) { - if (!memcg->soft_contributed) { - delta = 1; - memcg->soft_contributed = true; - } - } else { - if (memcg->soft_contributed) { - delta = -1; - memcg->soft_contributed = false; - } - } - - /* - * Necessary to update all ancestors when hierarchy is used - * because their event counter is not touched. - * We track children even outside the hierarchy for the root - * cgroup because tree walk starting at root should visit - * all cgroups and we want to prevent from pointless tree - * walk if no children is below the limit. - */ - while (delta && (parent = parent_mem_cgroup(parent))) - atomic_add(delta, &parent->children_in_excess); - if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy) - atomic_add(delta, &root_mem_cgroup->children_in_excess); - spin_unlock(&memcg->soft_lock); -} - -/* * Check events in order. * */ @@ -886,7 +1012,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) mem_cgroup_threshold(memcg); if (unlikely(do_softlimit)) - mem_cgroup_update_soft_limit(memcg); + mem_cgroup_update_tree(memcg, page); #if MAX_NUMNODES > 1 if (unlikely(do_numainfo)) atomic_inc(&memcg->numainfo_events); @@ -929,15 +1055,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) return memcg; } -static enum mem_cgroup_filter_t -mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root, - mem_cgroup_iter_filter cond) -{ - if (!cond) - return VISIT; - return cond(memcg, root); -} - /* * Returns a next (in a pre-order walk) alive memcg (with elevated css * ref. count) or NULL if the whole root's subtree has been visited. @@ -945,7 +1062,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root, * helper function to be used by mem_cgroup_iter */ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, - struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond) + struct mem_cgroup *last_visited) { struct cgroup_subsys_state *prev_css, *next_css; @@ -963,31 +1080,11 @@ skip_node: if (next_css) { struct mem_cgroup *mem = mem_cgroup_from_css(next_css); - switch (mem_cgroup_filter(mem, root, cond)) { - case SKIP: + if (css_tryget(&mem->css)) + return mem; + else { prev_css = next_css; goto skip_node; - case SKIP_TREE: - if (mem == root) - return NULL; - /* - * css_rightmost_descendant is not an optimal way to - * skip through a subtree (especially for imbalanced - * trees leaning to right) but that's what we have right - * now. More effective solution would be traversing - * right-up for first non-NULL without calling - * css_next_descendant_pre afterwards. - */ - prev_css = css_rightmost_descendant(next_css); - goto skip_node; - case VISIT: - if (css_tryget(&mem->css)) - return mem; - else { - prev_css = next_css; - goto skip_node; - } - break; } } @@ -1051,7 +1148,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, * @root: hierarchy root * @prev: previously returned memcg, NULL on first invocation * @reclaim: cookie for shared reclaim walks, NULL for full walks - * @cond: filter for visited nodes, NULL for no filter * * Returns references to children of the hierarchy below @root, or * @root itself, or %NULL after a full round-trip. @@ -1064,18 +1160,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, * divide up the memcgs in the hierarchy among all concurrent * reclaimers operating on the same zone and priority. */ -struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, +struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, - struct mem_cgroup_reclaim_cookie *reclaim, - mem_cgroup_iter_filter cond) + struct mem_cgroup_reclaim_cookie *reclaim) { struct mem_cgroup *memcg = NULL; struct mem_cgroup *last_visited = NULL; - if (mem_cgroup_disabled()) { - /* first call must return non-NULL, second return NULL */ - return (struct mem_cgroup *)(unsigned long)!prev; - } + if (mem_cgroup_disabled()) + return NULL; if (!root) root = root_mem_cgroup; @@ -1086,9 +1179,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, if (!root->use_hierarchy && root != root_mem_cgroup) { if (prev) goto out_css_put; - if (mem_cgroup_filter(root, root, cond) == VISIT) - return root; - return NULL; + return root; } rcu_read_lock(); @@ -1111,7 +1202,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, last_visited = mem_cgroup_iter_load(iter, root, &seq); } - memcg = __mem_cgroup_iter_next(root, last_visited, cond); + memcg = __mem_cgroup_iter_next(root, last_visited); if (reclaim) { mem_cgroup_iter_update(iter, last_visited, memcg, seq); @@ -1122,11 +1213,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, reclaim->generation = iter->generation; } - /* - * We have finished the whole tree walk or no group has been - * visited because filter told us to skip the root node. - */ - if (!memcg && (prev || (cond && !last_visited))) + if (prev && !memcg) goto out_unlock; } out_unlock: @@ -1767,7 +1854,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, return total; } -#if MAX_NUMNODES > 1 /** * test_mem_cgroup_node_reclaimable * @memcg: the target memcg @@ -1790,6 +1876,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, return false; } +#if MAX_NUMNODES > 1 /* * Always updating the nodemask is not very good - even if we have an empty @@ -1857,50 +1944,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) return node; } +/* + * Check all nodes whether it contains reclaimable pages or not. + * For quick scan, we make use of scan_nodes. This will allow us to skip + * unused nodes. But scan_nodes is lazily updated and may not cotain + * enough new information. We need to do double check. + */ +static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) +{ + int nid; + + /* + * quick check...making use of scan_node. + * We can skip unused nodes. + */ + if (!nodes_empty(memcg->scan_nodes)) { + for (nid = first_node(memcg->scan_nodes); + nid < MAX_NUMNODES; + nid = next_node(nid, memcg->scan_nodes)) { + + if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) + return true; + } + } + /* + * Check rest of nodes. + */ + for_each_node_state(nid, N_MEMORY) { + if (node_isset(nid, memcg->scan_nodes)) + continue; + if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) + return true; + } + return false; +} + #else int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) { return 0; } -#endif - -/* - * A group is eligible for the soft limit reclaim under the given root - * hierarchy if - * a) it is over its soft limit - * b) any parent up the hierarchy is over its soft limit - * - * If the given group doesn't have any children over the limit then it - * doesn't make any sense to iterate its subtree. - */ -enum mem_cgroup_filter_t -mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, - struct mem_cgroup *root) +static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) { - struct mem_cgroup *parent; - - if (!memcg) - memcg = root_mem_cgroup; - parent = memcg; - - if (res_counter_soft_limit_excess(&memcg->res)) - return VISIT; + return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); +} +#endif - /* - * If any parent up to the root in the hierarchy is over its soft limit - * then we have to obey and reclaim from this group as well. - */ - while ((parent = parent_mem_cgroup(parent))) { - if (res_counter_soft_limit_excess(&parent->res)) - return VISIT; - if (parent == root) +static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, + struct zone *zone, + gfp_t gfp_mask, + unsigned long *total_scanned) +{ + struct mem_cgroup *victim = NULL; + int total = 0; + int loop = 0; + unsigned long excess; + unsigned long nr_scanned; + struct mem_cgroup_reclaim_cookie reclaim = { + .zone = zone, + .priority = 0, + }; + + excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; + + while (1) { + victim = mem_cgroup_iter(root_memcg, victim, &reclaim); + if (!victim) { + loop++; + if (loop >= 2) { + /* + * If we have not been able to reclaim + * anything, it might because there are + * no reclaimable pages under this hierarchy + */ + if (!total) + break; + /* + * We want to do more targeted reclaim. + * excess >> 2 is not to excessive so as to + * reclaim too much, nor too less that we keep + * coming back to reclaim from this cgroup + */ + if (total >= (excess >> 2) || + (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) + break; + } + continue; + } + if (!mem_cgroup_reclaimable(victim, false)) + continue; + total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, + zone, &nr_scanned); + *total_scanned += nr_scanned; + if (!res_counter_soft_limit_excess(&root_memcg->res)) break; } - - if (!atomic_read(&memcg->children_in_excess)) - return SKIP_TREE; - return SKIP; + mem_cgroup_iter_break(root_memcg, victim); + return total; } static DEFINE_SPINLOCK(memcg_oom_lock); @@ -2812,7 +2953,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, unlock_page_cgroup(pc); /* - * "charge_statistics" updated event counter. + * "charge_statistics" updated event counter. Then, check it. + * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. + * if they exceeds softlimit. */ memcg_check_events(memcg, page); } @@ -4647,6 +4790,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, return ret; } +unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, + gfp_t gfp_mask, + unsigned long *total_scanned) +{ + unsigned long nr_reclaimed = 0; + struct mem_cgroup_per_zone *mz, *next_mz = NULL; + unsigned long reclaimed; + int loop = 0; + struct mem_cgroup_tree_per_zone *mctz; + unsigned long long excess; + unsigned long nr_scanned; + + if (order > 0) + return 0; + + mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); + /* + * This loop can run a while, specially if mem_cgroup's continuously + * keep exceeding their soft limit and putting the system under + * pressure + */ + do { + if (next_mz) + mz = next_mz; + else + mz = mem_cgroup_largest_soft_limit_node(mctz); + if (!mz) + break; + + nr_scanned = 0; + reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, + gfp_mask, &nr_scanned); + nr_reclaimed += reclaimed; + *total_scanned += nr_scanned; + spin_lock(&mctz->lock); + + /* + * If we failed to reclaim anything from this memory cgroup + * it is time to move on to the next cgroup + */ + next_mz = NULL; + if (!reclaimed) { + do { + /* + * Loop until we find yet another one. + * + * By the time we get the soft_limit lock + * again, someone might have aded the + * group back on the RB tree. Iterate to + * make sure we get a different mem. + * mem_cgroup_largest_soft_limit_node returns + * NULL if no other cgroup is present on + * the tree + */ + next_mz = + __mem_cgroup_largest_soft_limit_node(mctz); + if (next_mz == mz) + css_put(&next_mz->memcg->css); + else /* next_mz == NULL or other memcg */ + break; + } while (1); + } + __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); + excess = res_counter_soft_limit_excess(&mz->memcg->res); + /* + * One school of thought says that we should not add + * back the node to the tree if reclaim returns 0. + * But our reclaim could return 0, simply because due + * to priority we are exposing a smaller subset of + * memory to reclaim from. Consider this as a longer + * term TODO. + */ + /* If excess == 0, no tree ops */ + __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess); + spin_unlock(&mctz->lock); + css_put(&mz->memcg->css); + loop++; + /* + * Could not reclaim anything and there are no more + * mem cgroups to try or we seem to be looping without + * reclaiming anything. + */ + if (!nr_reclaimed && + (next_mz == NULL || + loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) + break; + } while (!nr_reclaimed); + if (next_mz) + css_put(&next_mz->memcg->css); + return nr_reclaimed; +} + /** * mem_cgroup_force_empty_list - clears LRU of a group * @memcg: group to clear @@ -5911,6 +6146,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; lruvec_init(&mz->lruvec); + mz->usage_in_excess = 0; + mz->on_tree = false; mz->memcg = memcg; } memcg->nodeinfo[node] = pn; @@ -5966,6 +6203,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) int node; size_t size = memcg_size(); + mem_cgroup_remove_from_trees(memcg); free_css_id(&mem_cgroup_subsys, &memcg->css); for_each_node(node) @@ -6002,6 +6240,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) } EXPORT_SYMBOL(parent_mem_cgroup); +static void __init mem_cgroup_soft_limit_tree_init(void) +{ + struct mem_cgroup_tree_per_node *rtpn; + struct mem_cgroup_tree_per_zone *rtpz; + int tmp, node, zone; + + for_each_node(node) { + tmp = node; + if (!node_state(node, N_NORMAL_MEMORY)) + tmp = -1; + rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); + BUG_ON(!rtpn); + + soft_limit_tree.rb_tree_per_node[node] = rtpn; + + for (zone = 0; zone < MAX_NR_ZONES; zone++) { + rtpz = &rtpn->rb_tree_per_zone[zone]; + rtpz->rb_root = RB_ROOT; + spin_lock_init(&rtpz->lock); + } + } +} + static struct cgroup_subsys_state * __ref mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { @@ -6031,7 +6292,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) mutex_init(&memcg->thresholds_lock); spin_lock_init(&memcg->move_lock); vmpressure_init(&memcg->vmpressure); - spin_lock_init(&memcg->soft_lock); return &memcg->css; @@ -6109,13 +6369,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) mem_cgroup_invalidate_reclaim_iterators(memcg); mem_cgroup_reparent_charges(memcg); - if (memcg->soft_contributed) { - while ((memcg = parent_mem_cgroup(memcg))) - atomic_dec(&memcg->children_in_excess); - - if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy) - atomic_dec(&root_mem_cgroup->children_in_excess); - } mem_cgroup_destroy_all_caches(memcg); vmpressure_cleanup(&memcg->vmpressure); } @@ -6790,6 +7043,7 @@ static int __init mem_cgroup_init(void) { hotcpu_notifier(memcg_cpu_hotplug_callback, 0); enable_swap_cgroup(); + mem_cgroup_soft_limit_tree_init(); memcg_stock_init(); return 0; } diff --git a/mm/mlock.c b/mm/mlock.c index d63802663242..67ba6da7d0e3 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -736,6 +736,7 @@ static int do_mlockall(int flags) /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); + cond_resched(); } out: return 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index 8ed1b775bdc9..beb35778c69f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -139,23 +139,11 @@ static bool global_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup; } - -static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc) -{ - struct mem_cgroup *root = sc->target_mem_cgroup; - return !mem_cgroup_disabled() && - mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE; -} #else static bool global_reclaim(struct scan_control *sc) { return true; } - -static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc) -{ - return false; -} #endif unsigned long zone_reclaimable_pages(struct zone *zone) @@ -2176,11 +2164,9 @@ static inline bool should_continue_reclaim(struct zone *zone, } } -static int -__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) +static void shrink_zone(struct zone *zone, struct scan_control *sc) { unsigned long nr_reclaimed, nr_scanned; - int groups_scanned = 0; do { struct mem_cgroup *root = sc->target_mem_cgroup; @@ -2188,17 +2174,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) .zone = zone, .priority = sc->priority, }; - struct mem_cgroup *memcg = NULL; - mem_cgroup_iter_filter filter = (soft_reclaim) ? - mem_cgroup_soft_reclaim_eligible : NULL; + struct mem_cgroup *memcg; nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; - while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) { + memcg = mem_cgroup_iter(root, NULL, &reclaim); + do { struct lruvec *lruvec; - groups_scanned++; lruvec = mem_cgroup_zone_lruvec(zone, memcg); shrink_lruvec(lruvec, sc); @@ -2218,7 +2202,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) mem_cgroup_iter_break(root, memcg); break; } - } + memcg = mem_cgroup_iter(root, memcg, &reclaim); + } while (memcg); vmpressure(sc->gfp_mask, sc->target_mem_cgroup, sc->nr_scanned - nr_scanned, @@ -2226,37 +2211,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, sc->nr_scanned - nr_scanned, sc)); - - return groups_scanned; -} - - -static void shrink_zone(struct zone *zone, struct scan_control *sc) -{ - bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc); - unsigned long nr_scanned = sc->nr_scanned; - int scanned_groups; - - scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim); - /* - * memcg iterator might race with other reclaimer or start from - * a incomplete tree walk so the tree walk in __shrink_zone - * might have missed groups that are above the soft limit. Try - * another loop to catch up with others. Do it just once to - * prevent from reclaim latencies when other reclaimers always - * preempt this one. - */ - if (do_soft_reclaim && !scanned_groups) - __shrink_zone(zone, sc, do_soft_reclaim); - - /* - * No group is over the soft limit or those that are do not have - * pages in the zone we are reclaiming so we have to reclaim everybody - */ - if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) { - __shrink_zone(zone, sc, false); - return; - } } /* Returns true if compaction should go ahead for a high-order request */ @@ -2320,6 +2274,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; + unsigned long nr_soft_reclaimed; + unsigned long nr_soft_scanned; bool aborted_reclaim = false; /* @@ -2359,6 +2315,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) continue; } } + /* + * This steals pages from memory cgroups over softlimit + * and returns the number of reclaimed pages and + * scanned pages. This works for global memory pressure + * and balancing, not for a memcg's limit. + */ + nr_soft_scanned = 0; + nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, + sc->order, sc->gfp_mask, + &nr_soft_scanned); + sc->nr_reclaimed += nr_soft_reclaimed; + sc->nr_scanned += nr_soft_scanned; /* need some check for avoid more shrink_zone() */ } @@ -2952,6 +2920,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, { int i; int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ + unsigned long nr_soft_reclaimed; + unsigned long nr_soft_scanned; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .priority = DEF_PRIORITY, @@ -3066,6 +3036,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, sc.nr_scanned = 0; + nr_soft_scanned = 0; + /* + * Call soft limit reclaim before calling shrink_zone. + */ + nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, + order, sc.gfp_mask, + &nr_soft_scanned); + sc.nr_reclaimed += nr_soft_reclaimed; + /* * There should be no need to raise the scanning * priority if enough pages are already being scanned diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 47016c304c84..66cad506b8a2 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3975,8 +3975,8 @@ sub string_find_replace { # check for new externs in .h files. if ($realfile =~ /\.h$/ && $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) { - if (WARN("AVOID_EXTERNS", - "extern prototypes should be avoided in .h files\n" . $herecurr) && + if (CHK("AVOID_EXTERNS", + "extern prototypes should be avoided in .h files\n" . $herecurr) && $fix) { $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/; } diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 98969541cbcc..bea523a5d852 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f) static int snd_compr_free(struct inode *inode, struct file *f) { struct snd_compr_file *data = f->private_data; + struct snd_compr_runtime *runtime = data->stream.runtime; + + switch (runtime->state) { + case SNDRV_PCM_STATE_RUNNING: + case SNDRV_PCM_STATE_DRAINING: + case SNDRV_PCM_STATE_PAUSED: + data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP); + break; + default: + break; + } + data->stream.ops->free(&data->stream); kfree(data->stream.runtime->buffer); kfree(data->stream.runtime); @@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device) struct snd_compr *compr; compr = device->device_data; - snd_unregister_device(compr->direction, compr->card, compr->device); + snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card, + compr->device); return 0; } diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index b524f89a1f13..18d972501585 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -111,6 +111,9 @@ enum { /* 0x0009 - 0x0014 -> 12 test regs */ /* 0x0015 - visibility reg */ +/* Cirrus Logic CS4208 */ +#define CS4208_VENDOR_NID 0x24 + /* * Cirrus Logic CS4210 * @@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = { {} /* terminator */ }; +static const struct hda_verb cs4208_coef_init_verbs[] = { + {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */ + {0x24, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */ + {0x24, AC_VERB_SET_COEF_INDEX, 0x0033}, + {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */ + {0x24, AC_VERB_SET_COEF_INDEX, 0x0034}, + {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */ + {} /* terminator */ +}; + /* Errata: CS4207 rev C0/C1/C2 Silicon * * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf @@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec) /* init_verb sequence for C0/C1/C2 errata*/ snd_hda_sequence_write(codec, cs_errata_init_verbs); snd_hda_sequence_write(codec, cs_coef_init_verbs); + } else if (spec->vendor_nid == CS4208_VENDOR_NID) { + snd_hda_sequence_write(codec, cs4208_coef_init_verbs); } snd_hda_gen_init(codec); @@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = { {} /* terminator */ }; +static const struct hda_pintbl mba6_pincfgs[] = { + { 0x10, 0x032120f0 }, /* HP */ + { 0x11, 0x500000f0 }, + { 0x12, 0x90100010 }, /* Speaker */ + { 0x13, 0x500000f0 }, + { 0x14, 0x500000f0 }, + { 0x15, 0x770000f0 }, + { 0x16, 0x770000f0 }, + { 0x17, 0x430000f0 }, + { 0x18, 0x43ab9030 }, /* Mic */ + { 0x19, 0x770000f0 }, + { 0x1a, 0x770000f0 }, + { 0x1b, 0x770000f0 }, + { 0x1c, 0x90a00090 }, + { 0x1d, 0x500000f0 }, + { 0x1e, 0x500000f0 }, + { 0x1f, 0x500000f0 }, + { 0x20, 0x500000f0 }, + { 0x21, 0x430000f0 }, + { 0x22, 0x430000f0 }, + {} /* terminator */ +}; + static void cs420x_fixup_gpio_13(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec) /* * CS4208 support: - * Its layout is no longer compatible with CS4206/CS4207, and the generic - * parser seems working fairly well, except for trivial fixups. + * Its layout is no longer compatible with CS4206/CS4207 */ enum { + CS4208_MBA6, CS4208_GPIO0, }; static const struct hda_model_fixup cs4208_models[] = { { .id = CS4208_GPIO0, .name = "gpio0" }, + { .id = CS4208_MBA6, .name = "mba6" }, {} }; static const struct snd_pci_quirk cs4208_fixup_tbl[] = { /* codec SSID */ - SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0), - SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0), + SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), + SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), {} /* terminator */ }; @@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec, } static const struct hda_fixup cs4208_fixups[] = { + [CS4208_MBA6] = { + .type = HDA_FIXUP_PINS, + .v.pins = mba6_pincfgs, + .chained = true, + .chain_id = CS4208_GPIO0, + }, [CS4208_GPIO0] = { .type = HDA_FIXUP_FUNC, .v.func = cs4208_fixup_gpio0, }, }; +/* correct the 0dB offset of input pins */ +static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc) +{ + unsigned int caps; + + caps = query_amp_caps(codec, adc, HDA_INPUT); + caps &= ~(AC_AMPCAP_OFFSET); + caps |= 0x02; + snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps); +} + static int patch_cs4208(struct hda_codec *codec) { struct cs_spec *spec; int err; - spec = cs_alloc_spec(codec, 0); /* no specific w/a */ + spec = cs_alloc_spec(codec, CS4208_VENDOR_NID); if (!spec) return -ENOMEM; @@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec) cs4208_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); + snd_hda_override_wcaps(codec, 0x18, + get_wcaps(codec, 0x18) | AC_WCAP_STEREO); + cs4208_fix_amp_caps(codec, 0x18); + cs4208_fix_amp_caps(codec, 0x1b); + cs4208_fix_amp_caps(codec, 0x1c); + err = cs_parse_auto_config(codec); if (err < 0) goto error; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 3d8cd04455a6..7ea0245fc6bd 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1149,32 +1149,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec, } static void haswell_config_cvts(struct hda_codec *codec, - int pin_id, int mux_id) + hda_nid_t pin_nid, int mux_idx) { struct hdmi_spec *spec = codec->spec; - struct hdmi_spec_per_pin *per_pin; - int pin_idx, mux_idx; - int curr; - int err; + hda_nid_t nid, end_nid; + int cvt_idx, curr; + struct hdmi_spec_per_cvt *per_cvt; - for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { - per_pin = get_pin(spec, pin_idx); + /* configure all pins, including "no physical connection" ones */ + end_nid = codec->start_nid + codec->num_nodes; + for (nid = codec->start_nid; nid < end_nid; nid++) { + unsigned int wid_caps = get_wcaps(codec, nid); + unsigned int wid_type = get_wcaps_type(wid_caps); - if (pin_idx == pin_id) + if (wid_type != AC_WID_PIN) continue; - curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0, + if (nid == pin_nid) + continue; + + curr = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONNECT_SEL, 0); + if (curr != mux_idx) + continue; - /* Choose another unused converter */ - if (curr == mux_id) { - err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx); - if (err < 0) - return; - snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx); - snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0, + /* choose an unassigned converter. The conveters in the + * connection list are in the same order as in the codec. + */ + for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) { + per_cvt = get_cvt(spec, cvt_idx); + if (!per_cvt->assigned) { + snd_printdd("choose cvt %d for pin nid %d\n", + cvt_idx, nid); + snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, - mux_idx); + cvt_idx); + break; + } } } } @@ -1216,7 +1227,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, /* configure unused pins to choose other converters */ if (is_haswell(codec)) - haswell_config_cvts(codec, pin_idx, mux_idx); + haswell_config_cvts(codec, per_pin->pin_nid, mux_idx); snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bc07d369fac4..0e303b99a47c 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3439,6 +3439,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec, /* Set to manual mode */ val = alc_read_coef_idx(codec, 0x06); alc_write_coef_idx(codec, 0x06, val & ~0x000c); + /* Enable Line1 input control by verb */ + val = alc_read_coef_idx(codec, 0x1a); + alc_write_coef_idx(codec, 0x1a, val | (1 << 4)); break; } } @@ -3531,6 +3534,7 @@ enum { ALC269VB_FIXUP_ORDISSIMO_EVE2, ALC283_FIXUP_CHROME_BOOK, ALC282_FIXUP_ASUS_TX300, + ALC283_FIXUP_INT_MIC, }; static const struct hda_fixup alc269_fixups[] = { @@ -3790,6 +3794,16 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc282_fixup_asus_tx300, }, + [ALC283_FIXUP_INT_MIC] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + {0x20, AC_VERB_SET_COEF_INDEX, 0x1a}, + {0x20, AC_VERB_SET_PROC_COEF, 0x0011}, + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -3874,7 +3888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), - SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), diff --git a/tools/lib/lk/debugfs.c b/tools/lib/lk/debugfs.c index 099e7cd022e4..7c4347962353 100644 --- a/tools/lib/lk/debugfs.c +++ b/tools/lib/lk/debugfs.c @@ -5,7 +5,6 @@ #include <stdbool.h> #include <sys/vfs.h> #include <sys/mount.h> -#include <linux/magic.h> #include <linux/kernel.h> #include "debugfs.h" diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c index 9570c2b0f83c..b2519e49424f 100644 --- a/tools/perf/arch/x86/util/tsc.c +++ b/tools/perf/arch/x86/util/tsc.c @@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc) int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, struct perf_tsc_conversion *tc) { - bool cap_usr_time_zero; + bool cap_user_time_zero; u32 seq; int i = 0; @@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, tc->time_mult = pc->time_mult; tc->time_shift = pc->time_shift; tc->time_zero = pc->time_zero; - cap_usr_time_zero = pc->cap_usr_time_zero; + cap_user_time_zero = pc->cap_user_time_zero; rmb(); if (pc->lock == seq && !(seq & 1)) break; @@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, } } - if (!cap_usr_time_zero) + if (!cap_user_time_zero) return -EOPNOTSUPP; return 0; diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 423875c999b2..afe377b2884f 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -321,8 +321,6 @@ found: return perf_event__repipe(tool, event_sw, &sample_sw, machine); } -extern volatile int session_done; - static void sig_handler(int sig __maybe_unused) { session_done = 1; diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index c2dff9cb1f2c..9b5f077fee5b 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -101,7 +101,7 @@ static int setup_cpunode_map(void) dir1 = opendir(PATH_SYS_NODE); if (!dir1) - return -1; + return 0; while ((dent1 = readdir(dir1)) != NULL) { if (dent1->d_type != DT_DIR || diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 8e50d8d77419..72eae7498c09 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep) return 0; } -extern volatile int session_done; - static void sig_handler(int sig __maybe_unused) { session_done = 1; @@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep) } } + if (session_done()) + return 0; + if (nr_samples == 0) { ui__error("The %s file has no samples!\n", session->filename); return 0; diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 7f31a3ded1b6..9c333ff3dfeb 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -553,8 +553,6 @@ static struct perf_tool perf_script = { .ordering_requires_timestamps = true, }; -extern volatile int session_done; - static void sig_handler(int sig __maybe_unused) { session_done = 1; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index f5aa6375e3e9..71aa3e35406b 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -16,6 +16,23 @@ #include <sys/mman.h> #include <linux/futex.h> +/* For older distros: */ +#ifndef MAP_STACK +# define MAP_STACK 0x20000 +#endif + +#ifndef MADV_HWPOISON +# define MADV_HWPOISON 100 +#endif + +#ifndef MADV_MERGEABLE +# define MADV_MERGEABLE 12 +#endif + +#ifndef MADV_UNMERGEABLE +# define MADV_UNMERGEABLE 13 +#endif + static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, unsigned long arg, u8 arg_idx __maybe_unused, @@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace) trace->tool.sample = trace__process_sample; trace->tool.mmap = perf_event__process_mmap; + trace->tool.mmap2 = perf_event__process_mmap2; trace->tool.comm = perf_event__process_comm; trace->tool.exit = perf_event__process_exit; trace->tool.fork = perf_event__process_fork; diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 214e17e97e5c..5f6f9b3271bb 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -87,7 +87,7 @@ CFLAGS += -Wall CFLAGS += -Wextra CFLAGS += -std=gnu99 -EXTLIBS = -lelf -lpthread -lrt -lm +EXTLIBS = -lelf -lpthread -lrt -lm -ldl ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) CFLAGS += -fstack-protector-all @@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y) CFLAGS += -DLIBELF_MMAP endif +ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y) + CFLAGS += -DHAVE_ELF_GETPHDRNUM +endif # include ARCH specific config -include $(src-perf)/arch/$(ARCH)/Makefile diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak index 708fb8e9822a..d5a8dd44945f 100644 --- a/tools/perf/config/feature-tests.mak +++ b/tools/perf/config/feature-tests.mak @@ -61,6 +61,15 @@ int main(void) } endef +define SOURCE_ELF_GETPHDRNUM +#include <libelf.h> +int main(void) +{ + size_t dst; + return elf_getphdrnum(0, &dst); +} +endef + ifndef NO_SLANG define SOURCE_SLANG #include <slang.h> @@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT int main(void) { + printf(\"error message: %s\n\", audit_errno_to_name(0)); return audit_open(); } endef diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index bfc5a27597d6..7eae5488ecea 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, end = map__rip_2objdump(map, sym->end); offset = line_ip - start; - if (offset < 0 || (u64)line_ip > end) + if ((u64)line_ip < start || (u64)line_ip > end) offset = -1; else parsed_line = tmp2 + 1; diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 3e5f5430a28a..e23bde19d590 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -263,6 +263,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die) } /** + * die_is_func_def - Ensure that this DIE is a subprogram and definition + * @dw_die: a DIE + * + * Ensure that this DIE is a subprogram and NOT a declaration. This + * returns true if @dw_die is a function definition. + **/ +bool die_is_func_def(Dwarf_Die *dw_die) +{ + Dwarf_Attribute attr; + + return (dwarf_tag(dw_die) == DW_TAG_subprogram && + dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL); +} + +/** * die_get_data_member_location - Get the data-member offset * @mb_die: a DIE of a member of a data structure * @offs: The offset of the member in the data structure @@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data) { struct __addr_die_search_param *ad = data; + /* + * Since a declaration entry doesn't has given pc, this always returns + * function definition entry. + */ if (dwarf_tag(fn_die) == DW_TAG_subprogram && dwarf_haspc(fn_die, ad->addr)) { memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 6ce1717784b7..8658d41697d2 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, int (*callback)(Dwarf_Die *, void *), void *data); +/* Ensure that this DIE is a subprogram and definition (not declaration) */ +extern bool die_is_func_def(Dwarf_Die *dw_die); + /* Compare diename and tname */ extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 26441d0e571b..ce69901176d8 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id, return write_padded(fd, name, name_len + 1, len); } -static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, - u16 misc, int fd) +static int __dsos__write_buildid_table(struct list_head *head, + struct machine *machine, + pid_t pid, u16 misc, int fd) { + char nm[PATH_MAX]; struct dso *pos; dsos__for_each_with_build_id(pos, head) { @@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, if (is_vdso_map(pos->short_name)) { name = (char *) VDSO__MAP_NAME; name_len = sizeof(VDSO__MAP_NAME) + 1; + } else if (dso__is_kcore(pos)) { + machine__mmap_name(machine, nm, sizeof(nm)); + name = nm; + name_len = strlen(nm) + 1; } else { name = pos->long_name; name_len = pos->long_name_len + 1; @@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd) umisc = PERF_RECORD_MISC_GUEST_USER; } - err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, - kmisc, fd); + err = __dsos__write_buildid_table(&machine->kernel_dsos, machine, + machine->pid, kmisc, fd); if (err == 0) - err = __dsos__write_buildid_table(&machine->user_dsos, + err = __dsos__write_buildid_table(&machine->user_dsos, machine, machine->pid, umisc, fd); return err; } @@ -375,23 +381,31 @@ out_free: return err; } -static int dso__cache_build_id(struct dso *dso, const char *debugdir) +static int dso__cache_build_id(struct dso *dso, struct machine *machine, + const char *debugdir) { bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; bool is_vdso = is_vdso_map(dso->short_name); + char *name = dso->long_name; + char nm[PATH_MAX]; - return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), - dso->long_name, debugdir, - is_kallsyms, is_vdso); + if (dso__is_kcore(dso)) { + is_kallsyms = true; + machine__mmap_name(machine, nm, sizeof(nm)); + name = nm; + } + return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name, + debugdir, is_kallsyms, is_vdso); } -static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) +static int __dsos__cache_build_ids(struct list_head *head, + struct machine *machine, const char *debugdir) { struct dso *pos; int err = 0; dsos__for_each_with_build_id(pos, head) - if (dso__cache_build_id(pos, debugdir)) + if (dso__cache_build_id(pos, machine, debugdir)) err = -1; return err; @@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) static int machine__cache_build_ids(struct machine *machine, const char *debugdir) { - int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); - ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); + int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine, + debugdir); + ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir); return ret; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 46a0d35a05e1..9ff6cf3e9a99 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists) next = rb_first(root); while (next) { + if (session_done()) + break; n = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&n->rb_node_in); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 933d14f287ca..6188d2876a71 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine) modules = path; } - if (symbol__restricted_filename(path, "/proc/modules")) + if (symbol__restricted_filename(modules, "/proc/modules")) return -1; file = fopen(modules, "r"); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index be0329394d56..371476cb8ddc 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = { static int debuginfo__init_offline_dwarf(struct debuginfo *self, const char *path) { - Dwfl_Module *mod; int fd; fd = open(path, O_RDONLY); @@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self, if (!self->dwfl) goto error; - mod = dwfl_report_offline(self->dwfl, "", "", fd); - if (!mod) + self->mod = dwfl_report_offline(self->dwfl, "", "", fd); + if (!self->mod) goto error; - self->dbg = dwfl_module_getdwarf(mod, &self->bias); + self->dbg = dwfl_module_getdwarf(self->mod, &self->bias); if (!self->dbg) goto error; @@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) } /* Convert subprogram DIE to trace point */ -static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, - bool retprobe, struct probe_trace_point *tp) +static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, + Dwarf_Addr paddr, bool retprobe, + struct probe_trace_point *tp) { Dwarf_Addr eaddr, highaddr; - const char *name; - - /* Copy the name of probe point */ - name = dwarf_diename(sp_die); - if (name) { - if (dwarf_entrypc(sp_die, &eaddr) != 0) { - pr_warning("Failed to get entry address of %s\n", - dwarf_diename(sp_die)); - return -ENOENT; - } - if (dwarf_highpc(sp_die, &highaddr) != 0) { - pr_warning("Failed to get end address of %s\n", - dwarf_diename(sp_die)); - return -ENOENT; - } - if (paddr > highaddr) { - pr_warning("Offset specified is greater than size of %s\n", - dwarf_diename(sp_die)); - return -EINVAL; - } - tp->symbol = strdup(name); - if (tp->symbol == NULL) - return -ENOMEM; - tp->offset = (unsigned long)(paddr - eaddr); - } else - /* This function has no name. */ - tp->offset = (unsigned long)paddr; + GElf_Sym sym; + const char *symbol; + + /* Verify the address is correct */ + if (dwarf_entrypc(sp_die, &eaddr) != 0) { + pr_warning("Failed to get entry address of %s\n", + dwarf_diename(sp_die)); + return -ENOENT; + } + if (dwarf_highpc(sp_die, &highaddr) != 0) { + pr_warning("Failed to get end address of %s\n", + dwarf_diename(sp_die)); + return -ENOENT; + } + if (paddr > highaddr) { + pr_warning("Offset specified is greater than size of %s\n", + dwarf_diename(sp_die)); + return -EINVAL; + } + + /* Get an appropriate symbol from symtab */ + symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL); + if (!symbol) { + pr_warning("Failed to find symbol at 0x%lx\n", + (unsigned long)paddr); + return -ENOENT; + } + tp->offset = (unsigned long)(paddr - sym.st_value); + tp->symbol = strdup(symbol); + if (!tp->symbol) + return -ENOMEM; /* Return probe must be on the head of a subprogram */ if (retprobe) { @@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) } /* If not a real subprogram, find a real one */ - if (dwarf_tag(sc_die) != DW_TAG_subprogram) { + if (!die_is_func_def(sc_die)) { if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { pr_warning("Failed to find probe point in any " "functions.\n"); @@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) struct dwarf_callback_param *param = data; struct probe_finder *pf = param->data; struct perf_probe_point *pp = &pf->pev->point; - Dwarf_Attribute attr; /* Check tag and diename */ - if (dwarf_tag(sp_die) != DW_TAG_subprogram || - !die_compare_name(sp_die, pp->function) || - dwarf_attr(sp_die, DW_AT_declaration, &attr)) + if (!die_is_func_def(sp_die) || + !die_compare_name(sp_die, pp->function)) return DWARF_CB_OK; /* Check declared file */ @@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) tev = &tf->tevs[tf->ntevs++]; /* Trace point should be converted from subprogram DIE */ - ret = convert_to_trace_point(&pf->sp_die, pf->addr, + ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, pf->pev->point.retprobe, &tev->point); if (ret < 0) return ret; @@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self, { struct trace_event_finder tf = { .pf = {.pev = pev, .callback = add_probe_trace_event}, - .max_tevs = max_tevs}; + .mod = self->mod, .max_tevs = max_tevs}; int ret; /* Allocate result tevs array */ @@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) vl = &af->vls[af->nvls++]; /* Trace point should be converted from subprogram DIE */ - ret = convert_to_trace_point(&pf->sp_die, pf->addr, + ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr, pf->pev->point.retprobe, &vl->point); if (ret < 0) return ret; @@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self, { struct available_var_finder af = { .pf = {.pev = pev, .callback = add_available_vars}, + .mod = self->mod, .max_vls = max_vls, .externs = externs}; int ret; @@ -1474,7 +1477,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data) return 0; } -/* Search function from function name */ +/* Search function definition from function name */ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) { struct dwarf_callback_param *param = data; @@ -1485,7 +1488,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) return DWARF_CB_OK; - if (dwarf_tag(sp_die) == DW_TAG_subprogram && + if (die_is_func_def(sp_die) && die_compare_name(sp_die, lr->function)) { lf->fname = dwarf_decl_file(sp_die); dwarf_decl_line(sp_die, &lr->offset); diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 17e94d0c36f9..3b7d63018960 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name) /* debug information structure */ struct debuginfo { Dwarf *dbg; + Dwfl_Module *mod; Dwfl *dwfl; Dwarf_Addr bias; }; @@ -77,6 +78,7 @@ struct probe_finder { struct trace_event_finder { struct probe_finder pf; + Dwfl_Module *mod; /* For solving symbols */ struct probe_trace_event *tevs; /* Found trace events */ int ntevs; /* Number of trace events */ int max_tevs; /* Max number of trace events */ @@ -84,6 +86,7 @@ struct trace_event_finder { struct available_var_finder { struct probe_finder pf; + Dwfl_Module *mod; /* For solving symbols */ struct variable_list *vls; /* Found variable lists */ int nvls; /* Number of variable lists */ int max_vls; /* Max no. of variable lists */ diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 51f5edf2a6d0..70ffa41518f3 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -531,6 +531,9 @@ static int flush_sample_queue(struct perf_session *s, return 0; list_for_each_entry_safe(iter, tmp, head, list) { + if (session_done()) + return 0; + if (iter->timestamp > limit) break; @@ -1160,7 +1163,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session, } } -#define session_done() (*(volatile int *)(&session_done)) volatile int session_done; static int __perf_session__process_pipe_events(struct perf_session *self, @@ -1372,10 +1374,13 @@ more: "Processing events..."); } + err = 0; + if (session_done()) + goto out_err; + if (file_pos < file_size) goto more; - err = 0; /* do the final flush for ordered samples */ session->ordered_samples.next_flush = ULLONG_MAX; err = flush_sample_queue(session, tool); diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 3aa75fb2225f..04bf7373a7e5 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session, #define perf_session__set_tracepoints_handlers(session, array) \ __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) + +extern volatile int session_done; + +#define session_done() (*(volatile int *)(&session_done)) #endif /* __PERF_SESSION_H */ diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index a7b9ab557380..a9c829be5216 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -8,6 +8,22 @@ #include "symbol.h" #include "debug.h" +#ifndef HAVE_ELF_GETPHDRNUM +static int elf_getphdrnum(Elf *elf, size_t *dst) +{ + GElf_Ehdr gehdr; + GElf_Ehdr *ehdr; + + ehdr = gelf_getehdr(elf, &gehdr); + if (!ehdr) + return -1; + + *dst = ehdr->e_phnum; + + return 0; +} +#endif + #ifndef NT_GNU_BUILD_ID #define NT_GNU_BUILD_ID 3 #endif diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index fe7a27d67d2b..e9e1c03f927d 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent, char *next = NULL; char *addr_str; char *mod; - char *fmt; + char *fmt = NULL; line = strtok_r(file, "\n", &next); while (line) { |