diff options
author | Mike Pagano <mpagano@gentoo.org> | 2018-12-01 10:07:51 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2018-12-01 10:07:51 -0500 |
commit | 3c846c91f5b30fcc7c4adbb519f42d972fd9cfd4 (patch) | |
tree | 8d5e0e05e0d91d4732b9585bbd3e848793604159 | |
parent | proj/linux-patches: Linux patch 4.19.5 (diff) | |
download | linux-patches-3c846c91f5b30fcc7c4adbb519f42d972fd9cfd4.tar.gz linux-patches-3c846c91f5b30fcc7c4adbb519f42d972fd9cfd4.tar.bz2 linux-patches-3c846c91f5b30fcc7c4adbb519f42d972fd9cfd4.zip |
proj/linux-patches: Linux patch 4.19.64.19-7
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1005_linux-4.19.6.patch | 4624 |
2 files changed, 4628 insertions, 0 deletions
diff --git a/0000_README b/0000_README index c0b6ddf9..c4c0a77d 100644 --- a/0000_README +++ b/0000_README @@ -63,6 +63,10 @@ Patch: 1004_linux-4.19.5.patch From: http://www.kernel.org Desc: Linux 4.19.5 +Patch: 1005_linux-4.19.6.patch +From: http://www.kernel.org +Desc: Linux 4.19.6 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1005_linux-4.19.6.patch b/1005_linux-4.19.6.patch new file mode 100644 index 00000000..91b0881a --- /dev/null +++ b/1005_linux-4.19.6.patch @@ -0,0 +1,4624 @@ +diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst +index 30491d91e93d..30187d49dc2c 100644 +--- a/Documentation/admin-guide/security-bugs.rst ++++ b/Documentation/admin-guide/security-bugs.rst +@@ -26,23 +26,35 @@ information is helpful. Any exploit code is very helpful and will not + be released without consent from the reporter unless it has already been + made public. + +-Disclosure +----------- +- +-The goal of the Linux kernel security team is to work with the bug +-submitter to understand and fix the bug. We prefer to publish the fix as +-soon as possible, but try to avoid public discussion of the bug itself +-and leave that to others. +- +-Publishing the fix may be delayed when the bug or the fix is not yet +-fully understood, the solution is not well-tested or for vendor +-coordination. However, we expect these delays to be short, measurable in +-days, not weeks or months. A release date is negotiated by the security +-team working with the bug submitter as well as vendors. However, the +-kernel security team holds the final say when setting a timeframe. The +-timeframe varies from immediate (esp. if it's already publicly known bug) +-to a few weeks. As a basic default policy, we expect report date to +-release date to be on the order of 7 days. ++Disclosure and embargoed information ++------------------------------------ ++ ++The security list is not a disclosure channel. For that, see Coordination ++below. ++ ++Once a robust fix has been developed, the release process starts. Fixes ++for publicly known bugs are released immediately. ++ ++Although our preference is to release fixes for publicly undisclosed bugs ++as soon as they become available, this may be postponed at the request of ++the reporter or an affected party for up to 7 calendar days from the start ++of the release process, with an exceptional extension to 14 calendar days ++if it is agreed that the criticality of the bug requires more time. The ++only valid reason for deferring the publication of a fix is to accommodate ++the logistics of QA and large scale rollouts which require release ++coordination. ++ ++Whilst embargoed information may be shared with trusted individuals in ++order to develop a fix, such information will not be published alongside ++the fix or on any other disclosure channel without the permission of the ++reporter. This includes but is not limited to the original bug report ++and followup discussions (if any), exploits, CVE information or the ++identity of the reporter. ++ ++In other words our only interest is in getting bugs fixed. All other ++information submitted to the security list and any followup discussions ++of the report are treated confidentially even after the embargo has been ++lifted, in perpetuity. + + Coordination + ------------ +@@ -68,7 +80,7 @@ may delay the bug handling. If a reporter wishes to have a CVE identifier + assigned ahead of public disclosure, they will need to contact the private + linux-distros list, described above. When such a CVE identifier is known + before a patch is provided, it is desirable to mention it in the commit +-message, though. ++message if the reporter agrees. + + Non-disclosure agreements + ------------------------- +diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt +index 903a78da65be..3a9926f99937 100644 +--- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt ++++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt +@@ -17,7 +17,7 @@ Example: + reg = <1>; + clocks = <&clk32m>; + interrupt-parent = <&gpio4>; +- interrupts = <13 IRQ_TYPE_EDGE_RISING>; ++ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; + vdd-supply = <®5v0>; + xceiver-supply = <®5v0>; + }; +diff --git a/MAINTAINERS b/MAINTAINERS +index b2f710eee67a..9e9b19ecf6f7 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13769,6 +13769,7 @@ F: drivers/i2c/busses/i2c-stm32* + + STABLE BRANCH + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> ++M: Sasha Levin <sashal@kernel.org> + L: stable@vger.kernel.org + S: Supported + F: Documentation/process/stable-kernel-rules.rst +diff --git a/Makefile b/Makefile +index a07830185bdf..20cbb8e84650 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 5 ++SUBLEVEL = 6 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h +index e0331e754568..b855f56489ac 100644 +--- a/arch/powerpc/include/asm/io.h ++++ b/arch/powerpc/include/asm/io.h +@@ -285,19 +285,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, + * their hooks, a bitfield is reserved for use by the platform near the + * top of MMIO addresses (not PIO, those have to cope the hard way). + * +- * This bit field is 12 bits and is at the top of the IO virtual +- * addresses PCI_IO_INDIRECT_TOKEN_MASK. ++ * The highest address in the kernel virtual space are: + * +- * The kernel virtual space is thus: ++ * d0003fffffffffff # with Hash MMU ++ * c00fffffffffffff # with Radix MMU + * +- * 0xD000000000000000 : vmalloc +- * 0xD000080000000000 : PCI PHB IO space +- * 0xD000080080000000 : ioremap +- * 0xD0000fffffffffff : end of ioremap region +- * +- * Since the top 4 bits are reserved as the region ID, we use thus +- * the next 12 bits and keep 4 bits available for the future if the +- * virtual address space is ever to be extended. ++ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits ++ * that can be used for the field. + * + * The direct IO mapping operations will then mask off those bits + * before doing the actual access, though that only happen when +@@ -309,8 +303,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, + */ + + #ifdef CONFIG_PPC_INDIRECT_MMIO +-#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul +-#define PCI_IO_IND_TOKEN_SHIFT 48 ++#define PCI_IO_IND_TOKEN_SHIFT 52 ++#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT) + #define PCI_FIX_ADDR(addr) \ + ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) + #define PCI_GET_ADDR_TOKEN(addr) \ +diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h +index 491b0f715d6b..ea1d7c808319 100644 +--- a/arch/powerpc/kvm/trace.h ++++ b/arch/powerpc/kvm/trace.h +@@ -6,8 +6,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace + + /* + * Tracepoint for guest mode entry. +@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests, + #endif /* _TRACE_KVM_H */ + + /* This part must be outside protection */ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h +index ac640e81fdc5..3837842986aa 100644 +--- a/arch/powerpc/kvm/trace_booke.h ++++ b/arch/powerpc/kvm/trace_booke.h +@@ -6,8 +6,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm_booke +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace_booke + + #define kvm_trace_symbol_exit \ + {0, "CRITICAL"}, \ +@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio, + #endif + + /* This part must be outside protection */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace_booke ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h +index bcfe8a987f6a..8a1e3b0047f1 100644 +--- a/arch/powerpc/kvm/trace_hv.h ++++ b/arch/powerpc/kvm/trace_hv.h +@@ -9,8 +9,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm_hv +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace_hv + + #define kvm_trace_symbol_hcall \ + {H_REMOVE, "H_REMOVE"}, \ +@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, + #endif /* _TRACE_KVM_HV_H */ + + /* This part must be outside protection */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace_hv ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h +index 2f9a8829552b..46a46d328fbf 100644 +--- a/arch/powerpc/kvm/trace_pr.h ++++ b/arch/powerpc/kvm/trace_pr.h +@@ -8,8 +8,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm_pr +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace_pr + + TRACE_EVENT(kvm_book3s_reenter, + TP_PROTO(int r, struct kvm_vcpu *vcpu), +@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit, + #endif /* _TRACE_KVM_H */ + + /* This part must be outside protection */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace_pr ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c +index 055b211b7126..5500e4edabc6 100644 +--- a/arch/powerpc/mm/numa.c ++++ b/arch/powerpc/mm/numa.c +@@ -1179,7 +1179,7 @@ static long vphn_get_associativity(unsigned long cpu, + + switch (rc) { + case H_FUNCTION: +- printk(KERN_INFO ++ printk_once(KERN_INFO + "VPHN is not supported. Disabling polling...\n"); + stop_topology_update(); + break; +diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile +index 61ec42405ec9..110be14e6122 100644 +--- a/arch/riscv/Makefile ++++ b/arch/riscv/Makefile +@@ -82,4 +82,8 @@ core-y += arch/riscv/kernel/ arch/riscv/mm/ + + libs-y += arch/riscv/lib/ + ++PHONY += vdso_install ++vdso_install: ++ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ ++ + all: vmlinux +diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c +index 3303ed2cd419..7dd308129b40 100644 +--- a/arch/riscv/kernel/module.c ++++ b/arch/riscv/kernel/module.c +@@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) + { + if (v != (u32)v) { + pr_err("%s: value %016llx out of range for 32-bit field\n", +- me->name, v); ++ me->name, (long long)v); + return -EINVAL; + } + *location = v; +@@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, + if (offset != (s32)offset) { + pr_err( + "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", +- me->name, v, location); ++ me->name, (long long)v, location); + return -EINVAL; + } + +@@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, + if (IS_ENABLED(CMODEL_MEDLOW)) { + pr_err( + "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", +- me->name, v, location); ++ me->name, (long long)v, location); + return -EINVAL; + } + +@@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, + } else { + pr_err( + "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", +- me->name, v, location); ++ me->name, (long long)v, location); + return -EINVAL; + } + +@@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, + } else { + pr_err( + "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", +- me->name, v, location); ++ me->name, (long long)v, location); + return -EINVAL; + } + } +@@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, + if (offset != fill_v) { + pr_err( + "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", +- me->name, v, location); ++ me->name, (long long)v, location); + return -EINVAL; + } + +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c +index 8527c3e1038b..bfa25814fe5f 100644 +--- a/arch/x86/events/intel/uncore_snb.c ++++ b/arch/x86/events/intel/uncore_snb.c +@@ -15,6 +15,25 @@ + #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 + #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f + #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f ++#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c ++#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 ++#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 ++#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f ++#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f ++#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc ++#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 ++#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 ++#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 ++#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f ++#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f ++#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 ++#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 ++#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 ++#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 ++#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 ++#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 ++#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca ++#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 + + /* SNB event control */ + #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff +@@ -569,7 +588,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, +- ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, + { /* end: all zeroes */ }, + }; + +@@ -618,6 +712,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { + IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ + IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ + IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ ++ IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ ++ IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ ++ IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ ++ IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ ++ IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ ++ IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ ++ IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ ++ IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ ++ IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ ++ IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ ++ IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ ++ IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ ++ IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ ++ IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ ++ IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ ++ IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ ++ IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ ++ IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ ++ IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ + { /* end marker */ } + }; + +diff --git a/block/bio.c b/block/bio.c +index 41173710430c..c4ef8aa46452 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) + if (bio_flagged(bio_src, BIO_THROTTLED)) + bio_set_flag(bio, BIO_THROTTLED); + bio->bi_opf = bio_src->bi_opf; ++ bio->bi_ioprio = bio_src->bi_ioprio; + bio->bi_write_hint = bio_src->bi_write_hint; + bio->bi_iter = bio_src->bi_iter; + bio->bi_io_vec = bio_src->bi_io_vec; +diff --git a/block/bounce.c b/block/bounce.c +index 418677dcec60..abb50e7e5fab 100644 +--- a/block/bounce.c ++++ b/block/bounce.c +@@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, + return NULL; + bio->bi_disk = bio_src->bi_disk; + bio->bi_opf = bio_src->bi_opf; ++ bio->bi_ioprio = bio_src->bi_ioprio; + bio->bi_write_hint = bio_src->bi_write_hint; + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; + bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; +diff --git a/crypto/simd.c b/crypto/simd.c +index ea7240be3001..78e8d037ae2b 100644 +--- a/crypto/simd.c ++++ b/crypto/simd.c +@@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) + + ctx->cryptd_tfm = cryptd_tfm; + +- reqsize = sizeof(struct skcipher_request); +- reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); ++ reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); ++ reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); ++ reqsize += sizeof(struct skcipher_request); + + crypto_skcipher_set_reqsize(tfm, reqsize); + +diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c +index e9fb0bf3c8d2..78f9de260d5f 100644 +--- a/drivers/acpi/acpica/dsopcode.c ++++ b/drivers/acpi/acpica/dsopcode.c +@@ -417,6 +417,10 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, + ACPI_FORMAT_UINT64(obj_desc->region.address), + obj_desc->region.length)); + ++ status = acpi_ut_add_address_range(obj_desc->region.space_id, ++ obj_desc->region.address, ++ obj_desc->region.length, node); ++ + /* Now the address and length are valid for this opregion */ + + obj_desc->region.flags |= AOPOBJ_DATA_VALID; +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c +index f2b6f4da1034..fdabd0b74492 100644 +--- a/drivers/block/floppy.c ++++ b/drivers/block/floppy.c +@@ -4151,10 +4151,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) + bio.bi_end_io = floppy_rb0_cb; + bio_set_op_attrs(&bio, REQ_OP_READ, 0); + ++ init_completion(&cbdata.complete); ++ + submit_bio(&bio); + process_fd_request(); + +- init_completion(&cbdata.complete); + wait_for_completion(&cbdata.complete); + + __free_page(page); +diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c +index b2ff423ad7f8..f4880a4f865b 100644 +--- a/drivers/cpufreq/imx6q-cpufreq.c ++++ b/drivers/cpufreq/imx6q-cpufreq.c +@@ -159,8 +159,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) + /* Ensure the arm clock divider is what we expect */ + ret = clk_set_rate(clks[ARM].clk, new_freq * 1000); + if (ret) { ++ int ret1; ++ + dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); +- regulator_set_voltage_tol(arm_reg, volt_old, 0); ++ ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0); ++ if (ret1) ++ dev_warn(cpu_dev, ++ "failed to restore vddarm voltage: %d\n", ret1); + return ret; + } + +diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c +index 388a929baf95..1a6a77df8a5e 100644 +--- a/drivers/firmware/efi/arm-init.c ++++ b/drivers/firmware/efi/arm-init.c +@@ -265,6 +265,10 @@ void __init efi_init(void) + (params.mmap & ~PAGE_MASK))); + + init_screen_info(); ++ ++ /* ARM does not permit early mappings to persist across paging_init() */ ++ if (IS_ENABLED(CONFIG_ARM)) ++ efi_memmap_unmap(); + } + + static int __init register_gop_device(void) +diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c +index 922cfb813109..a00934d263c5 100644 +--- a/drivers/firmware/efi/arm-runtime.c ++++ b/drivers/firmware/efi/arm-runtime.c +@@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void) + { + u64 mapsize; + +- if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) { ++ if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile +index 14c40a7750d1..c51627660dbb 100644 +--- a/drivers/firmware/efi/libstub/Makefile ++++ b/drivers/firmware/efi/libstub/Makefile +@@ -16,7 +16,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \ + cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \ + $(DISABLE_STACKLEAK_PLUGIN) + cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ +- -fno-builtin -fpic -mno-single-pic-base ++ -fno-builtin -fpic \ ++ $(call cc-option,-mno-single-pic-base) + + cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt + +diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c +index 5fc70520e04c..1907db2b38d8 100644 +--- a/drivers/firmware/efi/memmap.c ++++ b/drivers/firmware/efi/memmap.c +@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data) + + void __init efi_memmap_unmap(void) + { ++ if (!efi_enabled(EFI_MEMMAP)) ++ return; ++ + if (!efi.memmap.late) { + unsigned long size; + +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 25187403e3ac..a8e01d99919c 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -1285,7 +1285,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, + gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); + if (!gdev->descs) { + status = -ENOMEM; +- goto err_free_gdev; ++ goto err_free_ida; + } + + if (chip->ngpio == 0) { +@@ -1413,8 +1413,9 @@ err_free_label: + kfree_const(gdev->label); + err_free_descs: + kfree(gdev->descs); +-err_free_gdev: ++err_free_ida: + ida_simple_remove(&gpio_ida, gdev->id); ++err_free_gdev: + /* failures here can mean systems won't boot... */ + pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, + gdev->base, gdev->base + gdev->ngpio - 1, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +index 0c791e35acf0..79bd8bd97fae 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +@@ -496,8 +496,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + +- amdgpu_dpm_switch_power_profile(adev, +- PP_SMC_POWER_PROFILE_COMPUTE, !idle); ++ if (adev->powerplay.pp_funcs && ++ adev->powerplay.pp_funcs->switch_power_profile) ++ amdgpu_dpm_switch_power_profile(adev, ++ PP_SMC_POWER_PROFILE_COMPUTE, ++ !idle); + } + + bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +index ad151fefa41f..db406a35808f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); + MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); + MODULE_FIRMWARE("amdgpu/verde_mc.bin"); + MODULE_FIRMWARE("amdgpu/oland_mc.bin"); ++MODULE_FIRMWARE("amdgpu/hainan_mc.bin"); + MODULE_FIRMWARE("amdgpu/si58_mc.bin"); + + #define MC_SEQ_MISC0__MT__MASK 0xf0000000 +diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +index 5ae5ed2e62d6..21bc12e02311 100644 +--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c ++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) + else + wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); +- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); ++ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF); + + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); +diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c +index 69dab82a3771..bf589c53b908 100644 +--- a/drivers/gpu/drm/ast/ast_drv.c ++++ b/drivers/gpu/drm/ast/ast_drv.c +@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = { + + MODULE_DEVICE_TABLE(pci, pciidlist); + ++static void ast_kick_out_firmware_fb(struct pci_dev *pdev) ++{ ++ struct apertures_struct *ap; ++ bool primary = false; ++ ++ ap = alloc_apertures(1); ++ if (!ap) ++ return; ++ ++ ap->ranges[0].base = pci_resource_start(pdev, 0); ++ ap->ranges[0].size = pci_resource_len(pdev, 0); ++ ++#ifdef CONFIG_X86 ++ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; ++#endif ++ drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary); ++ kfree(ap); ++} ++ + static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { ++ ast_kick_out_firmware_fb(pdev); ++ + return drm_get_pci_dev(pdev, ent, &driver); + } + +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index 5e77d456d9bb..7c6ac3cadb6b 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, + } + ast_bo_unreserve(bo); + ++ ast_set_offset_reg(crtc); + ast_set_start_address_crt1(crtc, (u32)gpu_addr); + + return 0; +@@ -1254,7 +1255,7 @@ static int ast_cursor_move(struct drm_crtc *crtc, + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); + + /* dummy write to fire HWC */ +- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); ++ ast_show_cursor(crtc); + + return 0; + } +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c +index 9628dd617826..9214c8b02484 100644 +--- a/drivers/gpu/drm/drm_fb_helper.c ++++ b/drivers/gpu/drm/drm_fb_helper.c +@@ -200,6 +200,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) + mutex_lock(&fb_helper->lock); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { ++ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) ++ continue; ++ + ret = __drm_fb_helper_add_one_connector(fb_helper, connector); + if (ret) + goto fail; +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 43ae9de12ba3..c3a64d6a18df 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -2492,6 +2492,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, + uint32_t method1, method2; + int cpp; + ++ if (mem_value == 0) ++ return U32_MAX; ++ + if (!intel_wm_plane_visible(cstate, pstate)) + return 0; + +@@ -2521,6 +2524,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, + uint32_t method1, method2; + int cpp; + ++ if (mem_value == 0) ++ return U32_MAX; ++ + if (!intel_wm_plane_visible(cstate, pstate)) + return 0; + +@@ -2544,6 +2550,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, + { + int cpp; + ++ if (mem_value == 0) ++ return U32_MAX; ++ + if (!intel_wm_plane_visible(cstate, pstate)) + return 0; + +@@ -2998,6 +3007,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + } + ++static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) ++{ ++ /* ++ * On some SNB machines (Thinkpad X220 Tablet at least) ++ * LP3 usage can cause vblank interrupts to be lost. ++ * The DEIIR bit will go high but it looks like the CPU ++ * never gets interrupted. ++ * ++ * It's not clear whether other interrupt source could ++ * be affected or if this is somehow limited to vblank ++ * interrupts only. To play it safe we disable LP3 ++ * watermarks entirely. ++ */ ++ if (dev_priv->wm.pri_latency[3] == 0 && ++ dev_priv->wm.spr_latency[3] == 0 && ++ dev_priv->wm.cur_latency[3] == 0) ++ return; ++ ++ dev_priv->wm.pri_latency[3] = 0; ++ dev_priv->wm.spr_latency[3] = 0; ++ dev_priv->wm.cur_latency[3] = 0; ++ ++ DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); ++ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); ++ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); ++ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); ++} ++ + static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) + { + intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); +@@ -3014,8 +3051,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + +- if (IS_GEN6(dev_priv)) ++ if (IS_GEN6(dev_priv)) { + snb_wm_latency_quirk(dev_priv); ++ snb_wm_lp3_irq_quirk(dev_priv); ++ } + } + + static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) +diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c +index ca5aa7fba769..f4d8a730e821 100644 +--- a/drivers/gpu/drm/vc4/vc4_kms.c ++++ b/drivers/gpu/drm/vc4/vc4_kms.c +@@ -216,6 +216,12 @@ static int vc4_atomic_commit(struct drm_device *dev, + return 0; + } + ++ /* We know for sure we don't want an async update here. Set ++ * state->legacy_cursor_update to false to prevent ++ * drm_atomic_helper_setup_commit() from auto-completing ++ * commit->flip_done. ++ */ ++ state->legacy_cursor_update = false; + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret; +diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c +index 0422ec2b13d2..dc4128bfe2ca 100644 +--- a/drivers/hid/hid-steam.c ++++ b/drivers/hid/hid-steam.c +@@ -23,8 +23,9 @@ + * In order to avoid breaking them this driver creates a layered hidraw device, + * so it can detect when the client is running and then: + * - it will not send any command to the controller. +- * - this input device will be disabled, to avoid double input of the same ++ * - this input device will be removed, to avoid double input of the same + * user action. ++ * When the client is closed, this input device will be created again. + * + * For additional functions, such as changing the right-pad margin or switching + * the led, you can use the user-space tool at: +@@ -113,7 +114,7 @@ struct steam_device { + spinlock_t lock; + struct hid_device *hdev, *client_hdev; + struct mutex mutex; +- bool client_opened, input_opened; ++ bool client_opened; + struct input_dev __rcu *input; + unsigned long quirks; + struct work_struct work_connect; +@@ -279,18 +280,6 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable) + } + } + +-static void steam_update_lizard_mode(struct steam_device *steam) +-{ +- mutex_lock(&steam->mutex); +- if (!steam->client_opened) { +- if (steam->input_opened) +- steam_set_lizard_mode(steam, false); +- else +- steam_set_lizard_mode(steam, lizard_mode); +- } +- mutex_unlock(&steam->mutex); +-} +- + static int steam_input_open(struct input_dev *dev) + { + struct steam_device *steam = input_get_drvdata(dev); +@@ -301,7 +290,6 @@ static int steam_input_open(struct input_dev *dev) + return ret; + + mutex_lock(&steam->mutex); +- steam->input_opened = true; + if (!steam->client_opened && lizard_mode) + steam_set_lizard_mode(steam, false); + mutex_unlock(&steam->mutex); +@@ -313,7 +301,6 @@ static void steam_input_close(struct input_dev *dev) + struct steam_device *steam = input_get_drvdata(dev); + + mutex_lock(&steam->mutex); +- steam->input_opened = false; + if (!steam->client_opened && lizard_mode) + steam_set_lizard_mode(steam, true); + mutex_unlock(&steam->mutex); +@@ -400,7 +387,7 @@ static int steam_battery_register(struct steam_device *steam) + return 0; + } + +-static int steam_register(struct steam_device *steam) ++static int steam_input_register(struct steam_device *steam) + { + struct hid_device *hdev = steam->hdev; + struct input_dev *input; +@@ -414,17 +401,6 @@ static int steam_register(struct steam_device *steam) + return 0; + } + +- /* +- * Unlikely, but getting the serial could fail, and it is not so +- * important, so make up a serial number and go on. +- */ +- if (steam_get_serial(steam) < 0) +- strlcpy(steam->serial_no, "XXXXXXXXXX", +- sizeof(steam->serial_no)); +- +- hid_info(hdev, "Steam Controller '%s' connected", +- steam->serial_no); +- + input = input_allocate_device(); + if (!input) + return -ENOMEM; +@@ -492,11 +468,6 @@ static int steam_register(struct steam_device *steam) + goto input_register_fail; + + rcu_assign_pointer(steam->input, input); +- +- /* ignore battery errors, we can live without it */ +- if (steam->quirks & STEAM_QUIRK_WIRELESS) +- steam_battery_register(steam); +- + return 0; + + input_register_fail: +@@ -504,27 +475,88 @@ input_register_fail: + return ret; + } + +-static void steam_unregister(struct steam_device *steam) ++static void steam_input_unregister(struct steam_device *steam) + { + struct input_dev *input; ++ rcu_read_lock(); ++ input = rcu_dereference(steam->input); ++ rcu_read_unlock(); ++ if (!input) ++ return; ++ RCU_INIT_POINTER(steam->input, NULL); ++ synchronize_rcu(); ++ input_unregister_device(input); ++} ++ ++static void steam_battery_unregister(struct steam_device *steam) ++{ + struct power_supply *battery; + + rcu_read_lock(); +- input = rcu_dereference(steam->input); + battery = rcu_dereference(steam->battery); + rcu_read_unlock(); + +- if (battery) { +- RCU_INIT_POINTER(steam->battery, NULL); +- synchronize_rcu(); +- power_supply_unregister(battery); ++ if (!battery) ++ return; ++ RCU_INIT_POINTER(steam->battery, NULL); ++ synchronize_rcu(); ++ power_supply_unregister(battery); ++} ++ ++static int steam_register(struct steam_device *steam) ++{ ++ int ret; ++ ++ /* ++ * This function can be called several times in a row with the ++ * wireless adaptor, without steam_unregister() between them, because ++ * another client send a get_connection_status command, for example. ++ * The battery and serial number are set just once per device. ++ */ ++ if (!steam->serial_no[0]) { ++ /* ++ * Unlikely, but getting the serial could fail, and it is not so ++ * important, so make up a serial number and go on. ++ */ ++ if (steam_get_serial(steam) < 0) ++ strlcpy(steam->serial_no, "XXXXXXXXXX", ++ sizeof(steam->serial_no)); ++ ++ hid_info(steam->hdev, "Steam Controller '%s' connected", ++ steam->serial_no); ++ ++ /* ignore battery errors, we can live without it */ ++ if (steam->quirks & STEAM_QUIRK_WIRELESS) ++ steam_battery_register(steam); ++ ++ mutex_lock(&steam_devices_lock); ++ list_add(&steam->list, &steam_devices); ++ mutex_unlock(&steam_devices_lock); + } +- if (input) { +- RCU_INIT_POINTER(steam->input, NULL); +- synchronize_rcu(); ++ ++ mutex_lock(&steam->mutex); ++ if (!steam->client_opened) { ++ steam_set_lizard_mode(steam, lizard_mode); ++ ret = steam_input_register(steam); ++ } else { ++ ret = 0; ++ } ++ mutex_unlock(&steam->mutex); ++ ++ return ret; ++} ++ ++static void steam_unregister(struct steam_device *steam) ++{ ++ steam_battery_unregister(steam); ++ steam_input_unregister(steam); ++ if (steam->serial_no[0]) { + hid_info(steam->hdev, "Steam Controller '%s' disconnected", + steam->serial_no); +- input_unregister_device(input); ++ mutex_lock(&steam_devices_lock); ++ list_del(&steam->list); ++ mutex_unlock(&steam_devices_lock); ++ steam->serial_no[0] = 0; + } + } + +@@ -600,6 +632,9 @@ static int steam_client_ll_open(struct hid_device *hdev) + mutex_lock(&steam->mutex); + steam->client_opened = true; + mutex_unlock(&steam->mutex); ++ ++ steam_input_unregister(steam); ++ + return ret; + } + +@@ -609,13 +644,13 @@ static void steam_client_ll_close(struct hid_device *hdev) + + mutex_lock(&steam->mutex); + steam->client_opened = false; +- if (steam->input_opened) +- steam_set_lizard_mode(steam, false); +- else +- steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); + + hid_hw_close(steam->hdev); ++ if (steam->connected) { ++ steam_set_lizard_mode(steam, lizard_mode); ++ steam_input_register(steam); ++ } + } + + static int steam_client_ll_raw_request(struct hid_device *hdev, +@@ -744,11 +779,6 @@ static int steam_probe(struct hid_device *hdev, + } + } + +- mutex_lock(&steam_devices_lock); +- steam_update_lizard_mode(steam); +- list_add(&steam->list, &steam_devices); +- mutex_unlock(&steam_devices_lock); +- + return 0; + + hid_hw_open_fail: +@@ -774,10 +804,6 @@ static void steam_remove(struct hid_device *hdev) + return; + } + +- mutex_lock(&steam_devices_lock); +- list_del(&steam->list); +- mutex_unlock(&steam_devices_lock); +- + hid_destroy_device(steam->client_hdev); + steam->client_opened = false; + cancel_work_sync(&steam->work_connect); +@@ -792,12 +818,14 @@ static void steam_remove(struct hid_device *hdev) + static void steam_do_connect_event(struct steam_device *steam, bool connected) + { + unsigned long flags; ++ bool changed; + + spin_lock_irqsave(&steam->lock, flags); ++ changed = steam->connected != connected; + steam->connected = connected; + spin_unlock_irqrestore(&steam->lock, flags); + +- if (schedule_work(&steam->work_connect) == 0) ++ if (changed && schedule_work(&steam->work_connect) == 0) + dbg_hid("%s: connected=%d event already queued\n", + __func__, connected); + } +@@ -1019,13 +1047,8 @@ static int steam_raw_event(struct hid_device *hdev, + return 0; + rcu_read_lock(); + input = rcu_dereference(steam->input); +- if (likely(input)) { ++ if (likely(input)) + steam_do_input_event(steam, input, data); +- } else { +- dbg_hid("%s: input data without connect event\n", +- __func__); +- steam_do_connect_event(steam, true); +- } + rcu_read_unlock(); + break; + case STEAM_EV_CONNECT: +@@ -1074,7 +1097,10 @@ static int steam_param_set_lizard_mode(const char *val, + + mutex_lock(&steam_devices_lock); + list_for_each_entry(steam, &steam_devices, list) { +- steam_update_lizard_mode(steam); ++ mutex_lock(&steam->mutex); ++ if (!steam->client_opened) ++ steam_set_lizard_mode(steam, lizard_mode); ++ mutex_unlock(&steam->mutex); + } + mutex_unlock(&steam_devices_lock); + return 0; +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c +index 5c88706121c1..39134dd305f5 100644 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c +@@ -328,7 +328,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + u8 opcode, sc, vl; + u16 pkey; + u32 slid; +- int req_queued = 0; + u16 dlid; + u32 selector; + +@@ -392,7 +391,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + req->data_len = 0; + req->pq = pq; + req->cq = cq; +- req->status = -1; + req->ahg_idx = -1; + req->iov_idx = 0; + req->sent = 0; +@@ -400,12 +398,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + req->seqcomp = 0; + req->seqsubmitted = 0; + req->tids = NULL; +- req->done = 0; + req->has_error = 0; + INIT_LIST_HEAD(&req->txps); + + memcpy(&req->info, &info, sizeof(info)); + ++ /* The request is initialized, count it */ ++ atomic_inc(&pq->n_reqs); ++ + if (req_opcode(info.ctrl) == EXPECTED) { + /* expected must have a TID info and at least one data vector */ + if (req->data_iovs < 2) { +@@ -500,7 +500,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + ret = pin_vector_pages(req, &req->iovs[i]); + if (ret) { + req->data_iovs = i; +- req->status = ret; + goto free_req; + } + req->data_len += req->iovs[i].iov.iov_len; +@@ -561,14 +560,10 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + req->ahg_idx = sdma_ahg_alloc(req->sde); + + set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); +- atomic_inc(&pq->n_reqs); +- req_queued = 1; + /* Send the first N packets in the request to buy us some time */ + ret = user_sdma_send_pkts(req, pcount); +- if (unlikely(ret < 0 && ret != -EBUSY)) { +- req->status = ret; ++ if (unlikely(ret < 0 && ret != -EBUSY)) + goto free_req; +- } + + /* + * It is possible that the SDMA engine would have processed all the +@@ -588,14 +583,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + while (req->seqsubmitted != req->info.npkts) { + ret = user_sdma_send_pkts(req, pcount); + if (ret < 0) { +- if (ret != -EBUSY) { +- req->status = ret; +- WRITE_ONCE(req->has_error, 1); +- if (READ_ONCE(req->seqcomp) == +- req->seqsubmitted - 1) +- goto free_req; +- return ret; +- } ++ if (ret != -EBUSY) ++ goto free_req; + wait_event_interruptible_timeout( + pq->busy.wait_dma, + (pq->state == SDMA_PKT_Q_ACTIVE), +@@ -606,10 +595,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, + *count += idx; + return 0; + free_req: +- user_sdma_free_request(req, true); +- if (req_queued) ++ /* ++ * If the submitted seqsubmitted == npkts, the completion routine ++ * controls the final state. If sequbmitted < npkts, wait for any ++ * outstanding packets to finish before cleaning up. ++ */ ++ if (req->seqsubmitted < req->info.npkts) { ++ if (req->seqsubmitted) ++ wait_event(pq->busy.wait_dma, ++ (req->seqcomp == req->seqsubmitted - 1)); ++ user_sdma_free_request(req, true); + pq_update(pq); +- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); ++ set_comp_state(pq, cq, info.comp_idx, ERROR, ret); ++ } + return ret; + } + +@@ -917,7 +915,6 @@ dosend: + ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count); + req->seqsubmitted += count; + if (req->seqsubmitted == req->info.npkts) { +- WRITE_ONCE(req->done, 1); + /* + * The txreq has already been submitted to the HW queue + * so we can free the AHG entry now. Corruption will not +@@ -1365,11 +1362,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, + return idx; + } + +-/* +- * SDMA tx request completion callback. Called when the SDMA progress +- * state machine gets notification that the SDMA descriptors for this +- * tx request have been processed by the DMA engine. Called in +- * interrupt context. ++/** ++ * user_sdma_txreq_cb() - SDMA tx request completion callback. ++ * @txreq: valid sdma tx request ++ * @status: success/failure of request ++ * ++ * Called when the SDMA progress state machine gets notification that ++ * the SDMA descriptors for this tx request have been processed by the ++ * DMA engine. Called in interrupt context. ++ * Only do work on completed sequences. + */ + static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) + { +@@ -1378,7 +1379,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) + struct user_sdma_request *req; + struct hfi1_user_sdma_pkt_q *pq; + struct hfi1_user_sdma_comp_q *cq; +- u16 idx; ++ enum hfi1_sdma_comp_state state = COMPLETE; + + if (!tx->req) + return; +@@ -1391,31 +1392,19 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) + SDMA_DBG(req, "SDMA completion with error %d", + status); + WRITE_ONCE(req->has_error, 1); ++ state = ERROR; + } + + req->seqcomp = tx->seqnum; + kmem_cache_free(pq->txreq_cache, tx); +- tx = NULL; +- +- idx = req->info.comp_idx; +- if (req->status == -1 && status == SDMA_TXREQ_S_OK) { +- if (req->seqcomp == req->info.npkts - 1) { +- req->status = 0; +- user_sdma_free_request(req, false); +- pq_update(pq); +- set_comp_state(pq, cq, idx, COMPLETE, 0); +- } +- } else { +- if (status != SDMA_TXREQ_S_OK) +- req->status = status; +- if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && +- (READ_ONCE(req->done) || +- READ_ONCE(req->has_error))) { +- user_sdma_free_request(req, false); +- pq_update(pq); +- set_comp_state(pq, cq, idx, ERROR, req->status); +- } +- } ++ ++ /* sequence isn't complete? We are done */ ++ if (req->seqcomp != req->info.npkts - 1) ++ return; ++ ++ user_sdma_free_request(req, false); ++ set_comp_state(pq, cq, req->info.comp_idx, state, status); ++ pq_update(pq); + } + + static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) +@@ -1448,6 +1437,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) + if (!node) + continue; + ++ req->iovs[i].node = NULL; ++ + if (unpin) + hfi1_mmu_rb_remove(req->pq->handler, + &node->rb); +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h +index d2bc77f75253..0ae06456c868 100644 +--- a/drivers/infiniband/hw/hfi1/user_sdma.h ++++ b/drivers/infiniband/hw/hfi1/user_sdma.h +@@ -205,8 +205,6 @@ struct user_sdma_request { + /* Writeable fields shared with interrupt */ + u64 seqcomp ____cacheline_aligned_in_smp; + u64 seqsubmitted; +- /* status of the last txreq completed */ +- int status; + + /* Send side fields */ + struct list_head txps ____cacheline_aligned_in_smp; +@@ -228,7 +226,6 @@ struct user_sdma_request { + u16 tididx; + /* progress index moving along the iovs array */ + u8 iov_idx; +- u8 done; + u8 has_error; + + struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index 55d33500d55e..5e85f3cca867 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -99,9 +99,7 @@ static int synaptics_mode_cmd(struct psmouse *psmouse, u8 mode) + int synaptics_detect(struct psmouse *psmouse, bool set_properties) + { + struct ps2dev *ps2dev = &psmouse->ps2dev; +- u8 param[4]; +- +- param[0] = 0; ++ u8 param[4] = { 0 }; + + ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); + ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c +index 7b662bd1c7a0..30b15e91d8be 100644 +--- a/drivers/media/i2c/ov5640.c ++++ b/drivers/media/i2c/ov5640.c +@@ -288,10 +288,10 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = { + {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0}, + {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0}, +- {0x300e, 0x45, 0, 0}, {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0}, ++ {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0}, + {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, + {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, +- {0x4837, 0x0a, 0, 0}, {0x4800, 0x04, 0, 0}, {0x3824, 0x02, 0, 0}, ++ {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0}, + {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0}, + {0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0}, + {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0}, +@@ -910,6 +910,26 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg, + } + + /* download ov5640 settings to sensor through i2c */ ++static int ov5640_set_timings(struct ov5640_dev *sensor, ++ const struct ov5640_mode_info *mode) ++{ ++ int ret; ++ ++ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact); ++ if (ret < 0) ++ return ret; ++ ++ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact); ++ if (ret < 0) ++ return ret; ++ ++ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot); ++ if (ret < 0) ++ return ret; ++ ++ return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot); ++} ++ + static int ov5640_load_regs(struct ov5640_dev *sensor, + const struct ov5640_mode_info *mode) + { +@@ -937,7 +957,13 @@ static int ov5640_load_regs(struct ov5640_dev *sensor, + usleep_range(1000 * delay_ms, 1000 * delay_ms + 100); + } + +- return ret; ++ return ov5640_set_timings(sensor, mode); ++} ++ ++static int ov5640_set_autoexposure(struct ov5640_dev *sensor, bool on) ++{ ++ return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, ++ BIT(0), on ? 0 : BIT(0)); + } + + /* read exposure, in number of line periods */ +@@ -996,6 +1022,18 @@ static int ov5640_get_gain(struct ov5640_dev *sensor) + return gain & 0x3ff; + } + ++static int ov5640_set_gain(struct ov5640_dev *sensor, int gain) ++{ ++ return ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, ++ (u16)gain & 0x3ff); ++} ++ ++static int ov5640_set_autogain(struct ov5640_dev *sensor, bool on) ++{ ++ return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, ++ BIT(1), on ? 0 : BIT(1)); ++} ++ + static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on) + { + int ret; +@@ -1104,12 +1142,25 @@ static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on) + { + int ret; + +- ret = ov5640_mod_reg(sensor, OV5640_REG_MIPI_CTRL00, BIT(5), +- on ? 0 : BIT(5)); +- if (ret) +- return ret; +- ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, +- on ? 0x00 : 0x70); ++ /* ++ * Enable/disable the MIPI interface ++ * ++ * 0x300e = on ? 0x45 : 0x40 ++ * ++ * FIXME: the sensor manual (version 2.03) reports ++ * [7:5] = 000 : 1 data lane mode ++ * [7:5] = 001 : 2 data lanes mode ++ * But this settings do not work, while the following ones ++ * have been validated for 2 data lanes mode. ++ * ++ * [7:5] = 010 : 2 data lanes mode ++ * [4] = 0 : Power up MIPI HS Tx ++ * [3] = 0 : Power up MIPI LS Rx ++ * [2] = 1/0 : MIPI interface enable/disable ++ * [1:0] = 01/00: FIXME: 'debug' ++ */ ++ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, ++ on ? 0x45 : 0x40); + if (ret) + return ret; + +@@ -1333,7 +1384,7 @@ static int ov5640_set_ae_target(struct ov5640_dev *sensor, int target) + return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1F, fast_low); + } + +-static int ov5640_binning_on(struct ov5640_dev *sensor) ++static int ov5640_get_binning(struct ov5640_dev *sensor) + { + u8 temp; + int ret; +@@ -1341,8 +1392,8 @@ static int ov5640_binning_on(struct ov5640_dev *sensor) + ret = ov5640_read_reg(sensor, OV5640_REG_TIMING_TC_REG21, &temp); + if (ret) + return ret; +- temp &= 0xfe; +- return temp ? 1 : 0; ++ ++ return temp & BIT(0); + } + + static int ov5640_set_binning(struct ov5640_dev *sensor, bool enable) +@@ -1387,30 +1438,6 @@ static int ov5640_set_virtual_channel(struct ov5640_dev *sensor) + return ov5640_write_reg(sensor, OV5640_REG_DEBUG_MODE, temp); + } + +-static int ov5640_set_timings(struct ov5640_dev *sensor, +- const struct ov5640_mode_info *mode) +-{ +- int ret; +- +- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact); +- if (ret < 0) +- return ret; +- +- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact); +- if (ret < 0) +- return ret; +- +- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot); +- if (ret < 0) +- return ret; +- +- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot); +- if (ret < 0) +- return ret; +- +- return 0; +-} +- + static const struct ov5640_mode_info * + ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr, + int width, int height, bool nearest) +@@ -1452,7 +1479,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, + if (ret < 0) + return ret; + prev_shutter = ret; +- ret = ov5640_binning_on(sensor); ++ ret = ov5640_get_binning(sensor); + if (ret < 0) + return ret; + if (ret && mode->id != OV5640_MODE_720P_1280_720 && +@@ -1573,7 +1600,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, + } + + /* set capture gain */ +- ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.gain, cap_gain16); ++ ret = ov5640_set_gain(sensor, cap_gain16); + if (ret) + return ret; + +@@ -1586,7 +1613,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, + } + + /* set exposure */ +- return __v4l2_ctrl_s_ctrl(sensor->ctrls.exposure, cap_shutter); ++ return ov5640_set_exposure(sensor, cap_shutter); + } + + /* +@@ -1594,25 +1621,13 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, + * change mode directly + */ + static int ov5640_set_mode_direct(struct ov5640_dev *sensor, +- const struct ov5640_mode_info *mode, +- s32 exposure) ++ const struct ov5640_mode_info *mode) + { +- int ret; +- + if (!mode->reg_data) + return -EINVAL; + + /* Write capture setting */ +- ret = ov5640_load_regs(sensor, mode); +- if (ret < 0) +- return ret; +- +- /* turn auto gain/exposure back on for direct mode */ +- ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 1); +- if (ret) +- return ret; +- +- return __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_exp, exposure); ++ return ov5640_load_regs(sensor, mode); + } + + static int ov5640_set_mode(struct ov5640_dev *sensor) +@@ -1620,27 +1635,31 @@ static int ov5640_set_mode(struct ov5640_dev *sensor) + const struct ov5640_mode_info *mode = sensor->current_mode; + const struct ov5640_mode_info *orig_mode = sensor->last_mode; + enum ov5640_downsize_mode dn_mode, orig_dn_mode; +- s32 exposure; ++ bool auto_gain = sensor->ctrls.auto_gain->val == 1; ++ bool auto_exp = sensor->ctrls.auto_exp->val == V4L2_EXPOSURE_AUTO; + int ret; + + dn_mode = mode->dn_mode; + orig_dn_mode = orig_mode->dn_mode; + + /* auto gain and exposure must be turned off when changing modes */ +- ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 0); +- if (ret) +- return ret; ++ if (auto_gain) { ++ ret = ov5640_set_autogain(sensor, false); ++ if (ret) ++ return ret; ++ } + +- exposure = sensor->ctrls.auto_exp->val; +- ret = ov5640_set_exposure(sensor, V4L2_EXPOSURE_MANUAL); +- if (ret) +- return ret; ++ if (auto_exp) { ++ ret = ov5640_set_autoexposure(sensor, false); ++ if (ret) ++ goto restore_auto_gain; ++ } + + if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) || + (dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) { + /* + * change between subsampling and scaling +- * go through exposure calucation ++ * go through exposure calculation + */ + ret = ov5640_set_mode_exposure_calc(sensor, mode); + } else { +@@ -1648,15 +1667,16 @@ static int ov5640_set_mode(struct ov5640_dev *sensor) + * change inside subsampling or scaling + * download firmware directly + */ +- ret = ov5640_set_mode_direct(sensor, mode, exposure); ++ ret = ov5640_set_mode_direct(sensor, mode); + } +- + if (ret < 0) +- return ret; ++ goto restore_auto_exp_gain; + +- ret = ov5640_set_timings(sensor, mode); +- if (ret < 0) +- return ret; ++ /* restore auto gain and exposure */ ++ if (auto_gain) ++ ov5640_set_autogain(sensor, true); ++ if (auto_exp) ++ ov5640_set_autoexposure(sensor, true); + + ret = ov5640_set_binning(sensor, dn_mode != SCALING); + if (ret < 0) +@@ -1678,6 +1698,15 @@ static int ov5640_set_mode(struct ov5640_dev *sensor) + sensor->last_mode = mode; + + return 0; ++ ++restore_auto_exp_gain: ++ if (auto_exp) ++ ov5640_set_autoexposure(sensor, true); ++restore_auto_gain: ++ if (auto_gain) ++ ov5640_set_autogain(sensor, true); ++ ++ return ret; + } + + static int ov5640_set_framefmt(struct ov5640_dev *sensor, +@@ -1790,23 +1819,69 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on) + if (ret) + goto power_off; + ++ /* We're done here for DVP bus, while CSI-2 needs setup. */ ++ if (sensor->ep.bus_type != V4L2_MBUS_CSI2) ++ return 0; ++ ++ /* ++ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode ++ * ++ * 0x300e = 0x40 ++ * [7:5] = 010 : 2 data lanes mode (see FIXME note in ++ * "ov5640_set_stream_mipi()") ++ * [4] = 0 : Power up MIPI HS Tx ++ * [3] = 0 : Power up MIPI LS Rx ++ * [2] = 0 : MIPI interface disabled ++ */ ++ ret = ov5640_write_reg(sensor, ++ OV5640_REG_IO_MIPI_CTRL00, 0x40); ++ if (ret) ++ goto power_off; ++ ++ /* ++ * Gate clock and set LP11 in 'no packets mode' (idle) ++ * ++ * 0x4800 = 0x24 ++ * [5] = 1 : Gate clock when 'no packets' ++ * [2] = 1 : MIPI bus in LP11 when 'no packets' ++ */ ++ ret = ov5640_write_reg(sensor, ++ OV5640_REG_MIPI_CTRL00, 0x24); ++ if (ret) ++ goto power_off; ++ ++ /* ++ * Set data lanes and clock in LP11 when 'sleeping' ++ * ++ * 0x3019 = 0x70 ++ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping' ++ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping' ++ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping' ++ */ ++ ret = ov5640_write_reg(sensor, ++ OV5640_REG_PAD_OUTPUT00, 0x70); ++ if (ret) ++ goto power_off; ++ ++ /* Give lanes some time to coax into LP11 state. */ ++ usleep_range(500, 1000); ++ ++ } else { + if (sensor->ep.bus_type == V4L2_MBUS_CSI2) { +- /* +- * start streaming briefly followed by stream off in +- * order to coax the clock lane into LP-11 state. +- */ +- ret = ov5640_set_stream_mipi(sensor, true); +- if (ret) +- goto power_off; +- usleep_range(1000, 2000); +- ret = ov5640_set_stream_mipi(sensor, false); +- if (ret) +- goto power_off; ++ /* Reset MIPI bus settings to their default values. */ ++ ov5640_write_reg(sensor, ++ OV5640_REG_IO_MIPI_CTRL00, 0x58); ++ ov5640_write_reg(sensor, ++ OV5640_REG_MIPI_CTRL00, 0x04); ++ ov5640_write_reg(sensor, ++ OV5640_REG_PAD_OUTPUT00, 0x00); + } + +- return 0; ++ ov5640_set_power_off(sensor); + } + ++ return 0; ++ + power_off: + ov5640_set_power_off(sensor); + return ret; +@@ -2144,20 +2219,20 @@ static int ov5640_set_ctrl_white_balance(struct ov5640_dev *sensor, int awb) + return ret; + } + +-static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, int exp) ++static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, ++ enum v4l2_exposure_auto_type auto_exposure) + { + struct ov5640_ctrls *ctrls = &sensor->ctrls; +- bool auto_exposure = (exp == V4L2_EXPOSURE_AUTO); ++ bool auto_exp = (auto_exposure == V4L2_EXPOSURE_AUTO); + int ret = 0; + + if (ctrls->auto_exp->is_new) { +- ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, +- BIT(0), auto_exposure ? 0 : BIT(0)); ++ ret = ov5640_set_autoexposure(sensor, auto_exp); + if (ret) + return ret; + } + +- if (!auto_exposure && ctrls->exposure->is_new) { ++ if (!auto_exp && ctrls->exposure->is_new) { + u16 max_exp; + + ret = ov5640_read_reg16(sensor, OV5640_REG_AEC_PK_VTS, +@@ -2177,25 +2252,19 @@ static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, int exp) + return ret; + } + +-static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, int auto_gain) ++static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, bool auto_gain) + { + struct ov5640_ctrls *ctrls = &sensor->ctrls; + int ret = 0; + + if (ctrls->auto_gain->is_new) { +- ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, +- BIT(1), +- ctrls->auto_gain->val ? 0 : BIT(1)); ++ ret = ov5640_set_autogain(sensor, auto_gain); + if (ret) + return ret; + } + +- if (!auto_gain && ctrls->gain->is_new) { +- u16 gain = (u16)ctrls->gain->val; +- +- ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, +- gain & 0x3ff); +- } ++ if (!auto_gain && ctrls->gain->is_new) ++ ret = ov5640_set_gain(sensor, ctrls->gain->val); + + return ret; + } +@@ -2268,16 +2337,12 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) + + switch (ctrl->id) { + case V4L2_CID_AUTOGAIN: +- if (!ctrl->val) +- return 0; + val = ov5640_get_gain(sensor); + if (val < 0) + return val; + sensor->ctrls.gain->val = val; + break; + case V4L2_CID_EXPOSURE_AUTO: +- if (ctrl->val == V4L2_EXPOSURE_MANUAL) +- return 0; + val = ov5640_get_exposure(sensor); + if (val < 0) + return val; +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c +index 7bfd366d970d..c4115bae5db1 100644 +--- a/drivers/mmc/host/sdhci-pci-core.c ++++ b/drivers/mmc/host/sdhci-pci-core.c +@@ -12,6 +12,7 @@ + * - JMicron (hardware and technical support) + */ + ++#include <linux/bitfield.h> + #include <linux/string.h> + #include <linux/delay.h> + #include <linux/highmem.h> +@@ -462,6 +463,9 @@ struct intel_host { + u32 dsm_fns; + int drv_strength; + bool d3_retune; ++ bool rpm_retune_ok; ++ u32 glk_rx_ctrl1; ++ u32 glk_tun_val; + }; + + static const guid_t intel_dsm_guid = +@@ -791,6 +795,77 @@ cleanup: + return ret; + } + ++#ifdef CONFIG_PM ++#define GLK_RX_CTRL1 0x834 ++#define GLK_TUN_VAL 0x840 ++#define GLK_PATH_PLL GENMASK(13, 8) ++#define GLK_DLY GENMASK(6, 0) ++/* Workaround firmware failing to restore the tuning value */ ++static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) ++{ ++ struct sdhci_pci_slot *slot = chip->slots[0]; ++ struct intel_host *intel_host = sdhci_pci_priv(slot); ++ struct sdhci_host *host = slot->host; ++ u32 glk_rx_ctrl1; ++ u32 glk_tun_val; ++ u32 dly; ++ ++ if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) ++ return; ++ ++ glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); ++ glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); ++ ++ if (susp) { ++ intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; ++ intel_host->glk_tun_val = glk_tun_val; ++ return; ++ } ++ ++ if (!intel_host->glk_tun_val) ++ return; ++ ++ if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { ++ intel_host->rpm_retune_ok = true; ++ return; ++ } ++ ++ dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + ++ (intel_host->glk_tun_val << 1)); ++ if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) ++ return; ++ ++ glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; ++ sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); ++ ++ intel_host->rpm_retune_ok = true; ++ chip->rpm_retune = true; ++ mmc_retune_needed(host->mmc); ++ pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); ++} ++ ++static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) ++{ ++ if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && ++ !chip->rpm_retune) ++ glk_rpm_retune_wa(chip, susp); ++} ++ ++static int glk_runtime_suspend(struct sdhci_pci_chip *chip) ++{ ++ glk_rpm_retune_chk(chip, true); ++ ++ return sdhci_cqhci_runtime_suspend(chip); ++} ++ ++static int glk_runtime_resume(struct sdhci_pci_chip *chip) ++{ ++ glk_rpm_retune_chk(chip, false); ++ ++ return sdhci_cqhci_runtime_resume(chip); ++} ++#endif ++ + #ifdef CONFIG_ACPI + static int ni_set_max_freq(struct sdhci_pci_slot *slot) + { +@@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { + .resume = sdhci_cqhci_resume, + #endif + #ifdef CONFIG_PM +- .runtime_suspend = sdhci_cqhci_runtime_suspend, +- .runtime_resume = sdhci_cqhci_runtime_resume, ++ .runtime_suspend = glk_runtime_suspend, ++ .runtime_resume = glk_runtime_resume, + #endif + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | +@@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( + device_init_wakeup(&pdev->dev, true); + + if (slot->cd_idx >= 0) { +- ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, ++ ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, + slot->cd_override_level, 0, NULL); ++ if (ret && ret != -EPROBE_DEFER) ++ ret = mmc_gpiod_request_cd(host->mmc, NULL, ++ slot->cd_idx, ++ slot->cd_override_level, ++ 0, NULL); + if (ret == -EPROBE_DEFER) + goto remove; + +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 49163570a63a..3b3f88ffab53 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + } + EXPORT_SYMBOL_GPL(can_put_echo_skb); + ++struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) ++{ ++ struct can_priv *priv = netdev_priv(dev); ++ struct sk_buff *skb = priv->echo_skb[idx]; ++ struct canfd_frame *cf; ++ ++ if (idx >= priv->echo_skb_max) { ++ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", ++ __func__, idx, priv->echo_skb_max); ++ return NULL; ++ } ++ ++ if (!skb) { ++ netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", ++ __func__, idx); ++ return NULL; ++ } ++ ++ /* Using "struct canfd_frame::len" for the frame ++ * length is supported on both CAN and CANFD frames. ++ */ ++ cf = (struct canfd_frame *)skb->data; ++ *len_ptr = cf->len; ++ priv->echo_skb[idx] = NULL; ++ ++ return skb; ++} ++ + /* + * Get the skb from the stack and loop it back locally + * +@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); + */ + unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) + { +- struct can_priv *priv = netdev_priv(dev); +- +- BUG_ON(idx >= priv->echo_skb_max); +- +- if (priv->echo_skb[idx]) { +- struct sk_buff *skb = priv->echo_skb[idx]; +- struct can_frame *cf = (struct can_frame *)skb->data; +- u8 dlc = cf->can_dlc; ++ struct sk_buff *skb; ++ u8 len; + +- netif_rx(priv->echo_skb[idx]); +- priv->echo_skb[idx] = NULL; ++ skb = __can_get_echo_skb(dev, idx, &len); ++ if (!skb) ++ return 0; + +- return dlc; +- } ++ netif_rx(skb); + +- return 0; ++ return len; + } + EXPORT_SYMBOL_GPL(can_get_echo_skb); + +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c +index 8e972ef08637..75ce11395ee8 100644 +--- a/drivers/net/can/flexcan.c ++++ b/drivers/net/can/flexcan.c +@@ -135,13 +135,12 @@ + + /* FLEXCAN interrupt flag register (IFLAG) bits */ + /* Errata ERR005829 step7: Reserve first valid MB */ +-#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 +-#define FLEXCAN_TX_MB_OFF_FIFO 9 ++#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 + #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 +-#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 +-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) +-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 +-#define FLEXCAN_IFLAG_MB(x) BIT(x) ++#define FLEXCAN_TX_MB 63 ++#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) ++#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1) ++#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f) + #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) + #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) + #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) +@@ -259,9 +258,7 @@ struct flexcan_priv { + struct can_rx_offload offload; + + struct flexcan_regs __iomem *regs; +- struct flexcan_mb __iomem *tx_mb; + struct flexcan_mb __iomem *tx_mb_reserved; +- u8 tx_mb_idx; + u32 reg_ctrl_default; + u32 reg_imask1_default; + u32 reg_imask2_default; +@@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev, + static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + const struct flexcan_priv *priv = netdev_priv(dev); ++ struct flexcan_regs __iomem *regs = priv->regs; + struct can_frame *cf = (struct can_frame *)skb->data; + u32 can_id; + u32 data; +@@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de + + if (cf->can_dlc > 0) { + data = be32_to_cpup((__be32 *)&cf->data[0]); +- priv->write(data, &priv->tx_mb->data[0]); ++ priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[0]); + } + if (cf->can_dlc > 4) { + data = be32_to_cpup((__be32 *)&cf->data[4]); +- priv->write(data, &priv->tx_mb->data[1]); ++ priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[1]); + } + + can_put_echo_skb(skb, dev, 0); + +- priv->write(can_id, &priv->tx_mb->can_id); +- priv->write(ctrl, &priv->tx_mb->can_ctrl); ++ priv->write(can_id, ®s->mb[FLEXCAN_TX_MB].can_id); ++ priv->write(ctrl, ®s->mb[FLEXCAN_TX_MB].can_ctrl); + + /* Errata ERR005829 step8: + * Write twice INACTIVE(0x8) code to first MB. +@@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de + static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) + { + struct flexcan_priv *priv = netdev_priv(dev); ++ struct flexcan_regs __iomem *regs = priv->regs; + struct sk_buff *skb; + struct can_frame *cf; + bool rx_errors = false, tx_errors = false; ++ u32 timestamp; ++ ++ timestamp = priv->read(®s->timer) << 16; + + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) +@@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) + if (tx_errors) + dev->stats.tx_errors++; + +- can_rx_offload_irq_queue_err_skb(&priv->offload, skb); ++ can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + } + + static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) + { + struct flexcan_priv *priv = netdev_priv(dev); ++ struct flexcan_regs __iomem *regs = priv->regs; + struct sk_buff *skb; + struct can_frame *cf; + enum can_state new_state, rx_state, tx_state; + int flt; + struct can_berr_counter bec; ++ u32 timestamp; ++ ++ timestamp = priv->read(®s->timer) << 16; + + flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; + if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { +@@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) + if (unlikely(new_state == CAN_STATE_BUS_OFF)) + can_bus_off(dev); + +- can_rx_offload_irq_queue_err_skb(&priv->offload, skb); ++ can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + } + + static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) +@@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, + priv->write(BIT(n - 32), ®s->iflag2); + } else { + priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); +- priv->read(®s->timer); + } + ++ /* Read the Free Running Timer. It is optional but recommended ++ * to unlock Mailbox as soon as possible and make it available ++ * for reception. ++ */ ++ priv->read(®s->timer); ++ + return 1; + } + +@@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) + struct flexcan_regs __iomem *regs = priv->regs; + u32 iflag1, iflag2; + +- iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default; +- iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default & +- ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); ++ iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default & ++ ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); ++ iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default; + + return (u64)iflag2 << 32 | iflag1; + } +@@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) + struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + irqreturn_t handled = IRQ_NONE; +- u32 reg_iflag1, reg_esr; ++ u32 reg_iflag2, reg_esr; + enum can_state last_state = priv->can.state; + +- reg_iflag1 = priv->read(®s->iflag1); +- + /* reception interrupt */ + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u64 reg_iflag; +@@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) + break; + } + } else { ++ u32 reg_iflag1; ++ ++ reg_iflag1 = priv->read(®s->iflag1); + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { + handled = IRQ_HANDLED; + can_rx_offload_irq_offload_fifo(&priv->offload); +@@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) + } + } + ++ reg_iflag2 = priv->read(®s->iflag2); ++ + /* transmission complete interrupt */ +- if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { ++ if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) { ++ u32 reg_ctrl = priv->read(®s->mb[FLEXCAN_TX_MB].can_ctrl); ++ + handled = IRQ_HANDLED; +- stats->tx_bytes += can_get_echo_skb(dev, 0); ++ stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, ++ 0, reg_ctrl << 16); + stats->tx_packets++; + can_led_event(dev, CAN_LED_EVENT_TX); + + /* after sending a RTR frame MB is in RX mode */ + priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, +- &priv->tx_mb->can_ctrl); +- priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); ++ ®s->mb[FLEXCAN_TX_MB].can_ctrl); ++ priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), ®s->iflag2); + netif_wake_queue(dev); + } + +@@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev) + reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); + reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | + FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | +- FLEXCAN_MCR_IDAM_C; ++ FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB); + +- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { ++ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + reg_mcr &= ~FLEXCAN_MCR_FEN; +- reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); +- } else { +- reg_mcr |= FLEXCAN_MCR_FEN | +- FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); +- } ++ else ++ reg_mcr |= FLEXCAN_MCR_FEN; ++ + netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); + priv->write(reg_mcr, ®s->mcr); + +@@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev) + priv->write(reg_ctrl2, ®s->ctrl2); + } + +- /* clear and invalidate all mailboxes first */ +- for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { +- priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, +- ®s->mb[i].can_ctrl); +- } +- + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { +- for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) ++ for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { + priv->write(FLEXCAN_MB_CODE_RX_EMPTY, + ®s->mb[i].can_ctrl); ++ } ++ } else { ++ /* clear and invalidate unused mailboxes first */ ++ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) { ++ priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, ++ ®s->mb[i].can_ctrl); ++ } + } + + /* Errata ERR005829: mark first TX mailbox as INACTIVE */ +@@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev) + + /* mark TX mailbox as INACTIVE */ + priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, +- &priv->tx_mb->can_ctrl); ++ ®s->mb[FLEXCAN_TX_MB].can_ctrl); + + /* acceptance mask/acceptance code (accept everything) */ + priv->write(0x0, ®s->rxgmask); +@@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev) + priv->devtype_data = devtype_data; + priv->reg_xceiver = reg_xceiver; + +- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { +- priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP; ++ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; +- } else { +- priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; ++ else + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; +- } +- priv->tx_mb = ®s->mb[priv->tx_mb_idx]; + +- priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); +- priv->reg_imask2_default = 0; ++ priv->reg_imask1_default = 0; ++ priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); + + priv->offload.mailbox_read = flexcan_mailbox_read; + +diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c +index d94dae216820..727691dd08fb 100644 +--- a/drivers/net/can/rx-offload.c ++++ b/drivers/net/can/rx-offload.c +@@ -209,7 +209,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) + } + EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); + +-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) ++int can_rx_offload_queue_sorted(struct can_rx_offload *offload, ++ struct sk_buff *skb, u32 timestamp) ++{ ++ struct can_rx_offload_cb *cb; ++ unsigned long flags; ++ ++ if (skb_queue_len(&offload->skb_queue) > ++ offload->skb_queue_len_max) ++ return -ENOMEM; ++ ++ cb = can_rx_offload_get_cb(skb); ++ cb->timestamp = timestamp; ++ ++ spin_lock_irqsave(&offload->skb_queue.lock, flags); ++ __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); ++ spin_unlock_irqrestore(&offload->skb_queue.lock, flags); ++ ++ can_rx_offload_schedule(offload); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); ++ ++unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, ++ unsigned int idx, u32 timestamp) ++{ ++ struct net_device *dev = offload->dev; ++ struct net_device_stats *stats = &dev->stats; ++ struct sk_buff *skb; ++ u8 len; ++ int err; ++ ++ skb = __can_get_echo_skb(dev, idx, &len); ++ if (!skb) ++ return 0; ++ ++ err = can_rx_offload_queue_sorted(offload, skb, timestamp); ++ if (err) { ++ stats->rx_errors++; ++ stats->tx_fifo_errors++; ++ } ++ ++ return len; ++} ++EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); ++ ++int can_rx_offload_queue_tail(struct can_rx_offload *offload, ++ struct sk_buff *skb) + { + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) +@@ -220,7 +267,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b + + return 0; + } +-EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); ++EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); + + static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) + { +diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c +index 53e320c92a8b..ddaf46239e39 100644 +--- a/drivers/net/can/spi/hi311x.c ++++ b/drivers/net/can/spi/hi311x.c +@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net) + { + struct hi3110_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; +- unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; ++ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH; + int ret; + + ret = open_candev(net); +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index 5444e6213d45..64a794be7fcb 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -5997,7 +5997,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, + * for subsequent chanspecs. + */ + channel->flags = IEEE80211_CHAN_NO_HT40 | +- IEEE80211_CHAN_NO_80MHZ; ++ IEEE80211_CHAN_NO_80MHZ | ++ IEEE80211_CHAN_NO_160MHZ; + ch.bw = BRCMU_CHAN_BW_20; + cfg->d11inf.encchspec(&ch); + chaninfo = ch.chspec; +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +index cb5f32c1d705..0b3b1223cff7 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +@@ -6,6 +6,7 @@ + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -29,6 +30,7 @@ + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -84,7 +86,7 @@ + #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) + #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ + ACPI_SAR_TABLE_SIZE + 3) +-#define ACPI_WGDS_WIFI_DATA_SIZE 18 ++#define ACPI_WGDS_WIFI_DATA_SIZE 19 + #define ACPI_WRDD_WIFI_DATA_SIZE 2 + #define ACPI_SPLC_WIFI_DATA_SIZE 2 + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +index 48a3611d6a31..4d49a1a3f504 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +@@ -880,7 +880,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) + IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); + + BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * +- ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); ++ ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE); + + BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); + +@@ -915,6 +915,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) + return -ENOENT; + } + ++static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) ++{ ++ return -ENOENT; ++} ++ + static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) + { + return 0; +@@ -941,8 +946,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) + IWL_DEBUG_RADIO(mvm, + "WRDS SAR BIOS table invalid or unavailable. (%d)\n", + ret); +- /* if not available, don't fail and don't bother with EWRD */ +- return 0; ++ /* ++ * If not available, don't fail and don't bother with EWRD. ++ * Return 1 to tell that we can't use WGDS either. ++ */ ++ return 1; + } + + ret = iwl_mvm_sar_get_ewrd_table(mvm); +@@ -955,9 +963,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) + /* choose profile 1 (WRDS) as default for both chains */ + ret = iwl_mvm_sar_select_profile(mvm, 1, 1); + +- /* if we don't have profile 0 from BIOS, just skip it */ ++ /* ++ * If we don't have profile 0 from BIOS, just skip it. This ++ * means that SAR Geo will not be enabled either, even if we ++ * have other valid profiles. ++ */ + if (ret == -ENOENT) +- return 0; ++ return 1; + + return ret; + } +@@ -1155,11 +1167,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm) + iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + + ret = iwl_mvm_sar_init(mvm); +- if (ret) +- goto error; ++ if (ret == 0) { ++ ret = iwl_mvm_sar_geo_init(mvm); ++ } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) { ++ /* ++ * If basic SAR is not available, we check for WGDS, ++ * which should *not* be available either. If it is ++ * available, issue an error, because we can't use SAR ++ * Geo without basic SAR. ++ */ ++ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); ++ } + +- ret = iwl_mvm_sar_geo_init(mvm); +- if (ret) ++ if (ret < 0) + goto error; + + iwl_mvm_leds_sync(mvm); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index 155cc2ac0120..afed549f5645 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -306,8 +306,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, + goto out; + } + +- if (changed) +- *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); ++ if (changed) { ++ u32 status = le32_to_cpu(resp->status); ++ ++ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || ++ status == MCC_RESP_ILLEGAL); ++ } + + regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, + __le32_to_cpu(resp->n_channels), +@@ -4416,10 +4420,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + } + +- if (!fw_has_capa(&mvm->fw->ucode_capa, +- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) +- return; +- + /* if beacon filtering isn't on mac80211 does it anyway */ + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) + return; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +index cf48517944ec..f2579c94ffdb 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +@@ -545,9 +545,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, + } + + IWL_DEBUG_LAR(mvm, +- "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", +- status, mcc, mcc >> 8, mcc & 0xff, +- !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); ++ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", ++ status, mcc, mcc >> 8, mcc & 0xff, n_channels); + + exit: + iwl_free_resp(&cmd); +diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c +index 9e5a9a3112c9..3f4fb4dbbe33 100644 +--- a/drivers/opp/ti-opp-supply.c ++++ b/drivers/opp/ti-opp-supply.c +@@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data) + int ret; + + vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, +- new_supply_vbb->u_volt); ++ new_supply_vdd->u_volt); ++ ++ if (new_supply_vdd->u_volt_min < vdd_uv) ++ new_supply_vdd->u_volt_min = vdd_uv; + + /* Scaling up? Scale voltage before frequency */ + if (freq > old_freq) { +diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +index 4ceb06f8a33c..4edeb4cae72a 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c ++++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +@@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = { + + static struct meson_bank meson_gxbb_aobus_banks[] = { + /* name first last irq pullen pull dir out in */ +- BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), ++ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), + }; + + static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = { +diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c +index 7dae1d7bf6b0..158f618f1695 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c ++++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c +@@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = { + + static struct meson_bank meson_gxl_aobus_banks[] = { + /* name first last irq pullen pull dir out in */ +- BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), ++ BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), + }; + + static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { +diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c +index 29a458da78db..4f3ab18636a3 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson.c ++++ b/drivers/pinctrl/meson/pinctrl-meson.c +@@ -192,7 +192,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin, + dev_dbg(pc->dev, "pin %u: disable bias\n", pin); + + meson_calc_reg_and_bit(bank, pin, REG_PULL, ®, &bit); +- ret = regmap_update_bits(pc->reg_pull, reg, ++ ret = regmap_update_bits(pc->reg_pullen, reg, + BIT(bit), 0); + if (ret) + return ret; +diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c +index c6d79315218f..86466173114d 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson8.c ++++ b/drivers/pinctrl/meson/pinctrl-meson8.c +@@ -1053,7 +1053,7 @@ static struct meson_bank meson8_cbus_banks[] = { + + static struct meson_bank meson8_aobus_banks[] = { + /* name first last irq pullen pull dir out in */ +- BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), ++ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), + }; + + static struct meson_pinctrl_data meson8_cbus_pinctrl_data = { +diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c +index bb2a30964fc6..647ad15d5c3c 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson8b.c ++++ b/drivers/pinctrl/meson/pinctrl-meson8b.c +@@ -906,7 +906,7 @@ static struct meson_bank meson8b_cbus_banks[] = { + + static struct meson_bank meson8b_aobus_banks[] = { + /* name first lastc irq pullen pull dir out in */ +- BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), ++ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), + }; + + static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = { +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c +index df0c5776d49b..a5a19ff10535 100644 +--- a/drivers/rtc/rtc-cmos.c ++++ b/drivers/rtc/rtc-cmos.c +@@ -257,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) + struct cmos_rtc *cmos = dev_get_drvdata(dev); + unsigned char rtc_control; + ++ /* This not only a rtc_op, but also called directly */ + if (!is_valid_irq(cmos->irq)) + return -EIO; + +@@ -452,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) + unsigned char mon, mday, hrs, min, sec, rtc_control; + int ret; + ++ /* This not only a rtc_op, but also called directly */ + if (!is_valid_irq(cmos->irq)) + return -EIO; + +@@ -516,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) + struct cmos_rtc *cmos = dev_get_drvdata(dev); + unsigned long flags; + +- if (!is_valid_irq(cmos->irq)) +- return -EINVAL; +- + spin_lock_irqsave(&rtc_lock, flags); + + if (enabled) +@@ -579,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = { + .alarm_irq_enable = cmos_alarm_irq_enable, + }; + ++static const struct rtc_class_ops cmos_rtc_ops_no_alarm = { ++ .read_time = cmos_read_time, ++ .set_time = cmos_set_time, ++ .proc = cmos_procfs, ++}; ++ + /*----------------------------------------------------------------*/ + + /* +@@ -855,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) + dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); + goto cleanup1; + } ++ ++ cmos_rtc.rtc->ops = &cmos_rtc_ops; ++ } else { ++ cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm; + } + +- cmos_rtc.rtc->ops = &cmos_rtc_ops; + cmos_rtc.rtc->nvram_old_abi = true; + retval = rtc_register_device(cmos_rtc.rtc); + if (retval) +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c +index 9f99a0966550..7cb786d76e3c 100644 +--- a/drivers/rtc/rtc-pcf2127.c ++++ b/drivers/rtc/rtc-pcf2127.c +@@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context, + memcpy(buf + 1, val, val_size); + + ret = i2c_master_send(client, buf, val_size + 1); ++ ++ kfree(buf); ++ + if (ret != val_size + 1) + return ret < 0 ? ret : -EIO; + +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +index 8f60f0e04599..410eccf0bc5e 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +@@ -904,11 +904,9 @@ static void start_delivery_v1_hw(struct hisi_sas_dq *dq) + { + struct hisi_hba *hisi_hba = dq->hisi_hba; + struct hisi_sas_slot *s, *s1, *s2 = NULL; +- struct list_head *dq_list; + int dlvry_queue = dq->id; + int wp; + +- dq_list = &dq->list; + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +index 9c5c5a601332..1c4ea58da1ae 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +@@ -1666,11 +1666,9 @@ static void start_delivery_v2_hw(struct hisi_sas_dq *dq) + { + struct hisi_hba *hisi_hba = dq->hisi_hba; + struct hisi_sas_slot *s, *s1, *s2 = NULL; +- struct list_head *dq_list; + int dlvry_queue = dq->id; + int wp; + +- dq_list = &dq->list; + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +index 08b503e274b8..687ff61bba9f 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +@@ -883,11 +883,9 @@ static void start_delivery_v3_hw(struct hisi_sas_dq *dq) + { + struct hisi_hba *hisi_hba = dq->hisi_hba; + struct hisi_sas_slot *s, *s1, *s2 = NULL; +- struct list_head *dq_list; + int dlvry_queue = dq->id; + int wp; + +- dq_list = &dq->list; + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c +index aec5b10a8c85..ca6c3982548d 100644 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c +@@ -700,6 +700,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) + rport = lpfc_ndlp_get_nrport(ndlp); + if (rport) + nrport = rport->remoteport; ++ else ++ nrport = NULL; + spin_unlock(&phba->hbalock); + if (!nrport) + continue; +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index 431742201709..3ad460219fd6 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -152,17 +152,28 @@ static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i) + return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; + } + ++/* If we are not echoing the data, perhaps this is a secret so erase it */ ++static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size) ++{ ++ bool icanon = !!L_ICANON(tty); ++ bool no_echo = !L_ECHO(tty); ++ ++ if (icanon && no_echo) ++ memset(buffer, 0x00, size); ++} ++ + static int tty_copy_to_user(struct tty_struct *tty, void __user *to, + size_t tail, size_t n) + { + struct n_tty_data *ldata = tty->disc_data; + size_t size = N_TTY_BUF_SIZE - tail; +- const void *from = read_buf_addr(ldata, tail); ++ void *from = read_buf_addr(ldata, tail); + int uncopied; + + if (n > size) { + tty_audit_add_data(tty, from, size); + uncopied = copy_to_user(to, from, size); ++ zero_buffer(tty, from, size - uncopied); + if (uncopied) + return uncopied; + to += size; +@@ -171,7 +182,9 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to, + } + + tty_audit_add_data(tty, from, n); +- return copy_to_user(to, from, n); ++ uncopied = copy_to_user(to, from, n); ++ zero_buffer(tty, from, n - uncopied); ++ return uncopied; + } + + /** +@@ -1960,11 +1973,12 @@ static int copy_from_read_buf(struct tty_struct *tty, + n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); + n = min(*nr, n); + if (n) { +- const unsigned char *from = read_buf_addr(ldata, tail); ++ unsigned char *from = read_buf_addr(ldata, tail); + retval = copy_to_user(*b, from, n); + n -= retval; + is_eof = n == 1 && *from == EOF_CHAR(tty); + tty_audit_add_data(tty, from, n); ++ zero_buffer(tty, from, n); + smp_store_release(&ldata->read_tail, ldata->read_tail + n); + /* Turn single EOF into zero-length read */ + if (L_EXTPROC(tty) && ldata->icanon && is_eof && +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c +index c996b6859c5e..ae3ce330200e 100644 +--- a/drivers/tty/tty_buffer.c ++++ b/drivers/tty/tty_buffer.c +@@ -468,11 +468,15 @@ receive_buf(struct tty_port *port, struct tty_buffer *head, int count) + { + unsigned char *p = char_buf_ptr(head, head->read); + char *f = NULL; ++ int n; + + if (~head->flags & TTYB_NORMAL) + f = flag_buf_ptr(head, head->read); + +- return port->client_ops->receive_buf(port, p, f, count); ++ n = port->client_ops->receive_buf(port, p, f, count); ++ if (n > 0) ++ memset(p, 0, n); ++ return n; + } + + /** +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 6e0823790bee..f79979ae482a 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -2847,7 +2847,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1, + USB_PORT_FEAT_C_BH_PORT_RESET); + usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_PORT_LINK_STATE); +- usb_clear_port_feature(hub->hdev, port1, ++ ++ if (udev) ++ usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_CONNECTION); + + /* +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 88c80fcc39f5..fec97465ccac 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1499,6 +1499,7 @@ static int dwc3_probe(struct platform_device *pdev) + + err5: + dwc3_event_buffers_cleanup(dwc); ++ dwc3_ulpi_exit(dwc); + + err4: + dwc3_free_scratch_buffers(dwc); +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 1286076a8890..842795856bf4 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -283,8 +283,10 @@ err: + static void dwc3_pci_remove(struct pci_dev *pci) + { + struct dwc3_pci *dwc = pci_get_drvdata(pci); ++ struct pci_dev *pdev = dwc->pci; + +- gpiod_remove_lookup_table(&platform_bytcr_gpios); ++ if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) ++ gpiod_remove_lookup_table(&platform_bytcr_gpios); + #ifdef CONFIG_PM + cancel_work_sync(&dwc->wakeup_work); + #endif +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 2b53194081ba..2de1a3971a26 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1072,7 +1072,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, + /* Now prepare one extra TRB to align transfer size */ + trb = &dep->trb_pool[dep->trb_enqueue]; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, +- maxp - rem, false, 0, ++ maxp - rem, false, 1, + req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); +@@ -1116,7 +1116,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, + /* Now prepare one extra TRB to align transfer size */ + trb = &dep->trb_pool[dep->trb_enqueue]; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, +- false, 0, req->request.stream_id, ++ false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } else if (req->request.zero && req->request.length && +@@ -1132,7 +1132,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, + /* Now prepare one extra TRB to handle ZLP */ + trb = &dep->trb_pool[dep->trb_enqueue]; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, +- false, 0, req->request.stream_id, ++ false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } else { +@@ -2250,7 +2250,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, + * with one TRB pending in the ring. We need to manually clear HWO bit + * from that TRB. + */ +- if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { ++ if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + return 1; + } +diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c +index 27f00160332e..3c4abb5a1c3f 100644 +--- a/drivers/usb/host/xhci-histb.c ++++ b/drivers/usb/host/xhci-histb.c +@@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev) + struct xhci_hcd_histb *histb = platform_get_drvdata(dev); + struct usb_hcd *hcd = histb->hcd; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct usb_hcd *shared_hcd = xhci->shared_hcd; + + xhci->xhc_state |= XHCI_STATE_REMOVING; + +- usb_remove_hcd(xhci->shared_hcd); ++ usb_remove_hcd(shared_hcd); ++ xhci->shared_hcd = NULL; + device_wakeup_disable(&dev->dev); + + usb_remove_hcd(hcd); +- usb_put_hcd(xhci->shared_hcd); ++ usb_put_hcd(shared_hcd); + + xhci_histb_host_disable(histb); + usb_put_hcd(hcd); +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 12eea73d9f20..94aca1b5ac8a 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -876,7 +876,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + status |= USB_PORT_STAT_SUSPEND; + } + if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && +- !DEV_SUPERSPEED_ANY(raw_port_status)) { ++ !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) { + if ((raw_port_status & PORT_RESET) || + !(raw_port_status & PORT_PE)) + return 0xffffffff; +@@ -921,7 +921,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + time_left = wait_for_completion_timeout( + &bus_state->rexit_done[wIndex], + msecs_to_jiffies( +- XHCI_MAX_REXIT_TIMEOUT)); ++ XHCI_MAX_REXIT_TIMEOUT_MS)); + spin_lock_irqsave(&xhci->lock, flags); + + if (time_left) { +@@ -935,7 +935,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + } else { + int port_status = readl(port->addr); + xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", +- XHCI_MAX_REXIT_TIMEOUT, ++ XHCI_MAX_REXIT_TIMEOUT_MS, + port_status); + status |= USB_PORT_STAT_SUSPEND; + clear_bit(wIndex, &bus_state->rexit_ports); +@@ -1474,15 +1474,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + unsigned long flags; + struct xhci_hub *rhub; + struct xhci_port **ports; ++ u32 portsc_buf[USB_MAXCHILDREN]; ++ bool wake_enabled; + + rhub = xhci_get_rhub(hcd); + ports = rhub->ports; + max_ports = rhub->num_ports; + bus_state = &xhci->bus_state[hcd_index(hcd)]; ++ wake_enabled = hcd->self.root_hub->do_remote_wakeup; + + spin_lock_irqsave(&xhci->lock, flags); + +- if (hcd->self.root_hub->do_remote_wakeup) { ++ if (wake_enabled) { + if (bus_state->resuming_ports || /* USB2 */ + bus_state->port_remote_wakeup) { /* USB3 */ + spin_unlock_irqrestore(&xhci->lock, flags); +@@ -1490,26 +1493,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + return -EBUSY; + } + } +- +- port_index = max_ports; ++ /* ++ * Prepare ports for suspend, but don't write anything before all ports ++ * are checked and we know bus suspend can proceed ++ */ + bus_state->bus_suspended = 0; ++ port_index = max_ports; + while (port_index--) { +- /* suspend the port if the port is not suspended */ + u32 t1, t2; +- int slot_id; + + t1 = readl(ports[port_index]->addr); + t2 = xhci_port_state_to_neutral(t1); ++ portsc_buf[port_index] = 0; + +- if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) { +- xhci_dbg(xhci, "port %d not suspended\n", port_index); +- slot_id = xhci_find_slot_id_by_port(hcd, xhci, +- port_index + 1); +- if (slot_id) { ++ /* Bail out if a USB3 port has a new device in link training */ ++ if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) { ++ bus_state->bus_suspended = 0; ++ spin_unlock_irqrestore(&xhci->lock, flags); ++ xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); ++ return -EBUSY; ++ } ++ ++ /* suspend ports in U0, or bail out for new connect changes */ ++ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { ++ if ((t1 & PORT_CSC) && wake_enabled) { ++ bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); +- xhci_stop_device(xhci, slot_id, 1); +- spin_lock_irqsave(&xhci->lock, flags); ++ xhci_dbg(xhci, "Bus suspend bailout, port connect change\n"); ++ return -EBUSY; + } ++ xhci_dbg(xhci, "port %d not suspended\n", port_index); + t2 &= ~PORT_PLS_MASK; + t2 |= PORT_LINK_STROBE | XDEV_U3; + set_bit(port_index, &bus_state->bus_suspended); +@@ -1518,7 +1531,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + * including the USB 3.0 roothub, but only if CONFIG_PM + * is enabled, so also enable remote wake here. + */ +- if (hcd->self.root_hub->do_remote_wakeup) { ++ if (wake_enabled) { + if (t1 & PORT_CONNECT) { + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + t2 &= ~PORT_WKCONN_E; +@@ -1538,7 +1551,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + + t1 = xhci_port_state_to_neutral(t1); + if (t1 != t2) +- writel(t2, ports[port_index]->addr); ++ portsc_buf[port_index] = t2; ++ } ++ ++ /* write port settings, stopping and suspending ports if needed */ ++ port_index = max_ports; ++ while (port_index--) { ++ if (!portsc_buf[port_index]) ++ continue; ++ if (test_bit(port_index, &bus_state->bus_suspended)) { ++ int slot_id; ++ ++ slot_id = xhci_find_slot_id_by_port(hcd, xhci, ++ port_index + 1); ++ if (slot_id) { ++ spin_unlock_irqrestore(&xhci->lock, flags); ++ xhci_stop_device(xhci, slot_id, 1); ++ spin_lock_irqsave(&xhci->lock, flags); ++ } ++ } ++ writel(portsc_buf[port_index], ports[port_index]->addr); + } + hcd->state = HC_STATE_SUSPENDED; + bus_state->next_statechange = jiffies + msecs_to_jiffies(10); +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c +index 71d0d33c3286..60987c787e44 100644 +--- a/drivers/usb/host/xhci-mtk.c ++++ b/drivers/usb/host/xhci-mtk.c +@@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev) + struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev); + struct usb_hcd *hcd = mtk->hcd; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct usb_hcd *shared_hcd = xhci->shared_hcd; + +- usb_remove_hcd(xhci->shared_hcd); ++ usb_remove_hcd(shared_hcd); ++ xhci->shared_hcd = NULL; + device_init_wakeup(&dev->dev, false); + + usb_remove_hcd(hcd); +- usb_put_hcd(xhci->shared_hcd); ++ usb_put_hcd(shared_hcd); + usb_put_hcd(hcd); + xhci_mtk_sch_exit(mtk); + xhci_mtk_clks_disable(mtk); +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 51dd8e00c4f8..beeda27b3789 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -231,6 +231,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; + ++ if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM || ++ pdev->vendor == PCI_VENDOR_ID_CAVIUM) && ++ pdev->device == 0x9026) ++ xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; ++ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "QUIRK: Resetting on resume"); +@@ -356,6 +361,7 @@ static void xhci_pci_remove(struct pci_dev *dev) + if (xhci->shared_hcd) { + usb_remove_hcd(xhci->shared_hcd); + usb_put_hcd(xhci->shared_hcd); ++ xhci->shared_hcd = NULL; + } + + /* Workaround for spurious wakeups at shutdown with HSW */ +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 94e939249b2b..e5da8ce62914 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -359,14 +359,16 @@ static int xhci_plat_remove(struct platform_device *dev) + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct clk *clk = xhci->clk; + struct clk *reg_clk = xhci->reg_clk; ++ struct usb_hcd *shared_hcd = xhci->shared_hcd; + + xhci->xhc_state |= XHCI_STATE_REMOVING; + +- usb_remove_hcd(xhci->shared_hcd); ++ usb_remove_hcd(shared_hcd); ++ xhci->shared_hcd = NULL; + usb_phy_shutdown(hcd->usb_phy); + + usb_remove_hcd(hcd); +- usb_put_hcd(xhci->shared_hcd); ++ usb_put_hcd(shared_hcd); + + clk_disable_unprepare(clk); + clk_disable_unprepare(reg_clk); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index cd4659703647..9ae17a666bdb 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1517,6 +1517,35 @@ static void handle_device_notification(struct xhci_hcd *xhci, + usb_wakeup_notification(udev->parent, udev->portnum); + } + ++/* ++ * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI ++ * Controller. ++ * As per ThunderX2errata-129 USB 2 device may come up as USB 1 ++ * If a connection to a USB 1 device is followed by another connection ++ * to a USB 2 device. ++ * ++ * Reset the PHY after the USB device is disconnected if device speed ++ * is less than HCD_USB3. ++ * Retry the reset sequence max of 4 times checking the PLL lock status. ++ * ++ */ ++static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) ++{ ++ struct usb_hcd *hcd = xhci_to_hcd(xhci); ++ u32 pll_lock_check; ++ u32 retry_count = 4; ++ ++ do { ++ /* Assert PHY reset */ ++ writel(0x6F, hcd->regs + 0x1048); ++ udelay(10); ++ /* De-assert the PHY reset */ ++ writel(0x7F, hcd->regs + 0x1048); ++ udelay(200); ++ pll_lock_check = readl(hcd->regs + 0x1070); ++ } while (!(pll_lock_check & 0x1) && --retry_count); ++} ++ + static void handle_port_status(struct xhci_hcd *xhci, + union xhci_trb *event) + { +@@ -1552,6 +1581,13 @@ static void handle_port_status(struct xhci_hcd *xhci, + goto cleanup; + } + ++ /* We might get interrupts after shared_hcd is removed */ ++ if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { ++ xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); ++ bogus_port_status = true; ++ goto cleanup; ++ } ++ + hcd = port->rhub->hcd; + bus_state = &xhci->bus_state[hcd_index(hcd)]; + hcd_portnum = port->hcd_portnum; +@@ -1635,7 +1671,7 @@ static void handle_port_status(struct xhci_hcd *xhci, + * RExit to a disconnect state). If so, let the the driver know it's + * out of the RExit state. + */ +- if (!DEV_SUPERSPEED_ANY(portsc) && ++ if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 && + test_and_clear_bit(hcd_portnum, + &bus_state->rexit_ports)) { + complete(&bus_state->rexit_done[hcd_portnum]); +@@ -1643,8 +1679,12 @@ static void handle_port_status(struct xhci_hcd *xhci, + goto cleanup; + } + +- if (hcd->speed < HCD_USB3) ++ if (hcd->speed < HCD_USB3) { + xhci_test_and_clear_bit(xhci, port, PORT_PLC); ++ if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && ++ (portsc & PORT_CSC) && !(portsc & PORT_CONNECT)) ++ xhci_cavium_reset_phy_quirk(xhci); ++ } + + cleanup: + /* Update event ring dequeue pointer before dropping the lock */ +@@ -2247,6 +2287,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, + goto cleanup; + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: ++ case COMP_STOPPED_LENGTH_INVALID: + goto cleanup; + default: + xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c +index 4b463e5202a4..b1cce989bd12 100644 +--- a/drivers/usb/host/xhci-tegra.c ++++ b/drivers/usb/host/xhci-tegra.c +@@ -1240,6 +1240,7 @@ static int tegra_xusb_remove(struct platform_device *pdev) + + usb_remove_hcd(xhci->shared_hcd); + usb_put_hcd(xhci->shared_hcd); ++ xhci->shared_hcd = NULL; + usb_remove_hcd(tegra->hcd); + usb_put_hcd(tegra->hcd); + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 0420eefa647a..c928dbbff881 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -719,8 +719,6 @@ static void xhci_stop(struct usb_hcd *hcd) + + /* Only halt host and free memory after both hcds are removed */ + if (!usb_hcd_is_primary_hcd(hcd)) { +- /* usb core will free this hcd shortly, unset pointer */ +- xhci->shared_hcd = NULL; + mutex_unlock(&xhci->mutex); + return; + } +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 6230a578324c..e936e4c8af98 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1678,7 +1678,7 @@ struct xhci_bus_state { + * It can take up to 20 ms to transition from RExit to U0 on the + * Intel Lynx Point LP xHCI host. + */ +-#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) ++#define XHCI_MAX_REXIT_TIMEOUT_MS 20 + + static inline unsigned int hcd_index(struct usb_hcd *hcd) + { +@@ -1846,6 +1846,7 @@ struct xhci_hcd { + #define XHCI_SUSPEND_DELAY BIT_ULL(30) + #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) + #define XHCI_ZERO_64B_REGS BIT_ULL(32) ++#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) + + unsigned int num_active_eps; + unsigned int limit_active_eps; +diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c +index b0405d6aac85..48db9a9f13f9 100644 +--- a/fs/9p/vfs_dir.c ++++ b/fs/9p/vfs_dir.c +@@ -76,15 +76,6 @@ static inline int dt_type(struct p9_wstat *mistat) + return rettype; + } + +-static void p9stat_init(struct p9_wstat *stbuf) +-{ +- stbuf->name = NULL; +- stbuf->uid = NULL; +- stbuf->gid = NULL; +- stbuf->muid = NULL; +- stbuf->extension = NULL; +-} +- + /** + * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir + * @filp: opened file structure +@@ -145,12 +136,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) + rdir->tail = n; + } + while (rdir->head < rdir->tail) { +- p9stat_init(&st); + err = p9stat_read(fid->clnt, rdir->buf + rdir->head, + rdir->tail - rdir->head, &st); + if (err) { + p9_debug(P9_DEBUG_VFS, "returned %d\n", err); +- p9stat_free(&st); + return -EIO; + } + reclen = st.size+2; +diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c +index 9a69392f1fb3..d81c148682e7 100644 +--- a/fs/bfs/inode.c ++++ b/fs/bfs/inode.c +@@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) + + s->s_magic = BFS_MAGIC; + +- if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { ++ if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) || ++ le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) { + printf("Superblock is corrupted\n"); + goto out1; + } +@@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) + sizeof(struct bfs_inode) + + BFS_ROOT_INO - 1; + imap_len = (info->si_lasti / 8) + 1; +- info->si_imap = kzalloc(imap_len, GFP_KERNEL); +- if (!info->si_imap) ++ info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN); ++ if (!info->si_imap) { ++ printf("Cannot allocate %u bytes\n", imap_len); + goto out1; ++ } + for (i = 0; i < BFS_ROOT_INO; i++) + set_bit(i, info->si_imap); + +diff --git a/fs/dax.c b/fs/dax.c +index 0fb270f0a0ef..b0cd1364c68f 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -217,6 +217,9 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot) + return (void *)entry; + } + ++static void put_unlocked_mapping_entry(struct address_space *mapping, ++ pgoff_t index, void *entry); ++ + /* + * Lookup entry in radix tree, wait for it to become unlocked if it is + * exceptional entry and return it. The caller must call +@@ -256,8 +259,10 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, + revalidate = wait_fn(); + finish_wait(wq, &ewait.wait); + xa_lock_irq(&mapping->i_pages); +- if (revalidate) ++ if (revalidate) { ++ put_unlocked_mapping_entry(mapping, index, entry); + return ERR_PTR(-EAGAIN); ++ } + } + } + +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c +index 8748539c04ed..7f8bb0868c0f 100644 +--- a/fs/gfs2/bmap.c ++++ b/fs/gfs2/bmap.c +@@ -826,7 +826,7 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, + ret = gfs2_meta_inode_buffer(ip, &dibh); + if (ret) + goto unlock; +- iomap->private = dibh; ++ mp->mp_bh[0] = dibh; + + if (gfs2_is_stuffed(ip)) { + if (flags & IOMAP_WRITE) { +@@ -863,9 +863,6 @@ unstuff: + len = lblock_stop - lblock + 1; + iomap->length = len << inode->i_blkbits; + +- get_bh(dibh); +- mp->mp_bh[0] = dibh; +- + height = ip->i_height; + while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height]) + height++; +@@ -898,8 +895,6 @@ out: + iomap->bdev = inode->i_sb->s_bdev; + unlock: + up_read(&ip->i_rw_mutex); +- if (ret && dibh) +- brelse(dibh); + return ret; + + do_alloc: +@@ -980,9 +975,9 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos, + + static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, + loff_t length, unsigned flags, +- struct iomap *iomap) ++ struct iomap *iomap, ++ struct metapath *mp) + { +- struct metapath mp = { .mp_aheight = 1, }; + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_sbd *sdp = GFS2_SB(inode); + unsigned int data_blocks = 0, ind_blocks = 0, rblocks; +@@ -996,9 +991,9 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, + unstuff = gfs2_is_stuffed(ip) && + pos + length > gfs2_max_stuffed_size(ip); + +- ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp); ++ ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp); + if (ret) +- goto out_release; ++ goto out_unlock; + + alloc_required = unstuff || iomap->type == IOMAP_HOLE; + +@@ -1013,7 +1008,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, + + ret = gfs2_quota_lock_check(ip, &ap); + if (ret) +- goto out_release; ++ goto out_unlock; + + ret = gfs2_inplace_reserve(ip, &ap); + if (ret) +@@ -1038,17 +1033,15 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, + ret = gfs2_unstuff_dinode(ip, NULL); + if (ret) + goto out_trans_end; +- release_metapath(&mp); +- brelse(iomap->private); +- iomap->private = NULL; ++ release_metapath(mp); + ret = gfs2_iomap_get(inode, iomap->offset, iomap->length, +- flags, iomap, &mp); ++ flags, iomap, mp); + if (ret) + goto out_trans_end; + } + + if (iomap->type == IOMAP_HOLE) { +- ret = gfs2_iomap_alloc(inode, iomap, flags, &mp); ++ ret = gfs2_iomap_alloc(inode, iomap, flags, mp); + if (ret) { + gfs2_trans_end(sdp); + gfs2_inplace_release(ip); +@@ -1056,7 +1049,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, + goto out_qunlock; + } + } +- release_metapath(&mp); + if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip)) + iomap->page_done = gfs2_iomap_journaled_page_done; + return 0; +@@ -1069,10 +1061,7 @@ out_trans_fail: + out_qunlock: + if (alloc_required) + gfs2_quota_unlock(ip); +-out_release: +- if (iomap->private) +- brelse(iomap->private); +- release_metapath(&mp); ++out_unlock: + gfs2_write_unlock(inode); + return ret; + } +@@ -1088,10 +1077,10 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, + + trace_gfs2_iomap_start(ip, pos, length, flags); + if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) { +- ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap); ++ ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp); + } else { + ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp); +- release_metapath(&mp); ++ + /* + * Silently fall back to buffered I/O for stuffed files or if + * we've hot a hole (see gfs2_file_direct_write). +@@ -1100,6 +1089,11 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, + iomap->type != IOMAP_MAPPED) + ret = -ENOTBLK; + } ++ if (!ret) { ++ get_bh(mp.mp_bh[0]); ++ iomap->private = mp.mp_bh[0]; ++ } ++ release_metapath(&mp); + trace_gfs2_iomap_end(ip, iomap, ret); + return ret; + } +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c +index 6b84ef6ccff3..b041cb8ae383 100644 +--- a/fs/gfs2/ops_fstype.c ++++ b/fs/gfs2/ops_fstype.c +@@ -72,13 +72,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) + if (!sdp) + return NULL; + +- sb->s_fs_info = sdp; + sdp->sd_vfs = sb; + sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); + if (!sdp->sd_lkstats) { + kfree(sdp); + return NULL; + } ++ sb->s_fs_info = sdp; + + set_bit(SDF_NOJOURNALID, &sdp->sd_flags); + gfs2_tune_init(&sdp->sd_tune); +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c +index fa515d5ea5ba..7b861bbc0b43 100644 +--- a/fs/nfs/callback_proc.c ++++ b/fs/nfs/callback_proc.c +@@ -66,7 +66,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp, + out_iput: + rcu_read_unlock(); + trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status)); +- iput(inode); ++ nfs_iput_and_deactive(inode); + out: + dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status)); + return res->status; +@@ -108,7 +108,7 @@ __be32 nfs4_callback_recall(void *argp, void *resp, + } + trace_nfs4_cb_recall(cps->clp, &args->fh, inode, + &args->stateid, -ntohl(res)); +- iput(inode); ++ nfs_iput_and_deactive(inode); + out: + dprintk("%s: exit with status = %d\n", __func__, ntohl(res)); + return res; +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index f033f3a69a3b..75fe92eaa681 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -849,16 +849,23 @@ nfs_delegation_find_inode_server(struct nfs_server *server, + const struct nfs_fh *fhandle) + { + struct nfs_delegation *delegation; +- struct inode *res = NULL; ++ struct inode *freeme, *res = NULL; + + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { + spin_lock(&delegation->lock); + if (delegation->inode != NULL && + nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { +- res = igrab(delegation->inode); ++ freeme = igrab(delegation->inode); ++ if (freeme && nfs_sb_active(freeme->i_sb)) ++ res = freeme; + spin_unlock(&delegation->lock); + if (res != NULL) + return res; ++ if (freeme) { ++ rcu_read_unlock(); ++ iput(freeme); ++ rcu_read_lock(); ++ } + return ERR_PTR(-EAGAIN); + } + spin_unlock(&delegation->lock); +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c +index 94b52157bf8d..29dee9630eec 100644 +--- a/fs/notify/fanotify/fanotify.c ++++ b/fs/notify/fanotify/fanotify.c +@@ -115,12 +115,12 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info, + continue; + mark = iter_info->marks[type]; + /* +- * if the event is for a child and this inode doesn't care about +- * events on the child, don't send it! ++ * If the event is for a child and this mark doesn't care about ++ * events on a child, don't send it! + */ +- if (type == FSNOTIFY_OBJ_TYPE_INODE && +- (event_mask & FS_EVENT_ON_CHILD) && +- !(mark->mask & FS_EVENT_ON_CHILD)) ++ if (event_mask & FS_EVENT_ON_CHILD && ++ (type != FSNOTIFY_OBJ_TYPE_INODE || ++ !(mark->mask & FS_EVENT_ON_CHILD))) + continue; + + marks_mask |= mark->mask; +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c +index f43ea1aad542..170a733454f7 100644 +--- a/fs/notify/fsnotify.c ++++ b/fs/notify/fsnotify.c +@@ -161,9 +161,9 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask + parent = dget_parent(dentry); + p_inode = parent->d_inode; + +- if (unlikely(!fsnotify_inode_watches_children(p_inode))) ++ if (unlikely(!fsnotify_inode_watches_children(p_inode))) { + __fsnotify_update_child_dentry_flags(p_inode); +- else if (p_inode->i_fsnotify_mask & mask) { ++ } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) { + struct name_snapshot name; + + /* we are notifying a parent so come up with the new mask which +@@ -193,7 +193,7 @@ static int send_to_group(struct inode *to_tell, + struct fsnotify_iter_info *iter_info) + { + struct fsnotify_group *group = NULL; +- __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); ++ __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); + __u32 marks_mask = 0; + __u32 marks_ignored_mask = 0; + struct fsnotify_mark *mark; +@@ -324,14 +324,17 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, + struct fsnotify_iter_info iter_info = {}; + struct mount *mnt; + int ret = 0; +- /* global tests shouldn't care about events on child only the specific event */ +- __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); ++ __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); + + if (data_is == FSNOTIFY_EVENT_PATH) + mnt = real_mount(((const struct path *)data)->mnt); + else + mnt = NULL; + ++ /* An event "on child" is not intended for a mount mark */ ++ if (mask & FS_EVENT_ON_CHILD) ++ mnt = NULL; ++ + /* + * Optimization: srcu_read_lock() has a memory barrier which can + * be expensive. It protects walking the *_fsnotify_marks lists. +@@ -389,7 +392,7 @@ static __init int fsnotify_init(void) + { + int ret; + +- BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23); ++ BUG_ON(hweight32(ALL_FSNOTIFY_BITS) != 23); + + ret = init_srcu_struct(&fsnotify_mark_srcu); + if (ret) +diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h +index a83e1f632eb7..f01623aef2f7 100644 +--- a/include/linux/can/dev.h ++++ b/include/linux/can/dev.h +@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, + + void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx); ++struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); + unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); + void can_free_echo_skb(struct net_device *dev, unsigned int idx); + +diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h +index cb31683bbe15..8268811a697e 100644 +--- a/include/linux/can/rx-offload.h ++++ b/include/linux/can/rx-offload.h +@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload * + int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); + int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); + int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); +-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); ++int can_rx_offload_queue_sorted(struct can_rx_offload *offload, ++ struct sk_buff *skb, u32 timestamp); ++unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, ++ unsigned int idx, u32 timestamp); ++int can_rx_offload_queue_tail(struct can_rx_offload *offload, ++ struct sk_buff *skb); + void can_rx_offload_reset(struct can_rx_offload *offload); + void can_rx_offload_del(struct can_rx_offload *offload); + void can_rx_offload_enable(struct can_rx_offload *offload); +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h +index b8f4182f42f1..4599d1c95f8c 100644 +--- a/include/linux/fsnotify_backend.h ++++ b/include/linux/fsnotify_backend.h +@@ -68,15 +68,20 @@ + + #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) + ++/* Events that can be reported to backends */ + #define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ + FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ + FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ +- FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \ +- FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \ ++ FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME) ++ ++/* Extra flags that may be reported with event or control handling of events */ ++#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ + FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) + ++#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS) ++ + struct fsnotify_group; + struct fsnotify_event; + struct fsnotify_mark; +diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h +index 21713dc14ce2..673546ba7342 100644 +--- a/include/linux/pfn_t.h ++++ b/include/linux/pfn_t.h +@@ -10,7 +10,7 @@ + * PFN_DEV - pfn is not covered by system memmap by default + * PFN_MAP - pfn has a dynamic page mapping established by a device driver + */ +-#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) ++#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) + #define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) + #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) + #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) +diff --git a/include/net/sock.h b/include/net/sock.h +index c64a1cff9eb3..f18dbd6da906 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1491,6 +1491,7 @@ static inline void lock_sock(struct sock *sk) + lock_sock_nested(sk, 0); + } + ++void __release_sock(struct sock *sk); + void release_sock(struct sock *sk); + + /* BH context may only use the following locking interface. */ +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c +index ed5d34925ad0..6a4b41484afe 100644 +--- a/kernel/debug/kdb/kdb_io.c ++++ b/kernel/debug/kdb/kdb_io.c +@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize) + int count; + int i; + int diag, dtab_count; +- int key; ++ int key, buf_size, ret; + + + diag = kdbgetintenv("DTABCOUNT", &dtab_count); +@@ -336,9 +336,8 @@ poll_again: + else + p_tmp = tmpbuffer; + len = strlen(p_tmp); +- count = kallsyms_symbol_complete(p_tmp, +- sizeof(tmpbuffer) - +- (p_tmp - tmpbuffer)); ++ buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer); ++ count = kallsyms_symbol_complete(p_tmp, buf_size); + if (tab == 2 && count > 0) { + kdb_printf("\n%d symbols are found.", count); + if (count > dtab_count) { +@@ -350,9 +349,13 @@ poll_again: + } + kdb_printf("\n"); + for (i = 0; i < count; i++) { +- if (WARN_ON(!kallsyms_symbol_next(p_tmp, i))) ++ ret = kallsyms_symbol_next(p_tmp, i, buf_size); ++ if (WARN_ON(!ret)) + break; +- kdb_printf("%s ", p_tmp); ++ if (ret != -E2BIG) ++ kdb_printf("%s ", p_tmp); ++ else ++ kdb_printf("%s... ", p_tmp); + *(p_tmp + len) = '\0'; + } + if (i >= dtab_count) +diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h +index 1e5a502ba4a7..2118d8258b7c 100644 +--- a/kernel/debug/kdb/kdb_private.h ++++ b/kernel/debug/kdb/kdb_private.h +@@ -83,7 +83,7 @@ typedef struct __ksymtab { + unsigned long sym_start; + unsigned long sym_end; + } kdb_symtab_t; +-extern int kallsyms_symbol_next(char *prefix_name, int flag); ++extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size); + extern int kallsyms_symbol_complete(char *prefix_name, int max_len); + + /* Exported Symbols for kernel loadable modules to use. */ +diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c +index 987eb73284d2..b14b0925c184 100644 +--- a/kernel/debug/kdb/kdb_support.c ++++ b/kernel/debug/kdb/kdb_support.c +@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len) + * Parameters: + * prefix_name prefix of a symbol name to lookup + * flag 0 means search from the head, 1 means continue search. ++ * buf_size maximum length that can be written to prefix_name ++ * buffer + * Returns: + * 1 if a symbol matches the given prefix. + * 0 if no string found + */ +-int kallsyms_symbol_next(char *prefix_name, int flag) ++int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size) + { + int prefix_len = strlen(prefix_name); + static loff_t pos; +@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag) + pos = 0; + + while ((name = kdb_walk_kallsyms(&pos))) { +- if (strncmp(name, prefix_name, prefix_len) == 0) { +- strncpy(prefix_name, name, strlen(name)+1); +- return 1; +- } ++ if (!strncmp(name, prefix_name, prefix_len)) ++ return strscpy(prefix_name, name, buf_size); + } + return 0; + } +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 0b760c1369f7..15301ed19da6 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -2662,6 +2662,15 @@ void rcu_check_callbacks(int user) + rcu_bh_qs(); + } + rcu_preempt_check_callbacks(); ++ /* The load-acquire pairs with the store-release setting to true. */ ++ if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { ++ /* Idle and userspace execution already are quiescent states. */ ++ if (!rcu_is_cpu_rrupt_from_idle() && !user) { ++ set_tsk_need_resched(current); ++ set_preempt_need_resched(); ++ } ++ __this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); ++ } + if (rcu_pending()) + invoke_rcu_core(); + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 908c9cdae2f0..1162552dc3cc 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -5672,11 +5672,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, + return target; + } + +-static unsigned long cpu_util_wake(int cpu, struct task_struct *p); ++static unsigned long cpu_util_without(int cpu, struct task_struct *p); + +-static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) ++static unsigned long capacity_spare_without(int cpu, struct task_struct *p) + { +- return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0); ++ return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0); + } + + /* +@@ -5736,7 +5736,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, + + avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); + +- spare_cap = capacity_spare_wake(i, p); ++ spare_cap = capacity_spare_without(i, p); + + if (spare_cap > max_spare_cap) + max_spare_cap = spare_cap; +@@ -5887,8 +5887,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p + return prev_cpu; + + /* +- * We need task's util for capacity_spare_wake, sync it up to prev_cpu's +- * last_update_time. ++ * We need task's util for capacity_spare_without, sync it up to ++ * prev_cpu's last_update_time. + */ + if (!(sd_flag & SD_BALANCE_FORK)) + sync_entity_load_avg(&p->se); +@@ -6214,10 +6214,19 @@ static inline unsigned long cpu_util(int cpu) + } + + /* +- * cpu_util_wake: Compute CPU utilization with any contributions from +- * the waking task p removed. ++ * cpu_util_without: compute cpu utilization without any contributions from *p ++ * @cpu: the CPU which utilization is requested ++ * @p: the task which utilization should be discounted ++ * ++ * The utilization of a CPU is defined by the utilization of tasks currently ++ * enqueued on that CPU as well as tasks which are currently sleeping after an ++ * execution on that CPU. ++ * ++ * This method returns the utilization of the specified CPU by discounting the ++ * utilization of the specified task, whenever the task is currently ++ * contributing to the CPU utilization. + */ +-static unsigned long cpu_util_wake(int cpu, struct task_struct *p) ++static unsigned long cpu_util_without(int cpu, struct task_struct *p) + { + struct cfs_rq *cfs_rq; + unsigned int util; +@@ -6229,7 +6238,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p) + cfs_rq = &cpu_rq(cpu)->cfs; + util = READ_ONCE(cfs_rq->avg.util_avg); + +- /* Discount task's blocked util from CPU's util */ ++ /* Discount task's util from CPU's util */ + util -= min_t(unsigned int, util, task_util(p)); + + /* +@@ -6238,14 +6247,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p) + * a) if *p is the only task sleeping on this CPU, then: + * cpu_util (== task_util) > util_est (== 0) + * and thus we return: +- * cpu_util_wake = (cpu_util - task_util) = 0 ++ * cpu_util_without = (cpu_util - task_util) = 0 + * + * b) if other tasks are SLEEPING on this CPU, which is now exiting + * IDLE, then: + * cpu_util >= task_util + * cpu_util > util_est (== 0) + * and thus we discount *p's blocked utilization to return: +- * cpu_util_wake = (cpu_util - task_util) >= 0 ++ * cpu_util_without = (cpu_util - task_util) >= 0 + * + * c) if other tasks are RUNNABLE on that CPU and + * util_est > cpu_util +@@ -6258,8 +6267,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p) + * covered by the following code when estimated utilization is + * enabled. + */ +- if (sched_feat(UTIL_EST)) +- util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); ++ if (sched_feat(UTIL_EST)) { ++ unsigned int estimated = ++ READ_ONCE(cfs_rq->avg.util_est.enqueued); ++ ++ /* ++ * Despite the following checks we still have a small window ++ * for a possible race, when an execl's select_task_rq_fair() ++ * races with LB's detach_task(): ++ * ++ * detach_task() ++ * p->on_rq = TASK_ON_RQ_MIGRATING; ++ * ---------------------------------- A ++ * deactivate_task() \ ++ * dequeue_task() + RaceTime ++ * util_est_dequeue() / ++ * ---------------------------------- B ++ * ++ * The additional check on "current == p" it's required to ++ * properly fix the execl regression and it helps in further ++ * reducing the chances for the above race. ++ */ ++ if (unlikely(task_on_rq_queued(p) || current == p)) { ++ estimated -= min_t(unsigned int, estimated, ++ (_task_util_est(p) | UTIL_AVG_UNCHANGED)); ++ } ++ util = max(util, estimated); ++ } + + /* + * Utilization (estimated) can exceed the CPU capacity, thus let's +diff --git a/mm/memory.c b/mm/memory.c +index c467102a5cbc..5c5df53dbdf9 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3745,10 +3745,36 @@ static vm_fault_t do_fault(struct vm_fault *vmf) + struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret; + +- /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ +- if (!vma->vm_ops->fault) +- ret = VM_FAULT_SIGBUS; +- else if (!(vmf->flags & FAULT_FLAG_WRITE)) ++ /* ++ * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND ++ */ ++ if (!vma->vm_ops->fault) { ++ /* ++ * If we find a migration pmd entry or a none pmd entry, which ++ * should never happen, return SIGBUS ++ */ ++ if (unlikely(!pmd_present(*vmf->pmd))) ++ ret = VM_FAULT_SIGBUS; ++ else { ++ vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, ++ vmf->pmd, ++ vmf->address, ++ &vmf->ptl); ++ /* ++ * Make sure this is not a temporary clearing of pte ++ * by holding ptl and checking again. A R/M/W update ++ * of pte involves: take ptl, clearing the pte so that ++ * we don't have concurrent modification by hardware ++ * followed by an update. ++ */ ++ if (unlikely(pte_none(*vmf->pte))) ++ ret = VM_FAULT_SIGBUS; ++ else ++ ret = VM_FAULT_NOPAGE; ++ ++ pte_unmap_unlock(vmf->pte, vmf->ptl); ++ } ++ } else if (!(vmf->flags & FAULT_FLAG_WRITE)) + ret = do_read_fault(vmf); + else if (!(vma->vm_flags & VM_SHARED)) + ret = do_cow_fault(vmf); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index e2ef1c17942f..b721631d78ab 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -4055,17 +4055,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, + unsigned int cpuset_mems_cookie; + int reserve_flags; + +- /* +- * In the slowpath, we sanity check order to avoid ever trying to +- * reclaim >= MAX_ORDER areas which will never succeed. Callers may +- * be using allocators in order of preference for an area that is +- * too large. +- */ +- if (order >= MAX_ORDER) { +- WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); +- return NULL; +- } +- + /* + * We also sanity check to catch abuse of atomic reserves being used by + * callers that are not in atomic context. +@@ -4359,6 +4348,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, + gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ + struct alloc_context ac = { }; + ++ /* ++ * There are several places where we assume that the order value is sane ++ * so bail out early if the request is out of bound. ++ */ ++ if (unlikely(order >= MAX_ORDER)) { ++ WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); ++ return NULL; ++ } ++ + gfp_mask &= gfp_allowed_mask; + alloc_mask = gfp_mask; + if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) +@@ -7690,6 +7688,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, + if (PageReserved(page)) + goto unmovable; + ++ /* ++ * If the zone is movable and we have ruled out all reserved ++ * pages then it should be reasonably safe to assume the rest ++ * is movable. ++ */ ++ if (zone_idx(zone) == ZONE_MOVABLE) ++ continue; ++ + /* + * Hugepages are not in LRU lists, but they're movable. + * We need not scan over tail pages bacause we don't +diff --git a/mm/shmem.c b/mm/shmem.c +index 446942677cd4..38d228a30fdc 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2610,9 +2610,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) + inode_lock(inode); + /* We're holding i_mutex so we can access i_size directly */ + +- if (offset < 0) +- offset = -EINVAL; +- else if (offset >= inode->i_size) ++ if (offset < 0 || offset >= inode->i_size) + offset = -ENXIO; + else { + start = offset >> PAGE_SHIFT; +diff --git a/mm/slab.c b/mm/slab.c +index aa76a70e087e..d73c7a4820a4 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -3675,6 +3675,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) + struct kmem_cache *cachep; + void *ret; + ++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) ++ return NULL; + cachep = kmalloc_slab(size, flags); + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; +@@ -3710,6 +3712,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, + struct kmem_cache *cachep; + void *ret; + ++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) ++ return NULL; + cachep = kmalloc_slab(size, flags); + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; +diff --git a/mm/slab_common.c b/mm/slab_common.c +index fea3376f9816..3a7ac4f15194 100644 +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -1027,18 +1027,18 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) + { + unsigned int index; + +- if (unlikely(size > KMALLOC_MAX_SIZE)) { +- WARN_ON_ONCE(!(flags & __GFP_NOWARN)); +- return NULL; +- } +- + if (size <= 192) { + if (!size) + return ZERO_SIZE_PTR; + + index = size_index[size_index_elem(size)]; +- } else ++ } else { ++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ++ WARN_ON(1); ++ return NULL; ++ } + index = fls(size - 1); ++ } + + #ifdef CONFIG_ZONE_DMA + if (unlikely((flags & GFP_DMA))) +diff --git a/mm/z3fold.c b/mm/z3fold.c +index 4b366d181f35..aee9b0b8d907 100644 +--- a/mm/z3fold.c ++++ b/mm/z3fold.c +@@ -99,6 +99,7 @@ struct z3fold_header { + #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) + + #define BUDDY_MASK (0x3) ++#define BUDDY_SHIFT 2 + + /** + * struct z3fold_pool - stores metadata for each z3fold pool +@@ -145,7 +146,7 @@ enum z3fold_page_flags { + MIDDLE_CHUNK_MAPPED, + NEEDS_COMPACTING, + PAGE_STALE, +- UNDER_RECLAIM ++ PAGE_CLAIMED, /* by either reclaim or free */ + }; + + /***************** +@@ -174,7 +175,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, + clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); + clear_bit(NEEDS_COMPACTING, &page->private); + clear_bit(PAGE_STALE, &page->private); +- clear_bit(UNDER_RECLAIM, &page->private); ++ clear_bit(PAGE_CLAIMED, &page->private); + + spin_lock_init(&zhdr->page_lock); + kref_init(&zhdr->refcount); +@@ -223,8 +224,11 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) + unsigned long handle; + + handle = (unsigned long)zhdr; +- if (bud != HEADLESS) +- handle += (bud + zhdr->first_num) & BUDDY_MASK; ++ if (bud != HEADLESS) { ++ handle |= (bud + zhdr->first_num) & BUDDY_MASK; ++ if (bud == LAST) ++ handle |= (zhdr->last_chunks << BUDDY_SHIFT); ++ } + return handle; + } + +@@ -234,6 +238,12 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle) + return (struct z3fold_header *)(handle & PAGE_MASK); + } + ++/* only for LAST bud, returns zero otherwise */ ++static unsigned short handle_to_chunks(unsigned long handle) ++{ ++ return (handle & ~PAGE_MASK) >> BUDDY_SHIFT; ++} ++ + /* + * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle + * but that doesn't matter. because the masking will result in the +@@ -720,37 +730,39 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) + page = virt_to_page(zhdr); + + if (test_bit(PAGE_HEADLESS, &page->private)) { +- /* HEADLESS page stored */ +- bud = HEADLESS; +- } else { +- z3fold_page_lock(zhdr); +- bud = handle_to_buddy(handle); +- +- switch (bud) { +- case FIRST: +- zhdr->first_chunks = 0; +- break; +- case MIDDLE: +- zhdr->middle_chunks = 0; +- zhdr->start_middle = 0; +- break; +- case LAST: +- zhdr->last_chunks = 0; +- break; +- default: +- pr_err("%s: unknown bud %d\n", __func__, bud); +- WARN_ON(1); +- z3fold_page_unlock(zhdr); +- return; ++ /* if a headless page is under reclaim, just leave. ++ * NB: we use test_and_set_bit for a reason: if the bit ++ * has not been set before, we release this page ++ * immediately so we don't care about its value any more. ++ */ ++ if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) { ++ spin_lock(&pool->lock); ++ list_del(&page->lru); ++ spin_unlock(&pool->lock); ++ free_z3fold_page(page); ++ atomic64_dec(&pool->pages_nr); + } ++ return; + } + +- if (bud == HEADLESS) { +- spin_lock(&pool->lock); +- list_del(&page->lru); +- spin_unlock(&pool->lock); +- free_z3fold_page(page); +- atomic64_dec(&pool->pages_nr); ++ /* Non-headless case */ ++ z3fold_page_lock(zhdr); ++ bud = handle_to_buddy(handle); ++ ++ switch (bud) { ++ case FIRST: ++ zhdr->first_chunks = 0; ++ break; ++ case MIDDLE: ++ zhdr->middle_chunks = 0; ++ break; ++ case LAST: ++ zhdr->last_chunks = 0; ++ break; ++ default: ++ pr_err("%s: unknown bud %d\n", __func__, bud); ++ WARN_ON(1); ++ z3fold_page_unlock(zhdr); + return; + } + +@@ -758,7 +770,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) + atomic64_dec(&pool->pages_nr); + return; + } +- if (test_bit(UNDER_RECLAIM, &page->private)) { ++ if (test_bit(PAGE_CLAIMED, &page->private)) { + z3fold_page_unlock(zhdr); + return; + } +@@ -836,20 +848,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) + } + list_for_each_prev(pos, &pool->lru) { + page = list_entry(pos, struct page, lru); ++ ++ /* this bit could have been set by free, in which case ++ * we pass over to the next page in the pool. ++ */ ++ if (test_and_set_bit(PAGE_CLAIMED, &page->private)) ++ continue; ++ ++ zhdr = page_address(page); + if (test_bit(PAGE_HEADLESS, &page->private)) +- /* candidate found */ + break; + +- zhdr = page_address(page); +- if (!z3fold_page_trylock(zhdr)) ++ if (!z3fold_page_trylock(zhdr)) { ++ zhdr = NULL; + continue; /* can't evict at this point */ ++ } + kref_get(&zhdr->refcount); + list_del_init(&zhdr->buddy); + zhdr->cpu = -1; +- set_bit(UNDER_RECLAIM, &page->private); + break; + } + ++ if (!zhdr) ++ break; ++ + list_del_init(&page->lru); + spin_unlock(&pool->lock); + +@@ -898,6 +920,7 @@ next: + if (test_bit(PAGE_HEADLESS, &page->private)) { + if (ret == 0) { + free_z3fold_page(page); ++ atomic64_dec(&pool->pages_nr); + return 0; + } + spin_lock(&pool->lock); +@@ -905,7 +928,7 @@ next: + spin_unlock(&pool->lock); + } else { + z3fold_page_lock(zhdr); +- clear_bit(UNDER_RECLAIM, &page->private); ++ clear_bit(PAGE_CLAIMED, &page->private); + if (kref_put(&zhdr->refcount, + release_z3fold_page_locked)) { + atomic64_dec(&pool->pages_nr); +@@ -964,7 +987,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) + set_bit(MIDDLE_CHUNK_MAPPED, &page->private); + break; + case LAST: +- addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); ++ addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); + break; + default: + pr_err("unknown buddy id %d\n", buddy); +diff --git a/net/can/raw.c b/net/can/raw.c +index 1051eee82581..3aab7664933f 100644 +--- a/net/can/raw.c ++++ b/net/can/raw.c +@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) + } else + ifindex = ro->ifindex; + +- if (ro->fd_frames) { ++ dev = dev_get_by_index(sock_net(sk), ifindex); ++ if (!dev) ++ return -ENXIO; ++ ++ err = -EINVAL; ++ if (ro->fd_frames && dev->mtu == CANFD_MTU) { + if (unlikely(size != CANFD_MTU && size != CAN_MTU)) +- return -EINVAL; ++ goto put_dev; + } else { + if (unlikely(size != CAN_MTU)) +- return -EINVAL; ++ goto put_dev; + } + +- dev = dev_get_by_index(sock_net(sk), ifindex); +- if (!dev) +- return -ENXIO; +- + skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) +diff --git a/net/core/sock.c b/net/core/sock.c +index 3730eb855095..748765e35423 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2317,7 +2317,7 @@ static void __lock_sock(struct sock *sk) + finish_wait(&sk->sk_lock.wq, &wait); + } + +-static void __release_sock(struct sock *sk) ++void __release_sock(struct sock *sk) + __releases(&sk->sk_lock.slock) + __acquires(&sk->sk_lock.slock) + { +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index bbd07736fb0f..a32a0f4cc138 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2403,16 +2403,10 @@ adjudge_to_death: + sock_hold(sk); + sock_orphan(sk); + +- /* It is the last release_sock in its life. It will remove backlog. */ +- release_sock(sk); +- +- +- /* Now socket is owned by kernel and we acquire BH lock +- * to finish close. No need to check for user refs. +- */ + local_bh_disable(); + bh_lock_sock(sk); +- WARN_ON(sock_owned_by_user(sk)); ++ /* remove backlog if any, without releasing ownership. */ ++ __release_sock(sk); + + percpu_counter_inc(sk->sk_prot->orphan_count); + +@@ -2481,6 +2475,7 @@ adjudge_to_death: + out: + bh_unlock_sock(sk); + local_bh_enable(); ++ release_sock(sk); + sock_put(sk); + } + EXPORT_SYMBOL(tcp_close); +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c +index 1beeea9549fa..b99e73a7e7e0 100644 +--- a/net/llc/af_llc.c ++++ b/net/llc/af_llc.c +@@ -730,7 +730,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + struct sk_buff *skb = NULL; + struct sock *sk = sock->sk; + struct llc_sock *llc = llc_sk(sk); +- unsigned long cpu_flags; + size_t copied = 0; + u32 peek_seq = 0; + u32 *seq, skb_len; +@@ -855,9 +854,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + goto copy_uaddr; + + if (!(flags & MSG_PEEK)) { +- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); +- sk_eat_skb(sk, skb); +- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); ++ skb_unlink(skb, &sk->sk_receive_queue); ++ kfree_skb(skb); + *seq = 0; + } + +@@ -878,9 +876,8 @@ copy_uaddr: + llc_cmsg_rcv(msg, skb); + + if (!(flags & MSG_PEEK)) { +- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); +- sk_eat_skb(sk, skb); +- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); ++ skb_unlink(skb, &sk->sk_receive_queue); ++ kfree_skb(skb); + *seq = 0; + } + +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index a827a1f562bf..6a28b96e779e 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -499,8 +499,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc, + void sctp_assoc_rm_peer(struct sctp_association *asoc, + struct sctp_transport *peer) + { +- struct list_head *pos; +- struct sctp_transport *transport; ++ struct sctp_transport *transport; ++ struct list_head *pos; ++ struct sctp_chunk *ch; + + pr_debug("%s: association:%p addr:%pISpc\n", + __func__, asoc, &peer->ipaddr.sa); +@@ -564,7 +565,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, + */ + if (!list_empty(&peer->transmitted)) { + struct sctp_transport *active = asoc->peer.active_path; +- struct sctp_chunk *ch; + + /* Reset the transport of each chunk on this list */ + list_for_each_entry(ch, &peer->transmitted, +@@ -586,6 +586,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, + sctp_transport_hold(active); + } + ++ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) ++ if (ch->transport == peer) ++ ch->transport = NULL; ++ + asoc->peer.transport_count--; + + sctp_transport_free(peer); +diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c +index f1df9837f1ac..1ac08dcbf85d 100644 +--- a/net/sunrpc/auth_generic.c ++++ b/net/sunrpc/auth_generic.c +@@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred) + { + struct auth_cred *acred = &container_of(cred, struct generic_cred, + gc_base)->acred; +- bool ret; +- +- get_rpccred(cred); +- ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); +- put_rpccred(cred); +- +- return ret; ++ return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); + } + + static const struct rpc_credops generic_credops = { +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c +index e9394e7adc84..f4eadd3f7350 100644 +--- a/security/selinux/ss/policydb.c ++++ b/security/selinux/ss/policydb.c +@@ -1101,7 +1101,7 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len) + if ((len == 0) || (len == (u32)-1)) + return -EINVAL; + +- str = kmalloc(len + 1, flags); ++ str = kmalloc(len + 1, flags | __GFP_NOWARN); + if (!str) + return -ENOMEM; + +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index f8d4a419f3af..467039b342b5 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) + runtime->oss.channels = params_channels(params); + runtime->oss.rate = params_rate(params); + +- vfree(runtime->oss.buffer); +- runtime->oss.buffer = vmalloc(runtime->oss.period_bytes); ++ kvfree(runtime->oss.buffer); ++ runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL); + if (!runtime->oss.buffer) { + err = -ENOMEM; + goto failure; +@@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream) + { + struct snd_pcm_runtime *runtime; + runtime = substream->runtime; +- vfree(runtime->oss.buffer); ++ kvfree(runtime->oss.buffer); + runtime->oss.buffer = NULL; + #ifdef CONFIG_SND_PCM_OSS_PLUGINS + snd_pcm_oss_plugin_clear(substream); +diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c +index 0391cb1a4f19..71571d992159 100644 +--- a/sound/core/oss/pcm_plugin.c ++++ b/sound/core/oss/pcm_plugin.c +@@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t + return -ENXIO; + size /= 8; + if (plugin->buf_frames < frames) { +- vfree(plugin->buf); +- plugin->buf = vmalloc(size); ++ kvfree(plugin->buf); ++ plugin->buf = kvzalloc(size, GFP_KERNEL); + plugin->buf_frames = frames; + } + if (!plugin->buf) { +@@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin) + if (plugin->private_free) + plugin->private_free(plugin); + kfree(plugin->buf_channels); +- vfree(plugin->buf); ++ kvfree(plugin->buf); + kfree(plugin); + return 0; + } +diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile +index d79ab161cc75..f68b4bc55273 100644 +--- a/tools/power/cpupower/bench/Makefile ++++ b/tools/power/cpupower/bench/Makefile +@@ -9,7 +9,7 @@ endif + ifeq ($(strip $(STATIC)),true) + LIBS = -L../ -L$(OUTPUT) -lm + OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \ +- $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o ++ $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o + else + LIBS = -L../ -L$(OUTPUT) -lm -lcpupower + OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o +diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c +index 1b993fe1ce23..0c0f3e3f0d80 100644 +--- a/tools/power/cpupower/lib/cpufreq.c ++++ b/tools/power/cpupower/lib/cpufreq.c +@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname, + + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", + cpu, fname); +- return sysfs_read_file(path, buf, buflen); ++ return cpupower_read_sysfs(path, buf, buflen); + } + + /* helper function to write a new value to a /sys file */ +diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c +index 9bd4c7655fdb..852d25462388 100644 +--- a/tools/power/cpupower/lib/cpuidle.c ++++ b/tools/power/cpupower/lib/cpuidle.c +@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf, + + snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); + +- return sysfs_read_file(path, buf, buflen); ++ return cpupower_read_sysfs(path, buf, buflen); + } + + +diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c +index 9c395ec924de..9711d628b0f4 100644 +--- a/tools/power/cpupower/lib/cpupower.c ++++ b/tools/power/cpupower/lib/cpupower.c +@@ -15,7 +15,7 @@ + #include "cpupower.h" + #include "cpupower_intern.h" + +-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) ++unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen) + { + int fd; + ssize_t numread; +@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re + + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", + cpu, fname); +- if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) ++ if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0) + return -1; + *result = strtol(linebuf, &endp, 0); + if (endp == linebuf || errno == ERANGE) +diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h +index 92affdfbe417..4887c76d23f8 100644 +--- a/tools/power/cpupower/lib/cpupower_intern.h ++++ b/tools/power/cpupower/lib/cpupower_intern.h +@@ -3,4 +3,4 @@ + #define MAX_LINE_LEN 4096 + #define SYSFS_PATH_MAX 255 + +-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); ++unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen); +diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c +index cffc2c5a778d..ec50d2a95076 100644 +--- a/tools/testing/nvdimm/test/nfit.c ++++ b/tools/testing/nvdimm/test/nfit.c +@@ -139,8 +139,8 @@ static u32 handle[] = { + [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1), + }; + +-static unsigned long dimm_fail_cmd_flags[NUM_DCR]; +-static int dimm_fail_cmd_code[NUM_DCR]; ++static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)]; ++static int dimm_fail_cmd_code[ARRAY_SIZE(handle)]; + + static const struct nd_intel_smart smart_def = { + .flags = ND_INTEL_SMART_HEALTH_VALID +@@ -203,7 +203,7 @@ struct nfit_test { + unsigned long deadline; + spinlock_t lock; + } ars_state; +- struct device *dimm_dev[NUM_DCR]; ++ struct device *dimm_dev[ARRAY_SIZE(handle)]; + struct nd_intel_smart *smart; + struct nd_intel_smart_threshold *smart_threshold; + struct badrange badrange; +@@ -2678,7 +2678,7 @@ static int nfit_test_probe(struct platform_device *pdev) + u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle; + int i; + +- for (i = 0; i < NUM_DCR; i++) ++ for (i = 0; i < ARRAY_SIZE(handle); i++) + if (nfit_handle == handle[i]) + dev_set_drvdata(nfit_test->dimm_dev[i], + nfit_mem); |