summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1005_linux-5.1.6.patch14203
2 files changed, 14207 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 24316998..7713f537 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch: 1004_linux-5.1.5.patch
From: http://www.kernel.org
Desc: Linux 5.1.5
+Patch: 1005_linux-5.1.6.patch
+From: http://www.kernel.org
+Desc: Linux 5.1.6
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1005_linux-5.1.6.patch b/1005_linux-5.1.6.patch
new file mode 100644
index 00000000..897ab6da
--- /dev/null
+++ b/1005_linux-5.1.6.patch
@@ -0,0 +1,14203 @@
+diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
+index d1e2bb801e1b..6e97a3f771ef 100644
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -61,6 +61,7 @@ stable kernels.
+ | ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
+ | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
+ | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
++| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
+ | ARM | MMU-500 | #841119,#826419 | N/A |
+ | | | | |
+ | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+index 5d181fc3cc18..4a78ba8b85bc 100644
+--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
++++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+@@ -59,7 +59,8 @@ Required properties:
+ one for each entry in reset-names.
+ - reset-names: "phy" for reset of phy block,
+ "common" for phy common block reset,
+- "cfg" for phy's ahb cfg block reset.
++ "cfg" for phy's ahb cfg block reset,
++ "ufsphy" for the PHY reset in the UFS controller.
+
+ For "qcom,ipq8074-qmp-pcie-phy" must contain:
+ "phy", "common".
+@@ -74,7 +75,8 @@ Required properties:
+ "phy", "common".
+ For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
+ "phy", "common".
+- For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
++ For "qcom,sdm845-qmp-ufs-phy": must contain:
++ "ufsphy".
+
+ - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
+ - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
+diff --git a/Makefile b/Makefile
+index 24a16a544ffd..d8bdd2bb55dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 1
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+
+diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
+index 07e27f212dc7..d2453e2d3f1f 100644
+--- a/arch/arm/include/asm/cp15.h
++++ b/arch/arm/include/asm/cp15.h
+@@ -68,6 +68,8 @@
+ #define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
+ #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+
++#define CNTVCT __ACCESS_CP15_64(1, c14)
++
+ extern unsigned long cr_alignment; /* defined in entry-armv.S */
+
+ static inline unsigned long get_cr(void)
+diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
+index a9dd619c6c29..7bdbf5d5c47d 100644
+--- a/arch/arm/vdso/vgettimeofday.c
++++ b/arch/arm/vdso/vgettimeofday.c
+@@ -18,9 +18,9 @@
+ #include <linux/compiler.h>
+ #include <linux/hrtimer.h>
+ #include <linux/time.h>
+-#include <asm/arch_timer.h>
+ #include <asm/barrier.h>
+ #include <asm/bug.h>
++#include <asm/cp15.h>
+ #include <asm/page.h>
+ #include <asm/unistd.h>
+ #include <asm/vdso_datapage.h>
+@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
+ u64 cycle_now;
+ u64 nsec;
+
+- cycle_now = arch_counter_get_cntvct();
++ isb();
++ cycle_now = read_sysreg(CNTVCT);
+
+ cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 7e34b9eba5de..d218729ec852 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -517,6 +517,24 @@ config ARM64_ERRATUM_1286807
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_1463225
++ bool "Cortex-A76: Software Step might prevent interrupt recognition"
++ default y
++ help
++ This option adds a workaround for Arm Cortex-A76 erratum 1463225.
++
++ On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
++ of a system call instruction (SVC) can prevent recognition of
++ subsequent interrupts when software stepping is disabled in the
++ exception handler of the system call and either kernel debugging
++ is enabled or VHE is in use.
++
++ Work around the erratum by triggering a dummy step exception
++ when handling a system call from a task that is being stepped
++ in a VHE configuration of the kernel.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ bool "Cavium erratum 22375, 24313"
+ default y
+@@ -1347,6 +1365,7 @@ config ARM64_MODULE_PLTS
+
+ config ARM64_PSEUDO_NMI
+ bool "Support for NMI-like interrupts"
++ depends on BROKEN # 1556553607-46531-1-git-send-email-julien.thierry@arm.com
+ select CONFIG_ARM_GIC_V3
+ help
+ Adds support for mimicking Non-Maskable Interrupts through the use of
+diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
+index f6a76e43f39e..4389d5d0ca0f 100644
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -61,7 +61,8 @@
+ #define ARM64_HAS_GENERIC_AUTH_ARCH 40
+ #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
+ #define ARM64_HAS_IRQ_PRIO_MASKING 42
++#define ARM64_WORKAROUND_1463225 43
+
+-#define ARM64_NCAPS 43
++#define ARM64_NCAPS 44
+
+ #endif /* __ASM_CPUCAPS_H */
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index 6fb2214333a2..2d78ea6932b7 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -58,7 +58,7 @@ do { \
+ static inline int
+ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
+ {
+- int oldval = 0, ret, tmp;
++ int oldval, ret, tmp;
+ u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
+
+ pagefault_disable();
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index de70c1eabf33..74ebe9693714 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -478,6 +478,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
+ return __pmd_to_phys(pmd);
+ }
+
++static inline void pte_unmap(pte_t *pte) { }
++
+ /* Find an entry in the third-level page table. */
+ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+@@ -486,7 +488,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
+
+ #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
+ #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
+-#define pte_unmap(pte) do { } while (0)
+ #define pte_unmap_nested(pte) do { } while (0)
+
+ #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
+diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
+index 2b9a63771eda..f89263c8e11a 100644
+--- a/arch/arm64/include/asm/vdso_datapage.h
++++ b/arch/arm64/include/asm/vdso_datapage.h
+@@ -38,6 +38,7 @@ struct vdso_data {
+ __u32 tz_minuteswest; /* Whacky timezone stuff */
+ __u32 tz_dsttime;
+ __u32 use_syscall;
++ __u32 hrtimer_res;
+ };
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index 7f40dcbdd51d..e10e2a5d9ddc 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -94,7 +94,7 @@ int main(void)
+ DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
+ DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
+- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
++ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
+ DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
+ DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
+ DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 9950bb0cbd52..87019cd73f22 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -464,6 +464,22 @@ out_printmsg:
+ }
+ #endif /* CONFIG_ARM64_SSBD */
+
++#ifdef CONFIG_ARM64_ERRATUM_1463225
++DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
++
++static bool
++has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ u32 midr = read_cpuid_id();
++ /* Cortex-A76 r0p0 - r3p1 */
++ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
++
++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
++}
++#endif
++
+ static void __maybe_unused
+ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+ {
+@@ -738,6 +754,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ .capability = ARM64_WORKAROUND_1165522,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ },
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_1463225
++ {
++ .desc = "ARM erratum 1463225",
++ .capability = ARM64_WORKAROUND_1463225,
++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++ .matches = has_cortex_a76_erratum_1463225,
++ },
+ #endif
+ {
+ }
+diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
+index ea001241bdd4..00f8b8612b69 100644
+--- a/arch/arm64/kernel/cpu_ops.c
++++ b/arch/arm64/kernel/cpu_ops.c
+@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
+ pr_err("%pOF: missing enable-method property\n",
+ dn);
+ }
++ of_node_put(dn);
+ } else {
+ enable_method = acpi_get_enable_method(cpu);
+ if (!enable_method) {
+diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
+index b09b6f75f759..06941c1fe418 100644
+--- a/arch/arm64/kernel/kaslr.c
++++ b/arch/arm64/kernel/kaslr.c
+@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
+ /*
+- * Randomize the module region over a 4 GB window covering the
++ * Randomize the module region over a 2 GB window covering the
+ * kernel. This reduces the risk of modules leaking information
+ * about the address of the kernel itself, but results in
+ * branches between modules and the core kernel that are
+ * resolved via PLTs. (Branches between modules will be
+ * resolved normally.)
+ */
+- module_range = SZ_4G - (u64)(_end - _stext);
+- module_alloc_base = max((u64)_end + offset - SZ_4G,
++ module_range = SZ_2G - (u64)(_end - _stext);
++ module_alloc_base = max((u64)_end + offset - SZ_2G,
+ (u64)MODULES_VADDR);
+ } else {
+ /*
+diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
+index f713e2fc4d75..1e418e69b58c 100644
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
+ * can simply omit this fallback in that case.
+ */
+ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
+- module_alloc_base + SZ_4G, GFP_KERNEL,
++ module_alloc_base + SZ_2G, GFP_KERNEL,
+ PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+
+diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
+index 5610ac01c1ec..871c739f060a 100644
+--- a/arch/arm64/kernel/syscall.c
++++ b/arch/arm64/kernel/syscall.c
+@@ -8,6 +8,7 @@
+ #include <linux/syscalls.h>
+
+ #include <asm/daifflags.h>
++#include <asm/debug-monitors.h>
+ #include <asm/fpsimd.h>
+ #include <asm/syscall.h>
+ #include <asm/thread_info.h>
+@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
+ int syscall_trace_enter(struct pt_regs *regs);
+ void syscall_trace_exit(struct pt_regs *regs);
+
++#ifdef CONFIG_ARM64_ERRATUM_1463225
++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
++
++static void cortex_a76_erratum_1463225_svc_handler(void)
++{
++ u32 reg, val;
++
++ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
++ return;
++
++ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
++ return;
++
++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
++ reg = read_sysreg(mdscr_el1);
++ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
++ write_sysreg(val, mdscr_el1);
++ asm volatile("msr daifclr, #8");
++ isb();
++
++ /* We will have taken a single-step exception by this point */
++
++ write_sysreg(reg, mdscr_el1);
++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
++}
++#else
++static void cortex_a76_erratum_1463225_svc_handler(void) { }
++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
++
+ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
+ const syscall_fn_t syscall_table[])
+ {
+@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
+ regs->orig_x0 = regs->regs[0];
+ regs->syscallno = scno;
+
++ cortex_a76_erratum_1463225_svc_handler();
+ local_daif_restore(DAIF_PROCCTX);
+ user_exit();
+
+diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
+index 2d419006ad43..ec0bb588d755 100644
+--- a/arch/arm64/kernel/vdso.c
++++ b/arch/arm64/kernel/vdso.c
+@@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+
++ /* Read without the seqlock held by clock_getres() */
++ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
++
+ if (!use_syscall) {
+ /* tkr_mono.cycle_last == tkr_raw.cycle_last */
+ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
+diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
+index e8f60112818f..856fee6d3512 100644
+--- a/arch/arm64/kernel/vdso/gettimeofday.S
++++ b/arch/arm64/kernel/vdso/gettimeofday.S
+@@ -308,13 +308,14 @@ ENTRY(__kernel_clock_getres)
+ ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
+ b.ne 1f
+
+- ldr x2, 5f
++ adr vdso_data, _vdso_data
++ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
+ b 2f
+ 1:
+ cmp w0, #CLOCK_REALTIME_COARSE
+ ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
+ b.ne 4f
+- ldr x2, 6f
++ ldr x2, 5f
+ 2:
+ cbz x1, 3f
+ stp xzr, x2, [x1]
+@@ -328,8 +329,6 @@ ENTRY(__kernel_clock_getres)
+ svc #0
+ ret
+ 5:
+- .quad CLOCK_REALTIME_RES
+-6:
+ .quad CLOCK_COARSE_RES
+ .cfi_endproc
+ ENDPROC(__kernel_clock_getres)
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index 78c0a72f822c..674860e3e478 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
++ if (!is_vmalloc_addr(cpu_addr)) {
++ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
++ return __swiotlb_mmap_pfn(vma, pfn, size);
++ }
++
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+ /*
+ * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
+@@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
++ if (!is_vmalloc_addr(cpu_addr)) {
++ struct page *page = virt_to_page(cpu_addr);
++ return __swiotlb_get_sgtable_page(sgt, page, size);
++ }
++
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+ /*
+ * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 1a7e92ab69eb..9a6099a2c633 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -810,14 +810,47 @@ void __init hook_debug_fault_code(int nr,
+ debug_fault_info[nr].name = name;
+ }
+
++#ifdef CONFIG_ARM64_ERRATUM_1463225
++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
++
++static int __exception
++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
++{
++ if (user_mode(regs))
++ return 0;
++
++ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
++ return 0;
++
++ /*
++ * We've taken a dummy step exception from the kernel to ensure
++ * that interrupts are re-enabled on the syscall path. Return back
++ * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
++ * masked so that we can safely restore the mdscr and get on with
++ * handling the syscall.
++ */
++ regs->pstate |= PSR_D_BIT;
++ return 1;
++}
++#else
++static int __exception
++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
++{
++ return 0;
++}
++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
++
+ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
+- unsigned int esr,
+- struct pt_regs *regs)
++ unsigned int esr,
++ struct pt_regs *regs)
+ {
+ const struct fault_info *inf = esr_to_debug_fault_info(esr);
+ unsigned long pc = instruction_pointer(regs);
+ int rv;
+
++ if (cortex_a76_erratum_1463225_debug_handler(regs))
++ return 0;
++
+ /*
+ * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
+ * already disabled to preserve the last enabled/disabled addresses.
+diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
+index 9d9f6f334d3c..3da3e2b1b51b 100644
+--- a/arch/powerpc/boot/addnote.c
++++ b/arch/powerpc/boot/addnote.c
+@@ -223,7 +223,11 @@ main(int ac, char **av)
+ PUT_16(E_PHNUM, np + 2);
+
+ /* write back */
+- lseek(fd, (long) 0, SEEK_SET);
++ i = lseek(fd, (long) 0, SEEK_SET);
++ if (i < 0) {
++ perror("lseek");
++ exit(1);
++ }
+ i = write(fd, buf, n);
+ if (i < 0) {
+ perror("write");
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 3fad8d499767..5321a11c2835 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -968,7 +968,9 @@ start_here_multiplatform:
+
+ /* Restore parameters passed from prom_init/kexec */
+ mr r3,r31
+- bl early_setup /* also sets r13 and SPRG_PACA */
++ LOAD_REG_ADDR(r12, DOTSYM(early_setup))
++ mtctr r12
++ bctrl /* also sets r13 and SPRG_PACA */
+
+ LOAD_REG_ADDR(r3, start_here_common)
+ ld r4,PACAKMSR(r13)
+diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
+index 3c6ab22a0c4e..af3c15a1d41e 100644
+--- a/arch/powerpc/kernel/watchdog.c
++++ b/arch/powerpc/kernel/watchdog.c
+@@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
+
+ static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
+
+-static DEFINE_PER_CPU(struct timer_list, wd_timer);
++static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
+ static DEFINE_PER_CPU(u64, wd_timer_tb);
+
+ /* SMP checker bits */
+@@ -293,21 +293,21 @@ out:
+ nmi_exit();
+ }
+
+-static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
+-{
+- t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
+- if (wd_timer_period_ms > 1000)
+- t->expires = __round_jiffies_up(t->expires, cpu);
+- add_timer_on(t, cpu);
+-}
+-
+-static void wd_timer_fn(struct timer_list *t)
++static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ {
+ int cpu = smp_processor_id();
+
++ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
++ return HRTIMER_NORESTART;
++
++ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
++ return HRTIMER_NORESTART;
++
+ watchdog_timer_interrupt(cpu);
+
+- wd_timer_reset(cpu, t);
++ hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
++
++ return HRTIMER_RESTART;
+ }
+
+ void arch_touch_nmi_watchdog(void)
+@@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
+ }
+ EXPORT_SYMBOL(arch_touch_nmi_watchdog);
+
+-static void start_watchdog_timer_on(unsigned int cpu)
+-{
+- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
+-
+- per_cpu(wd_timer_tb, cpu) = get_tb();
+-
+- timer_setup(t, wd_timer_fn, TIMER_PINNED);
+- wd_timer_reset(cpu, t);
+-}
+-
+-static void stop_watchdog_timer_on(unsigned int cpu)
+-{
+- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
+-
+- del_timer_sync(t);
+-}
+-
+-static int start_wd_on_cpu(unsigned int cpu)
++static void start_watchdog(void *arg)
+ {
++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
++ int cpu = smp_processor_id();
+ unsigned long flags;
+
+ if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
+ WARN_ON(1);
+- return 0;
++ return;
+ }
+
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+- return 0;
++ return;
+
+ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
+- return 0;
++ return;
+
+ wd_smp_lock(&flags);
+ cpumask_set_cpu(cpu, &wd_cpus_enabled);
+@@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
+ }
+ wd_smp_unlock(&flags);
+
+- start_watchdog_timer_on(cpu);
++ *this_cpu_ptr(&wd_timer_tb) = get_tb();
+
+- return 0;
++ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer->function = watchdog_timer_fn;
++ hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
++ HRTIMER_MODE_REL_PINNED);
+ }
+
+-static int stop_wd_on_cpu(unsigned int cpu)
++static int start_watchdog_on_cpu(unsigned int cpu)
+ {
++ return smp_call_function_single(cpu, start_watchdog, NULL, true);
++}
++
++static void stop_watchdog(void *arg)
++{
++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
++ int cpu = smp_processor_id();
+ unsigned long flags;
+
+ if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
+- return 0; /* Can happen in CPU unplug case */
++ return; /* Can happen in CPU unplug case */
+
+- stop_watchdog_timer_on(cpu);
++ hrtimer_cancel(hrtimer);
+
+ wd_smp_lock(&flags);
+ cpumask_clear_cpu(cpu, &wd_cpus_enabled);
+ wd_smp_unlock(&flags);
+
+ wd_smp_clear_cpu_pending(cpu, get_tb());
++}
+
+- return 0;
++static int stop_watchdog_on_cpu(unsigned int cpu)
++{
++ return smp_call_function_single(cpu, stop_watchdog, NULL, true);
+ }
+
+ static void watchdog_calc_timeouts(void)
+@@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
+ int cpu;
+
+ for_each_cpu(cpu, &wd_cpus_enabled)
+- stop_wd_on_cpu(cpu);
++ stop_watchdog_on_cpu(cpu);
+ }
+
+ void watchdog_nmi_start(void)
+@@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
+
+ watchdog_calc_timeouts();
+ for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
+- start_wd_on_cpu(cpu);
++ start_watchdog_on_cpu(cpu);
+ }
+
+ /*
+@@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
+
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "powerpc/watchdog:online",
+- start_wd_on_cpu, stop_wd_on_cpu);
++ start_watchdog_on_cpu,
++ stop_watchdog_on_cpu);
+ if (err < 0) {
+ pr_warn("could not be initialized");
+ return err;
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index f976676004ad..48c9a97eb2c3 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -1498,6 +1498,9 @@ int start_topology_update(void)
+ {
+ int rc = 0;
+
++ if (!topology_updates_enabled)
++ return 0;
++
+ if (firmware_has_feature(FW_FEATURE_PRRN)) {
+ if (!prrn_enabled) {
+ prrn_enabled = 1;
+@@ -1531,6 +1534,9 @@ int stop_topology_update(void)
+ {
+ int rc = 0;
+
++ if (!topology_updates_enabled)
++ return 0;
++
+ if (prrn_enabled) {
+ prrn_enabled = 0;
+ #ifdef CONFIG_SMP
+@@ -1588,11 +1594,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
+
+ kbuf[read_len] = '\0';
+
+- if (!strncmp(kbuf, "on", 2))
++ if (!strncmp(kbuf, "on", 2)) {
++ topology_updates_enabled = true;
+ start_topology_update();
+- else if (!strncmp(kbuf, "off", 3))
++ } else if (!strncmp(kbuf, "off", 3)) {
+ stop_topology_update();
+- else
++ topology_updates_enabled = false;
++ } else
+ return -EINVAL;
+
+ return count;
+@@ -1607,9 +1615,7 @@ static const struct file_operations topology_ops = {
+
+ static int topology_update_init(void)
+ {
+- /* Do not poll for changes if disabled at boot */
+- if (topology_updates_enabled)
+- start_topology_update();
++ start_topology_update();
+
+ if (vphn_enabled)
+ topology_schedule_update();
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index b1c37cc3fa98..2d12f0037e3a 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -487,6 +487,11 @@ static int nest_imc_event_init(struct perf_event *event)
+ * Get the base memory addresss for this cpu.
+ */
+ chip_id = cpu_to_chip_id(event->cpu);
++
++ /* Return, if chip_id is not valid */
++ if (chip_id < 0)
++ return -ENODEV;
++
+ pcni = pmu->mem_info;
+ do {
+ if (pcni->id == chip_id) {
+@@ -494,7 +499,7 @@ static int nest_imc_event_init(struct perf_event *event)
+ break;
+ }
+ pcni++;
+- } while (pcni);
++ } while (pcni->vbase != 0);
+
+ if (!flag)
+ return -ENODEV;
+diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
+index 58a07948c76e..3d27f02695e4 100644
+--- a/arch/powerpc/platforms/powernv/opal-imc.c
++++ b/arch/powerpc/platforms/powernv/opal-imc.c
+@@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
+ nr_chips))
+ goto error;
+
+- pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
++ pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
+ GFP_KERNEL);
+ if (!pmu_ptr->mem_info)
+ goto error;
+diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
+index 5a286b012043..602e7cc26d11 100644
+--- a/arch/s390/kernel/kexec_elf.c
++++ b/arch/s390/kernel/kexec_elf.c
+@@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
+ struct kexec_buf buf;
+ const Elf_Ehdr *ehdr;
+ const Elf_Phdr *phdr;
++ Elf_Addr entry;
+ int i, ret;
+
+ ehdr = (Elf_Ehdr *)kernel;
+ buf.image = image;
++ if (image->type == KEXEC_TYPE_CRASH)
++ entry = STARTUP_KDUMP_OFFSET;
++ else
++ entry = ehdr->e_entry;
+
+ phdr = (void *)ehdr + ehdr->e_phoff;
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+@@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
+ buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
+ buf.memsz = phdr->p_memsz;
+
+- if (phdr->p_paddr == 0) {
++ if (entry - phdr->p_paddr < phdr->p_memsz) {
+ data->kernel_buf = buf.buffer;
+ data->memsz += STARTUP_NORMAL_OFFSET;
+
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 8485d6dc2754..9ebd01219812 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
+ return old;
+ }
+
++#ifdef CONFIG_PGSTE
+ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
+ {
+ pgd_t *pgd;
+@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
+ pmd = pmd_alloc(mm, pud, addr);
+ return pmd;
+ }
++#endif
+
+ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t new)
+diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
+index 8f9bfbf3cdb1..d6cce65b4871 100644
+--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
++++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
+@@ -132,7 +132,7 @@ enum {
+
+ static inline u32 sh7786_mm_sel(void)
+ {
+- return __raw_readl(0xFC400020) & 0x7;
++ return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
+ }
+
+ #endif /* __CPU_SH7786_H__ */
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index a587805c6687..56e748a7679f 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -47,7 +47,7 @@ export REALMODE_CFLAGS
+ export BITS
+
+ ifdef CONFIG_X86_NEED_RELOCS
+- LDFLAGS_vmlinux := --emit-relocs
++ LDFLAGS_vmlinux := --emit-relocs --discard-none
+ endif
+
+ #
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index d41de9af7a39..6072f92cb8ea 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
++
++ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
+ { },
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
+diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
+index 94dc564146ca..37ebf6fc5415 100644
+--- a/arch/x86/events/intel/rapl.c
++++ b/arch/x86/events/intel/rapl.c
+@@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
+
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
++
++ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
+ {},
+ };
+
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index a878e6286e4a..f3f4c2263501 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -89,6 +89,7 @@ static bool test_intel(int idx)
+ case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
++ case INTEL_FAM6_ICELAKE_MOBILE:
+ if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
+ return true;
+ break;
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index 321fe5f5d0e9..4d5fcd47ab75 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -61,9 +61,8 @@
+ } while (0)
+
+ #define RELOAD_SEG(seg) { \
+- unsigned int pre = GET_SEG(seg); \
++ unsigned int pre = (seg) | 3; \
+ unsigned int cur = get_user_seg(seg); \
+- pre |= 3; \
+ if (pre != cur) \
+ set_user_seg(seg, pre); \
+ }
+@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext_32 __user *sc)
+ {
+ unsigned int tmpflags, err = 0;
++ u16 gs, fs, es, ds;
+ void __user *buf;
+ u32 tmp;
+
+@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
+ current->restart_block.fn = do_no_restart_syscall;
+
+ get_user_try {
+- /*
+- * Reload fs and gs if they have changed in the signal
+- * handler. This does not handle long fs/gs base changes in
+- * the handler, but does not clobber them at least in the
+- * normal case.
+- */
+- RELOAD_SEG(gs);
+- RELOAD_SEG(fs);
+- RELOAD_SEG(ds);
+- RELOAD_SEG(es);
++ gs = GET_SEG(gs);
++ fs = GET_SEG(fs);
++ ds = GET_SEG(ds);
++ es = GET_SEG(es);
+
+ COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+ COPY(dx); COPY(cx); COPY(ip); COPY(ax);
+@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
+ buf = compat_ptr(tmp);
+ } get_user_catch(err);
+
++ /*
++ * Reload fs and gs if they have changed in the signal
++ * handler. This does not handle long fs/gs base changes in
++ * the handler, but does not clobber them at least in the
++ * normal case.
++ */
++ RELOAD_SEG(gs);
++ RELOAD_SEG(fs);
++ RELOAD_SEG(ds);
++ RELOAD_SEG(es);
++
+ err |= fpu__restore_sig(buf, 1);
+
+ force_iret();
+diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
+index 05861cc08787..0bbb07eaed6b 100644
+--- a/arch/x86/include/asm/text-patching.h
++++ b/arch/x86/include/asm/text-patching.h
+@@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs);
+ extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+ extern int after_bootmem;
+
++#ifndef CONFIG_UML_X86
+ static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+ {
+ regs->ip = ip;
+@@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
+ int3_emulate_jmp(regs, func);
+ }
+-#endif
++#endif /* CONFIG_X86_64 */
++#endif /* !CONFIG_UML_X86 */
+
+ #endif /* _ASM_X86_TEXT_PATCHING_H */
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 1954dd5552a2..3822cc8ac9d6 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -427,10 +427,11 @@ do { \
+ ({ \
+ __label__ __pu_label; \
+ int __pu_err = -EFAULT; \
+- __typeof__(*(ptr)) __pu_val; \
+- __pu_val = x; \
++ __typeof__(*(ptr)) __pu_val = (x); \
++ __typeof__(ptr) __pu_ptr = (ptr); \
++ __typeof__(size) __pu_size = (size); \
+ __uaccess_begin(); \
+- __put_user_size(__pu_val, (ptr), (size), __pu_label); \
++ __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
+ __pu_err = 0; \
+ __pu_label: \
+ __uaccess_end(); \
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 9a79c7808f9c..d7df79fc448c 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -667,15 +667,29 @@ void __init alternative_instructions(void)
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+- size_t len)
++ size_t len)
+ {
+ unsigned long flags;
+- local_irq_save(flags);
+- memcpy(addr, opcode, len);
+- local_irq_restore(flags);
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++
++ if (boot_cpu_has(X86_FEATURE_NX) &&
++ is_module_text_address((unsigned long)addr)) {
++ /*
++ * Modules text is marked initially as non-executable, so the
++ * code cannot be running and speculative code-fetches are
++ * prevented. Just change the code.
++ */
++ memcpy(addr, opcode, len);
++ } else {
++ local_irq_save(flags);
++ memcpy(addr, opcode, len);
++ local_irq_restore(flags);
++ sync_core();
++
++ /*
++ * Could also do a CLFLUSH here to speed up CPU recovery; but
++ * that causes hangs on some VIA CPUs.
++ */
++ }
+ return addr;
+ }
+
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index cf25405444ab..415621ddb8a2 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -19,6 +19,8 @@
+
+ #include "cpu.h"
+
++#define APICID_SOCKET_ID_BIT 6
++
+ /*
+ * nodes_per_socket: Stores the number of nodes per socket.
+ * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
+@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ if (!err)
+ c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+
++ /* Socket ID is ApicId[6] for these processors. */
++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++
+ cacheinfo_hygon_init_llc_id(c, cpu, node_id);
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+ u64 value;
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 1a7084ba9a3b..9e6a94c208e0 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -712,19 +712,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
+
+ barrier();
+ m.status = mce_rdmsrl(msr_ops.status(i));
++
++ /* If this entry is not valid, ignore it */
+ if (!(m.status & MCI_STATUS_VAL))
+ continue;
+
+ /*
+- * Uncorrected or signalled events are handled by the exception
+- * handler when it is enabled, so don't process those here.
+- *
+- * TBD do the same check for MCI_STATUS_EN here?
++ * If we are logging everything (at CPU online) or this
++ * is a corrected error, then we must log it.
+ */
+- if (!(flags & MCP_UC) &&
+- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
+- continue;
++ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
++ goto log_it;
++
++ /*
++ * Newer Intel systems that support software error
++ * recovery need to make additional checks. Other
++ * CPUs should skip over uncorrected errors, but log
++ * everything else.
++ */
++ if (!mca_cfg.ser) {
++ if (m.status & MCI_STATUS_UC)
++ continue;
++ goto log_it;
++ }
++
++ /* Log "not enabled" (speculative) errors */
++ if (!(m.status & MCI_STATUS_EN))
++ goto log_it;
++
++ /*
++ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
++ * UC == 1 && PCC == 0 && S == 0
++ */
++ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
++ goto log_it;
++
++ /*
++ * Skip anything else. Presumption is that our read of this
++ * bank is racing with a machine check. Leave the log alone
++ * for do_machine_check() to deal with it.
++ */
++ continue;
+
++log_it:
+ error_seen = true;
+
+ mce_read_aux(&m, i);
+@@ -1451,13 +1481,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
+ static int __mcheck_cpu_mce_banks_init(void)
+ {
+ int i;
+- u8 num_banks = mca_cfg.banks;
+
+- mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
++ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
+ if (!mce_banks)
+ return -ENOMEM;
+
+- for (i = 0; i < num_banks; i++) {
++ for (i = 0; i < MAX_NR_BANKS; i++) {
+ struct mce_bank *b = &mce_banks[i];
+
+ b->ctl = -1ULL;
+@@ -1471,28 +1500,19 @@ static int __mcheck_cpu_mce_banks_init(void)
+ */
+ static int __mcheck_cpu_cap_init(void)
+ {
+- unsigned b;
+ u64 cap;
++ u8 b;
+
+ rdmsrl(MSR_IA32_MCG_CAP, cap);
+
+ b = cap & MCG_BANKCNT_MASK;
+- if (!mca_cfg.banks)
+- pr_info("CPU supports %d MCE banks\n", b);
+-
+- if (b > MAX_NR_BANKS) {
+- pr_warn("Using only %u machine check banks out of %u\n",
+- MAX_NR_BANKS, b);
++ if (WARN_ON_ONCE(b > MAX_NR_BANKS))
+ b = MAX_NR_BANKS;
+- }
+
+- /* Don't support asymmetric configurations today */
+- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
+- mca_cfg.banks = b;
++ mca_cfg.banks = max(mca_cfg.banks, b);
+
+ if (!mce_banks) {
+ int err = __mcheck_cpu_mce_banks_init();
+-
+ if (err)
+ return err;
+ }
+@@ -2459,6 +2479,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
+
+ static int __init mcheck_late_init(void)
+ {
++ pr_info("Using %d MCE banks\n", mca_cfg.banks);
++
+ if (mca_cfg.recovery)
+ static_branch_inc(&mcsafe_key);
+
+diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
+index 8492ef7d9015..3f82afd0f46f 100644
+--- a/arch/x86/kernel/cpu/mce/inject.c
++++ b/arch/x86/kernel/cpu/mce/inject.c
+@@ -46,8 +46,6 @@
+ static struct mce i_mce;
+ static struct dentry *dfs_inj;
+
+-static u8 n_banks;
+-
+ #define MAX_FLAG_OPT_SIZE 4
+ #define NBCFG 0x44
+
+@@ -570,9 +568,15 @@ err:
+ static int inj_bank_set(void *data, u64 val)
+ {
+ struct mce *m = (struct mce *)data;
++ u8 n_banks;
++ u64 cap;
++
++ /* Get bank count on target CPU so we can handle non-uniform values. */
++ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
++ n_banks = cap & MCG_BANKCNT_MASK;
+
+ if (val >= n_banks) {
+- pr_err("Non-existent MCE bank: %llu\n", val);
++ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
+ return -EINVAL;
+ }
+
+@@ -665,10 +669,6 @@ static struct dfs_node {
+ static int __init debugfs_init(void)
+ {
+ unsigned int i;
+- u64 cap;
+-
+- rdmsrl(MSR_IA32_MCG_CAP, cap);
+- n_banks = cap & MCG_BANKCNT_MASK;
+
+ dfs_inj = debugfs_create_dir("mce-inject", NULL);
+ if (!dfs_inj)
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 5260185cbf7b..8a4a7823451a 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
+ if (ustate == UCODE_ERROR) {
+ error = -1;
+ break;
+- } else if (ustate == UCODE_OK)
++ } else if (ustate == UCODE_NEW) {
+ apply_microcode_on_target(cpu);
++ }
+ }
+
+ return error;
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index bd553b3af22e..6e0c0ed8e4bf 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -749,6 +749,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ unsigned long end_offset;
+ unsigned long op_offset;
+ unsigned long offset;
++ unsigned long npages;
+ unsigned long size;
+ unsigned long retq;
+ unsigned long *ptr;
+@@ -781,6 +782,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ return 0;
+
+ *tramp_size = size + RET_SIZE + sizeof(void *);
++ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
+
+ /* Copy ftrace_caller onto the trampoline memory */
+ ret = probe_kernel_read(trampoline, (void *)start_offset, size);
+@@ -825,6 +827,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ /* ALLOC_TRAMP flags lets us know we created it */
+ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+
++ /*
++ * Module allocation needs to be completed by making the page
++ * executable. The page is still writable, which is a security hazard,
++ * but anyhow ftrace breaks W^X completely.
++ */
++ set_memory_x((unsigned long)trampoline, npages);
+ return (unsigned long)trampoline;
+ fail:
+ tramp_free(trampoline, *tramp_size);
+diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
+index 0469cd078db1..b50ac9c7397b 100644
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow;
+ /*
+ * Probabilistic stack overflow check:
+ *
+- * Only check the stack in process context, because everything else
+- * runs on the big interrupt stacks. Checking reliably is too expensive,
+- * so we just check from interrupts.
++ * Regular device interrupts can enter on the following stacks:
++ *
++ * - User stack
++ *
++ * - Kernel task stack
++ *
++ * - Interrupt stack if a device driver reenables interrupts
++ * which should only happen in really old drivers.
++ *
++ * - Debug IST stack
++ *
++ * All other contexts are invalid.
+ */
+ static inline void stack_overflow_check(struct pt_regs *regs)
+ {
+@@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+ return;
+
+ oist = this_cpu_ptr(&orig_ist);
+- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
+- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
++ estack_bottom = (u64)oist->ist[DEBUG_STACK];
++ estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
+ if (regs->sp >= estack_top && regs->sp <= estack_bottom)
+ return;
+
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index b052e883dd8c..cfa3106faee4 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
+ p = __vmalloc_node_range(size, MODULE_ALIGN,
+ MODULES_VADDR + get_module_load_offset(),
+ MODULES_END, GFP_KERNEL,
+- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (p && (kasan_module_alloc(p, size) < 0)) {
+ vfree(p);
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 08dfd4c1a4f9..c8aa58a2bab9 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
+ COPY_SEG_CPL3(cs);
+ COPY_SEG_CPL3(ss);
+
+-#ifdef CONFIG_X86_64
+- /*
+- * Fix up SS if needed for the benefit of old DOSEMU and
+- * CRIU.
+- */
+- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
+- user_64bit_mode(regs)))
+- force_valid_ss(regs);
+-#endif
+-
+ get_user_ex(tmpflags, &sc->flags);
+ regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+ regs->orig_ax = -1; /* disable syscall checks */
+@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
+ buf = (void __user *)buf_val;
+ } get_user_catch(err);
+
++#ifdef CONFIG_X86_64
++ /*
++ * Fix up SS if needed for the benefit of old DOSEMU and
++ * CRIU.
++ */
++ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
++ force_valid_ss(regs);
++#endif
++
+ err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
+
+ force_iret();
+@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+ {
+ struct rt_sigframe __user *frame;
+ void __user *fp = NULL;
++ unsigned long uc_flags;
+ int err = 0;
+
+ frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
+@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+ return -EFAULT;
+ }
+
++ uc_flags = frame_uc_flags(regs);
++
+ put_user_try {
+ /* Create the ucontext. */
+- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
++ put_user_ex(uc_flags, &frame->uc.uc_flags);
+ put_user_ex(0, &frame->uc.uc_link);
+ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
+
+@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
+ {
+ #ifdef CONFIG_X86_X32_ABI
+ struct rt_sigframe_x32 __user *frame;
++ unsigned long uc_flags;
+ void __user *restorer;
+ int err = 0;
+ void __user *fpstate = NULL;
+@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
+ return -EFAULT;
+ }
+
++ uc_flags = frame_uc_flags(regs);
++
+ put_user_try {
+ /* Create the ucontext. */
+- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
++ put_user_ex(uc_flags, &frame->uc.uc_flags);
+ put_user_ex(0, &frame->uc.uc_link);
+ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
+ put_user_ex(0, &frame->uc.uc__pad0);
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index a5127b2c195f..834659288ba9 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -141,11 +141,11 @@ SECTIONS
+ *(.text.__x86.indirect_thunk)
+ __indirect_thunk_end = .;
+ #endif
+-
+- /* End of text section */
+- _etext = .;
+ } :text = 0x9090
+
++ /* End of text section */
++ _etext = .;
++
+ NOTES :text :note
+
+ EXCEPTION_TABLE(16) :text = 0x9090
+diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
+index faa264822cee..007bc654f928 100644
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
+ __kvm_migrate_apic_timer(vcpu);
+ __kvm_migrate_pit_timer(vcpu);
+ }
++
++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
++{
++ bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
++
++ return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
++}
+diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
+index d5005cc26521..fd210cdd4983 100644
+--- a/arch/x86/kvm/irq.h
++++ b/arch/x86/kvm/irq.h
+@@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
+ return mode != KVM_IRQCHIP_NONE;
+ }
+
++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
+ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
+ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
+ void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
+index 1495a735b38e..50fa9450fcf1 100644
+--- a/arch/x86/kvm/pmu_amd.c
++++ b/arch/x86/kvm/pmu_amd.c
+@@ -269,10 +269,10 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
+
+ pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
+ pmu->reserved_bits = 0xffffffff00200000ull;
++ pmu->version = 1;
+ /* not applicable to AMD; but clean them to prevent any fall out */
+ pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+ pmu->nr_arch_fixed_counters = 0;
+- pmu->version = 0;
+ pmu->global_status = 0;
+ }
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 406b558abfef..ae6e51828a54 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2024,7 +2024,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ if (!kvm_vcpu_apicv_active(vcpu))
+ return;
+
+- if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
++ /*
++ * Since the host physical APIC id is 8 bits,
++ * we can support host APIC ID upto 255.
++ */
++ if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
+ return;
+
+ entry = READ_ONCE(*(svm->avic_physical_id_cache));
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 0c601d079cd2..8f6f69c26c35 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2792,14 +2792,13 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
+ : "cc", "memory"
+ );
+
+- preempt_enable();
+-
+ if (vmx->msr_autoload.host.nr)
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ if (vmx->msr_autoload.guest.nr)
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+
+ if (vm_fail) {
++ preempt_enable();
+ WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+@@ -2811,6 +2810,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
+ local_irq_enable();
+ if (hw_breakpoint_active())
+ set_debugreg(__this_cpu_read(cpu_dr7), 7);
++ preempt_enable();
+
+ /*
+ * A non-failing VMEntry means we somehow entered guest mode with
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index fed1ab6a825c..6b8575c547ee 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1288,7 +1288,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ u64 efer = msr_info->data;
+
+ if (efer & efer_reserved_bits)
+- return false;
++ return 1;
+
+ if (!msr_info->host_initiated) {
+ if (!__kvm_valid_efer(vcpu, efer))
+diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
+index 3b24dc05251c..9d05572370ed 100644
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
+ /* Copy successful. Return zero */
+ .L_done_memcpy_trap:
+ xorl %eax, %eax
++.L_done:
+ ret
+ ENDPROC(__memcpy_mcsafe)
+ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
+@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
+ addl %edx, %ecx
+ .E_trailing_bytes:
+ mov %ecx, %eax
+- ret
++ jmp .L_done
+
+ /*
+ * For write fault handling, given the destination is unaligned,
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 667f1da36208..5eaf67e8314f 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -359,8 +359,6 @@ static noinline int vmalloc_fault(unsigned long address)
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+- WARN_ON_ONCE(in_nmi());
+-
+ /*
+ * Copy kernel mappings over when needed. This can also
+ * happen within a race in page table update. In the later
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 2c53b0f19329..1297e185b8c8 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -2133,14 +2133,19 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
+ */
+ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
+ {
+- unsigned char *uvhub_mask;
+ struct uvhub_desc *uvhub_descs;
++ unsigned char *uvhub_mask = NULL;
+
+ if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
+ timeout_us = calculate_destination_timeout();
+
+ uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
++ if (!uvhub_descs)
++ goto fail;
++
+ uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
++ if (!uvhub_mask)
++ goto fail;
+
+ if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
+ goto fail;
+diff --git a/block/bio.c b/block/bio.c
+index 716510ecd7ff..a3c80a6c1fe5 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -776,6 +776,8 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
+
+ if (vec_end_addr + 1 != page_addr + off)
+ return false;
++ if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
++ return false;
+ if (same_page && (vec_end_addr & PAGE_MASK) != page_addr)
+ return false;
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index aa6bc5c02643..c59babca6857 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -413,6 +413,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
+ struct list_head *list, bool run_queue_async)
+ {
+ struct elevator_queue *e;
++ struct request_queue *q = hctx->queue;
++
++ /*
++ * blk_mq_sched_insert_requests() is called from flush plug
++ * context only, and hold one usage counter to prevent queue
++ * from being released.
++ */
++ percpu_ref_get(&q->q_usage_counter);
+
+ e = hctx->queue->elevator;
+ if (e && e->type->ops.insert_requests)
+@@ -426,12 +434,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
+ if (!hctx->dispatch_busy && !e && !run_queue_async) {
+ blk_mq_try_issue_list_directly(hctx, list);
+ if (list_empty(list))
+- return;
++ goto out;
+ }
+ blk_mq_insert_requests(hctx, ctx, list);
+ }
+
+ blk_mq_run_hw_queue(hctx, run_queue_async);
++ out:
++ percpu_ref_put(&q->q_usage_counter);
+ }
+
+ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index b0e5e67e20a2..8a41cc5974fe 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2284,15 +2284,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
+ }
+ }
+
++static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
++{
++ int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
++
++ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
++ __alignof__(struct blk_mq_hw_ctx)) !=
++ sizeof(struct blk_mq_hw_ctx));
++
++ if (tag_set->flags & BLK_MQ_F_BLOCKING)
++ hw_ctx_size += sizeof(struct srcu_struct);
++
++ return hw_ctx_size;
++}
++
+ static int blk_mq_init_hctx(struct request_queue *q,
+ struct blk_mq_tag_set *set,
+ struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
+ {
+- int node;
++ hctx->queue_num = hctx_idx;
++
++ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
++
++ hctx->tags = set->tags[hctx_idx];
++
++ if (set->ops->init_hctx &&
++ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
++ goto unregister_cpu_notifier;
+
+- node = hctx->numa_node;
++ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
++ hctx->numa_node))
++ goto exit_hctx;
++ return 0;
++
++ exit_hctx:
++ if (set->ops->exit_hctx)
++ set->ops->exit_hctx(hctx, hctx_idx);
++ unregister_cpu_notifier:
++ blk_mq_remove_cpuhp(hctx);
++ return -1;
++}
++
++static struct blk_mq_hw_ctx *
++blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
++ int node)
++{
++ struct blk_mq_hw_ctx *hctx;
++ gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
++
++ hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
++ if (!hctx)
++ goto fail_alloc_hctx;
++
++ if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
++ goto free_hctx;
++
++ atomic_set(&hctx->nr_active, 0);
+ if (node == NUMA_NO_NODE)
+- node = hctx->numa_node = set->numa_node;
++ node = set->numa_node;
++ hctx->numa_node = node;
+
+ INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ spin_lock_init(&hctx->lock);
+@@ -2300,58 +2350,45 @@ static int blk_mq_init_hctx(struct request_queue *q,
+ hctx->queue = q;
+ hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
+
+- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
+-
+- hctx->tags = set->tags[hctx_idx];
+-
+ /*
+ * Allocate space for all possible cpus to avoid allocation at
+ * runtime
+ */
+ hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
+- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
++ gfp, node);
+ if (!hctx->ctxs)
+- goto unregister_cpu_notifier;
++ goto free_cpumask;
+
+ if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
+- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
++ gfp, node))
+ goto free_ctxs;
+-
+ hctx->nr_ctx = 0;
+
+ spin_lock_init(&hctx->dispatch_wait_lock);
+ init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
+ INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
+
+- if (set->ops->init_hctx &&
+- set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+- goto free_bitmap;
+-
+ hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
++ gfp);
+ if (!hctx->fq)
+- goto exit_hctx;
+-
+- if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
+- goto free_fq;
++ goto free_bitmap;
+
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ init_srcu_struct(hctx->srcu);
++ blk_mq_hctx_kobj_init(hctx);
+
+- return 0;
++ return hctx;
+
+- free_fq:
+- blk_free_flush_queue(hctx->fq);
+- exit_hctx:
+- if (set->ops->exit_hctx)
+- set->ops->exit_hctx(hctx, hctx_idx);
+ free_bitmap:
+ sbitmap_free(&hctx->ctx_map);
+ free_ctxs:
+ kfree(hctx->ctxs);
+- unregister_cpu_notifier:
+- blk_mq_remove_cpuhp(hctx);
+- return -1;
++ free_cpumask:
++ free_cpumask_var(hctx->cpumask);
++ free_hctx:
++ kfree(hctx);
++ fail_alloc_hctx:
++ return NULL;
+ }
+
+ static void blk_mq_init_cpu_queues(struct request_queue *q,
+@@ -2695,51 +2732,25 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
+ }
+ EXPORT_SYMBOL(blk_mq_init_sq_queue);
+
+-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
+-{
+- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
+-
+- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
+- __alignof__(struct blk_mq_hw_ctx)) !=
+- sizeof(struct blk_mq_hw_ctx));
+-
+- if (tag_set->flags & BLK_MQ_F_BLOCKING)
+- hw_ctx_size += sizeof(struct srcu_struct);
+-
+- return hw_ctx_size;
+-}
+-
+ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
+ struct blk_mq_tag_set *set, struct request_queue *q,
+ int hctx_idx, int node)
+ {
+ struct blk_mq_hw_ctx *hctx;
+
+- hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
+- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+- node);
++ hctx = blk_mq_alloc_hctx(q, set, node);
+ if (!hctx)
+- return NULL;
+-
+- if (!zalloc_cpumask_var_node(&hctx->cpumask,
+- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+- node)) {
+- kfree(hctx);
+- return NULL;
+- }
+-
+- atomic_set(&hctx->nr_active, 0);
+- hctx->numa_node = node;
+- hctx->queue_num = hctx_idx;
++ goto fail;
+
+- if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
+- free_cpumask_var(hctx->cpumask);
+- kfree(hctx);
+- return NULL;
+- }
+- blk_mq_hctx_kobj_init(hctx);
++ if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
++ goto free_hctx;
+
+ return hctx;
++
++ free_hctx:
++ kobject_put(&hctx->kobj);
++ fail:
++ return NULL;
+ }
+
+ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+diff --git a/block/blk.h b/block/blk.h
+index 5d636ee41663..e27fd1512e4b 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -75,7 +75,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
+
+ if (addr1 + vec1->bv_len != addr2)
+ return false;
+- if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
++ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
+ return false;
+ if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
+ return false;
+diff --git a/block/genhd.c b/block/genhd.c
+index 703267865f14..d8dff0b21f7d 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -531,6 +531,18 @@ void blk_free_devt(dev_t devt)
+ }
+ }
+
++/**
++ * We invalidate devt by assigning NULL pointer for devt in idr.
++ */
++void blk_invalidate_devt(dev_t devt)
++{
++ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
++ spin_lock_bh(&ext_devt_lock);
++ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
++ spin_unlock_bh(&ext_devt_lock);
++ }
++}
++
+ static char *bdevt_str(dev_t devt, char *buf)
+ {
+ if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
+@@ -793,6 +805,13 @@ void del_gendisk(struct gendisk *disk)
+
+ if (!(disk->flags & GENHD_FL_HIDDEN))
+ blk_unregister_region(disk_devt(disk), disk->minors);
++ /*
++ * Remove gendisk pointer from idr so that it cannot be looked up
++ * while RCU period before freeing gendisk is running to prevent
++ * use-after-free issues. Note that the device number stays
++ * "in-use" until we really free the gendisk.
++ */
++ blk_invalidate_devt(disk_devt(disk));
+
+ kobject_put(disk->part0.holder_dir);
+ kobject_put(disk->slave_dir);
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index 8e596a8dff32..aee643ce13d1 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -285,6 +285,13 @@ void delete_partition(struct gendisk *disk, int partno)
+ kobject_put(part->holder_dir);
+ device_del(part_to_dev(part));
+
++ /*
++ * Remove gendisk pointer from idr so that it cannot be looked up
++ * while RCU period before freeing gendisk is running to prevent
++ * use-after-free issues. Note that the device number stays
++ * "in-use" until we really free the gendisk.
++ */
++ blk_invalidate_devt(part_devt(part));
+ hd_struct_kill(part);
+ }
+
+diff --git a/block/sed-opal.c b/block/sed-opal.c
+index e0de4dd448b3..119640897293 100644
+--- a/block/sed-opal.c
++++ b/block/sed-opal.c
+@@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
+ static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
+ struct opal_mbr_data *opal_mbr)
+ {
++ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
++ OPAL_TRUE : OPAL_FALSE;
++
+ const struct opal_step mbr_steps[] = {
+ { opal_discovery0, },
+ { start_admin1LSP_opal_session, &opal_mbr->key },
+- { set_mbr_done, &opal_mbr->enable_disable },
++ { set_mbr_done, &enable_disable },
+ { end_opal_session, },
+ { start_admin1LSP_opal_session, &opal_mbr->key },
+- { set_mbr_enable_disable, &opal_mbr->enable_disable },
++ { set_mbr_enable_disable, &enable_disable },
+ { end_opal_session, },
+ { NULL, }
+ };
+@@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev,
+
+ static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
+ {
+- u8 mbr_done_tf = 1;
++ u8 mbr_done_tf = OPAL_TRUE;
+ const struct opal_step mbrdone_step [] = {
+ { opal_discovery0, },
+ { start_admin1LSP_opal_session, key },
+diff --git a/crypto/hmac.c b/crypto/hmac.c
+index e74730224f0a..4b8c8ee8f15c 100644
+--- a/crypto/hmac.c
++++ b/crypto/hmac.c
+@@ -168,6 +168,8 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
+
+ parent->descsize = sizeof(struct shash_desc) +
+ crypto_shash_descsize(hash);
++ if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE))
++ return -EINVAL;
+
+ ctx->hash = hash;
+ return 0;
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index e48894e002ba..a46c2c162c03 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -1232,18 +1232,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
+ /*
+ * set numa proximity domain for smmuv3 device
+ */
+-static void __init arm_smmu_v3_set_proximity(struct device *dev,
++static int __init arm_smmu_v3_set_proximity(struct device *dev,
+ struct acpi_iort_node *node)
+ {
+ struct acpi_iort_smmu_v3 *smmu;
+
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+ if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
+- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
++ int node = acpi_map_pxm_to_node(smmu->pxm);
++
++ if (node != NUMA_NO_NODE && !node_online(node))
++ return -EINVAL;
++
++ set_dev_node(dev, node);
+ pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
+ smmu->base_address,
+ smmu->pxm);
+ }
++ return 0;
+ }
+ #else
+ #define arm_smmu_v3_set_proximity NULL
+@@ -1318,7 +1324,7 @@ struct iort_dev_config {
+ int (*dev_count_resources)(struct acpi_iort_node *node);
+ void (*dev_init_resources)(struct resource *res,
+ struct acpi_iort_node *node);
+- void (*dev_set_proximity)(struct device *dev,
++ int (*dev_set_proximity)(struct device *dev,
+ struct acpi_iort_node *node);
+ };
+
+@@ -1369,8 +1375,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
+ if (!pdev)
+ return -ENOMEM;
+
+- if (ops->dev_set_proximity)
+- ops->dev_set_proximity(&pdev->dev, node);
++ if (ops->dev_set_proximity) {
++ ret = ops->dev_set_proximity(&pdev->dev, node);
++ if (ret)
++ goto dev_put;
++ }
+
+ count = ops->dev_count_resources(node);
+
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 77abe0ec4043..bd533f68b1de 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1031,6 +1031,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ const struct acpi_data_node *data = to_acpi_data_node(fwnode);
+ struct acpi_data_node *dn;
+
++ /*
++ * We can have a combination of device and data nodes, e.g. with
++ * hierarchical _DSD properties. Make sure the adev pointer is
++ * restored before going through data nodes, otherwise we will
++ * be looking for data_nodes below the last device found instead
++ * of the common fwnode shared by device_nodes and data_nodes.
++ */
++ adev = to_acpi_device_node(fwnode);
+ if (adev)
+ head = &adev->data.subnodes;
+ else if (data)
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index f80d298de3fa..8ad20ed0cb7c 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1747,6 +1747,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ if (dev->power.syscore)
+ goto Complete;
+
++ /* Avoid direct_complete to let wakeup_path propagate. */
++ if (device_may_wakeup(dev) || dev->power.wakeup_path)
++ dev->power.direct_complete = false;
++
+ if (dev->power.direct_complete) {
+ if (pm_runtime_status_suspended(dev)) {
+ pm_runtime_disable(dev);
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index d5d6e6e5da3b..62d3aa2b26f6 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -37,6 +37,7 @@
+ #define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
+ #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
+ #define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
++#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
+
+ int btbcm_check_bdaddr(struct hci_dev *hdev)
+ {
+@@ -82,7 +83,8 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
+ !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
+- !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
++ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
++ !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
+ bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
+ &bda->bdaddr);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
+index b0b680dd69f4..f5dbeec8e274 100644
+--- a/drivers/bluetooth/btmtkuart.c
++++ b/drivers/bluetooth/btmtkuart.c
+@@ -661,7 +661,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
+ {
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+- u32 baudrate;
++ __le32 baudrate;
+ u8 param;
+ int err;
+
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 237aea34b69f..d3b467792eb3 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -508,6 +508,8 @@ static int qca_open(struct hci_uart *hu)
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type != QCA_WCN3990) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
++ /* Controller needs time to bootup. */
++ msleep(150);
+ } else {
+ hu->init_speed = qcadev->init_speed;
+ hu->oper_speed = qcadev->oper_speed;
+@@ -992,7 +994,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+ while (!skb_queue_empty(&qca->txq))
+ usleep_range(100, 200);
+
+- serdev_device_wait_until_sent(hu->serdev,
++ if (hu->serdev)
++ serdev_device_wait_until_sent(hu->serdev,
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+
+ /* Give the controller time to process the request */
+diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
+index b65ff6962899..e9b6ac61fb7f 100644
+--- a/drivers/char/hw_random/omap-rng.c
++++ b/drivers/char/hw_random/omap-rng.c
+@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
+ priv->rng.read = omap_rng_do_read;
+ priv->rng.init = omap_rng_init;
+ priv->rng.cleanup = omap_rng_cleanup;
++ priv->rng.quality = 900;
+
+ priv->rng.priv = (unsigned long)priv;
+ platform_set_drvdata(pdev, priv);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 38c6d1af6d1c..af6e240f98ff 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -777,6 +777,7 @@ static struct crng_state **crng_node_pool __read_mostly;
+ #endif
+
+ static void invalidate_batched_entropy(void);
++static void numa_crng_init(void);
+
+ static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+ static int __init parse_trust_cpu(char *arg)
+@@ -805,7 +806,9 @@ static void crng_initialize(struct crng_state *crng)
+ }
+ crng->state[i] ^= rv;
+ }
+- if (trust_cpu && arch_init) {
++ if (trust_cpu && arch_init && crng == &primary_crng) {
++ invalidate_batched_entropy();
++ numa_crng_init();
+ crng_init = 2;
+ pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+ }
+@@ -2211,8 +2214,8 @@ struct batched_entropy {
+ u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
+ };
+ unsigned int position;
++ spinlock_t batch_lock;
+ };
+-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
+@@ -2222,12 +2225,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
+ */
+-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
++};
++
+ u64 get_random_u64(void)
+ {
+ u64 ret;
+- bool use_lock;
+- unsigned long flags = 0;
++ unsigned long flags;
+ struct batched_entropy *batch;
+ static void *previous;
+
+@@ -2242,28 +2247,25 @@ u64 get_random_u64(void)
+
+ warn_unseeded_randomness(&previous);
+
+- use_lock = READ_ONCE(crng_init) < 2;
+- batch = &get_cpu_var(batched_entropy_u64);
+- if (use_lock)
+- read_lock_irqsave(&batched_entropy_reset_lock, flags);
++ batch = raw_cpu_ptr(&batched_entropy_u64);
++ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
+- if (use_lock)
+- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+- put_cpu_var(batched_entropy_u64);
++ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(get_random_u64);
+
+-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
++};
+ u32 get_random_u32(void)
+ {
+ u32 ret;
+- bool use_lock;
+- unsigned long flags = 0;
++ unsigned long flags;
+ struct batched_entropy *batch;
+ static void *previous;
+
+@@ -2272,18 +2274,14 @@ u32 get_random_u32(void)
+
+ warn_unseeded_randomness(&previous);
+
+- use_lock = READ_ONCE(crng_init) < 2;
+- batch = &get_cpu_var(batched_entropy_u32);
+- if (use_lock)
+- read_lock_irqsave(&batched_entropy_reset_lock, flags);
++ batch = raw_cpu_ptr(&batched_entropy_u32);
++ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
+- if (use_lock)
+- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+- put_cpu_var(batched_entropy_u32);
++ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(get_random_u32);
+@@ -2297,12 +2295,19 @@ static void invalidate_batched_entropy(void)
+ int cpu;
+ unsigned long flags;
+
+- write_lock_irqsave(&batched_entropy_reset_lock, flags);
+ for_each_possible_cpu (cpu) {
+- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
++ struct batched_entropy *batched_entropy;
++
++ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
++ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
++ batched_entropy->position = 0;
++ spin_unlock(&batched_entropy->batch_lock);
++
++ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
++ spin_lock(&batched_entropy->batch_lock);
++ batched_entropy->position = 0;
++ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
+ }
+- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index fbeb71953526..05dbfdb9f4af 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -75,7 +75,7 @@ struct ports_driver_data {
+ /* All the console devices handled by this driver */
+ struct list_head consoles;
+ };
+-static struct ports_driver_data pdrvdata;
++static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
+
+ static DEFINE_SPINLOCK(pdrvdata_lock);
+ static DECLARE_COMPLETION(early_console_added);
+@@ -1394,6 +1394,7 @@ static int add_port(struct ports_device *portdev, u32 id)
+ port->async_queue = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
++ port->cons.vtermno = 0;
+
+ port->host_connected = port->guest_connected = false;
+ port->stats = (struct port_stats) { 0 };
+diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+index 4d92b27a6153..7a4c5957939a 100644
+--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
+ DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO),
+- DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3),
+- DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3),
++ DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S3D1),
++ DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3),
+ DEF_MOD("cmt3", 300, R8A774A1_CLK_R),
+ DEF_MOD("cmt2", 301, R8A774A1_CLK_R),
+@@ -143,8 +143,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
+ DEF_MOD("rwdt", 402, R8A774A1_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3),
+- DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3),
+- DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3),
++ DEF_MOD("audmac1", 501, R8A774A1_CLK_S1D2),
++ DEF_MOD("audmac0", 502, R8A774A1_CLK_S1D2),
+ DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1),
+diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+index 34e274f2a273..93dacd826fd0 100644
+--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+@@ -157,7 +157,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
+ DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3),
+
+- DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4),
++ DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2),
+ DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C),
+ DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C),
+ DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C),
+diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
+index 86842c9fd314..0825cd0ff286 100644
+--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
+@@ -129,8 +129,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ DEF_MOD("msiof2", 209, R8A7795_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A7795_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A7795_CLK_MSO),
+- DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
+- DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
++ DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
++ DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
+ DEF_MOD("cmt3", 300, R8A7795_CLK_R),
+@@ -153,8 +153,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ DEF_MOD("rwdt", 402, R8A7795_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
+- DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
+- DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
++ DEF_MOD("audmac1", 501, R8A7795_CLK_S1D2),
++ DEF_MOD("audmac0", 502, R8A7795_CLK_S1D2),
+ DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A7795_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A7795_CLK_S3D2),
+diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
+index 12c455859f2c..997cd956f12b 100644
+--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
+@@ -126,8 +126,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+ DEF_MOD("msiof2", 209, R8A7796_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A7796_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A7796_CLK_MSO),
+- DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
+- DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
++ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
++ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
+ DEF_MOD("cmt3", 300, R8A7796_CLK_R),
+ DEF_MOD("cmt2", 301, R8A7796_CLK_R),
+@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+ DEF_MOD("rwdt", 402, R8A7796_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
+- DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
+- DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
++ DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2),
++ DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2),
+ DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
+diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
+index eb1cca58a1e1..afc9c72fa094 100644
+--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
+@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
+ DEF_MOD("msiof2", 209, R8A77965_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77965_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77965_CLK_MSO),
+- DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
+- DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
++ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
++ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
+
+ DEF_MOD("cmt3", 300, R8A77965_CLK_R),
+@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
+ DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
+
+- DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
+- DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
++ DEF_MOD("audmac1", 501, R8A77965_CLK_S1D2),
++ DEF_MOD("audmac0", 502, R8A77965_CLK_S1D2),
+ DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
+diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
+index 9a278c75c918..03f445d47ef6 100644
+--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
+@@ -152,7 +152,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
+ DEF_MOD("intc-ex", 407, R8A77990_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3),
+
+- DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4),
++ DEF_MOD("audmac0", 502, R8A77990_CLK_S1D2),
+ DEF_MOD("drif7", 508, R8A77990_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A77990_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A77990_CLK_S3D2),
+diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
+index eee3874865a9..68707277b17b 100644
+--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
+@@ -133,7 +133,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
+ DEF_MOD("rwdt", 402, R8A77995_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
+- DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
++ DEF_MOD("audmac0", 502, R8A77995_CLK_S1D2),
+ DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
+ DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
+ DEF_MOD("thermal", 522, R8A77995_CLK_CP),
+diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
+index 5a67b7869960..355d6a3611db 100644
+--- a/drivers/clk/rockchip/clk-rk3288.c
++++ b/drivers/clk/rockchip/clk-rk3288.c
+@@ -219,7 +219,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
+ PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
+ PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
+
+-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
++PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
+ PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
+ "sclk_otgphy0_480m" };
+ PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
+@@ -313,13 +313,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+ COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
+ RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3288_CLKGATE_CON(12), 6, GFLAGS),
+- COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
++ COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
+ RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3288_CLKGATE_CON(12), 7, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
+ RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3288_CLKGATE_CON(12), 8, GFLAGS),
+- GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
++ GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
+ RK3288_CLKGATE_CON(12), 9, GFLAGS),
+ GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
+ RK3288_CLKGATE_CON(12), 10, GFLAGS),
+@@ -420,7 +420,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+ RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3288_CLKGATE_CON(3), 11, GFLAGS),
+- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
++ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
+ RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
+ GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
+ RK3288_CLKGATE_CON(9), 0, GFLAGS),
+@@ -647,7 +647,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+ INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
+ RK3288_CLKSEL_CON(22), 7, IFLAGS),
+
+- GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
++ GATE(0, "jtag", "ext_jtag", 0,
+ RK3288_CLKGATE_CON(4), 14, GFLAGS),
+
+ COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
+@@ -656,7 +656,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+ COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
+ RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
+ RK3288_CLKGATE_CON(3), 6, GFLAGS),
+- GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
++ GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
+ RK3288_CLKGATE_CON(13), 9, GFLAGS),
+ DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
+ RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
+@@ -697,7 +697,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+ GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
+ GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
+- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
++ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+
+ /* ddrctrl [DDR Controller PHY clock] gates */
+ GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
+@@ -837,12 +837,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
+ "pclk_alive_niu",
+ "pclk_pd_pmu",
+ "pclk_pmu_niu",
+- "pclk_core_niu",
+- "pclk_ddrupctl0",
+- "pclk_publ0",
+- "pclk_ddrupctl1",
+- "pclk_publ1",
+ "pmu_hclk_otg0",
++ /* pwm-regulators on some boards, so handoff-critical later */
++ "pclk_rkpwm",
+ };
+
+ static void __iomem *rk3288_cru_base;
+diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
+index a371c66e72ef..bd9b5fbc443b 100644
+--- a/drivers/clk/zynqmp/divider.c
++++ b/drivers/clk/zynqmp/divider.c
+@@ -31,12 +31,14 @@
+ * struct zynqmp_clk_divider - adjustable divider clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @flags: Hardware specific flags
++ * @is_frac: The divider is a fractional divider
+ * @clk_id: Id of clock
+ * @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
+ */
+ struct zynqmp_clk_divider {
+ struct clk_hw hw;
+ u8 flags;
++ bool is_frac;
+ u32 clk_id;
+ u32 div_type;
+ };
+@@ -116,8 +118,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
+
+ bestdiv = zynqmp_divider_get_val(*prate, rate);
+
+- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
+- (divider->flags & CLK_FRAC))
++ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
+ bestdiv = rate % *prate ? 1 : bestdiv;
+ *prate = rate * bestdiv;
+
+@@ -195,11 +196,13 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
+
+ init.name = name;
+ init.ops = &zynqmp_clk_divider_ops;
+- init.flags = nodes->flag;
++ /* CLK_FRAC is not defined in the common clk framework */
++ init.flags = nodes->flag & ~CLK_FRAC;
+ init.parent_names = parents;
+ init.num_parents = 1;
+
+ /* struct clk_divider assignments */
++ div->is_frac = !!(nodes->flag & CLK_FRAC);
+ div->flags = nodes->type_flag;
+ div->hw.init = &init;
+ div->clk_id = clk_id;
+diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
+index b3f4bd647e9b..988ebc326bdb 100644
+--- a/drivers/cpufreq/armada-8k-cpufreq.c
++++ b/drivers/cpufreq/armada-8k-cpufreq.c
+@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
+ of_node_put(node);
+ return -ENODEV;
+ }
++ of_node_put(node);
+
+ nb_cpus = num_possible_cpus();
+ freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index e10922709d13..bbf79544d0ad 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1098,6 +1098,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+ cpufreq_global_kobject, "policy%u", cpu);
+ if (ret) {
+ pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
++ kobject_put(&policy->kobj);
+ goto err_free_real_cpus;
+ }
+
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
+index ffa9adeaba31..9d1d9bf02710 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
+ /* Failure, so roll back. */
+ pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
+
++ kobject_put(&dbs_data->attr_set.kobj);
++
+ policy->governor_data = NULL;
+
+ if (!have_governor_per_policy())
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index a4ff09f91c8f..3e17560b1efe 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
+ ret = imx6ul_opp_check_speed_grading(cpu_dev);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+- return ret;
++ goto put_node;
+
+ dev_err(cpu_dev, "failed to read ocotp: %d\n",
+ ret);
+- return ret;
++ goto put_node;
+ }
+ } else {
+ imx6q_opp_check_speed_grading(cpu_dev);
+diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
+index c2dd43f3f5d8..8d63a6dc8383 100644
+--- a/drivers/cpufreq/kirkwood-cpufreq.c
++++ b/drivers/cpufreq/kirkwood-cpufreq.c
+@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
+ priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
+ if (IS_ERR(priv.cpu_clk)) {
+ dev_err(priv.dev, "Unable to get cpuclk\n");
+- return PTR_ERR(priv.cpu_clk);
++ err = PTR_ERR(priv.cpu_clk);
++ goto out_node;
+ }
+
+ err = clk_prepare_enable(priv.cpu_clk);
+ if (err) {
+ dev_err(priv.dev, "Unable to prepare cpuclk\n");
+- return err;
++ goto out_node;
+ }
+
+ kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
+@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
+ goto out_ddr;
+ }
+
+- of_node_put(np);
+- np = NULL;
+-
+ err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
+- if (!err)
+- return 0;
++ if (err) {
++ dev_err(priv.dev, "Failed to register cpufreq driver\n");
++ goto out_powersave;
++ }
+
+- dev_err(priv.dev, "Failed to register cpufreq driver\n");
++ of_node_put(np);
++ return 0;
+
++out_powersave:
+ clk_disable_unprepare(priv.powersave_clk);
+ out_ddr:
+ clk_disable_unprepare(priv.ddr_clk);
+ out_cpu:
+ clk_disable_unprepare(priv.cpu_clk);
++out_node:
+ of_node_put(np);
+
+ return err;
+diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
+index 75dfbd2a58ea..c7710c149de8 100644
+--- a/drivers/cpufreq/pasemi-cpufreq.c
++++ b/drivers/cpufreq/pasemi-cpufreq.c
+@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
++ of_node_put(cpu);
+ if (!cpu)
+ goto out;
+
+diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
+index 52f0d91d30c1..9b4ce2eb8222 100644
+--- a/drivers/cpufreq/pmac32-cpufreq.c
++++ b/drivers/cpufreq/pmac32-cpufreq.c
+@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
+ volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
++ of_node_put(volt_gpio_np);
+ if (!voltage_gpio){
+ pr_err("missing cpu-vcore-select gpio\n");
+ return 1;
+@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+
++ of_node_put(volt_gpio_np);
+ pvr = mfspr(SPRN_PVR);
+ has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
+
+diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
+index 41a0f0be3f9f..8414c3a4ea08 100644
+--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
++++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
+@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
+ !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
+ pr_info("invalid CBE regs pointers for cpufreq\n");
++ of_node_put(cpu);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+index a4b5ff2b72f8..f6936bb3b7be 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
+ }
+ } else {
+ /* Since we have the flag final, we can go up to modulo 4 */
+- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
++ if (areq->nbytes < 4)
++ end = 0;
++ else
++ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+ }
+
+ /* TODO if SGlen % 4 and !op->len then DMA */
+diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
+index de78282b8f44..9c6b5c1d6a1a 100644
+--- a/drivers/crypto/vmx/aesp8-ppc.pl
++++ b/drivers/crypto/vmx/aesp8-ppc.pl
+@@ -1357,7 +1357,7 @@ Loop_ctr32_enc:
+ addi $idx,$idx,16
+ bdnz Loop_ctr32_enc
+
+- vadduwm $ivec,$ivec,$one
++ vadduqm $ivec,$ivec,$one
+ vmr $dat,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+diff --git a/drivers/dax/super.c b/drivers/dax/super.c
+index 0a339b85133e..d7f2257f2568 100644
+--- a/drivers/dax/super.c
++++ b/drivers/dax/super.c
+@@ -73,22 +73,12 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+ EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+ #endif
+
+-/**
+- * __bdev_dax_supported() - Check if the device supports dax for filesystem
+- * @bdev: block device to check
+- * @blocksize: The block size of the device
+- *
+- * This is a library function for filesystems to check if the block device
+- * can be mounted with dax option.
+- *
+- * Return: true if supported, false if unsupported
+- */
+-bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
++bool __generic_fsdax_supported(struct dax_device *dax_dev,
++ struct block_device *bdev, int blocksize, sector_t start,
++ sector_t sectors)
+ {
+- struct dax_device *dax_dev;
+ bool dax_enabled = false;
+ pgoff_t pgoff, pgoff_end;
+- struct request_queue *q;
+ char buf[BDEVNAME_SIZE];
+ void *kaddr, *end_kaddr;
+ pfn_t pfn, end_pfn;
+@@ -102,21 +92,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
+ return false;
+ }
+
+- q = bdev_get_queue(bdev);
+- if (!q || !blk_queue_dax(q)) {
+- pr_debug("%s: error: request queue doesn't support dax\n",
+- bdevname(bdev, buf));
+- return false;
+- }
+-
+- err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
++ err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
+ if (err) {
+ pr_debug("%s: error: unaligned partition for dax\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
+- last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
++ last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
+ err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
+ if (err) {
+ pr_debug("%s: error: unaligned partition for dax\n",
+@@ -124,20 +107,11 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
+ return false;
+ }
+
+- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+- if (!dax_dev) {
+- pr_debug("%s: error: device does not support dax\n",
+- bdevname(bdev, buf));
+- return false;
+- }
+-
+ id = dax_read_lock();
+ len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
+ len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
+ dax_read_unlock(id);
+
+- put_dax(dax_dev);
+-
+ if (len < 1 || len2 < 1) {
+ pr_debug("%s: error: dax access failed (%ld)\n",
+ bdevname(bdev, buf), len < 1 ? len : len2);
+@@ -178,6 +152,49 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
+ }
+ return true;
+ }
++EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
++
++/**
++ * __bdev_dax_supported() - Check if the device supports dax for filesystem
++ * @bdev: block device to check
++ * @blocksize: The block size of the device
++ *
++ * This is a library function for filesystems to check if the block device
++ * can be mounted with dax option.
++ *
++ * Return: true if supported, false if unsupported
++ */
++bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
++{
++ struct dax_device *dax_dev;
++ struct request_queue *q;
++ char buf[BDEVNAME_SIZE];
++ bool ret;
++ int id;
++
++ q = bdev_get_queue(bdev);
++ if (!q || !blk_queue_dax(q)) {
++ pr_debug("%s: error: request queue doesn't support dax\n",
++ bdevname(bdev, buf));
++ return false;
++ }
++
++ dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
++ if (!dax_dev) {
++ pr_debug("%s: error: device does not support dax\n",
++ bdevname(bdev, buf));
++ return false;
++ }
++
++ id = dax_read_lock();
++ ret = dax_supported(dax_dev, bdev, blocksize, 0,
++ i_size_read(bdev->bd_inode) / 512);
++ dax_read_unlock(id);
++
++ put_dax(dax_dev);
++
++ return ret;
++}
+ EXPORT_SYMBOL_GPL(__bdev_dax_supported);
+ #endif
+
+@@ -303,6 +320,15 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ }
+ EXPORT_SYMBOL_GPL(dax_direct_access);
+
++bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
++ int blocksize, sector_t start, sector_t len)
++{
++ if (!dax_alive(dax_dev))
++ return false;
++
++ return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
++}
++
+ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i)
+ {
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 0ae3de76833b..839621b044f4 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -228,7 +228,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
+ * if is not found. This can happen when both drivers (the governor driver
+ * and the driver that call devfreq_add_device) are built as modules.
+ * devfreq_list_lock should be held by the caller. Returns the matched
+- * governor's pointer.
++ * governor's pointer or an error pointer.
+ */
+ static struct devfreq_governor *try_then_request_governor(const char *name)
+ {
+@@ -254,7 +254,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
+ /* Restore previous state before return */
+ mutex_lock(&devfreq_list_lock);
+ if (err)
+- return NULL;
++ return ERR_PTR(err);
+
+ governor = find_devfreq_governor(name);
+ }
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index fe69dccfa0c0..37a269420435 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1606,7 +1606,11 @@ static void at_xdmac_tasklet(unsigned long data)
+ struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+- BUG_ON(!desc->active_xfer);
++ if (!desc->active_xfer) {
++ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
++ spin_unlock_bh(&atchan->lock);
++ return;
++ }
+
+ txd = &desc->tx_dma_desc;
+
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index eec79fdf27a5..56695ffb5d37 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -966,6 +966,7 @@ static void _stop(struct pl330_thread *thrd)
+ {
+ void __iomem *regs = thrd->dmac->base;
+ u8 insn[6] = {0, 0, 0, 0, 0, 0};
++ u32 inten = readl(regs + INTEN);
+
+ if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
+ UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
+@@ -978,10 +979,13 @@ static void _stop(struct pl330_thread *thrd)
+
+ _emit_KILL(0, insn);
+
+- /* Stop generating interrupts for SEV */
+- writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
+-
+ _execute_DBGINSN(thrd, insn, is_manager(thrd));
++
++ /* clear the event */
++ if (inten & (1 << thrd->ev))
++ writel(1 << thrd->ev, regs + INTCLR);
++ /* Stop generating interrupts for SEV */
++ writel(inten & ~(1 << thrd->ev), regs + INTEN);
+ }
+
+ /* Start doing req 'idx' of thread 'thrd' */
+diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
+index 5ec0dd97b397..1477cce33dbe 100644
+--- a/drivers/dma/tegra210-adma.c
++++ b/drivers/dma/tegra210-adma.c
+@@ -22,7 +22,6 @@
+ #include <linux/of_device.h>
+ #include <linux/of_dma.h>
+ #include <linux/of_irq.h>
+-#include <linux/pm_clock.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/slab.h>
+
+@@ -141,6 +140,7 @@ struct tegra_adma {
+ struct dma_device dma_dev;
+ struct device *dev;
+ void __iomem *base_addr;
++ struct clk *ahub_clk;
+ unsigned int nr_channels;
+ unsigned long rx_requests_reserved;
+ unsigned long tx_requests_reserved;
+@@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev)
+ struct tegra_adma *tdma = dev_get_drvdata(dev);
+
+ tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
++ clk_disable_unprepare(tdma->ahub_clk);
+
+- return pm_clk_suspend(dev);
++ return 0;
+ }
+
+ static int tegra_adma_runtime_resume(struct device *dev)
+@@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev)
+ struct tegra_adma *tdma = dev_get_drvdata(dev);
+ int ret;
+
+- ret = pm_clk_resume(dev);
+- if (ret)
++ ret = clk_prepare_enable(tdma->ahub_clk);
++ if (ret) {
++ dev_err(dev, "ahub clk_enable failed: %d\n", ret);
+ return ret;
+-
++ }
+ tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+
+ return 0;
+@@ -693,13 +695,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+
+- ret = pm_clk_create(&pdev->dev);
+- if (ret)
+- return ret;
+-
+- ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
+- if (ret)
+- goto clk_destroy;
++ tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
++ if (IS_ERR(tdma->ahub_clk)) {
++ dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
++ return PTR_ERR(tdma->ahub_clk);
++ }
+
+ pm_runtime_enable(&pdev->dev);
+
+@@ -776,8 +776,6 @@ rpm_put:
+ pm_runtime_put_sync(&pdev->dev);
+ rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+-clk_destroy:
+- pm_clk_destroy(&pdev->dev);
+
+ return ret;
+ }
+@@ -787,6 +785,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
+ struct tegra_adma *tdma = platform_get_drvdata(pdev);
+ int i;
+
++ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&tdma->dma_dev);
+
+ for (i = 0; i < tdma->nr_channels; ++i)
+@@ -794,7 +793,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+- pm_clk_destroy(&pdev->dev);
+
+ return 0;
+ }
+diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
+index 540e8cd16ee6..db3bcf96b98f 100644
+--- a/drivers/extcon/Kconfig
++++ b/drivers/extcon/Kconfig
+@@ -30,7 +30,7 @@ config EXTCON_ARIZONA
+
+ config EXTCON_AXP288
+ tristate "X-Power AXP288 EXTCON support"
+- depends on MFD_AXP20X && USB_SUPPORT && X86
++ depends on MFD_AXP20X && USB_SUPPORT && X86 && ACPI
+ select USB_ROLE_SWITCH
+ help
+ Say Y here to enable support for USB peripheral detection
+diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
+index da0e9bc4262f..9327479c719c 100644
+--- a/drivers/extcon/extcon-arizona.c
++++ b/drivers/extcon/extcon-arizona.c
+@@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
+ struct arizona_extcon_info *info = platform_get_drvdata(pdev);
+ struct arizona *arizona = info->arizona;
+ int jack_irq_rise, jack_irq_fall;
++ bool change;
++
++ regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
++ ARIZONA_MICD_ENA, 0,
++ &change);
++
++ if (change) {
++ regulator_disable(info->micvdd);
++ pm_runtime_put(info->dev);
++ }
+
+ gpiod_put(info->micd_pol_gpio);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 466da5954a68..62bf9da25e4b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -23,7 +23,7 @@
+ # Makefile for the drm device driver. This driver provides support for the
+ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+-FULL_AMD_PATH=$(src)/..
++FULL_AMD_PATH=$(srctree)/$(src)/..
+ DISPLAY_FOLDER_NAME=display
+ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index ee47c11e92ce..4dee2326b29c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ {
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_fence *fence;
+- struct dma_fence *old, **ptr;
++ struct dma_fence __rcu **ptr;
+ uint32_t seq;
++ int r;
+
+ fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
+ if (fence == NULL)
+@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ seq, flags | AMDGPU_FENCE_FLAG_INT);
+
+ ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
++ if (unlikely(rcu_dereference_protected(*ptr, 1))) {
++ struct dma_fence *old;
++
++ rcu_read_lock();
++ old = dma_fence_get_rcu_safe(ptr);
++ rcu_read_unlock();
++
++ if (old) {
++ r = dma_fence_wait(old, false);
++ dma_fence_put(old);
++ if (r)
++ return r;
++ }
++ }
++
+ /* This function can't be called concurrently anyway, otherwise
+ * emitting the fence would mess up the hardware ring buffer.
+ */
+- old = rcu_dereference_protected(*ptr, 1);
+- if (old && !dma_fence_is_signaled(old)) {
+- DRM_INFO("rcu slot is busy\n");
+- dma_fence_wait(old, false);
+- }
+-
+ rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
+
+ *f = &fence->base;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3082b55b1e77..0886b36c2344 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3587,6 +3587,8 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
+ plane->state = &amdgpu_state->base;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
++ plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
++ plane->state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+ }
+ }
+
+@@ -4953,8 +4955,7 @@ cleanup:
+ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *stream_state)
+ {
+- stream_state->mode_changed =
+- crtc_state->mode_changed || crtc_state->active_changed;
++ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
+ }
+
+ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+@@ -5661,6 +5662,9 @@ skip_modeset:
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
++ /* ABM settings */
++ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
++
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+@@ -5858,7 +5862,9 @@ dm_determine_update_type_for_commit(struct dc *dc,
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+- struct dc_stream_update stream_update = { 0 };
++ struct dc_stream_update stream_update;
++
++ memset(&stream_update, 0, sizeof(stream_update));
+
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index a6cda201c964..88fe4fb43bfd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -524,6 +524,14 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_stream_state *link_stream;
+ struct dc_link_settings store_settings = *link_setting;
+
++ link->preferred_link_setting = store_settings;
++
++ /* Retrain with preferred link settings only relevant for
++ * DP signal type
++ */
++ if (!dc_is_dp_signal(link->connector_signal))
++ return;
++
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link) {
+@@ -538,7 +546,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
+
+ link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+- link->preferred_link_setting = store_settings;
++ /* Cannot retrain link if backend is off */
++ if (link_stream->dpms_off)
++ return;
++
+ if (link_stream)
+ decide_link_settings(link_stream, &store_settings);
+
+@@ -1666,6 +1677,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ continue;
+
+ if (stream_update->dpms_off) {
++ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (*stream_update->dpms_off) {
+ core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
+ dc->hwss.optimize_bandwidth(dc, dc->current_state);
+@@ -1673,6 +1685,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ dc->hwss.prepare_bandwidth(dc, dc->current_state);
+ core_link_enable_stream(dc->current_state, pipe_ctx);
+ }
++ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ }
+
+ if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index ea18e9c2d8ce..419e8de8c0f4 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2074,11 +2074,28 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
+ }
+ }
+
++static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
++{
++
++ uint32_t pxl_clk = timing->pix_clk_100hz;
++
++ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ pxl_clk /= 2;
++ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
++ pxl_clk = pxl_clk * 2 / 3;
++
++ if (timing->display_color_depth == COLOR_DEPTH_101010)
++ pxl_clk = pxl_clk * 10 / 8;
++ else if (timing->display_color_depth == COLOR_DEPTH_121212)
++ pxl_clk = pxl_clk * 12 / 8;
++
++ return pxl_clk;
++}
++
+ static bool dp_active_dongle_validate_timing(
+ const struct dc_crtc_timing *timing,
+ const struct dpcd_caps *dpcd_caps)
+ {
+- unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
+ const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
+
+ switch (dpcd_caps->dongle_type) {
+@@ -2115,13 +2132,6 @@ static bool dp_active_dongle_validate_timing(
+ return false;
+ }
+
+-
+- /* Check Color Depth and Pixel Clock */
+- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+- required_pix_clk_100hz /= 2;
+- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+- required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
+-
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+@@ -2130,14 +2140,11 @@ static bool dp_active_dongle_validate_timing(
+ case COLOR_DEPTH_101010:
+ if (dongle_caps->dp_hdmi_max_bpc < 10)
+ return false;
+- required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
+ break;
+ case COLOR_DEPTH_121212:
+ if (dongle_caps->dp_hdmi_max_bpc < 12)
+ return false;
+- required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
+ break;
+-
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+@@ -2145,7 +2152,7 @@ static bool dp_active_dongle_validate_timing(
+ return false;
+ }
+
+- if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
++ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
+ return false;
+
+ return true;
+@@ -2166,7 +2173,7 @@ enum dc_status dc_link_validate_mode_timing(
+ return DC_OK;
+
+ /* Passive Dongle */
+- if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
++ if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
+ return DC_EXCEED_DONGLE_CAP;
+
+ /* Active Dongle*/
+@@ -2316,7 +2323,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+ uint32_t denominator;
+
+ bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
+- kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
++ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+@@ -2736,3 +2743,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+ }
+ }
+
++uint32_t dc_bandwidth_in_kbps_from_timing(
++ const struct dc_crtc_timing *timing)
++{
++ uint32_t bits_per_channel = 0;
++ uint32_t kbps;
++
++ switch (timing->display_color_depth) {
++ case COLOR_DEPTH_666:
++ bits_per_channel = 6;
++ break;
++ case COLOR_DEPTH_888:
++ bits_per_channel = 8;
++ break;
++ case COLOR_DEPTH_101010:
++ bits_per_channel = 10;
++ break;
++ case COLOR_DEPTH_121212:
++ bits_per_channel = 12;
++ break;
++ case COLOR_DEPTH_141414:
++ bits_per_channel = 14;
++ break;
++ case COLOR_DEPTH_161616:
++ bits_per_channel = 16;
++ break;
++ default:
++ break;
++ }
++
++ ASSERT(bits_per_channel != 0);
++
++ kbps = timing->pix_clk_100hz / 10;
++ kbps *= bits_per_channel;
++
++ if (timing->flags.Y_ONLY != 1) {
++ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
++ kbps *= 3;
++ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ kbps /= 2;
++ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
++ kbps = kbps * 2 / 3;
++ }
++
++ return kbps;
++
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 09d301216076..6809932e80be 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1520,53 +1520,6 @@ static bool decide_fallback_link_setting(
+ return true;
+ }
+
+-static uint32_t bandwidth_in_kbps_from_timing(
+- const struct dc_crtc_timing *timing)
+-{
+- uint32_t bits_per_channel = 0;
+- uint32_t kbps;
+-
+- switch (timing->display_color_depth) {
+- case COLOR_DEPTH_666:
+- bits_per_channel = 6;
+- break;
+- case COLOR_DEPTH_888:
+- bits_per_channel = 8;
+- break;
+- case COLOR_DEPTH_101010:
+- bits_per_channel = 10;
+- break;
+- case COLOR_DEPTH_121212:
+- bits_per_channel = 12;
+- break;
+- case COLOR_DEPTH_141414:
+- bits_per_channel = 14;
+- break;
+- case COLOR_DEPTH_161616:
+- bits_per_channel = 16;
+- break;
+- default:
+- break;
+- }
+-
+- ASSERT(bits_per_channel != 0);
+-
+- kbps = timing->pix_clk_100hz / 10;
+- kbps *= bits_per_channel;
+-
+- if (timing->flags.Y_ONLY != 1) {
+- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+- kbps *= 3;
+- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+- kbps /= 2;
+- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+- kbps = kbps * 2 / 3;
+- }
+-
+- return kbps;
+-
+-}
+-
+ static uint32_t bandwidth_in_kbps_from_link_settings(
+ const struct dc_link_settings *link_setting)
+ {
+@@ -1607,7 +1560,7 @@ bool dp_validate_mode_timing(
+ link_setting = &link->verified_link_cap;
+ */
+
+- req_bw = bandwidth_in_kbps_from_timing(timing);
++ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+
+ if (req_bw <= max_bw) {
+@@ -1641,7 +1594,7 @@ void decide_link_settings(struct dc_stream_state *stream,
+ uint32_t req_bw;
+ uint32_t link_bw;
+
+- req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
++ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->link;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 349ab8017776..4c06eb52ab73 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1266,10 +1266,12 @@ bool dc_remove_plane_from_context(
+ * For head pipe detach surfaces from pipe for tail
+ * pipe just zero it out
+ */
+- if (!pipe_ctx->top_pipe) {
++ if (!pipe_ctx->top_pipe ||
++ (!pipe_ctx->top_pipe->top_pipe &&
++ pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+- } else {
++ } else {
+ memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index 8fc223defed4..a83e1c60f9db 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -252,4 +252,6 @@ bool dc_submit_i2c(
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
++uint32_t dc_bandwidth_in_kbps_from_timing(
++ const struct dc_crtc_timing *timing);
+ #endif /* DC_LINK_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+index 4fe3664fb495..5ecfcb9ee8a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+@@ -377,7 +377,6 @@ static bool acquire(
+ struct dce_aux *engine,
+ struct ddc *ddc)
+ {
+-
+ enum gpio_result result;
+
+ if (!is_engine_available(engine))
+@@ -458,7 +457,8 @@ int dce_aux_transfer(struct ddc_service *ddc,
+ memset(&aux_rep, 0, sizeof(aux_rep));
+
+ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+- acquire(aux_engine, ddc_pin);
++ if (!acquire(aux_engine, ddc_pin))
++ return -1;
+
+ if (payload->i2c_over_aux)
+ aux_req.type = AUX_TRANSACTION_TYPE_I2C;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+index c7642e748297..ce21a290bf3e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
+ int *num_part_y,
+ int *num_part_c)
+ {
++ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
++ lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
++
+ int line_size = scl_data->viewport.width < scl_data->recout.width ?
+ scl_data->viewport.width : scl_data->recout.width;
+ int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
+ scl_data->viewport_c.width : scl_data->recout.width;
+- int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+- int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+- int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+- int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+- int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
++
++ if (line_size == 0)
++ line_size = 1;
++
++ if (line_size_c == 0)
++ line_size_c = 1;
++
++
++ lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
++ memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
++ memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
++ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+
+ if (lb_config == LB_MEMORY_CONFIG_1) {
+ lb_memory_size = 816;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d1a8f1c302a9..5b551a544e82 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1008,9 +1008,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
+ * to non-preferred front end. If pipe_ctx->stream is not NULL,
+ * we will use the pipe, so don't disable
+ */
+- if (pipe_ctx->stream != NULL)
++ if (pipe_ctx->stream != NULL &&
++ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
++ pipe_ctx->stream_res.tg))
+ continue;
+
++ /* Disable on the current state so the new one isn't cleared. */
++ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
++
+ dpp->funcs->dpp_reset(dpp);
+
+ pipe_ctx->stream_res.tg = tg;
+@@ -2692,9 +2697,15 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ .rotation = pipe_ctx->plane_state->rotation,
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
+ };
+-
+- pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
+- pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
++ uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
++ uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
++ uint32_t x_offset = min(x_plane, pos_cpy.x);
++ uint32_t y_offset = min(y_plane, pos_cpy.y);
++
++ pos_cpy.x -= x_offset;
++ pos_cpy.y -= y_offset;
++ pos_cpy.x_hotspot += (x_plane - x_offset);
++ pos_cpy.y_hotspot += (y_plane - y_offset);
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 0fbc8fbc3541..a1055413bade 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1854,6 +1854,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+ coordinates_x, axis_x, curve,
+ MAX_HW_POINTS, tf_pts,
+ mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
++ if (ramp->type == GAMMA_CUSTOM)
++ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
+
+ ret = true;
+
+diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
+index 1b875e5dc0f6..a72e30c0e03d 100644
+--- a/drivers/gpu/drm/arm/display/komeda/Makefile
++++ b/drivers/gpu/drm/arm/display/komeda/Makefile
+@@ -1,8 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+ ccflags-y := \
+- -I$(src)/../include \
+- -I$(src)
++ -I $(srctree)/$(src)/../include \
++ -I $(srctree)/$(src)
+
+ komeda-y := \
+ komeda_drv.o \
+diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
+index 4985384e51f6..59ffb6b9c745 100644
+--- a/drivers/gpu/drm/drm_atomic_state_helper.c
++++ b/drivers/gpu/drm/drm_atomic_state_helper.c
+@@ -30,6 +30,7 @@
+ #include <drm/drm_connector.h>
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_device.h>
++#include <drm/drm_writeback.h>
+
+ #include <linux/slab.h>
+ #include <linux/dma-fence.h>
+@@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
++
++ if (state->writeback_job)
++ drm_writeback_cleanup_job(state->writeback_job);
+ }
+ EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 05bbc2b622fc..04aa6ccdfb24 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -497,7 +497,7 @@ int drm_dev_init(struct drm_device *dev,
+ BUG_ON(!parent);
+
+ kref_init(&dev->ref);
+- dev->dev = parent;
++ dev->dev = get_device(parent);
+ dev->driver = driver;
+
+ /* no per-device feature limits by default */
+@@ -567,6 +567,7 @@ err_minors:
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_fs_inode_free(dev->anon_inode);
+ err_free:
++ put_device(dev->dev);
+ mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
+@@ -602,6 +603,8 @@ void drm_dev_fini(struct drm_device *dev)
+ drm_minor_free(dev, DRM_MINOR_PRIMARY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+
++ put_device(dev->dev);
++
+ mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index 7caa3c7ed978..9701469a6e93 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -577,6 +577,7 @@ put_back_event:
+ file_priv->event_space -= length;
+ list_add(&e->link, &file_priv->event_list);
+ spin_unlock_irq(&dev->event_lock);
++ wake_up_interruptible(&file_priv->event_wait);
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
+index c20e6fe00cb3..2d75032f8159 100644
+--- a/drivers/gpu/drm/drm_writeback.c
++++ b/drivers/gpu/drm/drm_writeback.c
+@@ -268,6 +268,15 @@ void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ }
+ EXPORT_SYMBOL(drm_writeback_queue_job);
+
++void drm_writeback_cleanup_job(struct drm_writeback_job *job)
++{
++ if (job->fb)
++ drm_framebuffer_put(job->fb);
++
++ kfree(job);
++}
++EXPORT_SYMBOL(drm_writeback_cleanup_job);
++
+ /*
+ * @cleanup_work: deferred cleanup of a writeback job
+ *
+@@ -280,10 +289,9 @@ static void cleanup_work(struct work_struct *work)
+ struct drm_writeback_job *job = container_of(work,
+ struct drm_writeback_job,
+ cleanup_work);
+- drm_framebuffer_put(job->fb);
+- kfree(job);
+-}
+
++ drm_writeback_cleanup_job(job);
++}
+
+ /**
+ * drm_writeback_signal_completion - Signal the completion of a writeback job
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+index 18c27f795cf6..3156450723ba 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+@@ -515,6 +515,9 @@ static int etnaviv_bind(struct device *dev)
+ }
+ drm->dev_private = priv;
+
++ dev->dma_parms = &priv->dma_parms;
++ dma_set_max_seg_size(dev, SZ_2G);
++
+ mutex_init(&priv->gem_lock);
+ INIT_LIST_HEAD(&priv->gem_list);
+ priv->num_gpus = 0;
+@@ -552,6 +555,8 @@ static void etnaviv_unbind(struct device *dev)
+
+ component_unbind_all(dev, drm);
+
++ dev->dma_parms = NULL;
++
+ drm->dev_private = NULL;
+ kfree(priv);
+
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+index a6a7ded37ef1..6a4ea127c4f1 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+@@ -42,6 +42,7 @@ struct etnaviv_file_private {
+
+ struct etnaviv_drm_private {
+ int num_gpus;
++ struct device_dma_parameters dma_parms;
+ struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+
+ /* list of GEM objects: */
+diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
+index 271fb46d4dd0..ea8324abc784 100644
+--- a/drivers/gpu/drm/i915/gvt/Makefile
++++ b/drivers/gpu/drm/i915/gvt/Makefile
+@@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
+ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
+ fb_decoder.o dmabuf.o page_track.o
+
+-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
++ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
+ i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
+index 56a70c74af4e..b7b1ebdc8190 100644
+--- a/drivers/gpu/drm/msm/Makefile
++++ b/drivers/gpu/drm/msm/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+-ccflags-y := -Idrivers/gpu/drm/msm
+-ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
+-ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
++ccflags-y := -I $(srctree)/$(src)
++ccflags-y += -I $(srctree)/$(src)/disp/dpu1
++ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
+
+ msm-y := \
+ adreno/adreno_device.o \
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index d5f5e56422f5..270da14cba67 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -34,7 +34,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
+ {
+ struct device *dev = &gpu->pdev->dev;
+ const struct firmware *fw;
+- struct device_node *np;
++ struct device_node *np, *mem_np;
+ struct resource r;
+ phys_addr_t mem_phys;
+ ssize_t mem_size;
+@@ -48,11 +48,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
+ if (!np)
+ return -ENODEV;
+
+- np = of_parse_phandle(np, "memory-region", 0);
+- if (!np)
++ mem_np = of_parse_phandle(np, "memory-region", 0);
++ of_node_put(np);
++ if (!mem_np)
+ return -EINVAL;
+
+- ret = of_address_to_resource(np, 0, &r);
++ ret = of_address_to_resource(mem_np, 0, &r);
++ of_node_put(mem_np);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 5aa3307f3f0c..dd2c4d11d0e1 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -1023,13 +1023,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
+ "at idx: %d\n", i);
+- return;
++ goto error;
+ }
+
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
+ "at idx: %d\n", i);
+- return;
++ goto error;
+ }
+
+ phys->hw_pp = dpu_enc->hw_pp[i];
+@@ -1042,6 +1042,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ }
+
+ dpu_enc->mode_set_complete = true;
++
++error:
++ dpu_rm_release(&dpu_kms->rm, drm_enc);
+ }
+
+ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+@@ -1547,8 +1550,14 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
+ if (!ctl)
+ continue;
+
+- if (phys->split_role != ENC_ROLE_SLAVE)
++ /*
++ * This is cleared in frame_done worker, which isn't invoked
++ * for async commits. So don't set this for async, since it'll
++ * roll over to the next commit.
++ */
++ if (!async && phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
++
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
+diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
+index 49c04829cf34..fcf7a83f0e6f 100644
+--- a/drivers/gpu/drm/msm/msm_gem_vma.c
++++ b/drivers/gpu/drm/msm/msm_gem_vma.c
+@@ -85,7 +85,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
+
+ vma->mapped = true;
+
+- if (aspace->mmu)
++ if (aspace && aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, prot);
+
+diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
+index 581404e6544d..378c5dd692b0 100644
+--- a/drivers/gpu/drm/nouveau/Kbuild
++++ b/drivers/gpu/drm/nouveau/Kbuild
+@@ -1,7 +1,7 @@
+-ccflags-y += -I$(src)/include
+-ccflags-y += -I$(src)/include/nvkm
+-ccflags-y += -I$(src)/nvkm
+-ccflags-y += -I$(src)
++ccflags-y += -I $(srctree)/$(src)/include
++ccflags-y += -I $(srctree)/$(src)/include/nvkm
++ccflags-y += -I $(srctree)/$(src)/nvkm
++ccflags-y += -I $(srctree)/$(src)
+
+ # NVKM - HW resource manager
+ #- code also used by various userspace tools/tests
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+index 157b076a1272..38c9c086754b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+@@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
+ struct nvkm_device *device = bar->base.subdev.device;
+ static struct lock_class_key bar1_lock;
+ static struct lock_class_key bar2_lock;
+- u64 start, limit;
++ u64 start, limit, size;
+ int ret;
+
+ ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
+@@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
+
+ /* BAR2 */
+ start = 0x0100000000ULL;
+- limit = start + device->func->resource_size(device, 3);
++ size = device->func->resource_size(device, 3);
++ if (!size)
++ return -ENOMEM;
++ limit = start + size;
+
+ ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+ &bar2_lock, "bar2", &bar->bar2_vmm);
+@@ -164,7 +167,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
+
+ /* BAR1 */
+ start = 0x0000000000ULL;
+- limit = start + device->func->resource_size(device, 1);
++ size = device->func->resource_size(device, 1);
++ if (!size)
++ return -ENOMEM;
++ limit = start + size;
+
+ ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+ &bar1_lock, "bar1", &bar->bar1_vmm);
+diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
+index 64fb788b6647..f0fe975ed46c 100644
+--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
+@@ -1342,12 +1342,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
+ */
+ dsi_enable_scp_clk(dsi);
+
+- if (!dsi->vdds_dsi_enabled) {
+- r = regulator_enable(dsi->vdds_dsi_reg);
+- if (r)
+- goto err0;
+- dsi->vdds_dsi_enabled = true;
+- }
++ r = regulator_enable(dsi->vdds_dsi_reg);
++ if (r)
++ goto err0;
+
+ /* XXX PLL does not come out of reset without this... */
+ dispc_pck_free_enable(dsi->dss->dispc, 1);
+@@ -1372,36 +1369,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
+
+ return 0;
+ err1:
+- if (dsi->vdds_dsi_enabled) {
+- regulator_disable(dsi->vdds_dsi_reg);
+- dsi->vdds_dsi_enabled = false;
+- }
++ regulator_disable(dsi->vdds_dsi_reg);
+ err0:
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
+ return r;
+ }
+
+-static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
++static void dsi_pll_disable(struct dss_pll *pll)
+ {
++ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
++
+ dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
+- if (disconnect_lanes) {
+- WARN_ON(!dsi->vdds_dsi_enabled);
+- regulator_disable(dsi->vdds_dsi_reg);
+- dsi->vdds_dsi_enabled = false;
+- }
++
++ regulator_disable(dsi->vdds_dsi_reg);
+
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
+
+- DSSDBG("PLL uninit done\n");
+-}
+-
+-static void dsi_pll_disable(struct dss_pll *pll)
+-{
+- struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
+-
+- dsi_pll_uninit(dsi, true);
++ DSSDBG("PLL disable done\n");
+ }
+
+ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
+@@ -4096,11 +4082,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
+
+ r = dss_pll_enable(&dsi->pll);
+ if (r)
+- goto err0;
++ return r;
+
+ r = dsi_configure_dsi_clocks(dsi);
+ if (r)
+- goto err1;
++ goto err0;
+
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
+ dsi->module_id == 0 ?
+@@ -4108,6 +4094,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
+
+ DSSDBG("PLL OK\n");
+
++ if (!dsi->vdds_dsi_enabled) {
++ r = regulator_enable(dsi->vdds_dsi_reg);
++ if (r)
++ goto err1;
++
++ dsi->vdds_dsi_enabled = true;
++ }
++
+ r = dsi_cio_init(dsi);
+ if (r)
+ goto err2;
+@@ -4136,10 +4130,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
+ err3:
+ dsi_cio_uninit(dsi);
+ err2:
+- dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
++ regulator_disable(dsi->vdds_dsi_reg);
++ dsi->vdds_dsi_enabled = false;
+ err1:
+- dss_pll_disable(&dsi->pll);
++ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+ err0:
++ dss_pll_disable(&dsi->pll);
++
+ return r;
+ }
+
+@@ -4158,7 +4155,12 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
+
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+ dsi_cio_uninit(dsi);
+- dsi_pll_uninit(dsi, disconnect_lanes);
++ dss_pll_disable(&dsi->pll);
++
++ if (disconnect_lanes) {
++ regulator_disable(dsi->vdds_dsi_reg);
++ dsi->vdds_dsi_enabled = false;
++ }
+ }
+
+ static int dsi_display_enable(struct omap_dss_device *dssdev)
+diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
+index 9da94d10782a..d37e3c001e24 100644
+--- a/drivers/gpu/drm/omapdrm/omap_connector.c
++++ b/drivers/gpu/drm/omapdrm/omap_connector.c
+@@ -36,18 +36,22 @@ struct omap_connector {
+ };
+
+ static void omap_connector_hpd_notify(struct drm_connector *connector,
+- struct omap_dss_device *src,
+ enum drm_connector_status status)
+ {
+- if (status == connector_status_disconnected) {
+- /*
+- * If the source is an HDMI encoder, notify it of disconnection.
+- * This is required to let the HDMI encoder reset any internal
+- * state related to connection status, such as the CEC address.
+- */
+- if (src && src->type == OMAP_DISPLAY_TYPE_HDMI &&
+- src->ops->hdmi.lost_hotplug)
+- src->ops->hdmi.lost_hotplug(src);
++ struct omap_connector *omap_connector = to_omap_connector(connector);
++ struct omap_dss_device *dssdev;
++
++ if (status != connector_status_disconnected)
++ return;
++
++ /*
++ * Notify all devics in the pipeline of disconnection. This is required
++ * to let the HDMI encoders reset their internal state related to
++ * connection status, such as the CEC address.
++ */
++ for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
++ if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
++ dssdev->ops->hdmi.lost_hotplug(dssdev);
+ }
+ }
+
+@@ -67,7 +71,7 @@ static void omap_connector_hpd_cb(void *cb_data,
+ if (old_status == status)
+ return;
+
+- omap_connector_hpd_notify(connector, omap_connector->hpd, status);
++ omap_connector_hpd_notify(connector, status);
+
+ drm_kms_helper_hotplug_event(dev);
+ }
+@@ -128,7 +132,7 @@ static enum drm_connector_status omap_connector_detect(
+ ? connector_status_connected
+ : connector_status_disconnected;
+
+- omap_connector_hpd_notify(connector, dssdev->src, status);
++ omap_connector_hpd_notify(connector, status);
+ } else {
+ switch (omap_connector->display->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+index 87fa316e1d7b..58ccf648b70f 100644
+--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
++++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+@@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
+ /* Send Command GRAM memory write (no parameters) */
+ dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
+
++ /* Wait a short while to let the panel be ready before the 1st frame */
++ mdelay(10);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
+index b9baefdba38a..1c318ad32a8c 100644
+--- a/drivers/gpu/drm/pl111/pl111_versatile.c
++++ b/drivers/gpu/drm/pl111/pl111_versatile.c
+@@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
+ ret = vexpress_muxfpga_init();
+ if (ret) {
+ dev_err(dev, "unable to initialize muxfpga driver\n");
++ of_node_put(np);
+ return ret;
+ }
+
+@@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "can't find the sysreg device, deferring\n");
++ of_node_put(np);
+ return -EPROBE_DEFER;
+ }
+ map = dev_get_drvdata(&pdev->dev);
+ if (!map) {
+ dev_err(dev, "sysreg has not yet probed\n");
+ platform_device_put(pdev);
++ of_node_put(np);
+ return -EPROBE_DEFER;
+ }
+ } else {
+ map = syscon_node_to_regmap(np);
+ }
++ of_node_put(np);
+
+ if (IS_ERR(map)) {
+ dev_err(dev, "no Versatile syscon regmap\n");
+diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
+index 7ef97b2a6eda..033f44e46daf 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
++++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
+@@ -283,7 +283,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
+ * divider.
+ */
+ fout = fvco / (1 << e) / div7;
+- div = DIV_ROUND_CLOSEST(fout, target);
++ div = max(1UL, DIV_ROUND_CLOSEST(fout, target));
+ diff = abs(fout / div - target);
+
+ if (diff < pll->diff) {
+@@ -485,9 +485,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
+ }
+
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
+- /* Turn on the LVDS PHY. */
++ /*
++ * Turn on the LVDS PHY. On D3, the LVEN and LVRES bit must be
++ * set at the same time, so don't write the register yet.
++ */
+ lvdcr0 |= LVDCR0_LVEN;
+- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
++ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_PWD))
++ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ }
+
+ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index 7136fc91c603..e75f77ff8e0f 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
+ u32 block_space, start_delay;
+ u32 tcon_div;
+
+- tcon->dclk_min_div = 4;
+- tcon->dclk_max_div = 127;
++ tcon->dclk_min_div = SUN6I_DSI_TCON_DIV;
++ tcon->dclk_max_div = SUN6I_DSI_TCON_DIV;
+
+ sun4i_tcon0_mode_set_common(tcon, mode);
+
+diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+index 318994cd1b85..869e0aedf343 100644
+--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+@@ -358,7 +358,13 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
+ static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode)
+ {
+- return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
++ u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
++ u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
++
++ if (delay > mode->vtotal)
++ delay = delay % mode->vtotal;
++
++ return max_t(u16, delay, 1);
+ }
+
+ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
+diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+index a07090579f84..5c3ad5be0690 100644
+--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+@@ -13,6 +13,8 @@
+ #include <drm/drm_encoder.h>
+ #include <drm/drm_mipi_dsi.h>
+
++#define SUN6I_DSI_TCON_DIV 4
++
+ struct sun6i_dsi {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
+index 43a3b68d90a2..998d75be9e16 100644
+--- a/drivers/gpu/drm/tinydrm/ili9225.c
++++ b/drivers/gpu/drm/tinydrm/ili9225.c
+@@ -301,7 +301,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
+ mipi->enabled = false;
+ }
+
+-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
++static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ size_t num)
+ {
+ struct spi_device *spi = mipi->spi;
+@@ -311,11 +311,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
+
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
+- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
++ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
+ if (ret || !num)
+ return ret;
+
+- if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
++ if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+ bpw = 16;
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
+index 918f77c7de34..295cbcbc2bb6 100644
+--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
++++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
+@@ -153,16 +153,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
+ */
+ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+ {
++ u8 *cmdbuf;
+ int ret;
+
++ /* SPI requires dma-safe buffers */
++ cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
++ if (!cmdbuf)
++ return -ENOMEM;
++
+ mutex_lock(&mipi->cmdlock);
+- ret = mipi->command(mipi, cmd, data, len);
++ ret = mipi->command(mipi, cmdbuf, data, len);
+ mutex_unlock(&mipi->cmdlock);
+
++ kfree(cmdbuf);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(mipi_dbi_command_buf);
+
++/* This should only be used by mipi_dbi_command() */
++int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
++{
++ u8 *buf;
++ int ret;
++
++ buf = kmemdup(data, len, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
++
++ kfree(buf);
++
++ return ret;
++}
++EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
++
+ /**
+ * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
+ * @dst: The destination buffer
+@@ -774,18 +800,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
+ return 0;
+ }
+
+-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
++static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
+ u8 *parameters, size_t num)
+ {
+- unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
++ unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
+ int ret;
+
+- if (mipi_dbi_command_is_read(mipi, cmd))
++ if (mipi_dbi_command_is_read(mipi, *cmd))
+ return -ENOTSUPP;
+
+- MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
++ MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
+
+- ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
++ ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
+ if (ret || !num)
+ return ret;
+
+@@ -794,7 +820,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
+
+ /* MIPI DBI Type C Option 3 */
+
+-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
++static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
+ u8 *data, size_t len)
+ {
+ struct spi_device *spi = mipi->spi;
+@@ -803,7 +829,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+ struct spi_transfer tr[2] = {
+ {
+ .speed_hz = speed_hz,
+- .tx_buf = &cmd,
++ .tx_buf = cmd,
+ .len = 1,
+ }, {
+ .speed_hz = speed_hz,
+@@ -821,8 +847,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+ * Support non-standard 24-bit and 32-bit Nokia read commands which
+ * start with a dummy clock, so we need to read an extra byte.
+ */
+- if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
+- cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
++ if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
++ *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
+ if (!(len == 3 || len == 4))
+ return -EINVAL;
+
+@@ -852,7 +878,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+ data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
+ }
+
+- MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
++ MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
+
+ err_free:
+ kfree(buf);
+@@ -860,7 +886,7 @@ err_free:
+ return ret;
+ }
+
+-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
++static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
+ u8 *par, size_t num)
+ {
+ struct spi_device *spi = mipi->spi;
+@@ -868,18 +894,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
+ u32 speed_hz;
+ int ret;
+
+- if (mipi_dbi_command_is_read(mipi, cmd))
++ if (mipi_dbi_command_is_read(mipi, *cmd))
+ return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
+
+- MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
++ MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
+
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
+- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
++ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
+ if (ret || !num)
+ return ret;
+
+- if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
++ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ bpw = 16;
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
+index f0afcec72c34..30ae1c74edaa 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.c
++++ b/drivers/gpu/drm/v3d/v3d_drv.c
+@@ -312,14 +312,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
+ if (ret)
+ goto dev_destroy;
+
+- v3d_irq_init(v3d);
++ ret = v3d_irq_init(v3d);
++ if (ret)
++ goto gem_destroy;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+- goto gem_destroy;
++ goto irq_disable;
+
+ return 0;
+
++irq_disable:
++ v3d_irq_disable(v3d);
+ gem_destroy:
+ v3d_gem_destroy(drm);
+ dev_destroy:
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
+index fdda3037f7af..2fdb456b72d3 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.h
++++ b/drivers/gpu/drm/v3d/v3d_drv.h
+@@ -310,7 +310,7 @@ void v3d_reset(struct v3d_dev *v3d);
+ void v3d_invalidate_caches(struct v3d_dev *v3d);
+
+ /* v3d_irq.c */
+-void v3d_irq_init(struct v3d_dev *v3d);
++int v3d_irq_init(struct v3d_dev *v3d);
+ void v3d_irq_enable(struct v3d_dev *v3d);
+ void v3d_irq_disable(struct v3d_dev *v3d);
+ void v3d_irq_reset(struct v3d_dev *v3d);
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index 69338da70ddc..29d746cfce57 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -156,7 +156,7 @@ v3d_hub_irq(int irq, void *arg)
+ return status;
+ }
+
+-void
++int
+ v3d_irq_init(struct v3d_dev *v3d)
+ {
+ int ret, core;
+@@ -173,13 +173,22 @@ v3d_irq_init(struct v3d_dev *v3d)
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_hub_irq, IRQF_SHARED,
+ "v3d_hub", v3d);
++ if (ret)
++ goto fail;
++
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
+ v3d_irq, IRQF_SHARED,
+ "v3d_core0", v3d);
+ if (ret)
+- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
++ goto fail;
+
+ v3d_irq_enable(v3d);
++ return 0;
++
++fail:
++ if (ret != -EPROBE_DEFER)
++ dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
++ return ret;
+ }
+
+ void
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 860e21ec6a49..63a43726cce0 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -218,13 +218,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
+ * Add a usage to the temporary parser table.
+ */
+
+-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
++static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
+ {
+ if (parser->local.usage_index >= HID_MAX_USAGES) {
+ hid_err(parser->device, "usage index exceeded\n");
+ return -1;
+ }
+ parser->local.usage[parser->local.usage_index] = usage;
++ parser->local.usage_size[parser->local.usage_index] = size;
+ parser->local.collection_index[parser->local.usage_index] =
+ parser->collection_stack_ptr ?
+ parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
+@@ -486,10 +487,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
+ return 0;
+ }
+
+- if (item->size <= 2)
+- data = (parser->global.usage_page << 16) + data;
+-
+- return hid_add_usage(parser, data);
++ return hid_add_usage(parser, data, item->size);
+
+ case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
+
+@@ -498,9 +496,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
+ return 0;
+ }
+
+- if (item->size <= 2)
+- data = (parser->global.usage_page << 16) + data;
+-
+ parser->local.usage_minimum = data;
+ return 0;
+
+@@ -511,9 +506,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
+ return 0;
+ }
+
+- if (item->size <= 2)
+- data = (parser->global.usage_page << 16) + data;
+-
+ count = data - parser->local.usage_minimum;
+ if (count + parser->local.usage_index >= HID_MAX_USAGES) {
+ /*
+@@ -533,7 +525,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
+ }
+
+ for (n = parser->local.usage_minimum; n <= data; n++)
+- if (hid_add_usage(parser, n)) {
++ if (hid_add_usage(parser, n, item->size)) {
+ dbg_hid("hid_add_usage failed\n");
+ return -1;
+ }
+@@ -547,6 +539,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
+ return 0;
+ }
+
++/*
++ * Concatenate Usage Pages into Usages where relevant:
++ * As per specification, 6.2.2.8: "When the parser encounters a main item it
++ * concatenates the last declared Usage Page with a Usage to form a complete
++ * usage value."
++ */
++
++static void hid_concatenate_usage_page(struct hid_parser *parser)
++{
++ int i;
++
++ for (i = 0; i < parser->local.usage_index; i++)
++ if (parser->local.usage_size[i] <= 2)
++ parser->local.usage[i] += parser->global.usage_page << 16;
++}
++
+ /*
+ * Process a main item.
+ */
+@@ -556,6 +564,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
+ __u32 data;
+ int ret;
+
++ hid_concatenate_usage_page(parser);
++
+ data = item_udata(item);
+
+ switch (item->tag) {
+@@ -765,6 +775,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
+ __u32 data;
+ int i;
+
++ hid_concatenate_usage_page(parser);
++
+ data = item_udata(item);
+
+ switch (item->tag) {
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 199cc256e9d9..e74fa990ba13 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -836,13 +836,16 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
+
+ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
+ {
++ const u8 ping_byte = 0x5a;
++ u8 ping_data[3] = { 0, 0, ping_byte };
+ struct hidpp_report response;
+ int ret;
+
+- ret = hidpp_send_fap_command_sync(hidpp,
++ ret = hidpp_send_rap_command_sync(hidpp,
++ REPORT_ID_HIDPP_SHORT,
+ HIDPP_PAGE_ROOT_IDX,
+ CMD_ROOT_GET_PROTOCOL_VERSION,
+- NULL, 0, &response);
++ ping_data, sizeof(ping_data), &response);
+
+ if (ret == HIDPP_ERROR_INVALID_SUBID) {
+ hidpp->protocol_major = 1;
+@@ -862,8 +865,14 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
+ if (ret)
+ return ret;
+
+- hidpp->protocol_major = response.fap.params[0];
+- hidpp->protocol_minor = response.fap.params[1];
++ if (response.rap.params[2] != ping_byte) {
++ hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
++ __func__, response.rap.params[2], ping_byte);
++ return -EPROTO;
++ }
++
++ hidpp->protocol_major = response.rap.params[0];
++ hidpp->protocol_minor = response.rap.params[1];
+
+ return ret;
+ }
+@@ -1012,7 +1021,11 @@ static int hidpp_map_battery_level(int capacity)
+ {
+ if (capacity < 11)
+ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+- else if (capacity < 31)
++ /*
++ * The spec says this should be < 31 but some devices report 30
++ * with brand new batteries and Windows reports 30 as "Good".
++ */
++ else if (capacity < 30)
+ return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (capacity < 81)
+ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
+index 73c681162653..623736d2a7c1 100644
+--- a/drivers/hwmon/f71805f.c
++++ b/drivers/hwmon/f71805f.c
+@@ -96,17 +96,23 @@ superio_select(int base, int ld)
+ outb(ld, base + 1);
+ }
+
+-static inline void
++static inline int
+ superio_enter(int base)
+ {
++ if (!request_muxed_region(base, 2, DRVNAME))
++ return -EBUSY;
++
+ outb(0x87, base);
+ outb(0x87, base);
++
++ return 0;
+ }
+
+ static inline void
+ superio_exit(int base)
+ {
+ outb(0xaa, base);
++ release_region(base, 2);
+ }
+
+ /*
+@@ -1561,7 +1567,7 @@ exit:
+ static int __init f71805f_find(int sioaddr, unsigned short *address,
+ struct f71805f_sio_data *sio_data)
+ {
+- int err = -ENODEV;
++ int err;
+ u16 devid;
+
+ static const char * const names[] = {
+@@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
+ "F71872F/FG or F71806F/FG",
+ };
+
+- superio_enter(sioaddr);
++ err = superio_enter(sioaddr);
++ if (err)
++ return err;
+
++ err = -ENODEV;
+ devid = superio_inw(sioaddr, SIO_REG_MANID);
+ if (devid != SIO_FINTEK_ID)
+ goto exit;
+diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
+index d1a3f2040c00..58eee8fa3e6d 100644
+--- a/drivers/hwmon/pc87427.c
++++ b/drivers/hwmon/pc87427.c
+@@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
+ #define LD_IN 1
+ #define LD_TEMP 1
+
++static inline int superio_enter(int sioaddr)
++{
++ if (!request_muxed_region(sioaddr, 2, DRVNAME))
++ return -EBUSY;
++ return 0;
++}
++
+ static inline void superio_outb(int sioaddr, int reg, int val)
+ {
+ outb(reg, sioaddr);
+@@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
+ {
+ outb(0x02, sioaddr);
+ outb(0x02, sioaddr + 1);
++ release_region(sioaddr, 2);
+ }
+
+ /*
+@@ -1195,7 +1203,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
+ {
+ u16 val;
+ u8 cfg, cfg_b;
+- int i, err = 0;
++ int i, err;
++
++ err = superio_enter(sioaddr);
++ if (err)
++ return err;
+
+ /* Identify device */
+ val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
+diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
+index c0775084dde0..60e193f2e970 100644
+--- a/drivers/hwmon/smsc47b397.c
++++ b/drivers/hwmon/smsc47b397.c
+@@ -72,14 +72,19 @@ static inline void superio_select(int ld)
+ superio_outb(0x07, ld);
+ }
+
+-static inline void superio_enter(void)
++static inline int superio_enter(void)
+ {
++ if (!request_muxed_region(REG, 2, DRVNAME))
++ return -EBUSY;
++
+ outb(0x55, REG);
++ return 0;
+ }
+
+ static inline void superio_exit(void)
+ {
+ outb(0xAA, REG);
++ release_region(REG, 2);
+ }
+
+ #define SUPERIO_REG_DEVID 0x20
+@@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
+ u8 id, rev;
+ char *name;
+ unsigned short addr;
++ int err;
++
++ err = superio_enter();
++ if (err)
++ return err;
+
+- superio_enter();
+ id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
+
+ switch (id) {
+diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
+index c7b6a425e2c0..5eeac9853d0a 100644
+--- a/drivers/hwmon/smsc47m1.c
++++ b/drivers/hwmon/smsc47m1.c
+@@ -73,16 +73,21 @@ superio_inb(int reg)
+ /* logical device for fans is 0x0A */
+ #define superio_select() superio_outb(0x07, 0x0A)
+
+-static inline void
++static inline int
+ superio_enter(void)
+ {
++ if (!request_muxed_region(REG, 2, DRVNAME))
++ return -EBUSY;
++
+ outb(0x55, REG);
++ return 0;
+ }
+
+ static inline void
+ superio_exit(void)
+ {
+ outb(0xAA, REG);
++ release_region(REG, 2);
+ }
+
+ #define SUPERIO_REG_ACT 0x30
+@@ -531,8 +536,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
+ {
+ u8 val;
+ unsigned short addr;
++ int err;
++
++ err = superio_enter();
++ if (err)
++ return err;
+
+- superio_enter();
+ val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
+
+ /*
+@@ -608,13 +617,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
+ static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
+ {
+ if ((sio_data->activate & 0x01) == 0) {
+- superio_enter();
+- superio_select();
+-
+- pr_info("Disabling device\n");
+- superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+-
+- superio_exit();
++ if (!superio_enter()) {
++ superio_select();
++ pr_info("Disabling device\n");
++ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
++ superio_exit();
++ } else {
++ pr_warn("Failed to disable device\n");
++ }
+ }
+ }
+
+diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
+index 3a6bfa51cb94..95d5e8ec8b7f 100644
+--- a/drivers/hwmon/vt1211.c
++++ b/drivers/hwmon/vt1211.c
+@@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
+ outb(ldn, sio_cip + 1);
+ }
+
+-static inline void superio_enter(int sio_cip)
++static inline int superio_enter(int sio_cip)
+ {
++ if (!request_muxed_region(sio_cip, 2, DRVNAME))
++ return -EBUSY;
++
+ outb(0x87, sio_cip);
+ outb(0x87, sio_cip);
++
++ return 0;
+ }
+
+ static inline void superio_exit(int sio_cip)
+ {
+ outb(0xaa, sio_cip);
++ release_region(sio_cip, 2);
+ }
+
+ /* ---------------------------------------------------------------------
+@@ -1282,11 +1288,14 @@ EXIT:
+
+ static int __init vt1211_find(int sio_cip, unsigned short *address)
+ {
+- int err = -ENODEV;
++ int err;
+ int devid;
+
+- superio_enter(sio_cip);
++ err = superio_enter(sio_cip);
++ if (err)
++ return err;
+
++ err = -ENODEV;
+ devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
+ if (devid != SIO_VT1211_ID)
+ goto EXIT;
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index 76db6e5cc296..9ca21a8dfcd7 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -809,6 +809,7 @@ config STM32_DFSDM_ADC
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select STM32_DFSDM_CORE
+ select REGMAP_MMIO
++ select IIO_BUFFER
+ select IIO_BUFFER_HW_CONSUMER
+ help
+ Select this option to support ADCSigma delta modulator for
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index 54d9978b2740..a4310600a853 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ struct spi_transfer t = {
+ .tx_buf = data,
+ .len = size + 1,
+- .cs_change = sigma_delta->bus_locked,
++ .cs_change = sigma_delta->keep_cs_asserted,
+ };
+ struct spi_message m;
+ int ret;
+@@ -218,6 +218,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+
+ spi_bus_lock(sigma_delta->spi->master);
+ sigma_delta->bus_locked = true;
++ sigma_delta->keep_cs_asserted = true;
+ reinit_completion(&sigma_delta->completion);
+
+ ret = ad_sigma_delta_set_mode(sigma_delta, mode);
+@@ -235,9 +236,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ ret = 0;
+ }
+ out:
++ sigma_delta->keep_cs_asserted = false;
++ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ sigma_delta->bus_locked = false;
+ spi_bus_unlock(sigma_delta->spi->master);
+- ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+
+ return ret;
+ }
+@@ -290,6 +292,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+
+ spi_bus_lock(sigma_delta->spi->master);
+ sigma_delta->bus_locked = true;
++ sigma_delta->keep_cs_asserted = true;
+ reinit_completion(&sigma_delta->completion);
+
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
+@@ -299,9 +302,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ ret = wait_for_completion_interruptible_timeout(
+ &sigma_delta->completion, HZ);
+
+- sigma_delta->bus_locked = false;
+- spi_bus_unlock(sigma_delta->spi->master);
+-
+ if (ret == 0)
+ ret = -EIO;
+ if (ret < 0)
+@@ -322,7 +322,10 @@ out:
+ sigma_delta->irq_dis = true;
+ }
+
++ sigma_delta->keep_cs_asserted = false;
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
++ sigma_delta->bus_locked = false;
++ spi_bus_unlock(sigma_delta->spi->master);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret)
+@@ -359,6 +362,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
+
+ spi_bus_lock(sigma_delta->spi->master);
+ sigma_delta->bus_locked = true;
++ sigma_delta->keep_cs_asserted = true;
++
+ ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
+ if (ret)
+ goto err_unlock;
+@@ -387,6 +392,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
+ sigma_delta->irq_dis = true;
+ }
+
++ sigma_delta->keep_cs_asserted = false;
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+
+ sigma_delta->bus_locked = false;
+diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
+index 0ad63592cc3c..1e47bef72bb7 100644
+--- a/drivers/iio/adc/ti-ads7950.c
++++ b/drivers/iio/adc/ti-ads7950.c
+@@ -56,6 +56,9 @@ struct ti_ads7950_state {
+ struct spi_message ring_msg;
+ struct spi_message scan_single_msg;
+
++ /* Lock to protect the spi xfer buffers */
++ struct mutex slock;
++
+ struct regulator *reg;
+ unsigned int vref_mv;
+
+@@ -268,6 +271,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
+ struct ti_ads7950_state *st = iio_priv(indio_dev);
+ int ret;
+
++ mutex_lock(&st->slock);
+ ret = spi_sync(st->spi, &st->ring_msg);
+ if (ret < 0)
+ goto out;
+@@ -276,6 +280,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
+ iio_get_time_ns(indio_dev));
+
+ out:
++ mutex_unlock(&st->slock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+@@ -286,7 +291,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
+ struct ti_ads7950_state *st = iio_priv(indio_dev);
+ int ret, cmd;
+
+- mutex_lock(&indio_dev->mlock);
++ mutex_lock(&st->slock);
+
+ cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings;
+ st->single_tx = cmd;
+@@ -298,7 +303,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
+ ret = st->single_rx;
+
+ out:
+- mutex_unlock(&indio_dev->mlock);
++ mutex_unlock(&st->slock);
+
+ return ret;
+ }
+@@ -432,16 +437,19 @@ static int ti_ads7950_probe(struct spi_device *spi)
+ if (ACPI_COMPANION(&spi->dev))
+ st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT;
+
++ mutex_init(&st->slock);
++
+ st->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
+- return PTR_ERR(st->reg);
++ ret = PTR_ERR(st->reg);
++ goto error_destroy_mutex;
+ }
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable regulator \"vref\"\n");
+- return ret;
++ goto error_destroy_mutex;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+@@ -463,6 +471,8 @@ error_cleanup_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+ error_disable_reg:
+ regulator_disable(st->reg);
++error_destroy_mutex:
++ mutex_destroy(&st->slock);
+
+ return ret;
+ }
+@@ -475,6 +485,7 @@ static int ti_ads7950_remove(struct spi_device *spi)
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(st->reg);
++ mutex_destroy(&st->slock);
+
+ return 0;
+ }
+diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
+index 645f2e3975db..e38f704d88b7 100644
+--- a/drivers/iio/common/ssp_sensors/ssp_iio.c
++++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
+@@ -81,7 +81,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
+ unsigned int len, int64_t timestamp)
+ {
+ __le32 time;
+- int64_t calculated_time;
++ int64_t calculated_time = 0;
+ struct ssp_sensor_data *spd = iio_priv(indio_dev);
+
+ if (indio_dev->scan_bytes == 0)
+diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
+index 3de7f4426ac4..86abba5827a2 100644
+--- a/drivers/iio/magnetometer/hmc5843_i2c.c
++++ b/drivers/iio/magnetometer/hmc5843_i2c.c
+@@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = {
+ static int hmc5843_i2c_probe(struct i2c_client *cli,
+ const struct i2c_device_id *id)
+ {
++ struct regmap *regmap = devm_regmap_init_i2c(cli,
++ &hmc5843_i2c_regmap_config);
++ if (IS_ERR(regmap))
++ return PTR_ERR(regmap);
++
+ return hmc5843_common_probe(&cli->dev,
+- devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config),
++ regmap,
+ id->driver_data, id->name);
+ }
+
+diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
+index 535f03a70d63..79b2b707f90e 100644
+--- a/drivers/iio/magnetometer/hmc5843_spi.c
++++ b/drivers/iio/magnetometer/hmc5843_spi.c
+@@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = {
+ static int hmc5843_spi_probe(struct spi_device *spi)
+ {
+ int ret;
++ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ spi->mode = SPI_MODE_3;
+@@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi)
+ if (ret)
+ return ret;
+
++ regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config);
++ if (IS_ERR(regmap))
++ return PTR_ERR(regmap);
++
+ return hmc5843_common_probe(&spi->dev,
+- devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
++ regmap,
+ id->driver_data, id->name);
+ }
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 68c997be2429..c54da16df0be 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1173,18 +1173,31 @@ static inline bool cma_any_addr(const struct sockaddr *addr)
+ return cma_zero_addr(addr) || cma_loopback_addr(addr);
+ }
+
+-static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
++static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
+ {
+ if (src->sa_family != dst->sa_family)
+ return -1;
+
+ switch (src->sa_family) {
+ case AF_INET:
+- return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
+- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+- case AF_INET6:
+- return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
+- &((struct sockaddr_in6 *) dst)->sin6_addr);
++ return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
++ ((struct sockaddr_in *)dst)->sin_addr.s_addr;
++ case AF_INET6: {
++ struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
++ struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
++ bool link_local;
++
++ if (ipv6_addr_cmp(&src_addr6->sin6_addr,
++ &dst_addr6->sin6_addr))
++ return 1;
++ link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
++ IPV6_ADDR_LINKLOCAL;
++ /* Link local must match their scope_ids */
++ return link_local ? (src_addr6->sin6_scope_id !=
++ dst_addr6->sin6_scope_id) :
++ 0;
++ }
++
+ default:
+ return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
+ &((struct sockaddr_ib *) dst)->sib_addr);
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 4d232bdf9e97..689ba6bc2ca9 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -457,6 +457,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
+ skb_reset_transport_header(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
++ if (!skb)
++ return NULL;
+ }
+ t4_set_arp_err_handler(skb, NULL, NULL);
+ return skb;
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index faaaac8fbc55..3af5eb10a5ff 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -805,7 +805,8 @@ static int create_workqueues(struct hfi1_devdata *dd)
+ ppd->hfi1_wq =
+ alloc_workqueue(
+ "hfi%d_%d",
+- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
++ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
++ WQ_MEM_RECLAIM,
+ HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
+ dd->unit, pidx);
+ if (!ppd->hfi1_wq)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index b3c8c45ec1e3..64e0c69b69c5 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -70,7 +70,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
+ HNS_ROCE_VLAN_SL_BIT_MASK) <<
+ HNS_ROCE_VLAN_SL_SHIFT;
+
+- ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
++ ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) <<
+ HNS_ROCE_PORT_NUM_SHIFT));
+ ah->av.gid_index = grh->sgid_index;
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 0aa10ebda5d9..91669e35c6ca 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -711,6 +711,15 @@ struct pf_frame {
+ int depth;
+ };
+
++static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
++{
++ if (!mmkey)
++ return false;
++ if (mmkey->type == MLX5_MKEY_MW)
++ return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
++ return mmkey->key == key;
++}
++
+ static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
+ {
+ struct mlx5_ib_mw *mw;
+@@ -760,7 +769,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+
+ next_mr:
+ mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
+- if (!mmkey || mmkey->key != key) {
++ if (!mkey_is_eq(mmkey, key)) {
+ mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
+ ret = -EFAULT;
+ goto srcu_unlock;
+diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
+index 42f0f25e396c..ec89fbd06c53 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mr.c
++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
+@@ -199,6 +199,12 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
+ buf = map[0]->buf;
+
+ for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
++ if (num_buf >= RXE_BUF_PER_MAP) {
++ map++;
++ buf = map[0]->buf;
++ num_buf = 0;
++ }
++
+ vaddr = page_address(sg_page_iter_page(&sg_iter));
+ if (!vaddr) {
+ pr_warn("null vaddr\n");
+@@ -211,11 +217,6 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
+ num_buf++;
+ buf++;
+
+- if (num_buf >= RXE_BUF_PER_MAP) {
+- map++;
+- buf = map[0]->buf;
+- num_buf = 0;
+- }
+ }
+ }
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 5002838ea476..f8986effcb50 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg)
+ * possibly issue discards to them, then we add the bucket to
+ * the free list:
+ */
+- while (!fifo_empty(&ca->free_inc)) {
++ while (1) {
+ long bucket;
+
+- fifo_pop(&ca->free_inc, bucket);
++ if (!fifo_pop(&ca->free_inc, bucket))
++ break;
+
+ if (ca->discard) {
+ mutex_unlock(&ca->set->bucket_lock);
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index d3725c17ce3a..6c94fa007796 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -317,6 +317,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
+ }
+ }
+
++bool is_discard_enabled(struct cache_set *s)
++{
++ struct cache *ca;
++ unsigned int i;
++
++ for_each_cache(ca, s, i)
++ if (ca->discard)
++ return true;
++
++ return false;
++}
++
+ int bch_journal_replay(struct cache_set *s, struct list_head *list)
+ {
+ int ret = 0, keys = 0, entries = 0;
+@@ -330,9 +342,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
+ list_for_each_entry(i, list, list) {
+ BUG_ON(i->pin && atomic_read(i->pin) != 1);
+
+- cache_set_err_on(n != i->j.seq, s,
+-"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+- n, i->j.seq - 1, start, end);
++ if (n != i->j.seq) {
++ if (n == start && is_discard_enabled(s))
++ pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
++ n, i->j.seq - 1, start, end);
++ else {
++ pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
++ n, i->j.seq - 1, start, end);
++ ret = -EIO;
++ goto err;
++ }
++ }
+
+ for (k = i->j.start;
+ k < bset_bkey_last(&i->j);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 171d5e0f698b..e489d2459569 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1775,13 +1775,15 @@ err:
+ return NULL;
+ }
+
+-static void run_cache_set(struct cache_set *c)
++static int run_cache_set(struct cache_set *c)
+ {
+ const char *err = "cannot allocate memory";
+ struct cached_dev *dc, *t;
+ struct cache *ca;
+ struct closure cl;
+ unsigned int i;
++ LIST_HEAD(journal);
++ struct journal_replay *l;
+
+ closure_init_stack(&cl);
+
+@@ -1869,7 +1871,9 @@ static void run_cache_set(struct cache_set *c)
+ if (j->version < BCACHE_JSET_VERSION_UUID)
+ __uuid_write(c);
+
+- bch_journal_replay(c, &journal);
++ err = "bcache: replay journal failed";
++ if (bch_journal_replay(c, &journal))
++ goto err;
+ } else {
+ pr_notice("invalidating existing data");
+
+@@ -1937,11 +1941,19 @@ static void run_cache_set(struct cache_set *c)
+ flash_devs_run(c);
+
+ set_bit(CACHE_SET_RUNNING, &c->flags);
+- return;
++ return 0;
+ err:
++ while (!list_empty(&journal)) {
++ l = list_first_entry(&journal, struct journal_replay, list);
++ list_del(&l->list);
++ kfree(l);
++ }
++
+ closure_sync(&cl);
+ /* XXX: test this, it's broken */
+ bch_cache_set_error(c, "%s", err);
++
++ return -EIO;
+ }
+
+ static bool can_attach_cache(struct cache *ca, struct cache_set *c)
+@@ -2005,8 +2017,11 @@ found:
+ ca->set->cache[ca->sb.nr_this_dev] = ca;
+ c->cache_by_alloc[c->caches_loaded++] = ca;
+
+- if (c->caches_loaded == c->sb.nr_in_set)
+- run_cache_set(c);
++ if (c->caches_loaded == c->sb.nr_in_set) {
++ err = "failed to run cache set";
++ if (run_cache_set(c) < 0)
++ goto err;
++ }
+
+ return NULL;
+ err:
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index cde3b49b2a91..350cf0451456 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -880,13 +880,17 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
+ }
+ EXPORT_SYMBOL_GPL(dm_table_set_type);
+
++/* validate the dax capability of the target device span */
+ static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
+- sector_t start, sector_t len, void *data)
++ sector_t start, sector_t len, void *data)
+ {
+- return bdev_dax_supported(dev->bdev, PAGE_SIZE);
++ int blocksize = *(int *) data;
++
++ return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
++ start, len);
+ }
+
+-static bool dm_table_supports_dax(struct dm_table *t)
++bool dm_table_supports_dax(struct dm_table *t, int blocksize)
+ {
+ struct dm_target *ti;
+ unsigned i;
+@@ -899,7 +903,8 @@ static bool dm_table_supports_dax(struct dm_table *t)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, device_supports_dax, NULL))
++ !ti->type->iterate_devices(ti, device_supports_dax,
++ &blocksize))
+ return false;
+ }
+
+@@ -979,7 +984,7 @@ static int dm_table_determine_type(struct dm_table *t)
+ verify_bio_based:
+ /* We must use this table as bio-based */
+ t->type = DM_TYPE_BIO_BASED;
+- if (dm_table_supports_dax(t) ||
++ if (dm_table_supports_dax(t, PAGE_SIZE) ||
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
+ t->type = DM_TYPE_DAX_BIO_BASED;
+ } else {
+@@ -1905,7 +1910,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ }
+ blk_queue_write_cache(q, wc, fua);
+
+- if (dm_table_supports_dax(t))
++ if (dm_table_supports_dax(t, PAGE_SIZE))
+ blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 08e7d412af95..1cacf02633ec 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1105,6 +1105,25 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ return ret;
+ }
+
++static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
++ int blocksize, sector_t start, sector_t len)
++{
++ struct mapped_device *md = dax_get_private(dax_dev);
++ struct dm_table *map;
++ int srcu_idx;
++ bool ret;
++
++ map = dm_get_live_table(md, &srcu_idx);
++ if (!map)
++ return false;
++
++ ret = dm_table_supports_dax(map, blocksize);
++
++ dm_put_live_table(md, srcu_idx);
++
++ return ret;
++}
++
+ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+ {
+@@ -3194,6 +3213,7 @@ static const struct block_device_operations dm_blk_dops = {
+
+ static const struct dax_operations dm_dax_ops = {
+ .direct_access = dm_dax_direct_access,
++ .dax_supported = dm_dax_supported,
+ .copy_from_iter = dm_dax_copy_from_iter,
+ .copy_to_iter = dm_dax_copy_to_iter,
+ };
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index 2d539b82ec08..17e3db54404c 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -72,6 +72,7 @@ bool dm_table_bio_based(struct dm_table *t);
+ bool dm_table_request_based(struct dm_table *t);
+ void dm_table_free_md_mempools(struct dm_table *t);
+ struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
++bool dm_table_supports_dax(struct dm_table *t, int blocksize);
+
+ void dm_lock_md_type(struct mapped_device *md);
+ void dm_unlock_md_type(struct mapped_device *md);
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
+index 15b6b9c0a2e4..9c163f658aaf 100644
+--- a/drivers/media/common/videobuf2/videobuf2-core.c
++++ b/drivers/media/common/videobuf2/videobuf2-core.c
+@@ -672,6 +672,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
+ return -EBUSY;
+ }
+
++ if (q->waiting_in_dqbuf && *count) {
++ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
++ return -EBUSY;
++ }
++
+ if (*count == 0 || q->num_buffers != 0 ||
+ (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
+ /*
+@@ -807,6 +812,10 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
+ }
+
+ if (!q->num_buffers) {
++ if (q->waiting_in_dqbuf && *count) {
++ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
++ return -EBUSY;
++ }
+ memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+ q->memory = memory;
+ q->waiting_for_buffers = !q->is_output;
+@@ -1659,6 +1668,11 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+ for (;;) {
+ int ret;
+
++ if (q->waiting_in_dqbuf) {
++ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
++ return -EBUSY;
++ }
++
+ if (!q->streaming) {
+ dprintk(1, "streaming off, will not wait for buffers\n");
+ return -EINVAL;
+@@ -1686,6 +1700,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+ return -EAGAIN;
+ }
+
++ q->waiting_in_dqbuf = 1;
+ /*
+ * We are streaming and blocking, wait for another buffer to
+ * become ready or for streamoff. Driver's lock is released to
+@@ -1706,6 +1721,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+ * the locks or return an error if one occurred.
+ */
+ call_void_qop(q, wait_finish, q);
++ q->waiting_in_dqbuf = 0;
+ if (ret) {
+ dprintk(1, "sleep was interrupted\n");
+ return ret;
+@@ -2585,6 +2601,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
+ if (!data)
+ return -EINVAL;
+
++ if (q->waiting_in_dqbuf) {
++ dprintk(3, "another dup()ped fd is %s\n",
++ read ? "reading" : "writing");
++ return -EBUSY;
++ }
++
+ /*
+ * Initialize emulator on first call.
+ */
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index 123f2a33738b..403f42806455 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
+ u16 u16tmp;
+ u32 tuner_frequency_khz, target_mclk;
+ s32 s32tmp;
++ static const struct reg_sequence reset_buf[] = {
++ {0x07, 0x80}, {0x07, 0x00}
++ };
+
+ dev_dbg(&client->dev,
+ "delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+@@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
+ }
+
+ /* reset */
+- ret = regmap_write(dev->regmap, 0x07, 0x80);
+- if (ret)
+- goto err;
+-
+- ret = regmap_write(dev->regmap, 0x07, 0x00);
++ ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2);
+ if (ret)
+ goto err;
+
+diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
+index feacd8da421d..d55d8f169dca 100644
+--- a/drivers/media/dvb-frontends/si2165.c
++++ b/drivers/media/dvb-frontends/si2165.c
+@@ -275,18 +275,20 @@ static u32 si2165_get_fe_clk(struct si2165_state *state)
+
+ static int si2165_wait_init_done(struct si2165_state *state)
+ {
+- int ret = -EINVAL;
++ int ret;
+ u8 val = 0;
+ int i;
+
+ for (i = 0; i < 3; ++i) {
+- si2165_readreg8(state, REG_INIT_DONE, &val);
++ ret = si2165_readreg8(state, REG_INIT_DONE, &val);
++ if (ret < 0)
++ return ret;
+ if (val == 0x01)
+ return 0;
+ usleep_range(1000, 50000);
+ }
+ dev_err(&state->client->dev, "init_done was not set\n");
+- return ret;
++ return -EINVAL;
+ }
+
+ static int si2165_upload_firmware_block(struct si2165_state *state,
+diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
+index 799acce803fe..a1e9a980a445 100644
+--- a/drivers/media/i2c/ov2659.c
++++ b/drivers/media/i2c/ov2659.c
+@@ -1117,8 +1117,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
+ if (ov2659_formats[index].code == mf->code)
+ break;
+
+- if (index < 0)
+- return -EINVAL;
++ if (index < 0) {
++ index = 0;
++ mf->code = ov2659_formats[index].code;
++ }
+
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->field = V4L2_FIELD_NONE;
+diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
+index f9359b11fa5c..de7d9790f054 100644
+--- a/drivers/media/i2c/ov6650.c
++++ b/drivers/media/i2c/ov6650.c
+@@ -810,9 +810,16 @@ static int ov6650_video_probe(struct i2c_client *client)
+ u8 pidh, pidl, midh, midl;
+ int ret;
+
++ priv->clk = v4l2_clk_get(&client->dev, NULL);
++ if (IS_ERR(priv->clk)) {
++ ret = PTR_ERR(priv->clk);
++ dev_err(&client->dev, "v4l2_clk request err: %d\n", ret);
++ return ret;
++ }
++
+ ret = ov6650_s_power(&priv->subdev, 1);
+ if (ret < 0)
+- return ret;
++ goto eclkput;
+
+ msleep(20);
+
+@@ -849,6 +856,11 @@ static int ov6650_video_probe(struct i2c_client *client)
+
+ done:
+ ov6650_s_power(&priv->subdev, 0);
++ if (!ret)
++ return 0;
++eclkput:
++ v4l2_clk_put(priv->clk);
++
+ return ret;
+ }
+
+@@ -991,18 +1003,9 @@ static int ov6650_probe(struct i2c_client *client,
+ priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+- priv->clk = v4l2_clk_get(&client->dev, NULL);
+- if (IS_ERR(priv->clk)) {
+- ret = PTR_ERR(priv->clk);
+- goto eclkget;
+- }
+-
+ ret = ov6650_video_probe(client);
+- if (ret) {
+- v4l2_clk_put(priv->clk);
+-eclkget:
++ if (ret)
+ v4l2_ctrl_handler_free(&priv->hdl);
+- }
+
+ return ret;
+ }
+diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
+index a7d26b294eb5..e65693c2aad5 100644
+--- a/drivers/media/i2c/ov7670.c
++++ b/drivers/media/i2c/ov7670.c
+@@ -1664,6 +1664,7 @@ static int ov7670_s_power(struct v4l2_subdev *sd, int on)
+
+ if (on) {
+ ov7670_power_on (sd);
++ ov7670_init(sd, 0);
+ ov7670_apply_fmt(sd);
+ ov7675_apply_framerate(sd);
+ v4l2_ctrl_handler_setup(&info->hdl);
+diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
+index 5817d9cde4d0..6d8e4afe9673 100644
+--- a/drivers/media/pci/saa7146/hexium_gemini.c
++++ b/drivers/media/pci/saa7146/hexium_gemini.c
+@@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
+ /* enable i2c-port pins */
+ saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
+
+- hexium->i2c_adapter = (struct i2c_adapter) {
+- .name = "hexium gemini",
+- };
++ strscpy(hexium->i2c_adapter.name, "hexium gemini",
++ sizeof(hexium->i2c_adapter.name));
+ saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
+ if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
+ DEB_S("cannot register i2c-device. skipping.\n");
+diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
+index 0a05176c18ab..a794f9e5f990 100644
+--- a/drivers/media/pci/saa7146/hexium_orion.c
++++ b/drivers/media/pci/saa7146/hexium_orion.c
+@@ -231,9 +231,8 @@ static int hexium_probe(struct saa7146_dev *dev)
+ saa7146_write(dev, DD1_STREAM_B, 0x00000000);
+ saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
+
+- hexium->i2c_adapter = (struct i2c_adapter) {
+- .name = "hexium orion",
+- };
++ strscpy(hexium->i2c_adapter.name, "hexium orion",
++ sizeof(hexium->i2c_adapter.name));
+ saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
+ if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
+ DEB_S("cannot register i2c-device. skipping.\n");
+diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
+index b4f396c2e72c..eaa86737fa04 100644
+--- a/drivers/media/platform/coda/coda-bit.c
++++ b/drivers/media/platform/coda/coda-bit.c
+@@ -2010,6 +2010,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
+ /* Clear decode success flag */
+ coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
+
++ /* Clear error return value */
++ coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
++
+ trace_coda_dec_pic_run(ctx, meta);
+
+ coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+index d022c65bb34c..e20b340855e7 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+@@ -388,7 +388,7 @@ static void mtk_vdec_worker(struct work_struct *work)
+ }
+ buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+- buf.size = (size_t)src_buf->planes[0].bytesused;
++ buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
+@@ -1155,10 +1155,10 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
+
+ src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+- src_mem.size = (size_t)src_buf->planes[0].bytesused;
++ src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ mtk_v4l2_debug(2,
+ "[%d] buf id=%d va=%p dma=%pad size=%zx",
+- ctx->id, src_buf->index,
++ ctx->id, src_buf->vb2_buf.index,
+ src_mem.va, &src_mem.dma_addr,
+ src_mem.size);
+
+@@ -1182,7 +1182,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
+ }
+ mtk_v4l2_debug(ret ? 0 : 1,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+- ctx->id, src_buf->index,
++ ctx->id, src_buf->vb2_buf.index,
+ src_mem.size, ret, res_chg);
+ return;
+ }
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+index c6b48b5925fb..50351adafc47 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+@@ -894,7 +894,7 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+- dst_buf->planes[0].bytesused = 0;
++ dst_buf->vb2_buf.planes[0].bytesused = 0;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ }
+ } else {
+@@ -947,7 +947,7 @@ static int mtk_venc_encode_header(void *priv)
+
+ bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+- bs_buf.size = (size_t)dst_buf->planes[0].length;
++ bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;
+
+ mtk_v4l2_debug(1,
+ "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
+@@ -976,7 +976,7 @@ static int mtk_venc_encode_header(void *priv)
+ }
+
+ ctx->state = MTK_STATE_HEADER;
+- dst_buf->planes[0].bytesused = enc_result.bs_size;
++ dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ return 0;
+@@ -1107,12 +1107,12 @@ static void mtk_venc_worker(struct work_struct *work)
+
+ if (ret) {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+- dst_buf->planes[0].bytesused = 0;
++ dst_buf->vb2_buf.planes[0].bytesused = 0;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ mtk_v4l2_err("venc_if_encode failed=%d", ret);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+- dst_buf->planes[0].bytesused = enc_result.bs_size;
++ dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
+ enc_result.bs_size);
+diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
+index 5fe5b38fa901..922855b6025c 100644
+--- a/drivers/media/platform/stm32/stm32-dcmi.c
++++ b/drivers/media/platform/stm32/stm32-dcmi.c
+@@ -811,6 +811,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
+
+ sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
+ if (!sd_fmt) {
++ if (!dcmi->num_of_sd_formats)
++ return -ENODATA;
++
+ sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
+ pix->pixelformat = sd_fmt->fourcc;
+ }
+@@ -989,6 +992,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
+
+ sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
+ if (!sd_fmt) {
++ if (!dcmi->num_of_sd_formats)
++ return -ENODATA;
++
+ sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
+ pix->pixelformat = sd_fmt->fourcc;
+ }
+@@ -1645,7 +1651,7 @@ static int dcmi_probe(struct platform_device *pdev)
+ dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(dcmi->rstc)) {
+ dev_err(&pdev->dev, "Could not get reset control\n");
+- return -ENODEV;
++ return PTR_ERR(dcmi->rstc);
+ }
+
+ /* Get bus characteristics from devicetree */
+@@ -1660,7 +1666,7 @@ static int dcmi_probe(struct platform_device *pdev)
+ of_node_put(np);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+- return -ENODEV;
++ return ret;
+ }
+
+ if (ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+@@ -1673,8 +1679,9 @@ static int dcmi_probe(struct platform_device *pdev)
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+- dev_err(&pdev->dev, "Could not get irq\n");
+- return -ENODEV;
++ if (irq != -EPROBE_DEFER)
++ dev_err(&pdev->dev, "Could not get irq\n");
++ return irq;
+ }
+
+ dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+@@ -1694,12 +1701,13 @@ static int dcmi_probe(struct platform_device *pdev)
+ dev_name(&pdev->dev), dcmi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+- return -ENODEV;
++ return ret;
+ }
+
+ mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mclk)) {
+- dev_err(&pdev->dev, "Unable to get mclk\n");
++ if (PTR_ERR(mclk) != -EPROBE_DEFER)
++ dev_err(&pdev->dev, "Unable to get mclk\n");
+ return PTR_ERR(mclk);
+ }
+
+diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c
+index d1d6085da9f1..cf469a1191aa 100644
+--- a/drivers/media/platform/vicodec/codec-fwht.c
++++ b/drivers/media/platform/vicodec/codec-fwht.c
+@@ -46,8 +46,12 @@ static const uint8_t zigzag[64] = {
+ 63,
+ };
+
+-
+-static int rlc(const s16 *in, __be16 *output, int blocktype)
++/*
++ * noinline_for_stack to work around
++ * https://bugs.llvm.org/show_bug.cgi?id=38809
++ */
++static int noinline_for_stack
++rlc(const s16 *in, __be16 *output, int blocktype)
+ {
+ s16 block[8 * 8];
+ s16 *wp = block;
+@@ -106,8 +110,8 @@ static int rlc(const s16 *in, __be16 *output, int blocktype)
+ * This function will worst-case increase rlc_in by 65*2 bytes:
+ * one s16 value for the header and 8 * 8 coefficients of type s16.
+ */
+-static u16 derlc(const __be16 **rlc_in, s16 *dwht_out,
+- const __be16 *end_of_input)
++static noinline_for_stack u16
++derlc(const __be16 **rlc_in, s16 *dwht_out, const __be16 *end_of_input)
+ {
+ /* header */
+ const __be16 *input = *rlc_in;
+@@ -240,8 +244,9 @@ static void dequantize_inter(s16 *coeff)
+ *coeff <<= *quant;
+ }
+
+-static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
+- unsigned int input_step, bool intra)
++static void noinline_for_stack fwht(const u8 *block, s16 *output_block,
++ unsigned int stride,
++ unsigned int input_step, bool intra)
+ {
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+@@ -373,7 +378,8 @@ static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
+ * Furthermore values can be negative... This is just a version that
+ * works with 16 signed data
+ */
+-static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
++static void noinline_for_stack
++fwht16(const s16 *block, s16 *output_block, int stride, int intra)
+ {
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+@@ -456,7 +462,8 @@ static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
+ }
+ }
+
+-static void ifwht(const s16 *block, s16 *output_block, int intra)
++static noinline_for_stack void
++ifwht(const s16 *block, s16 *output_block, int intra)
+ {
+ /*
+ * we'll need more than 8 bits for the transformed coefficients
+@@ -604,9 +611,9 @@ static int var_inter(const s16 *old, const s16 *new)
+ return ret;
+ }
+
+-static int decide_blocktype(const u8 *cur, const u8 *reference,
+- s16 *deltablock, unsigned int stride,
+- unsigned int input_step)
++static noinline_for_stack int
++decide_blocktype(const u8 *cur, const u8 *reference, s16 *deltablock,
++ unsigned int stride, unsigned int input_step)
+ {
+ s16 tmp[64];
+ s16 old[64];
+diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
+index d7636fe9e174..8788369e59a0 100644
+--- a/drivers/media/platform/vicodec/vicodec-core.c
++++ b/drivers/media/platform/vicodec/vicodec-core.c
+@@ -159,12 +159,10 @@ static int device_process(struct vicodec_ctx *ctx,
+ struct vb2_v4l2_buffer *dst_vb)
+ {
+ struct vicodec_dev *dev = ctx->dev;
+- struct vicodec_q_data *q_dst;
+ struct v4l2_fwht_state *state = &ctx->state;
+ u8 *p_src, *p_dst;
+ int ret;
+
+- q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (ctx->is_enc)
+ p_src = vb2_plane_vaddr(&src_vb->vb2_buf, 0);
+ else
+@@ -186,8 +184,10 @@ static int device_process(struct vicodec_ctx *ctx,
+ return ret;
+ vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret);
+ } else {
++ struct vicodec_q_data *q_dst;
+ unsigned int comp_frame_size = ntohl(ctx->state.header.size);
+
++ q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (comp_frame_size > ctx->comp_max_size)
+ return -EINVAL;
+ state->info = q_dst->info;
+@@ -196,11 +196,6 @@ static int device_process(struct vicodec_ctx *ctx,
+ return ret;
+ vb2_set_plane_payload(&dst_vb->vb2_buf, 0, q_dst->sizeimage);
+ }
+-
+- dst_vb->sequence = q_dst->sequence++;
+- dst_vb->flags &= ~V4L2_BUF_FLAG_LAST;
+- v4l2_m2m_buf_copy_metadata(src_vb, dst_vb, !ctx->is_enc);
+-
+ return 0;
+ }
+
+@@ -274,16 +269,22 @@ static void device_run(void *priv)
+ struct vicodec_ctx *ctx = priv;
+ struct vicodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+- struct vicodec_q_data *q_src;
++ struct vicodec_q_data *q_src, *q_dst;
+ u32 state;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
++ q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ state = VB2_BUF_STATE_DONE;
+ if (device_process(ctx, src_buf, dst_buf))
+ state = VB2_BUF_STATE_ERROR;
++ else
++ dst_buf->sequence = q_dst->sequence++;
++ dst_buf->flags &= ~V4L2_BUF_FLAG_LAST;
++ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc);
++
+ ctx->last_dst_buf = dst_buf;
+
+ spin_lock(ctx->lock);
+@@ -1338,8 +1339,11 @@ static int vicodec_start_streaming(struct vb2_queue *q,
+ chroma_div = info->width_div * info->height_div;
+ q_data->sequence = 0;
+
+- ctx->last_src_buf = NULL;
+- ctx->last_dst_buf = NULL;
++ if (V4L2_TYPE_IS_OUTPUT(q->type))
++ ctx->last_src_buf = NULL;
++ else
++ ctx->last_dst_buf = NULL;
++
+ state->gop_cnt = 0;
+
+ if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
+diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
+index 0ba30756e1e4..d8cd5f5cb10d 100644
+--- a/drivers/media/platform/video-mux.c
++++ b/drivers/media/platform/video-mux.c
+@@ -419,9 +419,14 @@ static int video_mux_probe(struct platform_device *pdev)
+ vmux->active = -1;
+ vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads),
+ GFP_KERNEL);
++ if (!vmux->pads)
++ return -ENOMEM;
++
+ vmux->format_mbus = devm_kcalloc(dev, num_pads,
+ sizeof(*vmux->format_mbus),
+ GFP_KERNEL);
++ if (!vmux->format_mbus)
++ return -ENOMEM;
+
+ for (i = 0; i < num_pads; i++) {
+ vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
+diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
+index 34dcaca45d8b..dd47821fc661 100644
+--- a/drivers/media/platform/vim2m.c
++++ b/drivers/media/platform/vim2m.c
+@@ -1262,6 +1262,15 @@ static int vim2m_release(struct file *file)
+ return 0;
+ }
+
++static void vim2m_device_release(struct video_device *vdev)
++{
++ struct vim2m_dev *dev = container_of(vdev, struct vim2m_dev, vfd);
++
++ v4l2_device_unregister(&dev->v4l2_dev);
++ v4l2_m2m_release(dev->m2m_dev);
++ kfree(dev);
++}
++
+ static const struct v4l2_file_operations vim2m_fops = {
+ .owner = THIS_MODULE,
+ .open = vim2m_open,
+@@ -1277,7 +1286,7 @@ static const struct video_device vim2m_videodev = {
+ .fops = &vim2m_fops,
+ .ioctl_ops = &vim2m_ioctl_ops,
+ .minor = -1,
+- .release = video_device_release_empty,
++ .release = vim2m_device_release,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+ };
+
+@@ -1298,13 +1307,13 @@ static int vim2m_probe(struct platform_device *pdev)
+ struct video_device *vfd;
+ int ret;
+
+- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+- return ret;
++ goto error_free;
+
+ atomic_set(&dev->num_inst, 0);
+ mutex_init(&dev->dev_mutex);
+@@ -1317,7 +1326,7 @@ static int vim2m_probe(struct platform_device *pdev)
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+- goto unreg_v4l2;
++ goto error_v4l2;
+ }
+
+ video_set_drvdata(vfd, dev);
+@@ -1330,7 +1339,7 @@ static int vim2m_probe(struct platform_device *pdev)
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+- goto unreg_dev;
++ goto error_dev;
+ }
+
+ #ifdef CONFIG_MEDIA_CONTROLLER
+@@ -1346,27 +1355,29 @@ static int vim2m_probe(struct platform_device *pdev)
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+- goto unreg_m2m;
++ goto error_m2m;
+ }
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
+- goto unreg_m2m_mc;
++ goto error_m2m_mc;
+ }
+ #endif
+ return 0;
+
+ #ifdef CONFIG_MEDIA_CONTROLLER
+-unreg_m2m_mc:
++error_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+-unreg_m2m:
++error_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+ #endif
+-unreg_dev:
++error_dev:
+ video_unregister_device(&dev->vfd);
+-unreg_v4l2:
++error_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
++error_free:
++ kfree(dev);
+
+ return ret;
+ }
+@@ -1382,9 +1393,7 @@ static int vim2m_remove(struct platform_device *pdev)
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+ media_device_cleanup(&dev->mdev);
+ #endif
+- v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+- v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+ }
+diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
+index 0fbb7914098f..3aa62d7e3d0e 100644
+--- a/drivers/media/platform/vimc/vimc-core.c
++++ b/drivers/media/platform/vimc/vimc-core.c
+@@ -304,6 +304,8 @@ static int vimc_probe(struct platform_device *pdev)
+
+ dev_dbg(&pdev->dev, "probe");
+
++ memset(&vimc->mdev, 0, sizeof(vimc->mdev));
++
+ /* Create platform_device for each entity in the topology*/
+ vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->subdevs), GFP_KERNEL);
+diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
+index fcc897fb247b..392754c18046 100644
+--- a/drivers/media/platform/vimc/vimc-streamer.c
++++ b/drivers/media/platform/vimc/vimc-streamer.c
+@@ -120,7 +120,6 @@ static int vimc_streamer_thread(void *data)
+ int i;
+
+ set_freezable();
+- set_current_state(TASK_UNINTERRUPTIBLE);
+
+ for (;;) {
+ try_to_freeze();
+@@ -137,6 +136,7 @@ static int vimc_streamer_thread(void *data)
+ break;
+ }
+ //wait for 60hz
++ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 60);
+ }
+
+diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
+index 52eeda624d7e..530ac8decb25 100644
+--- a/drivers/media/platform/vivid/vivid-vid-cap.c
++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
+@@ -1007,7 +1007,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
+ v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
+ if (dev->bitmap_cap && (compose->width != s->r.width ||
+ compose->height != s->r.height)) {
+- kfree(dev->bitmap_cap);
++ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ }
+ *compose = s->r;
+diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
+index 3c8987af3772..ac5706b4cab8 100644
+--- a/drivers/media/radio/wl128x/fmdrv_common.c
++++ b/drivers/media/radio/wl128x/fmdrv_common.c
+@@ -489,7 +489,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
+ return -EIO;
+ }
+ /* Send response data to caller */
+- if (response != NULL && response_len != NULL && evt_hdr->dlen) {
++ if (response != NULL && response_len != NULL && evt_hdr->dlen &&
++ evt_hdr->dlen <= payload_len) {
+ /* Skip header info and copy only response data */
+ skb_pull(skb, sizeof(struct fm_event_msg_hdr));
+ memcpy(response, skb->data, evt_hdr->dlen);
+@@ -583,6 +584,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
+ return;
+
+ fm_evt_hdr = (void *)skb->data;
++ if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
++ return;
+
+ /* Skip header info and copy only response data */
+ skb_pull(skb, sizeof(struct fm_event_msg_hdr));
+@@ -1308,7 +1311,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
+ static int fm_power_up(struct fmdev *fmdev, u8 mode)
+ {
+ u16 payload;
+- __be16 asic_id, asic_ver;
++ __be16 asic_id = 0, asic_ver = 0;
+ int resp_len, ret;
+ u8 fw_name[50];
+
+diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
+index ffe2c672d105..3998ba29beb6 100644
+--- a/drivers/media/rc/serial_ir.c
++++ b/drivers/media/rc/serial_ir.c
+@@ -773,8 +773,6 @@ static void serial_ir_exit(void)
+
+ static int __init serial_ir_init_module(void)
+ {
+- int result;
+-
+ switch (type) {
+ case IR_HOMEBREW:
+ case IR_IRDEO:
+@@ -802,12 +800,7 @@ static int __init serial_ir_init_module(void)
+ if (sense != -1)
+ sense = !!sense;
+
+- result = serial_ir_init();
+- if (!result)
+- return 0;
+-
+- serial_ir_exit();
+- return result;
++ return serial_ir_init();
+ }
+
+ static void __exit serial_ir_exit_module(void)
+diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
+index 7876c897cc1d..222723d946e4 100644
+--- a/drivers/media/usb/au0828/au0828-video.c
++++ b/drivers/media/usb/au0828/au0828-video.c
+@@ -758,6 +758,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
+
+ dprintk(1, "au0828_analog_stream_enable called\n");
+
++ if (test_bit(DEV_DISCONNECTED, &d->dev_state))
++ return -ENODEV;
++
+ iface = usb_ifnum_to_if(d->usbdev, 0);
+ if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
+ dprintk(1, "Changing intf#0 to alt 5\n");
+@@ -839,9 +842,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
+ return rc;
+ }
+
++ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
++
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+- v4l2_device_call_all(&dev->v4l2_dev, 0, video,
+- s_stream, 1);
+ dev->vid_timeout_running = 1;
+ mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
+ } else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+@@ -861,10 +864,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
+
+ dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users);
+
+- if (dev->streaming_users-- == 1)
++ if (dev->streaming_users-- == 1) {
+ au0828_uninit_isoc(dev);
++ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
++ }
+
+- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+ dev->vid_timeout_running = 0;
+ del_timer_sync(&dev->vid_timeout);
+
+@@ -893,8 +897,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
+ dprintk(1, "au0828_stop_vbi_streaming called %d\n",
+ dev->streaming_users);
+
+- if (dev->streaming_users-- == 1)
++ if (dev->streaming_users-- == 1) {
+ au0828_uninit_isoc(dev);
++ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
++ }
+
+ spin_lock_irqsave(&dev->slock, flags);
+ if (dev->isoc_ctl.vbi_buf != NULL) {
+diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
+index 95c0bd4a19dc..45caf78119c4 100644
+--- a/drivers/media/usb/cpia2/cpia2_v4l.c
++++ b/drivers/media/usb/cpia2/cpia2_v4l.c
+@@ -1240,8 +1240,7 @@ static int __init cpia2_init(void)
+ LOG("%s v%s\n",
+ ABOUT, CPIA_VERSION);
+ check_parameters();
+- cpia2_usb_init();
+- return 0;
++ return cpia2_usb_init();
+ }
+
+
+diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
+index e28bd8836751..ae0814dd202a 100644
+--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
++++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
+@@ -615,16 +615,18 @@ static int dvbsky_init(struct dvb_usb_device *d)
+ return 0;
+ }
+
+-static void dvbsky_exit(struct dvb_usb_device *d)
++static int dvbsky_frontend_detach(struct dvb_usb_adapter *adap)
+ {
++ struct dvb_usb_device *d = adap_to_d(adap);
+ struct dvbsky_state *state = d_to_priv(d);
+- struct dvb_usb_adapter *adap = &d->adapter[0];
++
++ dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
+
+ dvb_module_release(state->i2c_client_tuner);
+ dvb_module_release(state->i2c_client_demod);
+ dvb_module_release(state->i2c_client_ci);
+
+- adap->fe[0] = NULL;
++ return 0;
+ }
+
+ /* DVB USB Driver stuff */
+@@ -640,11 +642,11 @@ static struct dvb_usb_device_properties dvbsky_s960_props = {
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_s960_attach,
++ .frontend_detach = dvbsky_frontend_detach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+- .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+@@ -667,11 +669,11 @@ static struct dvb_usb_device_properties dvbsky_s960c_props = {
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_s960c_attach,
++ .frontend_detach = dvbsky_frontend_detach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+- .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+@@ -694,11 +696,11 @@ static struct dvb_usb_device_properties dvbsky_t680c_props = {
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_t680c_attach,
++ .frontend_detach = dvbsky_frontend_detach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+- .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+@@ -721,11 +723,11 @@ static struct dvb_usb_device_properties dvbsky_t330_props = {
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_t330_attach,
++ .frontend_detach = dvbsky_frontend_detach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+- .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+@@ -748,11 +750,11 @@ static struct dvb_usb_device_properties mygica_t230c_props = {
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_mygica_t230c_attach,
++ .frontend_detach = dvbsky_frontend_detach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+- .exit = dvbsky_exit,
+
+ .num_adapters = 1,
+ .adapter = {
+diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
+index 24f5b615dc7a..dfa9f899d0c2 100644
+--- a/drivers/media/usb/go7007/go7007-fw.c
++++ b/drivers/media/usb/go7007/go7007-fw.c
+@@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space)
+ return cnt;
+ }
+
+-static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
+- int *framelen)
++static noinline_for_stack int do_special(struct go7007 *go, u16 type,
++ __le16 *code, int space, int *framelen)
+ {
+ switch (type) {
+ case SPECIAL_FRM_HEAD:
+diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
+index ac70b36d67b7..4d7517411cc2 100644
+--- a/drivers/media/usb/gspca/gspca.c
++++ b/drivers/media/usb/gspca/gspca.c
+@@ -294,7 +294,7 @@ static void fill_frame(struct gspca_dev *gspca_dev,
+ /* check the packet status and length */
+ st = urb->iso_frame_desc[i].status;
+ if (st) {
+- pr_err("ISOC data error: [%d] len=%d, status=%d\n",
++ gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n",
+ i, len, st);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ continue;
+@@ -314,6 +314,8 @@ static void fill_frame(struct gspca_dev *gspca_dev,
+ }
+
+ resubmit:
++ if (!gspca_dev->streaming)
++ return;
+ /* resubmit the URB */
+ st = usb_submit_urb(urb, GFP_ATOMIC);
+ if (st < 0)
+@@ -330,7 +332,7 @@ static void isoc_irq(struct urb *urb)
+ struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
+
+ gspca_dbg(gspca_dev, D_PACK, "isoc irq\n");
+- if (!vb2_start_streaming_called(&gspca_dev->queue))
++ if (!gspca_dev->streaming)
+ return;
+ fill_frame(gspca_dev, urb);
+ }
+@@ -344,7 +346,7 @@ static void bulk_irq(struct urb *urb)
+ int st;
+
+ gspca_dbg(gspca_dev, D_PACK, "bulk irq\n");
+- if (!vb2_start_streaming_called(&gspca_dev->queue))
++ if (!gspca_dev->streaming)
+ return;
+ switch (urb->status) {
+ case 0:
+@@ -367,6 +369,8 @@ static void bulk_irq(struct urb *urb)
+ urb->actual_length);
+
+ resubmit:
++ if (!gspca_dev->streaming)
++ return;
+ /* resubmit the URB */
+ if (gspca_dev->cam.bulk_nurbs != 0) {
+ st = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -1638,6 +1642,8 @@ void gspca_disconnect(struct usb_interface *intf)
+
+ mutex_lock(&gspca_dev->usb_lock);
+ gspca_dev->present = false;
++ destroy_urbs(gspca_dev);
++ gspca_input_destroy_urb(gspca_dev);
+
+ vb2_queue_error(&gspca_dev->queue);
+
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+index 446a999dd2ce..2bab4713bc5b 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+@@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp)
+
+ static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
+ {
++ if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
++ return 0;
+ return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
+ }
+
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+index 25648add77e5..bd2b7a67b732 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+@@ -50,6 +50,7 @@
+ #define PVR2_CVAL_INPUT_COMPOSITE 2
+ #define PVR2_CVAL_INPUT_SVIDEO 3
+ #define PVR2_CVAL_INPUT_RADIO 4
++#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO
+
+ enum pvr2_config {
+ pvr2_config_empty, /* No configuration */
+diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
+index 20571846e636..7495f8323147 100644
+--- a/drivers/media/v4l2-core/v4l2-fwnode.c
++++ b/drivers/media/v4l2-core/v4l2-fwnode.c
+@@ -225,6 +225,10 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
+ if (bus_type == V4L2_MBUS_CSI2_DPHY ||
+ bus_type == V4L2_MBUS_CSI2_CPHY || lanes_used ||
+ have_clk_lane || (flags & ~V4L2_MBUS_CSI2_CONTINUOUS_CLOCK)) {
++ /* Only D-PHY has a clock lane. */
++ unsigned int dfl_data_lane_index =
++ bus_type == V4L2_MBUS_CSI2_DPHY;
++
+ bus->flags = flags;
+ if (bus_type == V4L2_MBUS_UNKNOWN)
+ vep->bus_type = V4L2_MBUS_CSI2_DPHY;
+@@ -233,7 +237,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
+ if (use_default_lane_mapping) {
+ bus->clock_lane = 0;
+ for (i = 0; i < num_data_lanes; i++)
+- bus->data_lanes[i] = 1 + i;
++ bus->data_lanes[i] = dfl_data_lane_index + i;
+ } else {
+ bus->clock_lane = clock_lane;
+ for (i = 0; i < num_data_lanes; i++)
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 36d0d5c9cfba..35be1cc11dd8 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -667,8 +667,16 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+ pages[i].size = roundup(len, PAGE_SIZE);
+
+ if (ctx->maps[i]) {
++ struct vm_area_struct *vma = NULL;
++
+ rpra[i].pv = (u64) ctx->args[i].ptr;
+ pages[i].addr = ctx->maps[i]->phys;
++
++ vma = find_vma(current->mm, ctx->args[i].ptr);
++ if (vma)
++ pages[i].addr += ctx->args[i].ptr -
++ vma->vm_start;
++
+ } else {
+ rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
+ args = ALIGN(args, FASTRPC_ALIGN);
+@@ -782,6 +790,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
+ if (err)
+ goto bail;
+ }
++
++ /* make sure that all CPU memory writes are seen by DSP */
++ dma_wmb();
+ /* Send invoke buffer to remote dsp */
+ err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
+ if (err)
+@@ -798,6 +809,8 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
+ goto bail;
+
+ if (ctx->nscalars) {
++ /* make sure that all memory writes by DSP are seen by CPU */
++ dma_rmb();
+ /* populate all the output buffers with results */
+ err = fastrpc_put_args(ctx, kernel);
+ if (err)
+@@ -843,12 +856,12 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
+
+ if (copy_from_user(&init, argp, sizeof(init))) {
+ err = -EFAULT;
+- goto bail;
++ goto err;
+ }
+
+ if (init.filelen > INIT_FILELEN_MAX) {
+ err = -EINVAL;
+- goto bail;
++ goto err;
+ }
+
+ inbuf.pgid = fl->tgid;
+@@ -862,17 +875,15 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
+ if (init.filelen && init.filefd) {
+ err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
+ if (err)
+- goto bail;
++ goto err;
+ }
+
+ memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
+ 1024 * 1024);
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
+ &imem);
+- if (err) {
+- fastrpc_map_put(map);
+- goto bail;
+- }
++ if (err)
++ goto err_alloc;
+
+ fl->init_mem = imem;
+ args[0].ptr = (u64)(uintptr_t)&inbuf;
+@@ -908,13 +919,24 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
+
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ sc, args);
++ if (err)
++ goto err_invoke;
+
+- if (err) {
++ kfree(args);
++
++ return 0;
++
++err_invoke:
++ fl->init_mem = NULL;
++ fastrpc_buf_free(imem);
++err_alloc:
++ if (map) {
++ spin_lock(&fl->lock);
++ list_del(&map->node);
++ spin_unlock(&fl->lock);
+ fastrpc_map_put(map);
+- fastrpc_buf_free(imem);
+ }
+-
+-bail:
++err:
+ kfree(args);
+
+ return err;
+diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
+index 77d51be66c7e..652c8edb2164 100644
+--- a/drivers/misc/habanalabs/device.c
++++ b/drivers/misc/habanalabs/device.c
+@@ -498,11 +498,8 @@ disable_device:
+ return rc;
+ }
+
+-static void hl_device_hard_reset_pending(struct work_struct *work)
++static void device_kill_open_processes(struct hl_device *hdev)
+ {
+- struct hl_device_reset_work *device_reset_work =
+- container_of(work, struct hl_device_reset_work, reset_work);
+- struct hl_device *hdev = device_reset_work->hdev;
+ u16 pending_total, pending_cnt;
+ struct task_struct *task = NULL;
+
+@@ -537,6 +534,12 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
+ }
+ }
+
++ /* We killed the open users, but because the driver cleans up after the
++ * user contexts are closed (e.g. mmu mappings), we need to wait again
++ * to make sure the cleaning phase is finished before continuing with
++ * the reset
++ */
++
+ pending_cnt = pending_total;
+
+ while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+@@ -552,6 +555,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
+
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+
++}
++
++static void device_hard_reset_pending(struct work_struct *work)
++{
++ struct hl_device_reset_work *device_reset_work =
++ container_of(work, struct hl_device_reset_work, reset_work);
++ struct hl_device *hdev = device_reset_work->hdev;
++
++ device_kill_open_processes(hdev);
++
+ hl_device_reset(hdev, true, true);
+
+ kfree(device_reset_work);
+@@ -635,7 +648,7 @@ again:
+ * from a dedicated work
+ */
+ INIT_WORK(&device_reset_work->reset_work,
+- hl_device_hard_reset_pending);
++ device_hard_reset_pending);
+ device_reset_work->hdev = hdev;
+ schedule_work(&device_reset_work->reset_work);
+
+@@ -1035,6 +1048,15 @@ void hl_device_fini(struct hl_device *hdev)
+ /* Mark device as disabled */
+ hdev->disabled = true;
+
++ /*
++ * Flush anyone that is inside the critical section of enqueue
++ * jobs to the H/W
++ */
++ hdev->asic_funcs->hw_queues_lock(hdev);
++ hdev->asic_funcs->hw_queues_unlock(hdev);
++
++ device_kill_open_processes(hdev);
++
+ hl_hwmon_fini(hdev);
+
+ device_late_fini(hdev);
+diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
+index 3c509e19d69d..1533cb320540 100644
+--- a/drivers/misc/habanalabs/goya/goya.c
++++ b/drivers/misc/habanalabs/goya/goya.c
+@@ -4407,6 +4407,9 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
+ {
+ struct goya_device *goya = hdev->asic_specific;
+
++ if (hdev->hard_reset_pending)
++ return U64_MAX;
++
+ return readq(hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - goya->ddr_bar_cur_addr));
+ }
+@@ -4415,6 +4418,9 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
+ {
+ struct goya_device *goya = hdev->asic_specific;
+
++ if (hdev->hard_reset_pending)
++ return;
++
+ writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - goya->ddr_bar_cur_addr));
+ }
+diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
+index ce1fda40a8b8..fadaf557603f 100644
+--- a/drivers/misc/habanalabs/memory.c
++++ b/drivers/misc/habanalabs/memory.c
+@@ -1046,10 +1046,17 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
+
+ mutex_lock(&ctx->mmu_lock);
+
+- for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size)
++ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+- "unmap failed for vaddr: 0x%llx\n", next_vaddr);
++ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
++
++ /* unmapping on Palladium can be really long, so avoid a CPU
++ * soft lockup bug by sleeping a little between unmapping pages
++ */
++ if (hdev->pldm)
++ usleep_range(500, 1000);
++ }
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+
+diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
+index efb8a7965dd4..154f4204d58c 100644
+--- a/drivers/mmc/core/pwrseq_emmc.c
++++ b/drivers/mmc/core/pwrseq_emmc.c
+@@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc {
+
+ #define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
+-static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
+-{
+- gpiod_set_value(pwrseq->reset_gpio, 1);
+- udelay(1);
+- gpiod_set_value(pwrseq->reset_gpio, 0);
+- udelay(200);
+-}
+-
+ static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
+ {
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
+
+- __mmc_pwrseq_emmc_reset(pwrseq);
++ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
++ udelay(1);
++ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
++ udelay(200);
+ }
+
+ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
+@@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
+ {
+ struct mmc_pwrseq_emmc *pwrseq = container_of(this,
+ struct mmc_pwrseq_emmc, reset_nb);
++ gpiod_set_value(pwrseq->reset_gpio, 1);
++ udelay(1);
++ gpiod_set_value(pwrseq->reset_gpio, 0);
++ udelay(200);
+
+- __mmc_pwrseq_emmc_reset(pwrseq);
+ return NOTIFY_DONE;
+ }
+
+@@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
+
+- /*
+- * register reset handler to ensure emmc reset also from
+- * emergency_reboot(), priority 255 is the highest priority
+- * so it will be executed before any system reboot handler.
+- */
+- pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
+- pwrseq->reset_nb.priority = 255;
+- register_restart_handler(&pwrseq->reset_nb);
++ if (!gpiod_cansleep(pwrseq->reset_gpio)) {
++ /*
++ * register reset handler to ensure emmc reset also from
++ * emergency_reboot(), priority 255 is the highest priority
++ * so it will be executed before any system reboot handler.
++ */
++ pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
++ pwrseq->reset_nb.priority = 255;
++ register_restart_handler(&pwrseq->reset_nb);
++ } else {
++ dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
++ }
+
+ pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ pwrseq->pwrseq.dev = dev;
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 265e1aeeb9d8..d3d32f9a2cb1 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -221,6 +221,14 @@ static int mmc_decode_scr(struct mmc_card *card)
+
+ if (scr->sda_spec3)
+ scr->cmds = UNSTUFF_BITS(resp, 32, 2);
++
++ /* SD Spec says: any SD Card shall set at least bits 0 and 2 */
++ if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
++ !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
++ pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index 1b1498805972..a3533935e282 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -819,6 +819,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ }
+
+ status = spi_sync_locked(spi, &host->m);
++ if (status < 0) {
++ dev_dbg(&spi->dev, "read error %d\n", status);
++ return status;
++ }
+
+ if (host->dma_dev) {
+ dma_sync_single_for_cpu(host->dma_dev,
+diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
+index 9d12c06c7fd6..2feb4ef32035 100644
+--- a/drivers/mmc/host/sdhci-iproc.c
++++ b/drivers/mmc/host/sdhci-iproc.c
+@@ -196,7 +196,8 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
+ };
+
+ static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
+- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
++ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
++ SDHCI_QUIRK_NO_HISPD_BIT,
+ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ .ops = &sdhci_iproc_32only_ops,
+ };
+@@ -219,7 +220,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
+
+ static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
++ SDHCI_QUIRK_NO_HISPD_BIT,
+ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
+ .ops = &sdhci_iproc_ops,
+ };
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 4e669b4edfc1..7e0eae8dafae 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -694,6 +694,9 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
++ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
++ mdelay(5);
++
+ if (mask & SDHCI_RESET_ALL) {
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+@@ -1074,6 +1077,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ if (esdhc->vendor_ver > VENDOR_V_22)
+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+
++ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
++ host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
++ host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
++ }
++
+ if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
+ of_device_is_compatible(np, "fsl,p5020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p4080-esdhc") ||
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index a6eacf2099c3..9b03d7e404f8 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -224,28 +224,23 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
+ if (!tx_ring->tx_buffer_info) {
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+- return -ENOMEM;
++ goto err_tx_buffer_info;
+ }
+
+ size = sizeof(u16) * tx_ring->ring_size;
+ tx_ring->free_tx_ids = vzalloc_node(size, node);
+ if (!tx_ring->free_tx_ids) {
+ tx_ring->free_tx_ids = vzalloc(size);
+- if (!tx_ring->free_tx_ids) {
+- vfree(tx_ring->tx_buffer_info);
+- return -ENOMEM;
+- }
++ if (!tx_ring->free_tx_ids)
++ goto err_free_tx_ids;
+ }
+
+ size = tx_ring->tx_max_header_size;
+ tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
+ if (!tx_ring->push_buf_intermediate_buf) {
+ tx_ring->push_buf_intermediate_buf = vzalloc(size);
+- if (!tx_ring->push_buf_intermediate_buf) {
+- vfree(tx_ring->tx_buffer_info);
+- vfree(tx_ring->free_tx_ids);
+- return -ENOMEM;
+- }
++ if (!tx_ring->push_buf_intermediate_buf)
++ goto err_push_buf_intermediate_buf;
+ }
+
+ /* Req id ring for TX out of order completions */
+@@ -259,6 +254,15 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
+ tx_ring->next_to_clean = 0;
+ tx_ring->cpu = ena_irq->cpu;
+ return 0;
++
++err_push_buf_intermediate_buf:
++ vfree(tx_ring->free_tx_ids);
++ tx_ring->free_tx_ids = NULL;
++err_free_tx_ids:
++ vfree(tx_ring->tx_buffer_info);
++ tx_ring->tx_buffer_info = NULL;
++err_tx_buffer_info:
++ return -ENOMEM;
+ }
+
+ /* ena_free_tx_resources - Free I/O Tx Resources per Queue
+@@ -378,6 +382,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
+ rx_ring->free_rx_ids = vzalloc(size);
+ if (!rx_ring->free_rx_ids) {
+ vfree(rx_ring->rx_buffer_info);
++ rx_ring->rx_buffer_info = NULL;
+ return -ENOMEM;
+ }
+ }
+@@ -2292,7 +2297,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
+ host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
+ host_info->os_type = ENA_ADMIN_OS_LINUX;
+ host_info->kernel_ver = LINUX_VERSION_CODE;
+- strncpy(host_info->kernel_ver_str, utsname()->version,
++ strlcpy(host_info->kernel_ver_str, utsname()->version,
+ sizeof(host_info->kernel_ver_str) - 1);
+ host_info->os_dist = 0;
+ strncpy(host_info->os_dist_str, utsname()->release,
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+index c2fd323c4078..ea75f275023f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
++++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+@@ -75,8 +75,8 @@ struct l2t_data {
+ struct l2t_entry *rover; /* starting point for next allocation */
+ atomic_t nfree; /* number of free entries */
+ rwlock_t lock;
+- struct l2t_entry l2tab[0];
+ struct rcu_head rcu_head; /* to handle rcu cleanup */
++ struct l2t_entry l2tab[];
+ };
+
+ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 89179e316687..4bc0c357cb8e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -6161,15 +6161,24 @@ static int __init cxgb4_init_module(void)
+
+ ret = pci_register_driver(&cxgb4_driver);
+ if (ret < 0)
+- debugfs_remove(cxgb4_debugfs_root);
++ goto err_pci;
+
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (!inet6addr_registered) {
+- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+- inet6addr_registered = true;
++ ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
++ if (ret)
++ pci_unregister_driver(&cxgb4_driver);
++ else
++ inet6addr_registered = true;
+ }
+ #endif
+
++ if (ret == 0)
++ return ret;
++
++err_pci:
++ debugfs_remove(cxgb4_debugfs_root);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index dc339dc1adb2..57cbaa38d247 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -2796,6 +2796,7 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
++ int err;
+
+ /* Check if we actually support Rx flow classification */
+ if (dpaa2_eth_has_legacy_dist(priv)) {
+@@ -2814,9 +2815,13 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+ return -EOPNOTSUPP;
+ }
+
++ err = dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
++ if (err)
++ return err;
++
+ priv->rx_cls_enabled = 1;
+
+- return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
++ return 0;
+ }
+
+ /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+index 299b277bc7ae..589b7ee32bff 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+@@ -107,7 +107,7 @@ struct hclgevf_mbx_arq_ring {
+ struct hclgevf_dev *hdev;
+ u32 head;
+ u32 tail;
+- u32 count;
++ atomic_t count;
+ u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+ };
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 162cb9afa0e7..c7d310903319 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2705,7 +2705,7 @@ int hns3_clean_rx_ring(
+ #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ int recv_pkts, recv_bds, clean_count, err;
+- int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
++ int unused_count = hns3_desc_unused(ring);
+ struct sk_buff *skb = ring->skb;
+ int num;
+
+@@ -2714,6 +2714,7 @@ int hns3_clean_rx_ring(
+
+ recv_pkts = 0, recv_bds = 0, clean_count = 0;
+ num -= unused_count;
++ unused_count -= ring->pending_buf;
+
+ while (recv_pkts < budget && recv_bds < num) {
+ /* Reuse or realloc buffers */
+@@ -3773,12 +3774,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
+ struct netdev_hw_addr *ha, *tmp;
+ int ret = 0;
+
++ netif_addr_lock_bh(ndev);
+ /* go through and sync uc_addr entries to the device */
+ list = &ndev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_uc_sync(ndev, ha->addr);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
+ /* go through and sync mc_addr entries to the device */
+@@ -3786,9 +3788,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_mc_sync(ndev, ha->addr);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
++out:
++ netif_addr_unlock_bh(ndev);
+ return ret;
+ }
+
+@@ -3799,6 +3803,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
+
+ hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+
++ netif_addr_lock_bh(netdev);
+ /* go through and unsync uc_addr entries to the device */
+ list = &netdev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+@@ -3809,6 +3814,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ if (ha->refcount > 1)
+ hns3_nic_mc_unsync(netdev, ha->addr);
++
++ netif_addr_unlock_bh(netdev);
+ }
+
+ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
+@@ -3850,6 +3857,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
++ /* Free the pending skb in rx ring */
++ if (ring->skb) {
++ dev_kfree_skb_any(ring->skb);
++ ring->skb = NULL;
++ ring->pending_buf = 0;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index 359d4731fb2d..ea94b5152963 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -483,6 +483,11 @@ static void hns3_get_stats(struct net_device *netdev,
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u64 *p = data;
+
++ if (hns3_nic_resetting(netdev)) {
++ netdev_err(netdev, "dev resetting, could not get stats\n");
++ return;
++ }
++
+ if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
+ netdev_err(netdev, "could not get any statistics\n");
+ return;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+index 3a093a92eac5..d92e4af11b1f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+@@ -373,21 +373,26 @@ int hclge_cmd_init(struct hclge_dev *hdev)
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto err_cmd_init;
+ }
+
+ ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "firmware version query failed %d\n", ret);
+- return ret;
++ goto err_cmd_init;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
++
++err_cmd_init:
++ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
++
++ return ret;
+ }
+
+ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index aafc69f4bfdd..a7bbb6d3091a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
+ ret = hclge_pfc_setup_hw(hdev);
+ if (init && ret == -EOPNOTSUPP)
+ dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
+- else
++ else if (ret) {
++ dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
++ ret);
+ return ret;
++ }
+
+ return hclge_tm_bp_setup(hdev);
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+index 9441b453d38d..382ecb15e743 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+@@ -327,7 +327,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+ hdev->arq.hdev = hdev;
+ hdev->arq.head = 0;
+ hdev->arq.tail = 0;
+- hdev->arq.count = 0;
++ atomic_set(&hdev->arq.count, 0);
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+@@ -344,8 +344,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto err_cmd_init;
+ }
+
+ /* get firmware version */
+@@ -353,13 +353,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to query firmware version\n", ret);
+- return ret;
++ goto err_cmd_init;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
++
++err_cmd_init:
++ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
++
++ return ret;
+ }
+
+ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 8bc28e6f465f..8dd7fef863f6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2007,9 +2007,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
+ static int hclgevf_client_start(struct hnae3_handle *handle)
+ {
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++ int ret;
++
++ ret = hclgevf_set_alive(handle, true);
++ if (ret)
++ return ret;
+
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+- return hclgevf_set_alive(handle, true);
++
++ return 0;
+ }
+
+ static void hclgevf_client_stop(struct hnae3_handle *handle)
+@@ -2051,6 +2057,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+ {
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
++ if (hdev->keep_alive_timer.function)
++ del_timer_sync(&hdev->keep_alive_timer);
++ if (hdev->keep_alive_task.func)
++ cancel_work_sync(&hdev->keep_alive_task);
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+index 7dc3c9f79169..4f2c77283cb4 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+ /* we will drop the async msg if we find ARQ as full
+ * and continue with next message
+ */
+- if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
++ if (atomic_read(&hdev->arq.count) >=
++ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ dev_warn(&hdev->pdev->dev,
+ "Async Q full, dropping msg(%d)\n",
+ req->msg[1]);
+@@ -220,7 +221,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+ memcpy(&msg_q[0], req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
+ hclge_mbx_tail_ptr_move_arq(hdev->arq);
+- hdev->arq.count++;
++ atomic_inc(&hdev->arq.count);
+
+ hclgevf_mbx_task_schedule(hdev);
+
+@@ -308,7 +309,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
+ }
+
+ hclge_mbx_head_ptr_move_arq(hdev->arq);
+- hdev->arq.count--;
++ atomic_dec(&hdev->arq.count);
+ msg_q = hdev->arq.msg_q[hdev->arq.head];
+ }
+ }
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 7acc61e4f645..c10c9d7eadaa 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -7350,7 +7350,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
+- if (pci_dev_run_wake(pdev))
++ if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index b1c265012c8a..ac9fcb097689 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2654,6 +2654,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
++ /* Don't modify stripping options if a port VLAN is active */
++ if (vsi->info.pvid)
++ return;
++
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
+@@ -2684,6 +2688,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
++ /* Don't modify stripping options if a port VLAN is active */
++ if (vsi->info.pvid)
++ return;
++
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 831d52bc3c9a..2b0362c827e9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -181,7 +181,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
+ * check for the valid queue id
+ **/
+ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
+- u8 qid)
++ u16 qid)
+ {
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
+@@ -2454,8 +2454,10 @@ error_param:
+ (u8 *)&stats, sizeof(stats));
+ }
+
+-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
++/* If the VF is not trusted restrict the number of MAC/VLAN it can program
++ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
++ */
++#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
+ #define I40E_VC_MAX_VLAN_PER_VF 8
+
+ /**
+@@ -3374,7 +3376,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+- goto err;
++ goto err_out;
+ }
+
+ if (!vf->adq_enabled) {
+@@ -3382,7 +3384,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ "VF %d: ADq is not enabled, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+- goto err;
++ goto err_out;
+ }
+
+ if (i40e_validate_cloud_filter(vf, vcf)) {
+@@ -3390,7 +3392,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ "VF %d: Invalid input/s, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+- goto err;
++ goto err_out;
+ }
+
+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+@@ -3451,13 +3453,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+- goto err;
++ goto err_free;
+ }
+
+ INIT_HLIST_NODE(&cfilter->cloud_node);
+ hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
++ /* release the pointer passing it to the collection */
++ cfilter = NULL;
+ vf->num_cloud_filters++;
+-err:
++err_free:
++ kfree(cfilter);
++err_out:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
+ aq_ret);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 89440775aea1..6af5bd5883ca 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -277,6 +277,7 @@ struct ice_q_vector {
+ * value to the device
+ */
+ u8 intrl;
++ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
+ } ____cacheline_internodealigned_in_smp;
+
+ enum ice_pf_flags {
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index fa61203bee26..b710545cf7d1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1848,6 +1848,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+ */
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+
++ /* Preserve existing VLAN strip setting */
++ ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
++ ICE_AQ_VSI_VLAN_EMOD_M);
++
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 47cc3f905b7f..6ec73864019c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
+ {
+ struct ice_hw *hw = &pf->hw;
+
++ /* already prepared for reset */
++ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
++ return;
++
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
+@@ -416,10 +420,15 @@ static void ice_reset_subtask(struct ice_pf *pf)
+ * for the reset now), poll for reset done, rebuild and return.
+ */
+ if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
+- clear_bit(__ICE_GLOBR_RECV, pf->state);
+- clear_bit(__ICE_CORER_RECV, pf->state);
+- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+- ice_prepare_for_reset(pf);
++ /* Perform the largest reset requested */
++ if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
++ reset_type = ICE_RESET_CORER;
++ if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
++ reset_type = ICE_RESET_GLOBR;
++ /* return if no valid reset type requested */
++ if (reset_type == ICE_RESET_INVAL)
++ return;
++ ice_prepare_for_reset(pf);
+
+ /* make sure we are ready to rebuild */
+ if (ice_check_reset(&pf->hw)) {
+@@ -2545,6 +2554,9 @@ static int ice_set_features(struct net_device *netdev,
+ struct ice_vsi *vsi = np->vsi;
+ int ret = 0;
+
++ /* Multiple features can be changed in one call so keep features in
++ * separate if/else statements to guarantee each feature is checked
++ */
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ ret = ice_vsi_manage_rss_lut(vsi, true);
+ else if (!(features & NETIF_F_RXHASH) &&
+@@ -2557,8 +2569,9 @@ static int ice_set_features(struct net_device *netdev,
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
+ ret = ice_vsi_manage_vlan_stripping(vsi, false);
+- else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
+- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
++
++ if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
++ !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+ ret = ice_vsi_manage_vlan_insertion(vsi);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index c289d97f477d..851030ad5016 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -1048,18 +1048,257 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+ return failure ? budget : (int)total_rx_pkts;
+ }
+
++static unsigned int ice_itr_divisor(struct ice_port_info *pi)
++{
++ switch (pi->phy.link_info.link_speed) {
++ case ICE_AQ_LINK_SPEED_40GB:
++ return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
++ case ICE_AQ_LINK_SPEED_25GB:
++ case ICE_AQ_LINK_SPEED_20GB:
++ return ICE_ITR_ADAPTIVE_MIN_INC * 512;
++ case ICE_AQ_LINK_SPEED_100MB:
++ return ICE_ITR_ADAPTIVE_MIN_INC * 32;
++ default:
++ return ICE_ITR_ADAPTIVE_MIN_INC * 256;
++ }
++}
++
++/**
++ * ice_update_itr - update the adaptive ITR value based on statistics
++ * @q_vector: structure containing interrupt and ring information
++ * @rc: structure containing ring performance data
++ *
++ * Stores a new ITR value based on packets and byte
++ * counts during the last interrupt. The advantage of per interrupt
++ * computation is faster updates and more accurate ITR for the current
++ * traffic pattern. Constants in this function were computed
++ * based on theoretical maximum wire speed and thresholds were set based
++ * on testing data as well as attempting to minimize response time
++ * while increasing bulk throughput.
++ */
++static void
++ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
++{
++ unsigned int avg_wire_size, packets, bytes, itr;
++ unsigned long next_update = jiffies;
++ bool container_is_rx;
++
++ if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
++ return;
++
++ /* If itr_countdown is set it means we programmed an ITR within
++ * the last 4 interrupt cycles. This has a side effect of us
++ * potentially firing an early interrupt. In order to work around
++ * this we need to throw out any data received for a few
++ * interrupts following the update.
++ */
++ if (q_vector->itr_countdown) {
++ itr = rc->target_itr;
++ goto clear_counts;
++ }
++
++ container_is_rx = (&q_vector->rx == rc);
++ /* For Rx we want to push the delay up and default to low latency.
++ * for Tx we want to pull the delay down and default to high latency.
++ */
++ itr = container_is_rx ?
++ ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
++ ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
++
++ /* If we didn't update within up to 1 - 2 jiffies we can assume
++ * that either packets are coming in so slow there hasn't been
++ * any work, or that there is so much work that NAPI is dealing
++ * with interrupt moderation and we don't need to do anything.
++ */
++ if (time_after(next_update, rc->next_update))
++ goto clear_counts;
++
++ packets = rc->total_pkts;
++ bytes = rc->total_bytes;
++
++ if (container_is_rx) {
++ /* If Rx there are 1 to 4 packets and bytes are less than
++ * 9000 assume insufficient data to use bulk rate limiting
++ * approach unless Tx is already in bulk rate limiting. We
++ * are likely latency driven.
++ */
++ if (packets && packets < 4 && bytes < 9000 &&
++ (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
++ itr = ICE_ITR_ADAPTIVE_LATENCY;
++ goto adjust_by_size;
++ }
++ } else if (packets < 4) {
++ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
++ * bulk mode and we are receiving 4 or fewer packets just
++ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
++ * that the Rx can relax.
++ */
++ if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
++ (q_vector->rx.target_itr & ICE_ITR_MASK) ==
++ ICE_ITR_ADAPTIVE_MAX_USECS)
++ goto clear_counts;
++ } else if (packets > 32) {
++ /* If we have processed over 32 packets in a single interrupt
++ * for Tx assume we need to switch over to "bulk" mode.
++ */
++ rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
++ }
++
++ /* We have no packets to actually measure against. This means
++ * either one of the other queues on this vector is active or
++ * we are a Tx queue doing TSO with too high of an interrupt rate.
++ *
++ * Between 4 and 56 we can assume that our current interrupt delay
++ * is only slightly too low. As such we should increase it by a small
++ * fixed amount.
++ */
++ if (packets < 56) {
++ itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
++ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
++ itr &= ICE_ITR_ADAPTIVE_LATENCY;
++ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
++ }
++ goto clear_counts;
++ }
++
++ if (packets <= 256) {
++ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
++ itr &= ICE_ITR_MASK;
++
++ /* Between 56 and 112 is our "goldilocks" zone where we are
++ * working out "just right". Just report that our current
++ * ITR is good for us.
++ */
++ if (packets <= 112)
++ goto clear_counts;
++
++ /* If packet count is 128 or greater we are likely looking
++ * at a slight overrun of the delay we want. Try halving
++ * our delay to see if that will cut the number of packets
++ * in half per interrupt.
++ */
++ itr >>= 1;
++ itr &= ICE_ITR_MASK;
++ if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
++ itr = ICE_ITR_ADAPTIVE_MIN_USECS;
++
++ goto clear_counts;
++ }
++
++ /* The paths below assume we are dealing with a bulk ITR since
++ * number of packets is greater than 256. We are just going to have
++ * to compute a value and try to bring the count under control,
++ * though for smaller packet sizes there isn't much we can do as
++ * NAPI polling will likely be kicking in sooner rather than later.
++ */
++ itr = ICE_ITR_ADAPTIVE_BULK;
++
++adjust_by_size:
++ /* If packet counts are 256 or greater we can assume we have a gross
++ * overestimation of what the rate should be. Instead of trying to fine
++ * tune it just use the formula below to try and dial in an exact value
++ * gives the current packet size of the frame.
++ */
++ avg_wire_size = bytes / packets;
++
++ /* The following is a crude approximation of:
++ * wmem_default / (size + overhead) = desired_pkts_per_int
++ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
++ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
++ *
++ * Assuming wmem_default is 212992 and overhead is 640 bytes per
++ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
++ * formula down to
++ *
++ * (170 * (size + 24)) / (size + 640) = ITR
++ *
++ * We first do some math on the packet size and then finally bitshift
++ * by 8 after rounding up. We also have to account for PCIe link speed
++ * difference as ITR scales based on this.
++ */
++ if (avg_wire_size <= 60) {
++ /* Start at 250k ints/sec */
++ avg_wire_size = 4096;
++ } else if (avg_wire_size <= 380) {
++ /* 250K ints/sec to 60K ints/sec */
++ avg_wire_size *= 40;
++ avg_wire_size += 1696;
++ } else if (avg_wire_size <= 1084) {
++ /* 60K ints/sec to 36K ints/sec */
++ avg_wire_size *= 15;
++ avg_wire_size += 11452;
++ } else if (avg_wire_size <= 1980) {
++ /* 36K ints/sec to 30K ints/sec */
++ avg_wire_size *= 5;
++ avg_wire_size += 22420;
++ } else {
++ /* plateau at a limit of 30K ints/sec */
++ avg_wire_size = 32256;
++ }
++
++ /* If we are in low latency mode halve our delay which doubles the
++ * rate to somewhere between 100K to 16K ints/sec
++ */
++ if (itr & ICE_ITR_ADAPTIVE_LATENCY)
++ avg_wire_size >>= 1;
++
++ /* Resultant value is 256 times larger than it needs to be. This
++ * gives us room to adjust the value as needed to either increase
++ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
++ *
++ * Use addition as we have already recorded the new latency flag
++ * for the ITR value.
++ */
++ itr += DIV_ROUND_UP(avg_wire_size,
++ ice_itr_divisor(q_vector->vsi->port_info)) *
++ ICE_ITR_ADAPTIVE_MIN_INC;
++
++ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
++ itr &= ICE_ITR_ADAPTIVE_LATENCY;
++ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
++ }
++
++clear_counts:
++ /* write back value */
++ rc->target_itr = itr;
++
++ /* next update should occur within next jiffy */
++ rc->next_update = next_update + 1;
++
++ rc->total_bytes = 0;
++ rc->total_pkts = 0;
++}
++
+ /**
+ * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
+ * @itr_idx: interrupt throttling index
+- * @reg_itr: interrupt throttling value adjusted based on ITR granularity
++ * @itr: interrupt throttling value in usecs
+ */
+-static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
++static u32 ice_buildreg_itr(int itr_idx, u16 itr)
+ {
++ /* The itr value is reported in microseconds, and the register value is
++ * recorded in 2 microsecond units. For this reason we only need to
++ * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
++ * granularity as a shift instead of division. The mask makes sure the
++ * ITR value is never odd so we don't accidentally write into the field
++ * prior to the ITR field.
++ */
++ itr &= ICE_ITR_MASK;
++
+ return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
+- (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
++ (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
+ }
+
++/* The act of updating the ITR will cause it to immediately trigger. In order
++ * to prevent this from throwing off adaptive update statistics we defer the
++ * update so that it can only happen so often. So after either Tx or Rx are
++ * updated we make the adaptive scheme wait until either the ITR completely
++ * expires via the next_update expiration or we have been through at least
++ * 3 interrupts.
++ */
++#define ITR_COUNTDOWN_START 3
++
+ /**
+ * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
+ * @vsi: the VSI associated with the q_vector
+@@ -1068,10 +1307,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
+ static void
+ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+ {
+- struct ice_hw *hw = &vsi->back->hw;
+- struct ice_ring_container *rc;
++ struct ice_ring_container *tx = &q_vector->tx;
++ struct ice_ring_container *rx = &q_vector->rx;
+ u32 itr_val;
+
++ /* This will do nothing if dynamic updates are not enabled */
++ ice_update_itr(q_vector, tx);
++ ice_update_itr(q_vector, rx);
++
+ /* This block of logic allows us to get away with only updating
+ * one ITR value with each interrupt. The idea is to perform a
+ * pseudo-lazy update with the following criteria.
+@@ -1080,35 +1323,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+ * 2. If we must reduce an ITR that is given highest priority.
+ * 3. We then give priority to increasing ITR based on amount.
+ */
+- if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+- rc = &q_vector->rx;
++ if (rx->target_itr < rx->current_itr) {
+ /* Rx ITR needs to be reduced, this is highest priority */
+- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+- rc->current_itr = rc->target_itr;
+- } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+- ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+- (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+- rc = &q_vector->tx;
++ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
++ rx->current_itr = rx->target_itr;
++ q_vector->itr_countdown = ITR_COUNTDOWN_START;
++ } else if ((tx->target_itr < tx->current_itr) ||
++ ((rx->target_itr - rx->current_itr) <
++ (tx->target_itr - tx->current_itr))) {
+ /* Tx ITR needs to be reduced, this is second priority
+ * Tx ITR needs to be increased more than Rx, fourth priority
+ */
+- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+- rc->current_itr = rc->target_itr;
+- } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+- rc = &q_vector->rx;
++ itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
++ tx->current_itr = tx->target_itr;
++ q_vector->itr_countdown = ITR_COUNTDOWN_START;
++ } else if (rx->current_itr != rx->target_itr) {
+ /* Rx ITR needs to be increased, third priority */
+- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+- rc->current_itr = rc->target_itr;
++ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
++ rx->current_itr = rx->target_itr;
++ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else {
+ /* Still have to re-enable the interrupts */
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
++ if (q_vector->itr_countdown)
++ q_vector->itr_countdown--;
+ }
+
+- if (!test_bit(__ICE_DOWN, vsi->state)) {
+- int vector = vsi->hw_base_vector + q_vector->v_idx;
+-
+- wr32(hw, GLINT_DYN_CTL(vector), itr_val);
+- }
++ if (!test_bit(__ICE_DOWN, vsi->state))
++ wr32(&vsi->back->hw,
++ GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx),
++ itr_val);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index fc358ea81816..74a031fbd732 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -128,6 +128,12 @@ enum ice_rx_dtype {
+ #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
+ #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
+
++#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
++#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
++#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
++#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
++#define ICE_ITR_ADAPTIVE_BULK 0x0000
++
+ #define ICE_DFLT_INTRL 0
+
+ /* Legacy or Advanced Mode Queue */
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index 57155b4a59dc..8b1ee9f3a39d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -764,6 +764,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+ {
+ struct ice_hw *hw = &pf->hw;
++ struct ice_vf *vf;
+ int v, i;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+@@ -778,12 +779,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_trigger_vf_reset(&pf->vf[v], is_vflr);
+
+- /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
+- * queues to inform Firmware about VF reset.
+- */
+- for (v = 0; v < pf->num_alloc_vfs; v++)
+- ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
+- ICE_VF_RESET, v, NULL);
++ for (v = 0; v < pf->num_alloc_vfs; v++) {
++ struct ice_vsi *vsi;
++
++ vf = &pf->vf[v];
++ vsi = pf->vsi[vf->lan_vsi_idx];
++ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
++ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
++ ice_vsi_stop_rx_rings(vsi);
++ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
++ }
++ }
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+@@ -796,9 +802,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+
+ /* Check each VF in sequence */
+ while (v < pf->num_alloc_vfs) {
+- struct ice_vf *vf = &pf->vf[v];
+ u32 reg;
+
++ vf = &pf->vf[v];
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ break;
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 3269d8e94744..580d14b49fda 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3452,6 +3452,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ break;
+ }
+ }
++
++ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
++
+ pm_runtime_put_noidle(&pdev->dev);
+ return 0;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 3f3cd32ae60a..e0ba59b5296f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -431,6 +431,9 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
+ return index;
+ }
+
++/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
++void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
++
+ #else /* CONFIG_MLX5_ESWITCH */
+ /* eswitch API stubs */
+ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 9b2d78ee22b8..a97ffd0dbf01 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -363,7 +363,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
+ esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
+ for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
+ rep = &esw->offloads.vport_reps[vf_vport];
+- if (rep->rep_if[REP_ETH].state != REP_LOADED)
++ if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
+ continue;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
+@@ -1306,7 +1306,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+ ether_addr_copy(rep->hw_id, hw_id);
+
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+- rep->rep_if[rep_type].state = REP_UNREGISTERED;
++ atomic_set(&rep->rep_if[rep_type].state,
++ REP_UNREGISTERED);
+ }
+
+ return 0;
+@@ -1315,11 +1316,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+ {
+- if (rep->rep_if[rep_type].state != REP_LOADED)
+- return;
+-
+- rep->rep_if[rep_type].unload(rep);
+- rep->rep_if[rep_type].state = REP_REGISTERED;
++ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
++ REP_LOADED, REP_REGISTERED) == REP_LOADED)
++ rep->rep_if[rep_type].unload(rep);
+ }
+
+ static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
+@@ -1380,16 +1379,15 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ {
+ int err = 0;
+
+- if (rep->rep_if[rep_type].state != REP_REGISTERED)
+- return 0;
+-
+- err = rep->rep_if[rep_type].load(esw->dev, rep);
+- if (err)
+- return err;
+-
+- rep->rep_if[rep_type].state = REP_LOADED;
++ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
++ REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
++ err = rep->rep_if[rep_type].load(esw->dev, rep);
++ if (err)
++ atomic_set(&rep->rep_if[rep_type].state,
++ REP_REGISTERED);
++ }
+
+- return 0;
++ return err;
+ }
+
+ static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
+@@ -1523,8 +1521,6 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
+ return 0;
+ }
+
+-void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+-
+ static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
+ {
+ mlx5e_tc_clean_fdb_peer_flows(esw);
+@@ -2076,7 +2072,7 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
+ rep_if->get_proto_dev = __rep_if->get_proto_dev;
+ rep_if->priv = __rep_if->priv;
+
+- rep_if->state = REP_REGISTERED;
++ atomic_set(&rep_if->state, REP_REGISTERED);
+ }
+ }
+ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
+@@ -2091,7 +2087,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
+ __unload_reps_all_vport(esw, max_vf, rep_type);
+
+ mlx5_esw_for_all_reps(esw, i, rep)
+- rep->rep_if[rep_type].state = REP_UNREGISTERED;
++ atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
+ }
+ EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
+
+@@ -2111,7 +2107,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
+
+ rep = mlx5_eswitch_get_rep(esw, vport);
+
+- if (rep->rep_if[rep_type].state == REP_LOADED &&
++ if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
+ rep->rep_if[rep_type].get_proto_dev)
+ return rep->rep_if[rep_type].get_proto_dev(rep);
+ return NULL;
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index a591583d120e..dd12b73a8853 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -800,12 +800,17 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+
+ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+ {
+- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ struct cpsw_common *cpsw = priv->cpsw;
++ int slave_port = -1;
++
++ if (cpsw->data.dual_emac)
++ slave_port = priv->emac_port + 1;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ cpsw_set_promiscious(ndev, true);
+- cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
++ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
+ return;
+ } else {
+ /* Disable promiscuous mode */
+@@ -813,7 +818,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+ }
+
+ /* Restore allmulti on vlans if necessary */
+- cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
++ cpsw_ale_set_allmulti(cpsw->ale,
++ ndev->flags & IFF_ALLMULTI, slave_port);
+
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 798c989d5d93..b3d9591b4824 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -482,24 +482,25 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+ }
+ EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
+
+-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
++void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
+ {
+ u32 ale_entry[ALE_ENTRY_WORDS];
+- int type, idx;
+ int unreg_mcast = 0;
+-
+- /* Only bother doing the work if the setting is actually changing */
+- if (ale->allmulti == allmulti)
+- return;
+-
+- /* Remember the new setting to check against next time */
+- ale->allmulti = allmulti;
++ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
++ int vlan_members;
++
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
++ vlan_members =
++ cpsw_ale_get_vlan_member_list(ale_entry,
++ ale->vlan_field_bits);
++
++ if (port != -1 && !(vlan_members & BIT(port)))
++ continue;
+
+ unreg_mcast =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
+index cd07a3e96d57..1fe196d8a5e4 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.h
++++ b/drivers/net/ethernet/ti/cpsw_ale.h
+@@ -37,7 +37,6 @@ struct cpsw_ale {
+ struct cpsw_ale_params params;
+ struct timer_list timer;
+ unsigned long ageout;
+- int allmulti;
+ u32 version;
+ /* These bits are different on NetCP NU Switch ALE */
+ u32 port_mask_bits;
+@@ -116,7 +115,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
+ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast);
+ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
+-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
++void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
+
+ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
+ int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index e0dce373cdd9..3d4a166a49d5 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -875,12 +875,6 @@ static inline int netvsc_send_pkt(
+ } else if (ret == -EAGAIN) {
+ netif_tx_stop_queue(txq);
+ ndev_ctx->eth_stats.stop_queue++;
+- if (atomic_read(&nvchan->queue_sends) < 1 &&
+- !net_device->tx_disable) {
+- netif_tx_wake_queue(txq);
+- ndev_ctx->eth_stats.wake_queue++;
+- ret = -ENOSPC;
+- }
+ } else {
+ netdev_err(ndev,
+ "Unable to send packet pages %u len %u, ret %d\n",
+@@ -888,6 +882,15 @@ static inline int netvsc_send_pkt(
+ ret);
+ }
+
++ if (netif_tx_queue_stopped(txq) &&
++ atomic_read(&nvchan->queue_sends) < 1 &&
++ !net_device->tx_disable) {
++ netif_tx_wake_queue(txq);
++ ndev_ctx->eth_stats.wake_queue++;
++ if (ret == -EAGAIN)
++ ret = -ENOSPC;
++ }
++
+ return ret;
+ }
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index cd5966b0db57..f6a6cc5bf118 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1829,13 +1829,25 @@ EXPORT_SYMBOL(genphy_read_status);
+ */
+ int genphy_soft_reset(struct phy_device *phydev)
+ {
++ u16 res = BMCR_RESET;
+ int ret;
+
+- ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
++ if (phydev->autoneg == AUTONEG_ENABLE)
++ res |= BMCR_ANRESTART;
++
++ ret = phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, res);
+ if (ret < 0)
+ return ret;
+
+- return phy_poll_reset(phydev);
++ ret = phy_poll_reset(phydev);
++ if (ret)
++ return ret;
++
++ /* BMCR may be reset to defaults */
++ if (phydev->autoneg == AUTONEG_DISABLE)
++ ret = genphy_setup_forced(phydev);
++
++ return ret;
+ }
+ EXPORT_SYMBOL(genphy_soft_reset);
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 366217263d70..d9a6699abe59 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -63,6 +63,7 @@ enum qmi_wwan_flags {
+
+ enum qmi_wwan_quirks {
+ QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
++ QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
+ };
+
+ struct qmimux_hdr {
+@@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
+ .data = QMI_WWAN_QUIRK_DTR,
+ };
+
++static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
++ .description = "WWAN/QMI device",
++ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
++ .bind = qmi_wwan_bind,
++ .unbind = qmi_wwan_unbind,
++ .manage_power = qmi_wwan_manage_power,
++ .rx_fixup = qmi_wwan_rx_fixup,
++ .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
++};
++
+ #define HUAWEI_VENDOR_ID 0x12D1
+
+ /* map QMI/wwan function by a fixed interface number */
+@@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
+ #define QMI_GOBI_DEVICE(vend, prod) \
+ QMI_FIXED_INTF(vend, prod, 0)
+
++/* Quectel does not use fixed interface numbers on at least some of their
++ * devices. We need to check the number of endpoints to ensure that we bind to
++ * the correct interface.
++ */
++#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
++ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
++ USB_SUBCLASS_VENDOR_SPEC, 0xff), \
++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
++
+ static const struct usb_device_id products[] = {
+ /* 1. CDC ECM like devices match on the control interface */
+ { /* Huawei E392, E398 and possibly others sharing both device id and more... */
+@@ -969,20 +989,9 @@ static const struct usb_device_id products[] = {
+ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+- { /* Quectel EP06/EG06/EM06 */
+- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
+- USB_CLASS_VENDOR_SPEC,
+- USB_SUBCLASS_VENDOR_SPEC,
+- 0xff),
+- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
+- },
+- { /* Quectel EG12/EM12 */
+- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
+- USB_CLASS_VENDOR_SPEC,
+- USB_SUBCLASS_VENDOR_SPEC,
+- 0xff),
+- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
+- },
++ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
++ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
++ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+
+ /* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
+@@ -1283,7 +1292,6 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
+ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
+ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
+- {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
+@@ -1363,27 +1371,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
+ return false;
+ }
+
+-static bool quectel_diag_detected(struct usb_interface *intf)
+-{
+- struct usb_device *dev = interface_to_usbdev(intf);
+- struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+- u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
+- u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
+-
+- if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
+- return false;
+-
+- if (id_product == 0x0306 || id_product == 0x0512)
+- return true;
+- else
+- return false;
+-}
+-
+ static int qmi_wwan_probe(struct usb_interface *intf,
+ const struct usb_device_id *prod)
+ {
+ struct usb_device_id *id = (struct usb_device_id *)prod;
+ struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
++ const struct driver_info *info;
+
+ /* Workaround to enable dynamic IDs. This disables usbnet
+ * blacklisting functionality. Which, if required, can be
+@@ -1417,10 +1410,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ * we need to match on class/subclass/protocol. These values are
+ * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+ * different. Ignore the current interface if the number of endpoints
+- * the number for the diag interface (two).
++ * equals the number for the diag interface (two).
+ */
+- if (quectel_diag_detected(intf))
+- return -ENODEV;
++ info = (void *)&id->driver_info;
++
++ if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
++ if (desc->bNumEndpoints == 2)
++ return -ENODEV;
++ }
+
+ return usbnet_probe(intf, id);
+ }
+diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
+index a1e226652b4a..692730415d78 100644
+--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
++++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
+@@ -1274,7 +1274,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ params->wait);
+
+ out:
++ /* when the sent packet was not acked by receiver(ACK=0), rc will
++ * be -EAGAIN. In this case this function needs to return success,
++ * the ACK=0 will be reflected in tx_status.
++ */
+ tx_status = (rc == 0);
++ rc = (rc == -EAGAIN) ? 0 : rc;
+ cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
+ tx_status, GFP_KERNEL);
+
+diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
+index bda4a9712f91..63116f4b62c7 100644
+--- a/drivers/net/wireless/ath/wil6210/wmi.c
++++ b/drivers/net/wireless/ath/wil6210/wmi.c
+@@ -3502,8 +3502,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
+- wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status);
+- rc = -EINVAL;
++ wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
++ evt.evt.status);
++ rc = -EAGAIN;
+ }
+
+ kfree(cmd);
+@@ -3555,9 +3556,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
+- wil_err(wil, "mgmt_tx_ext failed with status %d\n",
+- evt.evt.status);
+- rc = -EINVAL;
++ wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
++ evt.evt.status);
++ rc = -EAGAIN;
+ }
+
+ kfree(cmd);
+diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
+index e99e766a3028..1cabae424839 100644
+--- a/drivers/net/wireless/atmel/at76c50x-usb.c
++++ b/drivers/net/wireless/atmel/at76c50x-usb.c
+@@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void)
+ if (result < 0)
+ printk(KERN_ERR DRIVER_NAME
+ ": usb_register failed (status %d)\n", result);
+-
+- led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
++ else
++ led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
+ return result;
+ }
+
+diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
+index 46408a560814..aedee026c5e2 100644
+--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
++++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
+@@ -1835,7 +1835,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
+ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
+ {
+ struct b43_phy_lp *lpphy = dev->phy.lp;
+- struct lpphy_tx_gains gains, oldgains;
++ struct lpphy_tx_gains oldgains;
+ int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
+
+ lpphy_read_tx_pctl_mode_from_hardware(dev);
+@@ -1849,9 +1849,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
+ lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
+
+ if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
+- lpphy_papd_cal(dev, gains, 0, 1, 30);
++ lpphy_papd_cal(dev, oldgains, 0, 1, 30);
+ else
+- lpphy_papd_cal(dev, gains, 0, 1, 65);
++ lpphy_papd_cal(dev, oldgains, 0, 1, 65);
+
+ if (old_afe_ovr)
+ lpphy_set_tx_gains(dev, oldgains);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index e92f6351bd22..8ee8af4e7ec4 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -5464,6 +5464,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+ conn_info->req_ie =
+ kmemdup(cfg->extra_buf, conn_info->req_ie_len,
+ GFP_KERNEL);
++ if (!conn_info->req_ie)
++ conn_info->req_ie_len = 0;
+ } else {
+ conn_info->req_ie_len = 0;
+ conn_info->req_ie = NULL;
+@@ -5480,6 +5482,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+ conn_info->resp_ie =
+ kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
+ GFP_KERNEL);
++ if (!conn_info->resp_ie)
++ conn_info->resp_ie_len = 0;
+ } else {
+ conn_info->resp_ie_len = 0;
+ conn_info->resp_ie = NULL;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 4fbe8791f674..24ed19ed116e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -841,17 +841,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
+ bool rtnl_locked)
+ {
+ struct brcmf_if *ifp;
++ int ifidx;
+
+ ifp = drvr->iflist[bsscfgidx];
+- drvr->iflist[bsscfgidx] = NULL;
+ if (!ifp) {
+ bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx);
+ return;
+ }
+ brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
+ ifp->ifidx);
+- if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
+- drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
++ ifidx = ifp->ifidx;
++
+ if (ifp->ndev) {
+ if (bsscfgidx == 0) {
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+@@ -879,6 +879,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
+ brcmf_p2p_ifp_removed(ifp, rtnl_locked);
+ kfree(ifp);
+ }
++
++ drvr->iflist[bsscfgidx] = NULL;
++ if (drvr->if2bss[ifidx] == bsscfgidx)
++ drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
+ }
+
+ void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+index abeb305492e0..d48b8b2d946f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+@@ -580,24 +580,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
+ return ifidx == *(int *)arg;
+ }
+
+-static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
+- int ifidx)
+-{
+- bool (*matchfn)(struct sk_buff *, void *) = NULL;
+- struct sk_buff *skb;
+- int prec;
+-
+- if (ifidx != -1)
+- matchfn = brcmf_fws_ifidx_match;
+- for (prec = 0; prec < q->num_prec; prec++) {
+- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+- while (skb) {
+- brcmu_pkt_buf_free_skb(skb);
+- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+- }
+- }
+-}
+-
+ static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
+ {
+ int i;
+@@ -669,6 +651,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+ return 0;
+ }
+
++static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
++ int ifidx)
++{
++ bool (*matchfn)(struct sk_buff *, void *) = NULL;
++ struct sk_buff *skb;
++ int prec;
++ u32 hslot;
++
++ if (ifidx != -1)
++ matchfn = brcmf_fws_ifidx_match;
++ for (prec = 0; prec < q->num_prec; prec++) {
++ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
++ while (skb) {
++ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
++ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
++ true);
++ brcmu_pkt_buf_free_skb(skb);
++ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
++ }
++ }
++}
++
+ static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
+ u32 slot_id)
+ {
+@@ -2200,6 +2204,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
+ brcmf_fws_lock(fws);
+ ifp->fws_desc = NULL;
+ brcmf_dbg(TRACE, "deleting %s\n", entry->name);
++ brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
++ ifp->ifidx);
+ brcmf_fws_macdesc_deinit(entry);
+ brcmf_fws_cleanup(fws, ifp->ifidx);
+ brcmf_fws_unlock(fws);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+index e9cbfd077710..81e1842f1d8c 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+@@ -160,7 +160,7 @@ struct brcmf_usbdev_info {
+
+ struct usb_device *usbdev;
+ struct device *dev;
+- struct mutex dev_init_lock;
++ struct completion dev_init_done;
+
+ int ctl_in_pipe, ctl_out_pipe;
+ struct urb *ctl_urb; /* URB for control endpoint */
+@@ -682,12 +682,18 @@ static int brcmf_usb_up(struct device *dev)
+
+ static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
+ {
++ int i;
++
+ if (devinfo->ctl_urb)
+ usb_kill_urb(devinfo->ctl_urb);
+ if (devinfo->bulk_urb)
+ usb_kill_urb(devinfo->bulk_urb);
+- brcmf_usb_free_q(&devinfo->tx_postq, true);
+- brcmf_usb_free_q(&devinfo->rx_postq, true);
++ if (devinfo->tx_reqs)
++ for (i = 0; i < devinfo->bus_pub.ntxq; i++)
++ usb_kill_urb(devinfo->tx_reqs[i].urb);
++ if (devinfo->rx_reqs)
++ for (i = 0; i < devinfo->bus_pub.nrxq; i++)
++ usb_kill_urb(devinfo->rx_reqs[i].urb);
+ }
+
+ static void brcmf_usb_down(struct device *dev)
+@@ -1193,11 +1199,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
+ if (ret)
+ goto error;
+
+- mutex_unlock(&devinfo->dev_init_lock);
++ complete(&devinfo->dev_init_done);
+ return;
+ error:
+ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+- mutex_unlock(&devinfo->dev_init_lock);
++ complete(&devinfo->dev_init_done);
+ device_release_driver(dev);
+ }
+
+@@ -1265,7 +1271,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
+ if (ret)
+ goto fail;
+ /* we are done */
+- mutex_unlock(&devinfo->dev_init_lock);
++ complete(&devinfo->dev_init_done);
+ return 0;
+ }
+ bus->chip = bus_pub->devid;
+@@ -1325,11 +1331,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+
+ devinfo->usbdev = usb;
+ devinfo->dev = &usb->dev;
+- /* Take an init lock, to protect for disconnect while still loading.
++ /* Init completion, to protect for disconnect while still loading.
+ * Necessary because of the asynchronous firmware load construction
+ */
+- mutex_init(&devinfo->dev_init_lock);
+- mutex_lock(&devinfo->dev_init_lock);
++ init_completion(&devinfo->dev_init_done);
+
+ usb_set_intfdata(intf, devinfo);
+
+@@ -1407,7 +1412,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ return 0;
+
+ fail:
+- mutex_unlock(&devinfo->dev_init_lock);
++ complete(&devinfo->dev_init_done);
+ kfree(devinfo);
+ usb_set_intfdata(intf, NULL);
+ return ret;
+@@ -1422,7 +1427,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
+ devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
+
+ if (devinfo) {
+- mutex_lock(&devinfo->dev_init_lock);
++ wait_for_completion(&devinfo->dev_init_done);
+ /* Make sure that devinfo still exists. Firmware probe routines
+ * may have released the device and cleared the intfdata.
+ */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+index 8eff2753abad..d493021f6031 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+@@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
+ struct brcmf_if *ifp;
+ const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
+ struct sk_buff *reply;
+- int ret, payload, ret_len;
++ unsigned int payload, ret_len;
+ void *dcmd_buf = NULL, *wr_pointer;
+ u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
++ int ret;
+
+ if (len < sizeof(*cmdhdr)) {
+ brcmf_err("vendor command too short: %d\n", len);
+@@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
+ brcmf_err("oversize return buffer %d\n", ret_len);
+ ret_len = BRCMF_DCMD_MAXLEN;
+ }
+- payload = max(ret_len, len) + 1;
++ payload = max_t(unsigned int, ret_len, len) + 1;
+ dcmd_buf = vzalloc(payload);
+ if (NULL == dcmd_buf)
+ return -ENOMEM;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 98d123dd7177..eb452e9dce05 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -2277,7 +2277,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+ const u8 *maddr = _maddr;
+ struct iwl_trans_txq_scd_cfg cfg = {
+- .fifo = IWL_MVM_TX_FIFO_MCAST,
++ .fifo = vif->type == NL80211_IFTYPE_AP ?
++ IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
+ .sta_id = msta->sta_id,
+ .tid = 0,
+ .aggregate = false,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 8d4f0628622b..12f02aaf923e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1434,10 +1434,15 @@ out_err:
+ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+- struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
++ struct iwl_rxq *rxq;
+ u32 r, i, count = 0;
+ bool emergency = false;
+
++ if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
++ return;
++
++ rxq = &trans_pcie->rxq[queue];
++
+ restart:
+ spin_lock(&rxq->lock);
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index c46f0a54a0c7..e582d9b3e50c 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4082,16 +4082,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+
+ if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
+ dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
++ kfree(hostcmd);
+ return -EFAULT;
+ }
+
+ /* process hostcmd response*/
+ skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
+- if (!skb)
++ if (!skb) {
++ kfree(hostcmd);
+ return -ENOMEM;
++ }
+ err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
+ hostcmd->len, hostcmd->cmd);
+ if (err) {
++ kfree(hostcmd);
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
+index bfe84e55df77..f1522fb1c1e8 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
+@@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
+ rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
+ rx_rate - 1 : rx_rate;
+
++ if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
++ rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
++
+ return rate_index;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 76629b98c78d..8c7ee8302fb8 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -271,10 +271,11 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
+ return 0;
+ }
+
+-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
++int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+ {
++ struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue_entry e;
+ struct mt76_txwi_cache *t;
+ struct mt76_queue_buf buf[32];
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index bcbfd3c4a44b..eb882b2cbc0e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -156,7 +156,7 @@ struct mt76_queue_ops {
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+ struct sk_buff *skb, void *txwi);
+
+- int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
++ int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
+@@ -645,7 +645,7 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
+ return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
+ }
+
+-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
++int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+index 4dcb465095d1..99c0a3ba37cb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+@@ -23,7 +23,7 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ if (!skb)
+ return;
+
+- mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
++ mt76_dma_tx_queue_skb(&dev->mt76, MT_TXQ_BEACON, skb,
+ &mvif->sta.wcid, NULL);
+
+ spin_lock_bh(&dev->ps_lock);
+@@ -118,8 +118,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+
+- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
+- NULL);
++ mt76_dma_tx_queue_skb(&dev->mt76, MT_TXQ_CAB, skb,
++ &mvif->sta.wcid, NULL);
+ }
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+index daaed1220147..952fe19cba9b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+@@ -146,8 +146,8 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+
+- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+- NULL);
++ mt76_dma_tx_queue_skb(&dev->mt76, MT_TXQ_PSD, skb,
++ &mvif->group_wcid, NULL);
+ }
+ spin_unlock_bh(&q->lock);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 2585df512335..0c1036da9a92 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -286,7 +286,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ q = &dev->q_tx[qid];
+
+ spin_lock_bh(&q->lock);
+- dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
++ dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
+ dev->queue_ops->kick(dev, q);
+
+ if (q->queued > q->ndesc - 8 && !q->stopped) {
+@@ -327,7 +327,6 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ {
+ struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+
+ info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+ if (last)
+@@ -335,7 +334,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ mt76_skb_set_moredata(skb, !last);
+- dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
++ dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
+ }
+
+ void
+@@ -390,6 +389,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+ struct mt76_txq *mtxq, bool *empty)
+ {
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
++ enum mt76_txq_id qid = mt76_txq_get_qid(txq);
+ struct ieee80211_tx_info *info;
+ struct mt76_wcid *wcid = mtxq->wcid;
+ struct sk_buff *skb;
+@@ -423,7 +423,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+ if (ampdu)
+ mt76_check_agg_ssn(mtxq, skb);
+
+- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
++ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
+
+ if (idx < 0)
+ return idx;
+@@ -458,7 +458,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+ if (cur_ampdu)
+ mt76_check_agg_ssn(mtxq, skb);
+
+- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
++ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
+ txq->sta);
+ if (idx < 0)
+ return idx;
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index 4c1abd492405..b1551419338f 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -726,10 +726,11 @@ mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
+ }
+
+ static int
+-mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
++mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+ {
++ struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76u_buf *buf;
+ u16 idx = q->tail;
+ int err;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
+index 217d2a7a43c7..ac746c322554 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.c
++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
+@@ -448,6 +448,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+ /* <2> work queue */
+ rtlpriv->works.hw = hw;
+ rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
++ if (unlikely(!rtlpriv->works.rtl_wq)) {
++ pr_err("Failed to allocate work queue\n");
++ return;
++ }
++
+ INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
+ (void *)rtl_watchdog_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+index 203e7b574e84..e2e0bfbc24fe 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+@@ -600,6 +600,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ u1rsvdpageloc, 3);
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
+
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+index 18c76990a089..86b1b88cc4ed 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+@@ -623,6 +623,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+ u1rsvdpageloc, 3);
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
+
+ if (cmd_send_packet)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+index 7c5b54b71a92..67305ce915ec 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+@@ -744,6 +744,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ u1rsvdpageloc, 3);
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
+
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+index be451a6f7dbe..33481232fad0 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+@@ -448,6 +448,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ u1rsvdpageloc, 3);
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
+
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+index 4d7fa27f55ca..aa56058af56e 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+@@ -562,6 +562,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+ u1rsvdpageloc, sizeof(u1rsvdpageloc));
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
+
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+index dc0eb692088f..fe32d397d287 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+@@ -1623,6 +1623,8 @@ out:
+ &reserved_page_packet_8812[0], totalpacketlen);
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
+
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+@@ -1759,6 +1761,8 @@ out:
+ &reserved_page_packet_8821[0], totalpacketlen);
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
+
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+index 831046e760f8..49df3bb08d41 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+@@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common)
+ * @adapter: Pointer to the adapter structure.
+ * @band: Operating band to be set.
+ *
+- * Return: None.
++ * Return: int - 0 on success, negative error on failure.
+ */
+-static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
++static int rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+ {
+ struct ieee80211_supported_band *sbands = &adapter->sbands[band];
+ void *channels = NULL;
+
+ if (band == NL80211_BAND_2GHZ) {
+- channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
+- memcpy(channels,
+- rsi_2ghz_channels,
+- sizeof(rsi_2ghz_channels));
++ channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels),
++ GFP_KERNEL);
++ if (!channels)
++ return -ENOMEM;
+ sbands->band = NL80211_BAND_2GHZ;
+ sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
+ sbands->bitrates = rsi_rates;
+ sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
+ } else {
+- channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
+- memcpy(channels,
+- rsi_5ghz_channels,
+- sizeof(rsi_5ghz_channels));
++ channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels),
++ GFP_KERNEL);
++ if (!channels)
++ return -ENOMEM;
+ sbands->band = NL80211_BAND_5GHZ;
+ sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
+ sbands->bitrates = &rsi_rates[4];
+@@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+ sbands->ht_cap.mcs.rx_mask[0] = 0xff;
+ sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ /* sbands->ht_cap.mcs.rx_highest = 0x82; */
++ return 0;
+ }
+
+ static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
+@@ -2064,11 +2065,16 @@ int rsi_mac80211_attach(struct rsi_common *common)
+ wiphy->available_antennas_rx = 1;
+ wiphy->available_antennas_tx = 1;
+
+- rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
++ status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
++ if (status)
++ return status;
+ wiphy->bands[NL80211_BAND_2GHZ] =
+ &adapter->sbands[NL80211_BAND_2GHZ];
+ if (common->num_supp_bands > 1) {
+- rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
++ status = rsi_register_rates_channels(adapter,
++ NL80211_BAND_5GHZ);
++ if (status)
++ return status;
+ wiphy->bands[NL80211_BAND_5GHZ] =
+ &adapter->sbands[NL80211_BAND_5GHZ];
+ }
+diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
+index 90dc979f260b..c1608f0bf6d0 100644
+--- a/drivers/net/wireless/st/cw1200/main.c
++++ b/drivers/net/wireless/st/cw1200/main.c
+@@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
+ mutex_init(&priv->wsm_cmd_mux);
+ mutex_init(&priv->conf_mutex);
+ priv->workqueue = create_singlethread_workqueue("cw1200_wq");
++ if (!priv->workqueue) {
++ ieee80211_free_hw(hw);
++ return NULL;
++ }
++
+ sema_init(&priv->scan.lock, 1);
+ INIT_WORK(&priv->scan.work, cw1200_scan_work);
+ INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 0279eb1da3ef..d9d845077b8b 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -281,20 +281,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
+ return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
+ }
+
++/*
++ * Use the 'no check' versions of copy_from_iter_flushcache() and
++ * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
++ * checking, both file offset and device offset, is handled by
++ * dax_iomap_actor()
++ */
+ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+ {
+- return copy_from_iter_flushcache(addr, bytes, i);
++ return _copy_from_iter_flushcache(addr, bytes, i);
+ }
+
+ static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+ {
+- return copy_to_iter_mcsafe(addr, bytes, i);
++ return _copy_to_iter_mcsafe(addr, bytes, i);
+ }
+
+ static const struct dax_operations pmem_dax_ops = {
+ .direct_access = pmem_dax_direct_access,
++ .dax_supported = generic_fsdax_supported,
+ .copy_from_iter = pmem_copy_from_iter,
+ .copy_to_iter = pmem_copy_to_iter,
+ };
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 2c43e12b70af..8782d86a8ca3 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1591,6 +1591,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ unsigned short bs = 1 << ns->lba_shift;
+
++ if (ns->lba_shift > PAGE_SHIFT) {
++ /* unsupported block size, set capacity to 0 later */
++ bs = (1 << 9);
++ }
+ blk_mq_freeze_queue(disk->queue);
+ blk_integrity_unregister(disk);
+
+@@ -1601,7 +1605,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ if (ns->ms && !ns->ext &&
+ (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, ns->ms, ns->pi_type);
+- if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
++ if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
++ ns->lba_shift > PAGE_SHIFT)
+ capacity = 0;
+
+ set_capacity(disk, capacity);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 11a5ecae78c8..e1824c2e0a1c 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -914,8 +914,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ {
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
+- &ctrl->ctrl);
++ if (ctrl->ctrl.admin_tagset)
++ blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
++ nvme_cancel_request, &ctrl->ctrl);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_destroy_admin_queue(ctrl, remove);
+ }
+@@ -926,8 +927,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+- blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
+- &ctrl->ctrl);
++ if (ctrl->ctrl.tagset)
++ blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
++ nvme_cancel_request, &ctrl->ctrl);
+ if (remove)
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_rdma_destroy_io_queues(ctrl, remove);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 68c49dd67210..aae5374d2b93 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1710,7 +1710,9 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
+ {
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_tcp_stop_queue(ctrl, 0);
+- blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
++ if (ctrl->admin_tagset)
++ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
++ nvme_cancel_request, ctrl);
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_tcp_destroy_admin_queue(ctrl, remove);
+ }
+@@ -1722,7 +1724,9 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ return;
+ nvme_stop_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+- blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl);
++ if (ctrl->tagset)
++ blk_mq_tagset_busy_iter(ctrl->tagset,
++ nvme_cancel_request, ctrl);
+ if (remove)
+ nvme_start_queues(ctrl);
+ nvme_tcp_destroy_io_queues(ctrl, remove);
+diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
+index bfd03e023308..8f8606b9bc9e 100644
+--- a/drivers/perf/arm-cci.c
++++ b/drivers/perf/arm-cci.c
+@@ -1684,21 +1684,24 @@ static int cci_pmu_probe(struct platform_device *pdev)
+ raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
+ mutex_init(&cci_pmu->reserve_mutex);
+ atomic_set(&cci_pmu->active_events, 0);
+- cci_pmu->cpu = get_cpu();
+-
+- ret = cci_pmu_init(cci_pmu, pdev);
+- if (ret) {
+- put_cpu();
+- return ret;
+- }
+
++ cci_pmu->cpu = raw_smp_processor_id();
++ g_cci_pmu = cci_pmu;
+ cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ "perf/arm/cci:online", NULL,
+ cci_pmu_offline_cpu);
+- put_cpu();
+- g_cci_pmu = cci_pmu;
++
++ ret = cci_pmu_init(cci_pmu, pdev);
++ if (ret)
++ goto error_pmu_init;
++
+ pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
+ return 0;
++
++error_pmu_init:
++ cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
++ g_cci_pmu = NULL;
++ return ret;
+ }
+
+ static int cci_pmu_remove(struct platform_device *pdev)
+diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
+index 4bbd9ede38c8..cc5af961778d 100644
+--- a/drivers/phy/allwinner/phy-sun4i-usb.c
++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
+@@ -554,6 +554,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
+ struct sun4i_usb_phy_data *data =
+ container_of(work, struct sun4i_usb_phy_data, detect.work);
+ struct phy *phy0 = data->phys[0].phy;
++ struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
+ bool force_session_end, id_notify = false, vbus_notify = false;
+ int id_det, vbus_det;
+
+@@ -610,6 +611,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
+ mutex_unlock(&phy0->mutex);
+ }
+
++ /* Enable PHY0 passby for host mode only. */
++ sun4i_usb_phy_passby(phy, !id_det);
++
+ /* Re-route PHY0 if necessary */
+ if (data->cfg->phy0_dual_route)
+ sun4i_usb_phy0_reroute(data, id_det);
+diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig
+index 82651524ffb9..718f8729701d 100644
+--- a/drivers/phy/motorola/Kconfig
++++ b/drivers/phy/motorola/Kconfig
+@@ -13,7 +13,7 @@ config PHY_CPCAP_USB
+
+ config PHY_MAPPHONE_MDM6600
+ tristate "Motorola Mapphone MDM6600 modem USB PHY driver"
+- depends on OF && USB_SUPPORT
++ depends on OF && USB_SUPPORT && GPIOLIB
+ select GENERIC_PHY
+ help
+ Enable this for MDM6600 USB modem to work on Motorola phones
+diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
+index 103efc456a12..022ac16f626c 100644
+--- a/drivers/phy/ti/Kconfig
++++ b/drivers/phy/ti/Kconfig
+@@ -37,7 +37,7 @@ config OMAP_USB2
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+- select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS
++ select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS || COMPILE_TEST
+ help
+ Enable this to support the transceiver that is part of SOC. This
+ driver takes care of all the PHY functionality apart from comparator.
+diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
+index aa5f949ef219..5b0678f310e5 100644
+--- a/drivers/pinctrl/pinctrl-pistachio.c
++++ b/drivers/pinctrl/pinctrl-pistachio.c
+@@ -1367,6 +1367,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
+ if (!of_find_property(child, "gpio-controller", NULL)) {
+ dev_err(pctl->dev,
+ "No gpio-controller property for bank %u\n", i);
++ of_node_put(child);
+ ret = -ENODEV;
+ goto err;
+ }
+@@ -1374,6 +1375,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
+ irq = irq_of_parse_and_map(child, 0);
+ if (irq < 0) {
+ dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
++ of_node_put(child);
+ ret = irq;
+ goto err;
+ }
+diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
+index e66af93f2cbf..195b442a2343 100644
+--- a/drivers/pinctrl/pinctrl-st.c
++++ b/drivers/pinctrl/pinctrl-st.c
+@@ -1170,7 +1170,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
+ struct property *pp;
+ struct st_pinconf *conf;
+ struct device_node *pins;
+- int i = 0, npins = 0, nr_props;
++ int i = 0, npins = 0, nr_props, ret = 0;
+
+ pins = of_get_child_by_name(np, "st,pins");
+ if (!pins)
+@@ -1185,7 +1185,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
+ npins++;
+ } else {
+ pr_warn("Invalid st,pins in %pOFn node\n", np);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out_put_node;
+ }
+ }
+
+@@ -1195,8 +1196,10 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
+ grp->pin_conf = devm_kcalloc(info->dev,
+ npins, sizeof(*conf), GFP_KERNEL);
+
+- if (!grp->pins || !grp->pin_conf)
+- return -ENOMEM;
++ if (!grp->pins || !grp->pin_conf) {
++ ret = -ENOMEM;
++ goto out_put_node;
++ }
+
+ /* <bank offset mux direction rt_type rt_delay rt_clk> */
+ for_each_property_of_node(pins, pp) {
+@@ -1229,9 +1232,11 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
+ }
+ i++;
+ }
++
++out_put_node:
+ of_node_put(pins);
+
+- return 0;
++ return ret;
+ }
+
+ static int st_pctl_parse_functions(struct device_node *np,
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+index 44c6b753f692..85ddf49a5188 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+@@ -71,6 +71,7 @@ s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ }
+
+ clk_base = of_iomap(np, 0);
++ of_node_put(np);
+ if (!clk_base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ return ERR_PTR(-EINVAL);
+diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
+index caa44dd2880a..3cb69309912b 100644
+--- a/drivers/pinctrl/zte/pinctrl-zx.c
++++ b/drivers/pinctrl/zte/pinctrl-zx.c
+@@ -411,6 +411,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
+ }
+
+ zpctl->aux_base = of_iomap(np, 0);
++ of_node_put(np);
+ if (!zpctl->aux_base)
+ return -ENOMEM;
+
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 968dcd9d7a07..35a7d020afec 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -2256,6 +2256,7 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
+ if (pin->gpiod == rdev->ena_pin->gpiod) {
+ if (pin->request_count <= 1) {
+ pin->request_count = 0;
++ gpiod_put(pin->gpiod);
+ list_del(&pin->list);
+ kfree(pin);
+ rdev->ena_pin = NULL;
+@@ -5061,10 +5062,11 @@ void regulator_unregister(struct regulator_dev *rdev)
+ regulator_put(rdev->supply);
+ }
+
++ flush_work(&rdev->disable_work.work);
++
+ mutex_lock(&regulator_list_mutex);
+
+ debugfs_remove_recursive(rdev->debugfs);
+- flush_work(&rdev->disable_work.work);
+ WARN_ON(rdev->open_count);
+ regulator_remove_coupling(rdev);
+ unset_regulator_supplies(rdev);
+diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
+index 3c6fac793658..3ade4b8d204e 100644
+--- a/drivers/regulator/da9055-regulator.c
++++ b/drivers/regulator/da9055-regulator.c
+@@ -487,8 +487,10 @@ static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data)
+ {
+ struct da9055_regulator *regulator = data;
+
++ regulator_lock(regulator->rdev);
+ regulator_notifier_call_chain(regulator->rdev,
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
++ regulator_unlock(regulator->rdev);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
+index b064d8a19d4c..bab88ddfc509 100644
+--- a/drivers/regulator/da9062-regulator.c
++++ b/drivers/regulator/da9062-regulator.c
+@@ -974,8 +974,10 @@ static irqreturn_t da9062_ldo_lim_event(int irq, void *data)
+ continue;
+
+ if (BIT(regl->info->oc_event.lsb) & bits) {
++ regulator_lock(regl->rdev);
+ regulator_notifier_call_chain(regl->rdev,
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
++ regulator_unlock(regl->rdev);
+ handled = IRQ_HANDLED;
+ }
+ }
+diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
+index 2b0c7a85306a..d7bdb95b7602 100644
+--- a/drivers/regulator/da9063-regulator.c
++++ b/drivers/regulator/da9063-regulator.c
+@@ -615,9 +615,12 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
+ if (regl->info->oc_event.reg != DA9063_REG_STATUS_D)
+ continue;
+
+- if (BIT(regl->info->oc_event.lsb) & bits)
++ if (BIT(regl->info->oc_event.lsb) & bits) {
++ regulator_lock(regl->rdev);
+ regulator_notifier_call_chain(regl->rdev,
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
++ regulator_unlock(regl->rdev);
++ }
+ }
+
+ return IRQ_HANDLED;
+diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
+index 109ee12d4362..4d7fe4819c1c 100644
+--- a/drivers/regulator/da9211-regulator.c
++++ b/drivers/regulator/da9211-regulator.c
+@@ -322,8 +322,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
+ goto error_i2c;
+
+ if (reg_val & DA9211_E_OV_CURR_A) {
++ regulator_lock(chip->rdev[0]);
+ regulator_notifier_call_chain(chip->rdev[0],
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
++ regulator_unlock(chip->rdev[0]);
+
+ err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
+ DA9211_E_OV_CURR_A);
+@@ -334,8 +336,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
+ }
+
+ if (reg_val & DA9211_E_OV_CURR_B) {
++ regulator_lock(chip->rdev[1]);
+ regulator_notifier_call_chain(chip->rdev[1],
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
++ regulator_unlock(chip->rdev[1]);
+
+ err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
+ DA9211_E_OV_CURR_B);
+diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
+index 14fd38807134..2e16a6ab491d 100644
+--- a/drivers/regulator/lp8755.c
++++ b/drivers/regulator/lp8755.c
+@@ -372,10 +372,13 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+ if ((flag0 & (0x4 << icnt))
+ && (pchip->irqmask & (0x04 << icnt))
+- && (pchip->rdev[icnt] != NULL))
++ && (pchip->rdev[icnt] != NULL)) {
++ regulator_lock(pchip->rdev[icnt]);
+ regulator_notifier_call_chain(pchip->rdev[icnt],
+ LP8755_EVENT_PWR_FAULT,
+ NULL);
++ regulator_unlock(pchip->rdev[icnt]);
++ }
+
+ /* read flag1 register */
+ ret = lp8755_read(pchip, 0x0E, &flag1);
+@@ -389,18 +392,24 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
+ /* send OCP event to all regulator devices */
+ if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+- if (pchip->rdev[icnt] != NULL)
++ if (pchip->rdev[icnt] != NULL) {
++ regulator_lock(pchip->rdev[icnt]);
+ regulator_notifier_call_chain(pchip->rdev[icnt],
+ LP8755_EVENT_OCP,
+ NULL);
++ regulator_unlock(pchip->rdev[icnt]);
++ }
+
+ /* send OVP event to all regulator devices */
+ if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+- if (pchip->rdev[icnt] != NULL)
++ if (pchip->rdev[icnt] != NULL) {
++ regulator_lock(pchip->rdev[icnt]);
+ regulator_notifier_call_chain(pchip->rdev[icnt],
+ LP8755_EVENT_OVP,
+ NULL);
++ regulator_unlock(pchip->rdev[icnt]);
++ }
+ return IRQ_HANDLED;
+
+ err_i2c:
+diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
+index 63f724f260ef..75089b037b72 100644
+--- a/drivers/regulator/ltc3589.c
++++ b/drivers/regulator/ltc3589.c
+@@ -419,16 +419,22 @@ static irqreturn_t ltc3589_isr(int irq, void *dev_id)
+
+ if (irqstat & LTC3589_IRQSTAT_THERMAL_WARN) {
+ event = REGULATOR_EVENT_OVER_TEMP;
+- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
++ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
++ regulator_lock(ltc3589->regulators[i]);
+ regulator_notifier_call_chain(ltc3589->regulators[i],
+ event, NULL);
++ regulator_unlock(ltc3589->regulators[i]);
++ }
+ }
+
+ if (irqstat & LTC3589_IRQSTAT_UNDERVOLT_WARN) {
+ event = REGULATOR_EVENT_UNDER_VOLTAGE;
+- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
++ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
++ regulator_lock(ltc3589->regulators[i]);
+ regulator_notifier_call_chain(ltc3589->regulators[i],
+ event, NULL);
++ regulator_unlock(ltc3589->regulators[i]);
++ }
+ }
+
+ /* Clear warning condition */
+diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
+index e6d66e492b85..4be90c78c720 100644
+--- a/drivers/regulator/ltc3676.c
++++ b/drivers/regulator/ltc3676.c
+@@ -285,17 +285,23 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
+ if (irqstat & LTC3676_IRQSTAT_THERMAL_WARN) {
+ dev_warn(dev, "Over-temperature Warning\n");
+ event = REGULATOR_EVENT_OVER_TEMP;
+- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
++ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
++ regulator_lock(ltc3676->regulators[i]);
+ regulator_notifier_call_chain(ltc3676->regulators[i],
+ event, NULL);
++ regulator_unlock(ltc3676->regulators[i]);
++ }
+ }
+
+ if (irqstat & LTC3676_IRQSTAT_UNDERVOLT_WARN) {
+ dev_info(dev, "Undervoltage Warning\n");
+ event = REGULATOR_EVENT_UNDER_VOLTAGE;
+- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
++ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
++ regulator_lock(ltc3676->regulators[i]);
+ regulator_notifier_call_chain(ltc3676->regulators[i],
+ event, NULL);
++ regulator_unlock(ltc3676->regulators[i]);
++ }
+ }
+
+ /* Clear warning condition */
+diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
+index 1600f9821891..810816e9df5d 100644
+--- a/drivers/regulator/pv88060-regulator.c
++++ b/drivers/regulator/pv88060-regulator.c
+@@ -244,9 +244,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
+ if (reg_val & PV88060_E_VDD_FLT) {
+ for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
++ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
++ regulator_unlock(chip->rdev[i]);
+ }
+ }
+
+@@ -261,9 +263,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
+ if (reg_val & PV88060_E_OVER_TEMP) {
+ for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
++ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_OVER_TEMP,
+ NULL);
++ regulator_unlock(chip->rdev[i]);
+ }
+ }
+
+diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
+index bdddacdbeb99..6279216fb254 100644
+--- a/drivers/regulator/pv88080-regulator.c
++++ b/drivers/regulator/pv88080-regulator.c
+@@ -345,9 +345,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
+ if (reg_val & PV88080_E_VDD_FLT) {
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
++ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
++ regulator_unlock(chip->rdev[i]);
+ }
+ }
+
+@@ -362,9 +364,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
+ if (reg_val & PV88080_E_OVER_TEMP) {
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
++ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_OVER_TEMP,
+ NULL);
++ regulator_unlock(chip->rdev[i]);
+ }
+ }
+
+diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
+index 6e97cc6df2ee..90f4f907fb3f 100644
+--- a/drivers/regulator/pv88090-regulator.c
++++ b/drivers/regulator/pv88090-regulator.c
+@@ -237,9 +237,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
+ if (reg_val & PV88090_E_VDD_FLT) {
+ for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
++ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
++ regulator_unlock(chip->rdev[i]);
+ }
+ }
+
+@@ -254,9 +256,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
+ if (reg_val & PV88090_E_OVER_TEMP) {
+ for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
++ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_OVER_TEMP,
+ NULL);
++ regulator_unlock(chip->rdev[i]);
+ }
+ }
+
+diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
+index 12b422373580..d1873f94bca7 100644
+--- a/drivers/regulator/wm831x-dcdc.c
++++ b/drivers/regulator/wm831x-dcdc.c
+@@ -183,9 +183,11 @@ static irqreturn_t wm831x_dcdc_uv_irq(int irq, void *data)
+ {
+ struct wm831x_dcdc *dcdc = data;
+
++ regulator_lock(dcdc->regulator);
+ regulator_notifier_call_chain(dcdc->regulator,
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
++ regulator_unlock(dcdc->regulator);
+
+ return IRQ_HANDLED;
+ }
+@@ -194,9 +196,11 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
+ {
+ struct wm831x_dcdc *dcdc = data;
+
++ regulator_lock(dcdc->regulator);
+ regulator_notifier_call_chain(dcdc->regulator,
+ REGULATOR_EVENT_OVER_CURRENT,
+ NULL);
++ regulator_unlock(dcdc->regulator);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
+index 6dd891d7eee3..11f351191dba 100644
+--- a/drivers/regulator/wm831x-isink.c
++++ b/drivers/regulator/wm831x-isink.c
+@@ -140,9 +140,11 @@ static irqreturn_t wm831x_isink_irq(int irq, void *data)
+ {
+ struct wm831x_isink *isink = data;
+
++ regulator_lock(isink->regulator);
+ regulator_notifier_call_chain(isink->regulator,
+ REGULATOR_EVENT_OVER_CURRENT,
+ NULL);
++ regulator_unlock(isink->regulator);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
+index e4a6f888484e..fcd038e7cd80 100644
+--- a/drivers/regulator/wm831x-ldo.c
++++ b/drivers/regulator/wm831x-ldo.c
+@@ -51,9 +51,11 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
+ {
+ struct wm831x_ldo *ldo = data;
+
++ regulator_lock(ldo->regulator);
+ regulator_notifier_call_chain(ldo->regulator,
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
++ regulator_unlock(ldo->regulator);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
+index d25282b4a7dd..73697e4b18a9 100644
+--- a/drivers/rtc/rtc-88pm860x.c
++++ b/drivers/rtc/rtc-88pm860x.c
+@@ -421,7 +421,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev)
+ struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
+
+ #ifdef VRTC_CALIBRATION
+- flush_scheduled_work();
++ cancel_delayed_work_sync(&info->calib_work);
+ /* disable measurement */
+ pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
+ #endif /* VRTC_CALIBRATION */
+diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
+index c5908cfea234..8e6c9b3bcc29 100644
+--- a/drivers/rtc/rtc-stm32.c
++++ b/drivers/rtc/rtc-stm32.c
+@@ -788,11 +788,14 @@ static int stm32_rtc_probe(struct platform_device *pdev)
+ ret = device_init_wakeup(&pdev->dev, true);
+ if (rtc->data->has_wakeirq) {
+ rtc->wakeirq_alarm = platform_get_irq(pdev, 1);
+- if (rtc->wakeirq_alarm <= 0)
+- ret = rtc->wakeirq_alarm;
+- else
++ if (rtc->wakeirq_alarm > 0) {
+ ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+ rtc->wakeirq_alarm);
++ } else {
++ ret = rtc->wakeirq_alarm;
++ if (rtc->wakeirq_alarm == -EPROBE_DEFER)
++ goto err;
++ }
+ }
+ if (ret)
+ dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret);
+diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
+index 153820876a82..2f741f455c30 100644
+--- a/drivers/rtc/rtc-xgene.c
++++ b/drivers/rtc/rtc-xgene.c
+@@ -168,6 +168,10 @@ static int xgene_rtc_probe(struct platform_device *pdev)
+ if (IS_ERR(pdata->csr_base))
+ return PTR_ERR(pdata->csr_base);
+
++ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
++ if (IS_ERR(pdata->rtc))
++ return PTR_ERR(pdata->rtc);
++
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+@@ -198,15 +202,15 @@ static int xgene_rtc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+- &xgene_rtc_ops, THIS_MODULE);
+- if (IS_ERR(pdata->rtc)) {
+- clk_disable_unprepare(pdata->clk);
+- return PTR_ERR(pdata->rtc);
+- }
+-
+ /* HW does not support update faster than 1 seconds */
+ pdata->rtc->uie_unsupported = 1;
++ pdata->rtc->ops = &xgene_rtc_ops;
++
++ ret = rtc_register_device(pdata->rtc);
++ if (ret) {
++ clk_disable_unprepare(pdata->clk);
++ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
+index 4e8aedd50cb0..d04d4378ca50 100644
+--- a/drivers/s390/block/dcssblk.c
++++ b/drivers/s390/block/dcssblk.c
+@@ -59,6 +59,7 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
+
+ static const struct dax_operations dcssblk_dax_ops = {
+ .direct_access = dcssblk_dax_direct_access,
++ .dax_supported = generic_fsdax_supported,
+ .copy_from_iter = dcssblk_dax_copy_from_iter,
+ .copy_to_iter = dcssblk_dax_copy_to_iter,
+ };
+diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
+index 9811fd8a0c73..92eabbb5f18d 100644
+--- a/drivers/s390/cio/cio.h
++++ b/drivers/s390/cio/cio.h
+@@ -115,7 +115,7 @@ struct subchannel {
+ struct schib_config config;
+ } __attribute__ ((aligned(8)));
+
+-DECLARE_PER_CPU(struct irb, cio_irb);
++DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
+
+ #define to_subchannel(n) container_of(n, struct subchannel, dev)
+
+diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
+index 0b3b9de45c60..9e84d8a971ad 100644
+--- a/drivers/s390/cio/vfio_ccw_drv.c
++++ b/drivers/s390/cio/vfio_ccw_drv.c
+@@ -40,26 +40,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
+ if (ret != -EBUSY)
+ goto out_unlock;
+
++ iretry = 255;
+ do {
+- iretry = 255;
+
+ ret = cio_cancel_halt_clear(sch, &iretry);
+- while (ret == -EBUSY) {
+- /*
+- * Flush all I/O and wait for
+- * cancel/halt/clear completion.
+- */
+- private->completion = &completion;
+- spin_unlock_irq(sch->lock);
+
+- wait_for_completion_timeout(&completion, 3*HZ);
++ if (ret == -EIO) {
++ pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
++ sch->schid.ssid, sch->schid.sch_no);
++ break;
++ }
++
++ /*
++ * Flush all I/O and wait for
++ * cancel/halt/clear completion.
++ */
++ private->completion = &completion;
++ spin_unlock_irq(sch->lock);
+
+- spin_lock_irq(sch->lock);
+- private->completion = NULL;
+- flush_workqueue(vfio_ccw_work_q);
+- ret = cio_cancel_halt_clear(sch, &iretry);
+- };
++ if (ret == -EBUSY)
++ wait_for_completion_timeout(&completion, 3*HZ);
+
++ private->completion = NULL;
++ flush_workqueue(vfio_ccw_work_q);
++ spin_lock_irq(sch->lock);
+ ret = cio_disable_subchannel(sch);
+ } while (ret == -EBUSY);
+ out_unlock:
+diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
+index f673e106c041..dc5ff47de3fe 100644
+--- a/drivers/s390/cio/vfio_ccw_ops.c
++++ b/drivers/s390/cio/vfio_ccw_ops.c
+@@ -130,11 +130,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
+
+ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+ (private->state != VFIO_CCW_STATE_STANDBY)) {
+- if (!vfio_ccw_mdev_reset(mdev))
++ if (!vfio_ccw_sch_quiesce(private->sch))
+ private->state = VFIO_CCW_STATE_STANDBY;
+ /* The state will be NOT_OPER on error. */
+ }
+
++ cp_free(&private->cp);
+ private->mdev = NULL;
+ atomic_inc(&private->avail);
+
+@@ -158,6 +159,14 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
+ struct vfio_ccw_private *private =
+ dev_get_drvdata(mdev_parent_dev(mdev));
+
++ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
++ (private->state != VFIO_CCW_STATE_STANDBY)) {
++ if (!vfio_ccw_mdev_reset(mdev))
++ private->state = VFIO_CCW_STATE_STANDBY;
++ /* The state will be NOT_OPER on error. */
++ }
++
++ cp_free(&private->cp);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
+ }
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 689c2af7026a..c31b2d31cd83 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -659,6 +659,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+ trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+
+ if (mex->outputdatalength < mex->inputdatalength) {
++ func_code = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -742,6 +743,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
+ trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+
+ if (crt->outputdatalength < crt->inputdatalength) {
++ func_code = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -951,6 +953,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
+
+ targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+ if (!targets) {
++ func_code = 0;
+ rc = -ENOMEM;
+ goto out;
+ }
+@@ -958,6 +961,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
+ uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
+ if (copy_from_user(targets, uptr,
+ target_num * sizeof(*targets))) {
++ func_code = 0;
+ rc = -EFAULT;
+ goto out_free;
+ }
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index c851cf6e01c4..d603dfea97ab 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -163,6 +163,12 @@ struct qeth_vnicc_info {
+ bool rx_bcast_enabled;
+ };
+
++static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
++ enum qeth_ipa_setadp_cmd func)
++{
++ return (ipa->supported_funcs & func);
++}
++
+ static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
+ enum qeth_ipa_funcs func)
+ {
+@@ -176,9 +182,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
+ }
+
+ #define qeth_adp_supported(c, f) \
+- qeth_is_ipa_supported(&c->options.adp, f)
+-#define qeth_adp_enabled(c, f) \
+- qeth_is_ipa_enabled(&c->options.adp, f)
++ qeth_is_adp_supported(&c->options.adp, f)
+ #define qeth_is_supported(c, f) \
+ qeth_is_ipa_supported(&c->options.ipa4, f)
+ #define qeth_is_enabled(c, f) \
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 44bd6f04c145..8c73a99daff3 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -1308,7 +1308,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card)
+ card->qdio.no_out_queues = 4;
+ }
+
+-static void qeth_update_from_chp_desc(struct qeth_card *card)
++static int qeth_update_from_chp_desc(struct qeth_card *card)
+ {
+ struct ccw_device *ccwdev;
+ struct channel_path_desc_fmt0 *chp_dsc;
+@@ -1318,7 +1318,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
+ ccwdev = card->data.ccwdev;
+ chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
+ if (!chp_dsc)
+- goto out;
++ return -ENOMEM;
+
+ card->info.func_level = 0x4100 + chp_dsc->desc;
+ if (card->info.type == QETH_CARD_TYPE_IQD)
+@@ -1333,6 +1333,7 @@ out:
+ kfree(chp_dsc);
+ QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
+ QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
++ return 0;
+ }
+
+ static void qeth_init_qdio_info(struct qeth_card *card)
+@@ -4986,7 +4987,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
+
+ QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
+ atomic_set(&card->force_alloc_skb, 0);
+- qeth_update_from_chp_desc(card);
++ rc = qeth_update_from_chp_desc(card);
++ if (rc)
++ return rc;
+ retry:
+ if (retries < 3)
+ QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
+@@ -5641,7 +5644,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
+ }
+
+ qeth_setup_card(card);
+- qeth_update_from_chp_desc(card);
++ rc = qeth_update_from_chp_desc(card);
++ if (rc)
++ goto err_chp_desc;
+
+ card->dev = qeth_alloc_netdev(card);
+ if (!card->dev) {
+@@ -5676,6 +5681,7 @@ err_disc:
+ qeth_core_free_discipline(card);
+ err_load:
+ free_netdev(card->dev);
++err_chp_desc:
+ err_card:
+ qeth_core_free_card(card);
+ err_dev:
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 17b45a0c7bc3..3611a4ef0d15 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -2052,6 +2052,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
+ if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
+ phy->phy_state = PHY_EMPTY;
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
++ /*
++ * Even though the PHY is empty, for convenience we discover
++ * the PHY to update the PHY info, like negotiated linkrate.
++ */
++ sas_ex_phy_discover(dev, phy_id);
+ return res;
+ } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
+ dev_type_flutter(type, phy->attached_dev_type)) {
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index 2e3949c6cd07..25553e7ba85c 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -2005,8 +2005,11 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 256);
+
++ /* This string MUST be consistent with other FC platforms
++ * supported by Broadcom.
++ */
+ strncpy(ae->un.AttrString,
+- "Broadcom Inc.",
++ "Emulex Corporation",
+ sizeof(ae->un.AttrString));
+ len = strnlen(ae->un.AttrString,
+ sizeof(ae->un.AttrString));
+@@ -2360,10 +2363,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 32);
+
+- ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
+- ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
+- ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
+- ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
++ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
++ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
++ if (vport->nvmei_support || vport->phba->nvmet_support)
++ ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
++ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ size = FOURBYTES + 32;
+ ad->AttrLen = cpu_to_be16(size);
+ ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
+@@ -2673,9 +2677,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 32);
+
+- ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
+- ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
+- ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
++ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
++ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
++ if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
++ ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
++ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ size = FOURBYTES + 32;
+ ad->AttrLen = cpu_to_be16(size);
+ ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index aa4961a2caf8..75e9d46d44d4 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -932,7 +932,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+- /* Clean up any firmware default rpi's */
++
++ /* Clean up any SLI3 firmware default rpi's */
++ if (phba->sli_rev > LPFC_SLI_REV3)
++ goto skip_unreg_did;
++
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mb) {
+ lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
+@@ -944,6 +948,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
+ }
+ }
+
++ skip_unreg_did:
+ /* Setup myDID for link up if we are in pt2pt mode */
+ if (phba->pport->fc_flag & FC_PT2PT) {
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+@@ -4868,6 +4873,10 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ * accept PLOGIs after unreg_rpi_cmpl
+ */
+ acc_plogi = 0;
++ } else if (vport->load_flag & FC_UNLOADING) {
++ mbox->ctx_ndlp = NULL;
++ mbox->mbox_cmpl =
++ lpfc_sli_def_mbox_cmpl;
+ } else {
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl =
+@@ -4979,6 +4988,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
++ /* Unreg DID is an SLI3 operation. */
++ if (phba->sli_rev > LPFC_SLI_REV3)
++ return;
++
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 7fcdaed3fa94..46e155d1fa15 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -3245,6 +3245,13 @@ void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_destroy_expedite_pool(phba);
+
++ if (!(phba->pport->load_flag & FC_UNLOADING)) {
++ lpfc_sli_flush_fcp_rings(phba);
++
++ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
++ lpfc_sli_flush_nvme_rings(phba);
++ }
++
+ hwq_count = phba->cfg_hdw_queue;
+
+ for (i = 0; i < hwq_count; i++) {
+@@ -3611,8 +3618,6 @@ lpfc_io_free(struct lpfc_hba *phba)
+ struct lpfc_sli4_hdw_queue *qp;
+ int idx;
+
+- spin_lock_irq(&phba->hbalock);
+-
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
+ /* Release all the lpfc_nvme_bufs maintained by this host. */
+@@ -3642,8 +3647,6 @@ lpfc_io_free(struct lpfc_hba *phba)
+ }
+ spin_unlock(&qp->io_buf_list_get_lock);
+ }
+-
+- spin_unlock_irq(&phba->hbalock);
+ }
+
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index 1aa00d2c3f74..9defff711884 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -2080,15 +2080,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
+ lpfc_nvme_template.max_hw_queues =
+ phba->sli4_hba.num_present_cpu;
+
++ if (!IS_ENABLED(CONFIG_NVME_FC))
++ return ret;
++
+ /* localport is allocated from the stack, but the registration
+ * call allocates heap memory as well as the private area.
+ */
+-#if (IS_ENABLED(CONFIG_NVME_FC))
++
+ ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
+ &vport->phba->pcidev->dev, &localport);
+-#else
+- ret = -ENOMEM;
+-#endif
+ if (!ret) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
+ "6005 Successfully registered local "
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index a497b2c0cb79..25501d4605ff 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3670,7 +3670,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ cpu = smp_processor_id();
+- if (cpu < LPFC_CHECK_CPU_CNT)
++ if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
+ phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
+ }
+ #endif
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 57b4a463b589..dc933b6d7800 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -2502,8 +2502,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ } else {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ }
++ pmb->ctx_ndlp = NULL;
+ }
+- pmb->ctx_ndlp = NULL;
+ }
+
+ /* Check security permission status on INIT_LINK mailbox command */
+@@ -7652,12 +7652,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ phba->cfg_xri_rebalancing = 0;
+ }
+
+- /* Arm the CQs and then EQs on device */
+- lpfc_sli4_arm_cqeq_intr(phba);
+-
+- /* Indicate device interrupt mode */
+- phba->sli4_hba.intr_enable = 1;
+-
+ /* Allow asynchronous mailbox command to go through */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+@@ -7726,6 +7720,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ phba->trunk_link.link3.state = LPFC_LINK_DOWN;
+ spin_unlock_irq(&phba->hbalock);
+
++ /* Arm the CQs and then EQs on device */
++ lpfc_sli4_arm_cqeq_intr(phba);
++
++ /* Indicate device interrupt mode */
++ phba->sli4_hba.intr_enable = 1;
++
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->hba_flag & LINK_DISABLED)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index 6ca583bdde23..29b51c466721 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -902,6 +902,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ kref_put(&io_req->refcount, qedf_release_cmd);
++ return -EINVAL;
+ }
+
+ /* Obtain free SQE */
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index 6d6d6013e35b..bf371e7b957d 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -1000,6 +1000,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+ qedi_ep = ep->dd_data;
+ qedi = qedi_ep->qedi;
+
++ if (qedi_ep->state == EP_STATE_OFLDCONN_START)
++ goto ep_exit_recover;
++
+ flush_work(&qedi_ep->offload_work);
+
+ if (qedi_ep->conn) {
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 69bbea9239cc..add17843148d 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3475,7 +3475,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ ql_log(ql_log_fatal, vha, 0x00c8,
+ "Failed to allocate memory for ha->msix_entries.\n");
+ ret = -ENOMEM;
+- goto msix_out;
++ goto free_irqs;
+ }
+ ha->flags.msix_enabled = 1;
+
+@@ -3558,6 +3558,10 @@ msix_register_fail:
+
+ msix_out:
+ return ret;
++
++free_irqs:
++ pci_free_irq_vectors(ha->pdev);
++ goto msix_out;
+ }
+
+ int
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 697eee1d8847..b210a8296c27 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -680,7 +680,6 @@ done:
+ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
+ {
+ fc_port_t *t;
+- unsigned long flags;
+
+ switch (e->u.nack.type) {
+ case SRB_NACK_PRLI:
+@@ -693,10 +692,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
+ if (t) {
+ ql_log(ql_log_info, vha, 0xd034,
+ "%s create sess success %p", __func__, t);
+- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ /* create sess has an extra kref */
+ vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
+- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+ break;
+ }
+@@ -708,9 +705,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
+ {
+ fc_port_t *fcport = container_of(work, struct fc_port, del_work);
+ struct qla_hw_data *ha = fcport->vha->hw;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+
+ if (fcport->se_sess) {
+ ha->tgt.tgt_ops->shutdown_sess(fcport);
+@@ -718,7 +712,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
+ } else {
+ qlt_unreg_sess(fcport);
+ }
+- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+
+ /*
+@@ -787,8 +780,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ fcport->port_name, sess->loop_id);
+ sess->local = 0;
+ }
+- ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++
++ ha->tgt.tgt_ops->put_sess(sess);
+ }
+
+ /*
+@@ -4242,9 +4236,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
+ /*
+ * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+ */
+- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+
+ out_term:
+@@ -4261,9 +4253,7 @@ out_term:
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+
+ static void qlt_do_work(struct work_struct *work)
+@@ -4472,9 +4462,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ if (!cmd) {
+ ql_dbg(ql_dbg_io, vha, 0x3062,
+ "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return -EBUSY;
+ }
+
+@@ -6318,17 +6306,19 @@ static void qlt_abort_work(struct qla_tgt *tgt,
+ }
+
+ rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+- ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
++ ha->tgt.tgt_ops->put_sess(sess);
++
+ if (rc != 0)
+ goto out_term;
+ return;
+
+ out_term2:
++ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
++
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
+ out_term:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+@@ -6386,9 +6376,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
+ scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
+
+ rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+- ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
++ ha->tgt.tgt_ops->put_sess(sess);
++
+ if (rc != 0)
+ goto out_term;
+ return;
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 8a3075d17c63..e58becb790fa 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -359,7 +359,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
+ if (!sess)
+ return;
+
+- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+ kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
+ }
+
+@@ -374,8 +373,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ target_sess_cmd_list_set_waiting(se_sess);
+- tcm_qla2xxx_put_sess(sess);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
++
++ tcm_qla2xxx_put_sess(sess);
+ }
+
+ static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+@@ -399,6 +399,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+ cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
++ transport_generic_request_failure(&cmd->se_cmd,
++ TCM_CHECK_CONDITION_ABORT_CMD);
+ return 0;
+ }
+ cmd->trc_flags |= TRC_XFR_RDY;
+@@ -829,7 +831,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
+
+ static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
+ {
+- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+ }
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 6e4f4931ae17..8c674eca09f1 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -5930,7 +5930,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
+ val = rd_nvram_byte(ha, sec_addr);
+ if (val & BIT_7)
+ ddb_index[1] = (val & 0x7f);
+-
++ goto exit_boot_info;
+ } else if (is_qla80XX(ha)) {
+ buf = dma_alloc_coherent(&ha->pdev->dev, size,
+ &buf_dma, GFP_KERNEL);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 2b2bc4b49d78..b894786df6c2 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2603,7 +2603,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
+ int res;
+ struct scsi_device *sdp = sdkp->device;
+ struct scsi_mode_data data;
+- int disk_ro = get_disk_ro(sdkp->disk);
+ int old_wp = sdkp->write_prot;
+
+ set_disk_ro(sdkp->disk, 0);
+@@ -2644,7 +2643,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
+ "Test WP failed, assume Write Enabled\n");
+ } else {
+ sdkp->write_prot = ((data.device_specific & 0x80) != 0);
+- set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
++ set_disk_ro(sdkp->disk, sdkp->write_prot);
+ if (sdkp->first_scan || old_wp != sdkp->write_prot) {
+ sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
+ sdkp->write_prot ? "on" : "off");
+diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
+index 0e855b5afe82..2f592df921d9 100644
+--- a/drivers/scsi/ufs/ufs-hisi.c
++++ b/drivers/scsi/ufs/ufs-hisi.c
+@@ -587,6 +587,10 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
+ ufshcd_set_variant(hba, host);
+
+ host->rst = devm_reset_control_get(dev, "rst");
++ if (IS_ERR(host->rst)) {
++ dev_err(dev, "%s: failed to get reset control\n", __func__);
++ return PTR_ERR(host->rst);
++ }
+
+ ufs_hisi_set_pm_lvl(hba);
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index e040f9dd9ff3..5ba49c8cd2a3 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -6294,19 +6294,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+ goto out;
+ }
+
+- if (hba->vreg_info.vcc)
++ if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vcc->max_uA,
+ POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+- if (hba->vreg_info.vccq)
++ if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+- if (hba->vreg_info.vccq2)
++ if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq2->max_uA,
+ icc_level,
+@@ -7004,6 +7004,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+ if (!vreg)
+ return 0;
+
++ /*
++ * "set_load" operation shall be required on those regulators
++ * which specifically configured current limitation. Otherwise
++ * zero max_uA may cause unexpected behavior when regulator is
++ * enabled or set as high power mode.
++ */
++ if (!vreg->max_uA)
++ return 0;
++
+ ret = regulator_set_load(vreg->reg, ua);
+ if (ret < 0) {
+ dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
+@@ -7039,12 +7048,15 @@ static int ufshcd_config_vreg(struct device *dev,
+ name = vreg->name;
+
+ if (regulator_count_voltages(reg) > 0) {
+- min_uV = on ? vreg->min_uV : 0;
+- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+- if (ret) {
+- dev_err(dev, "%s: %s set voltage failed, err=%d\n",
++ if (vreg->min_uV && vreg->max_uV) {
++ min_uV = on ? vreg->min_uV : 0;
++ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
++ if (ret) {
++ dev_err(dev,
++ "%s: %s set voltage failed, err=%d\n",
+ __func__, name, ret);
+- goto out;
++ goto out;
++ }
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
+index 71f094c9ec68..f3585777324c 100644
+--- a/drivers/slimbus/qcom-ngd-ctrl.c
++++ b/drivers/slimbus/qcom-ngd-ctrl.c
+@@ -1342,6 +1342,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
+ return -ENOMEM;
+
+ ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
++ if (!ngd->pdev) {
++ kfree(ngd);
++ return -ENOMEM;
++ }
+ ngd->id = id;
+ ngd->pdev->dev.parent = parent;
+ ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index fffc21cd5f79..b3173ebddade 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -570,7 +570,8 @@ static int atmel_qspi_remove(struct platform_device *pdev)
+
+ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
+ {
+- struct atmel_qspi *aq = dev_get_drvdata(dev);
++ struct spi_controller *ctrl = dev_get_drvdata(dev);
++ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+
+ clk_disable_unprepare(aq->qspick);
+ clk_disable_unprepare(aq->pclk);
+@@ -580,7 +581,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
+
+ static int __maybe_unused atmel_qspi_resume(struct device *dev)
+ {
+- struct atmel_qspi *aq = dev_get_drvdata(dev);
++ struct spi_controller *ctrl = dev_get_drvdata(dev);
++ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+
+ clk_prepare_enable(aq->pclk);
+ clk_prepare_enable(aq->qspick);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 6ec647bbba77..a81ae29aa68a 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1494,7 +1494,7 @@ static int spi_imx_transfer(struct spi_device *spi,
+
+ /* flush rxfifo before transfer */
+ while (spi_imx->devtype_data->rx_available(spi_imx))
+- spi_imx->rx(spi_imx);
++ readl(spi_imx->base + MXC_CSPIRXDATA);
+
+ if (spi_imx->slave_mode)
+ return spi_imx_pio_transfer_slave(spi, transfer);
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index b6ddba833d02..d2076f2f468f 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -884,10 +884,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
+
+ rate = min_t(int, ssp_clk, rate);
+
++ /*
++ * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
++ * that the SSP transmission rate can be greater than the device rate
++ */
+ if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
+- return (ssp_clk / (2 * rate) - 1) & 0xff;
++ return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
+ else
+- return (ssp_clk / rate - 1) & 0xfff;
++ return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
+ }
+
+ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
+diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
+index 556870dcdf79..5d35a82945cd 100644
+--- a/drivers/spi/spi-rspi.c
++++ b/drivers/spi/spi-rspi.c
+@@ -271,7 +271,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+ /* Sets parity, interrupt mask */
+ rspi_write8(rspi, 0x00, RSPI_SPCR2);
+
+- /* Sets SPCMD */
++ /* Resets sequencer */
++ rspi_write8(rspi, 0, RSPI_SPSCR);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+@@ -315,7 +316,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+- /* Sets SPCMD */
++ /* Resets sequencer */
++ rspi_write8(rspi, 0, RSPI_SPSCR);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+@@ -366,7 +368,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
+ /* Sets buffer to allow normal operation */
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
+- /* Sets SPCMD */
++ /* Resets sequencer */
++ rspi_write8(rspi, 0, RSPI_SPSCR);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index 3b2a9a6b990d..0b9a8bddb939 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -93,6 +93,7 @@ struct stm32_qspi_flash {
+
+ struct stm32_qspi {
+ struct device *dev;
++ struct spi_controller *ctrl;
+ void __iomem *io_base;
+ void __iomem *mm_base;
+ resource_size_t mm_size;
+@@ -397,6 +398,7 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ mutex_destroy(&qspi->lock);
+ clk_disable_unprepare(qspi->clk);
++ spi_master_put(qspi->ctrl);
+ }
+
+ static int stm32_qspi_probe(struct platform_device *pdev)
+@@ -413,43 +415,54 @@ static int stm32_qspi_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ qspi = spi_controller_get_devdata(ctrl);
++ qspi->ctrl = ctrl;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
+ qspi->io_base = devm_ioremap_resource(dev, res);
+- if (IS_ERR(qspi->io_base))
+- return PTR_ERR(qspi->io_base);
++ if (IS_ERR(qspi->io_base)) {
++ ret = PTR_ERR(qspi->io_base);
++ goto err;
++ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
+ qspi->mm_base = devm_ioremap_resource(dev, res);
+- if (IS_ERR(qspi->mm_base))
+- return PTR_ERR(qspi->mm_base);
++ if (IS_ERR(qspi->mm_base)) {
++ ret = PTR_ERR(qspi->mm_base);
++ goto err;
++ }
+
+ qspi->mm_size = resource_size(res);
+- if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
+- return -EINVAL;
++ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
++ ret = -EINVAL;
++ goto err;
++ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
+ dev_name(dev), qspi);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+- return ret;
++ goto err;
+ }
+
+ init_completion(&qspi->data_completion);
+
+ qspi->clk = devm_clk_get(dev, NULL);
+- if (IS_ERR(qspi->clk))
+- return PTR_ERR(qspi->clk);
++ if (IS_ERR(qspi->clk)) {
++ ret = PTR_ERR(qspi->clk);
++ goto err;
++ }
+
+ qspi->clk_rate = clk_get_rate(qspi->clk);
+- if (!qspi->clk_rate)
+- return -EINVAL;
++ if (!qspi->clk_rate) {
++ ret = -EINVAL;
++ goto err;
++ }
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+- return ret;
++ goto err;
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+@@ -472,14 +485,11 @@ static int stm32_qspi_probe(struct platform_device *pdev)
+ ctrl->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_master(dev, ctrl);
+- if (ret)
+- goto err_spi_register;
+-
+- return 0;
++ if (!ret)
++ return 0;
+
+-err_spi_register:
++err:
+ stm32_qspi_release(qspi);
+-
+ return ret;
+ }
+
+diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
+index a76acedd7e2f..a1888dc6a938 100644
+--- a/drivers/spi/spi-tegra114.c
++++ b/drivers/spi/spi-tegra114.c
+@@ -1067,27 +1067,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+- ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+- tegra_spi_isr_thread, IRQF_ONESHOT,
+- dev_name(&pdev->dev), tspi);
+- if (ret < 0) {
+- dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+- tspi->irq);
+- goto exit_free_master;
+- }
+
+ tspi->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+- goto exit_free_irq;
++ goto exit_free_master;
+ }
+
+ tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+- goto exit_free_irq;
++ goto exit_free_master;
+ }
+
+ tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
+@@ -1095,7 +1087,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
+
+ ret = tegra_spi_init_dma_param(tspi, true);
+ if (ret < 0)
+- goto exit_free_irq;
++ goto exit_free_master;
+ ret = tegra_spi_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+@@ -1117,18 +1109,32 @@ static int tegra_spi_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
++
++ reset_control_assert(tspi->rst);
++ udelay(2);
++ reset_control_deassert(tspi->rst);
+ tspi->def_command1_reg = SPI_M_S;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ pm_runtime_put(&pdev->dev);
++ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
++ tegra_spi_isr_thread, IRQF_ONESHOT,
++ dev_name(&pdev->dev), tspi);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
++ tspi->irq);
++ goto exit_pm_disable;
++ }
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+- goto exit_pm_disable;
++ goto exit_free_irq;
+ }
+ return ret;
+
++exit_free_irq:
++ free_irq(spi_irq, tspi);
+ exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+@@ -1136,8 +1142,6 @@ exit_pm_disable:
+ tegra_spi_deinit_dma_param(tspi, false);
+ exit_rx_dma_free:
+ tegra_spi_deinit_dma_param(tspi, true);
+-exit_free_irq:
+- free_irq(spi_irq, tspi);
+ exit_free_master:
+ spi_master_put(master);
+ return ret;
+diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
+index fba3f180f233..8a5966963834 100644
+--- a/drivers/spi/spi-topcliff-pch.c
++++ b/drivers/spi/spi-topcliff-pch.c
+@@ -1299,18 +1299,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
+ dma->rx_buf_virt, dma->rx_buf_dma);
+ }
+
+-static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
++static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+ {
+ struct pch_spi_dma_ctrl *dma;
++ int ret;
+
+ dma = &data->dma;
++ ret = 0;
+ /* Get Consistent memory for Tx DMA */
+ dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
++ if (!dma->tx_buf_virt)
++ ret = -ENOMEM;
++
+ /* Get Consistent memory for Rx DMA */
+ dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
++ if (!dma->rx_buf_virt)
++ ret = -ENOMEM;
++
++ return ret;
+ }
+
+ static int pch_spi_pd_probe(struct platform_device *plat_dev)
+@@ -1387,7 +1396,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
+
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+- pch_alloc_dma_buf(board_dat, data);
++ ret = pch_alloc_dma_buf(board_dat, data);
++ if (ret)
++ goto err_spi_register_master;
+ }
+
+ ret = spi_register_master(master);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 93986f879b09..a83fcddf1dad 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -36,6 +36,8 @@
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/spi.h>
++EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
++EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
+
+ #include "internals.h"
+
+@@ -1039,6 +1041,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
++ if (!xfer->len)
++ continue;
+ if (!xfer->tx_buf)
+ xfer->tx_buf = ctlr->dummy_tx;
+ if (!xfer->rx_buf)
+@@ -2195,6 +2199,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
+ */
+ cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_LOW);
++ if (IS_ERR(cs[i]))
++ return PTR_ERR(cs[i]);
+
+ if (cs[i]) {
+ /*
+@@ -2275,24 +2281,6 @@ int spi_register_controller(struct spi_controller *ctlr)
+ if (status)
+ return status;
+
+- if (!spi_controller_is_slave(ctlr)) {
+- if (ctlr->use_gpio_descriptors) {
+- status = spi_get_gpio_descs(ctlr);
+- if (status)
+- return status;
+- /*
+- * A controller using GPIO descriptors always
+- * supports SPI_CS_HIGH if need be.
+- */
+- ctlr->mode_bits |= SPI_CS_HIGH;
+- } else {
+- /* Legacy code path for GPIOs from DT */
+- status = of_spi_register_master(ctlr);
+- if (status)
+- return status;
+- }
+- }
+-
+ /* even if it's just one always-selected device, there must
+ * be at least one chipselect
+ */
+@@ -2349,6 +2337,25 @@ int spi_register_controller(struct spi_controller *ctlr)
+ * registration fails if the bus ID is in use.
+ */
+ dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
++
++ if (!spi_controller_is_slave(ctlr)) {
++ if (ctlr->use_gpio_descriptors) {
++ status = spi_get_gpio_descs(ctlr);
++ if (status)
++ return status;
++ /*
++ * A controller using GPIO descriptors always
++ * supports SPI_CS_HIGH if need be.
++ */
++ ctlr->mode_bits |= SPI_CS_HIGH;
++ } else {
++ /* Legacy code path for GPIOs from DT */
++ status = of_spi_register_master(ctlr);
++ if (status)
++ return status;
++ }
++ }
++
+ status = device_add(&ctlr->dev);
+ if (status < 0) {
+ /* free bus id */
+diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
+index f51f150307df..ffa379efff83 100644
+--- a/drivers/ssb/bridge_pcmcia_80211.c
++++ b/drivers/ssb/bridge_pcmcia_80211.c
+@@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
+ .resume = ssb_host_pcmcia_resume,
+ };
+
++static int pcmcia_init_failed;
++
+ /*
+ * These are not module init/exit functions!
+ * The module_pcmcia_driver() helper cannot be used here.
+ */
+ int ssb_host_pcmcia_init(void)
+ {
+- return pcmcia_register_driver(&ssb_host_pcmcia_driver);
++ pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
++
++ return pcmcia_init_failed;
+ }
+
+ void ssb_host_pcmcia_exit(void)
+ {
+- pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
++ if (!pcmcia_init_failed)
++ pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
+ }
+diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
+index aea449a8dbf8..76818cc48ddc 100644
+--- a/drivers/staging/media/davinci_vpfe/Kconfig
++++ b/drivers/staging/media/davinci_vpfe/Kconfig
+@@ -1,7 +1,7 @@
+ config VIDEO_DM365_VPFE
+ tristate "DM365 VPFE Media Controller Capture Driver"
+ depends on VIDEO_V4L2
+- depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || COMPILE_TEST
++ depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1)
+ depends on VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_DAVINCI_VPBE_DISPLAY
+ select VIDEOBUF2_DMA_CONTIG
+diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
+index 8a9af4688fd4..8cdd3daa6c5f 100644
+--- a/drivers/staging/media/imx/imx-media-vdic.c
++++ b/drivers/staging/media/imx/imx-media-vdic.c
+@@ -231,6 +231,12 @@ static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
+ curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
+ next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
+ break;
++ default:
++ /*
++ * can't get here, priv->fieldtype can only be one of
++ * the above. This is to quiet smatch errors.
++ */
++ return;
+ }
+
+ ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
+diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
+index d575ac78c8f0..d00d26264c37 100644
+--- a/drivers/staging/media/ipu3/ipu3.c
++++ b/drivers/staging/media/ipu3/ipu3.c
+@@ -791,7 +791,7 @@ out:
+ * PCI rpm framework checks the existence of driver rpm callbacks.
+ * Place a dummy callback here to avoid rpm going into error state.
+ */
+-static int imgu_rpm_dummy_cb(struct device *dev)
++static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
+ {
+ return 0;
+ }
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
+index 4aedd24a9848..c57c04b41d2e 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
+@@ -28,6 +28,8 @@
+
+ #define CEDRUS_CAPABILITY_UNTILED BIT(0)
+
++#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
++
+ enum cedrus_codec {
+ CEDRUS_CODEC_MPEG2,
+
+@@ -91,6 +93,7 @@ struct cedrus_dec_ops {
+
+ struct cedrus_variant {
+ unsigned int capabilities;
++ unsigned int quirks;
+ };
+
+ struct cedrus_dev {
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+index 0acf219a8c91..fbfff7c1c771 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+@@ -177,7 +177,8 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
+ */
+
+ #ifdef PHYS_PFN_OFFSET
+- dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
++ if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
++ dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ #endif
+
+ ret = of_reserved_mem_device_init(dev->dev);
+diff --git a/drivers/staging/mt7621-mmc/sd.c b/drivers/staging/mt7621-mmc/sd.c
+index 4b26ec896a96..38f9ea02ee3a 100644
+--- a/drivers/staging/mt7621-mmc/sd.c
++++ b/drivers/staging/mt7621-mmc/sd.c
+@@ -468,7 +468,11 @@ static unsigned int msdc_command_start(struct msdc_host *host,
+ host->cmd = cmd;
+ host->cmd_rsp = resp;
+
+- init_completion(&host->cmd_done);
++ // The completion should have been consumed by the previous command
++ // response handler, because the mmc requests should be serialized
++ if (completion_done(&host->cmd_done))
++ dev_err(mmc_dev(host->mmc),
++ "previous command was not handled\n");
+
+ sdr_set_bits(host->base + MSDC_INTEN, wints);
+ sdc_send_cmd(rawcmd, cmd->arg);
+@@ -490,7 +494,6 @@ static unsigned int msdc_command_resp(struct msdc_host *host,
+ MSDC_INT_ACMD19_DONE;
+
+ BUG_ON(in_interrupt());
+- //init_completion(&host->cmd_done);
+ //sdr_set_bits(host->base + MSDC_INTEN, wints);
+
+ spin_unlock(&host->lock);
+@@ -593,8 +596,6 @@ static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
+ struct bd *bd;
+ u32 j;
+
+- BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
+-
+ gpd = dma->gpd;
+ bd = dma->bd;
+
+@@ -674,7 +675,13 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ //msdc_clr_fifo(host); /* no need */
+
+ msdc_dma_on(); /* enable DMA mode first!! */
+- init_completion(&host->xfer_done);
++
++ // The completion should have been consumed by the previous
++ // xfer response handler, because the mmc requests should be
++ // serialized
++ if (completion_done(&host->cmd_done))
++ dev_err(mmc_dev(host->mmc),
++ "previous transfer was not handled\n");
+
+ /* start the command first*/
+ if (msdc_command_start(host, cmd, CMD_TIMEOUT) != 0)
+@@ -683,6 +690,13 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
++
++ if (data->sg_count == 0) {
++ dev_err(mmc_dev(host->mmc), "failed to map DMA for transfer\n");
++ data->error = -ENOMEM;
++ goto done;
++ }
++
+ msdc_dma_setup(host, &host->dma, data->sg,
+ data->sg_count);
+
+@@ -693,7 +707,6 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ /* for read, the data coming too fast, then CRC error
+ * start DMA no business with CRC.
+ */
+- //init_completion(&host->xfer_done);
+ msdc_dma_start(host);
+
+ spin_unlock(&host->lock);
+@@ -1688,6 +1701,8 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ }
+ msdc_init_gpd_bd(host, &host->dma);
+
++ init_completion(&host->cmd_done);
++ init_completion(&host->xfer_done);
+ INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card);
+ spin_lock_init(&host->lock);
+ msdc_init_hw(host);
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+index dd4898861b83..eb1e5dcb0d52 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+@@ -209,6 +209,9 @@ vchiq_platform_init_state(struct vchiq_state *state)
+ struct vchiq_2835_state *platform_state;
+
+ state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
++ if (!state->platform_state)
++ return VCHIQ_ERROR;
++
+ platform_state = (struct vchiq_2835_state *)state->platform_state;
+
+ platform_state->inited = 1;
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+index 53f5a1cb4636..819813e742d8 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+@@ -2239,6 +2239,8 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
+ local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
+
+ status = vchiq_platform_init_state(state);
++ if (status != VCHIQ_SUCCESS)
++ return VCHIQ_ERROR;
+
+ /*
+ bring up slot handler thread
+diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
+index e3fc920af682..8b7f9131e9d1 100644
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -473,6 +473,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
+ goto out;
+
+ sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
++ if (!sw->uuid) {
++ tb_sw_warn(sw, "cannot allocate memory for switch\n");
++ tb_switch_put(sw);
++ goto out;
++ }
+ sw->connection_id = connection_id;
+ sw->connection_key = connection_key;
+ sw->link = link;
+diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
+index b2f0d6386cee..8c077c4f3b5b 100644
+--- a/drivers/thunderbolt/property.c
++++ b/drivers/thunderbolt/property.c
+@@ -548,6 +548,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key,
+
+ property->length = size / 4;
+ property->value.data = kzalloc(size, GFP_KERNEL);
++ if (!property->value.data) {
++ kfree(property);
++ return -ENOMEM;
++ }
++
+ memcpy(property->value.data, buf, buflen);
+
+ list_add_tail(&property->list, &parent->properties);
+@@ -578,7 +583,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key,
+ return -ENOMEM;
+
+ property->length = size / 4;
+- property->value.data = kzalloc(size, GFP_KERNEL);
++ property->value.text = kzalloc(size, GFP_KERNEL);
++ if (!property->value.text) {
++ kfree(property);
++ return -ENOMEM;
++ }
++
+ strcpy(property->value.text, text);
+
+ list_add_tail(&property->list, &parent->properties);
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index cd96994dc094..f569a2673742 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -10,15 +10,13 @@
+ #include <linux/idr.h>
+ #include <linux/nvmem-provider.h>
+ #include <linux/pm_runtime.h>
++#include <linux/sched/signal.h>
+ #include <linux/sizes.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+
+ #include "tb.h"
+
+-/* Switch authorization from userspace is serialized by this lock */
+-static DEFINE_MUTEX(switch_lock);
+-
+ /* Switch NVM support */
+
+ #define NVM_DEVID 0x05
+@@ -254,8 +252,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ struct tb_switch *sw = priv;
+ int ret = 0;
+
+- if (mutex_lock_interruptible(&switch_lock))
+- return -ERESTARTSYS;
++ if (!mutex_trylock(&sw->tb->lock))
++ return restart_syscall();
+
+ /*
+ * Since writing the NVM image might require some special steps,
+@@ -275,7 +273,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ memcpy(sw->nvm->buf + offset, val, bytes);
+
+ unlock:
+- mutex_unlock(&switch_lock);
++ mutex_unlock(&sw->tb->lock);
+
+ return ret;
+ }
+@@ -364,10 +362,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
+ }
+ nvm->non_active = nvm_dev;
+
+- mutex_lock(&switch_lock);
+ sw->nvm = nvm;
+- mutex_unlock(&switch_lock);
+-
+ return 0;
+
+ err_nvm_active:
+@@ -384,10 +379,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
+ {
+ struct tb_switch_nvm *nvm;
+
+- mutex_lock(&switch_lock);
+ nvm = sw->nvm;
+ sw->nvm = NULL;
+- mutex_unlock(&switch_lock);
+
+ if (!nvm)
+ return;
+@@ -716,8 +709,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
+ {
+ int ret = -EINVAL;
+
+- if (mutex_lock_interruptible(&switch_lock))
+- return -ERESTARTSYS;
++ if (!mutex_trylock(&sw->tb->lock))
++ return restart_syscall();
+
+ if (sw->authorized)
+ goto unlock;
+@@ -760,7 +753,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
+ }
+
+ unlock:
+- mutex_unlock(&switch_lock);
++ mutex_unlock(&sw->tb->lock);
+ return ret;
+ }
+
+@@ -817,15 +810,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+ struct tb_switch *sw = tb_to_switch(dev);
+ ssize_t ret;
+
+- if (mutex_lock_interruptible(&switch_lock))
+- return -ERESTARTSYS;
++ if (!mutex_trylock(&sw->tb->lock))
++ return restart_syscall();
+
+ if (sw->key)
+ ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
+ else
+ ret = sprintf(buf, "\n");
+
+- mutex_unlock(&switch_lock);
++ mutex_unlock(&sw->tb->lock);
+ return ret;
+ }
+
+@@ -842,8 +835,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+ else if (hex2bin(key, buf, sizeof(key)))
+ return -EINVAL;
+
+- if (mutex_lock_interruptible(&switch_lock))
+- return -ERESTARTSYS;
++ if (!mutex_trylock(&sw->tb->lock))
++ return restart_syscall();
+
+ if (sw->authorized) {
+ ret = -EBUSY;
+@@ -858,7 +851,7 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+ }
+ }
+
+- mutex_unlock(&switch_lock);
++ mutex_unlock(&sw->tb->lock);
+ return ret;
+ }
+ static DEVICE_ATTR(key, 0600, key_show, key_store);
+@@ -904,8 +897,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
+ bool val;
+ int ret;
+
+- if (mutex_lock_interruptible(&switch_lock))
+- return -ERESTARTSYS;
++ if (!mutex_trylock(&sw->tb->lock))
++ return restart_syscall();
+
+ /* If NVMem devices are not yet added */
+ if (!sw->nvm) {
+@@ -953,7 +946,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
+ }
+
+ exit_unlock:
+- mutex_unlock(&switch_lock);
++ mutex_unlock(&sw->tb->lock);
+
+ if (ret)
+ return ret;
+@@ -967,8 +960,8 @@ static ssize_t nvm_version_show(struct device *dev,
+ struct tb_switch *sw = tb_to_switch(dev);
+ int ret;
+
+- if (mutex_lock_interruptible(&switch_lock))
+- return -ERESTARTSYS;
++ if (!mutex_trylock(&sw->tb->lock))
++ return restart_syscall();
+
+ if (sw->safe_mode)
+ ret = -ENODATA;
+@@ -977,7 +970,7 @@ static ssize_t nvm_version_show(struct device *dev,
+ else
+ ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+
+- mutex_unlock(&switch_lock);
++ mutex_unlock(&sw->tb->lock);
+
+ return ret;
+ }
+@@ -1294,13 +1287,14 @@ int tb_switch_configure(struct tb_switch *sw)
+ return tb_plug_events_active(sw, true);
+ }
+
+-static void tb_switch_set_uuid(struct tb_switch *sw)
++static int tb_switch_set_uuid(struct tb_switch *sw)
+ {
+ u32 uuid[4];
+- int cap;
++ int cap, ret;
+
++ ret = 0;
+ if (sw->uuid)
+- return;
++ return ret;
+
+ /*
+ * The newer controllers include fused UUID as part of link
+@@ -1308,7 +1302,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
+ */
+ cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
+ if (cap > 0) {
+- tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
++ ret = tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
++ if (ret)
++ return ret;
+ } else {
+ /*
+ * ICM generates UUID based on UID and fills the upper
+@@ -1323,6 +1319,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
+ }
+
+ sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
++ if (!sw->uuid)
++ ret = -ENOMEM;
++ return ret;
+ }
+
+ static int tb_switch_add_dma_port(struct tb_switch *sw)
+@@ -1372,7 +1371,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
+
+ if (status) {
+ tb_sw_info(sw, "switch flash authentication failed\n");
+- tb_switch_set_uuid(sw);
++ ret = tb_switch_set_uuid(sw);
++ if (ret)
++ return ret;
+ nvm_set_auth_status(sw, status);
+ }
+
+@@ -1422,7 +1423,9 @@ int tb_switch_add(struct tb_switch *sw)
+ }
+ tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
+
+- tb_switch_set_uuid(sw);
++ ret = tb_switch_set_uuid(sw);
++ if (ret)
++ return ret;
+
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ if (sw->ports[i].disabled) {
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index 52584c4003e3..f5e0282225d1 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -80,8 +80,7 @@ struct tb_switch_nvm {
+ * @depth: Depth in the chain this switch is connected (ICM only)
+ *
+ * When the switch is being added or removed to the domain (other
+- * switches) you need to have domain lock held. For switch authorization
+- * internal switch_lock is enough.
++ * switches) you need to have domain lock held.
+ */
+ struct tb_switch {
+ struct device dev;
+diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
+index e27dd8beb94b..e0642dcb8b9b 100644
+--- a/drivers/thunderbolt/xdomain.c
++++ b/drivers/thunderbolt/xdomain.c
+@@ -740,6 +740,7 @@ static void enumerate_services(struct tb_xdomain *xd)
+ struct tb_service *svc;
+ struct tb_property *p;
+ struct device *dev;
++ int id;
+
+ /*
+ * First remove all services that are not available anymore in
+@@ -768,7 +769,12 @@ static void enumerate_services(struct tb_xdomain *xd)
+ break;
+ }
+
+- svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
++ id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
++ if (id < 0) {
++ kfree(svc);
++ break;
++ }
++ svc->id = id;
+ svc->dev.bus = &tb_bus_type;
+ svc->dev.type = &tb_service_type;
+ svc->dev.parent = &xd->dev;
+diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
+index 3475e841ef5c..4c18bbfe1a92 100644
+--- a/drivers/tty/ipwireless/main.c
++++ b/drivers/tty/ipwireless/main.c
+@@ -114,6 +114,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
+
+ ipw->common_memory = ioremap(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
++ if (!ipw->common_memory) {
++ ret = -ENOMEM;
++ goto exit1;
++ }
+ if (!request_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]),
+ IPWIRELESS_PCCARD_NAME)) {
+@@ -134,6 +138,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
+
+ ipw->attr_memory = ioremap(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]));
++ if (!ipw->attr_memory) {
++ ret = -ENOMEM;
++ goto exit3;
++ }
+ if (!request_mem_region(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]),
+ IPWIRELESS_PCCARD_NAME)) {
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 975d7c1288e3..e9f740484001 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -3020,6 +3020,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev)
+ {
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
++ /* No need for pm_runtime_put(), we're shutting down */
++ pm_runtime_get_sync(&dev->dev);
++
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 8d4631c81b9f..310eef451db8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5902,7 +5902,10 @@ int usb_reset_device(struct usb_device *udev)
+ cintf->needs_binding = 1;
+ }
+ }
+- usb_unbind_and_rebind_marked_interfaces(udev);
++
++ /* If the reset failed, hub_wq will unbind drivers later */
++ if (ret == 0)
++ usb_unbind_and_rebind_marked_interfaces(udev);
+ }
+
+ usb_autosuspend_device(udev);
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 6812a8a3a98b..a749de7604c6 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -714,13 +714,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+ unsigned int maxsize;
+
+ if (is_isoc)
+- maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+- DEV_DMA_ISOC_RX_NBYTES_LIMIT;
++ maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
++ DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
++ MAX_DMA_DESC_NUM_HS_ISOC;
+ else
+- maxsize = DEV_DMA_NBYTES_LIMIT;
+-
+- /* Above size of one descriptor was chosen, multiple it */
+- maxsize *= MAX_DMA_DESC_NUM_GENERIC;
++ maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
+
+ return maxsize;
+ }
+@@ -932,7 +930,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+
+ /* Update index of last configured entry in the chain */
+ hs_ep->next_desc++;
+- if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC)
++ if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
+ hs_ep->next_desc = 0;
+
+ return 0;
+@@ -964,7 +962,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+ }
+
+ /* Initialize descriptor chain by Host Busy status */
+- for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) {
++ for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
+ desc = &hs_ep->desc_list[i];
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY
+@@ -2162,7 +2160,7 @@ static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+
+ hs_ep->compl_desc++;
+- if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1))
++ if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
+ hs_ep->compl_desc = 0;
+ desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
+ }
+@@ -3899,6 +3897,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
+ unsigned int i, val, size;
+ int ret = 0;
+ unsigned char ep_type;
++ int desc_num;
+
+ dev_dbg(hsotg->dev,
+ "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
+@@ -3945,11 +3944,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
+ dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
+ __func__, epctrl, epctrl_reg);
+
++ if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
++ desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
++ else
++ desc_num = MAX_DMA_DESC_NUM_GENERIC;
++
+ /* Allocate DMA descriptor chain for non-ctrl endpoints */
+ if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+ hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
+- MAX_DMA_DESC_NUM_GENERIC *
+- sizeof(struct dwc2_dma_desc),
++ desc_num * sizeof(struct dwc2_dma_desc),
+ &hs_ep->desc_list_dma, GFP_ATOMIC);
+ if (!hs_ep->desc_list) {
+ ret = -ENOMEM;
+@@ -4092,7 +4095,7 @@ error1:
+
+ error2:
+ if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
+- dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
++ dmam_free_coherent(hsotg->dev, desc_num *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index f944cea4056b..72110a8c49d6 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1600,6 +1600,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_suspend(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
++ synchronize_irq(dwc->irq_gadget);
+ dwc3_core_exit(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
+@@ -1632,6 +1633,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_suspend(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
++ synchronize_irq(dwc->irq_gadget);
+ }
+
+ dwc3_otg_exit(dwc);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index e293400cc6e9..2bb0ff9608d3 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -3384,8 +3384,6 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
+ dwc3_disconnect_gadget(dwc);
+ __dwc3_gadget_stop(dwc);
+
+- synchronize_irq(dwc->irq_gadget);
+-
+ return 0;
+ }
+
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 20413c276c61..47be961f1bf3 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1133,7 +1133,8 @@ error_lock:
+ error_mutex:
+ mutex_unlock(&epfile->mutex);
+ error:
+- ffs_free_buffer(io_data);
++ if (ret != -EIOCBQUEUED) /* don't free if there is iocb queued */
++ ffs_free_buffer(io_data);
+ return ret;
+ }
+
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index 68a113594808..2811c4afde01 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
+ int size = len * sizeof(u16);
+ int ret = -ENOMEM;
+
++ flags |= __GFP_NOWARN;
++
+ if (cmap->len != len) {
+ fb_dealloc_cmap(cmap);
+ if (!len)
+diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
+index 283d9307df21..ac049871704d 100644
+--- a/drivers/video/fbdev/core/modedb.c
++++ b/drivers/video/fbdev/core/modedb.c
+@@ -935,6 +935,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
+ if (var->vmode & FB_VMODE_DOUBLE)
+ vtotal *= 2;
+
++ if (!htotal || !vtotal)
++ return;
++
+ hfreq = pixclock/htotal;
+ mode->refresh = hfreq/vtotal;
+ }
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index fd02e8a4841d..9f39f0c360e0 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev)
+ info->apertures->ranges[0].base = efifb_fix.smem_start;
+ info->apertures->ranges[0].size = size_remap;
+
+- if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
++ if (efi_enabled(EFI_BOOT) &&
++ !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+ if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+ (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+ pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
+diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
+index 0364d3329c52..3516ce6718d9 100644
+--- a/drivers/w1/w1_io.c
++++ b/drivers/w1/w1_io.c
+@@ -432,8 +432,7 @@ int w1_reset_resume_command(struct w1_master *dev)
+ if (w1_reset_bus(dev))
+ return -1;
+
+- /* This will make only the last matched slave perform a skip ROM. */
+- w1_write_8(dev, W1_RESUME_CMD);
++ w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(w1_reset_resume_command);
+diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
+index f3fbb700f569..05a286d24f14 100644
+--- a/drivers/xen/biomerge.c
++++ b/drivers/xen/biomerge.c
+@@ -4,12 +4,13 @@
+ #include <xen/xen.h>
+ #include <xen/page.h>
+
++/* check if @page can be merged with 'vec1' */
+ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+- const struct bio_vec *vec2)
++ const struct page *page)
+ {
+ #if XEN_PAGE_SIZE == PAGE_SIZE
+ unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
+- unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
++ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
+
+ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
+ #else
+diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
+index a2cdf25573e2..706801c6c4c4 100644
+--- a/fs/afs/xattr.c
++++ b/fs/afs/xattr.c
+@@ -69,11 +69,20 @@ static int afs_xattr_get_fid(const struct xattr_handler *handler,
+ void *buffer, size_t size)
+ {
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+- char text[8 + 1 + 8 + 1 + 8 + 1];
++ char text[16 + 1 + 24 + 1 + 8 + 1];
+ size_t len;
+
+- len = sprintf(text, "%llx:%llx:%x",
+- vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
++ /* The volume ID is 64-bit, the vnode ID is 96-bit and the
++ * uniquifier is 32-bit.
++ */
++ len = sprintf(text, "%llx:", vnode->fid.vid);
++ if (vnode->fid.vnode_hi)
++ len += sprintf(text + len, "%x%016llx",
++ vnode->fid.vnode_hi, vnode->fid.vnode);
++ else
++ len += sprintf(text + len, "%llx", vnode->fid.vnode);
++ len += sprintf(text + len, ":%x", vnode->fid.unique);
++
+ if (size == 0)
+ return len;
+ if (len > size)
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 4f2a8ae0aa42..716656d502a9 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -1009,6 +1009,7 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
+ struct list_head *workspace;
+ int ret;
+
++ level = btrfs_compress_op[type]->set_level(level);
+ workspace = get_workspace(type, level);
+ ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
+ start, pages,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d789542edc5a..5e40c8f1e97a 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3981,8 +3981,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
+ info->space_info_kobj, "%s",
+ alloc_name(space_info->flags));
+ if (ret) {
+- percpu_counter_destroy(&space_info->total_bytes_pinned);
+- kfree(space_info);
++ kobject_put(&space_info->kobj);
+ return ret;
+ }
+
+@@ -11315,9 +11314,9 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
+ * held back allocations.
+ */
+ static int btrfs_trim_free_extents(struct btrfs_device *device,
+- struct fstrim_range *range, u64 *trimmed)
++ u64 minlen, u64 *trimmed)
+ {
+- u64 start = range->start, len = 0;
++ u64 start = 0, len = 0;
+ int ret;
+
+ *trimmed = 0;
+@@ -11360,8 +11359,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
+ if (!trans)
+ up_read(&fs_info->commit_root_sem);
+
+- ret = find_free_dev_extent_start(trans, device, range->minlen,
+- start, &start, &len);
++ ret = find_free_dev_extent_start(trans, device, minlen, start,
++ &start, &len);
+ if (trans) {
+ up_read(&fs_info->commit_root_sem);
+ btrfs_put_transaction(trans);
+@@ -11374,16 +11373,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
+ break;
+ }
+
+- /* If we are out of the passed range break */
+- if (start > range->start + range->len - 1) {
+- mutex_unlock(&fs_info->chunk_mutex);
+- ret = 0;
+- break;
+- }
+-
+- start = max(range->start, start);
+- len = min(range->len, len);
+-
+ ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
+ mutex_unlock(&fs_info->chunk_mutex);
+
+@@ -11393,10 +11382,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
+ start += len;
+ *trimmed += bytes;
+
+- /* We've trimmed enough */
+- if (*trimmed >= range->len)
+- break;
+-
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+@@ -11480,7 +11465,8 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ devices = &fs_info->fs_devices->devices;
+ list_for_each_entry(device, devices, dev_list) {
+- ret = btrfs_trim_free_extents(device, range, &group_trimmed);
++ ret = btrfs_trim_free_extents(device, range->minlen,
++ &group_trimmed);
+ if (ret) {
+ dev_failed++;
+ dev_ret = ret;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 34fe8a58b0e9..ef11808b592b 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2058,6 +2058,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ int ret = 0, err;
+ u64 len;
+
++ /*
++ * If the inode needs a full sync, make sure we use a full range to
++ * avoid log tree corruption, due to hole detection racing with ordered
++ * extent completion for adjacent ranges, and assertion failures during
++ * hole detection.
++ */
++ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
++ &BTRFS_I(inode)->runtime_flags)) {
++ start = 0;
++ end = LLONG_MAX;
++ }
++
+ /*
+ * The range length can be represented by u64, we have to do the typecasts
+ * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
+@@ -2546,10 +2558,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+
+ ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
+- if (ret) {
+- inode_unlock(inode);
++ if (ret)
+ goto out_only_mutex;
+- }
+
+ path = btrfs_alloc_path();
+ if (!path) {
+@@ -3132,6 +3142,7 @@ static long btrfs_fallocate(struct file *file, int mode,
+ ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
+ cur_offset, last_byte - cur_offset);
+ if (ret < 0) {
++ cur_offset = last_byte;
+ free_extent_map(em);
+ break;
+ }
+@@ -3181,7 +3192,7 @@ out:
+ /* Let go of our reservation. */
+ if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
+ btrfs_free_reserved_data_space(inode, data_reserved,
+- alloc_start, alloc_end - cur_offset);
++ cur_offset, alloc_end - cur_offset);
+ extent_changeset_free(data_reserved);
+ return ret;
+ }
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 351fa506dc9b..1d82ee4883eb 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -4330,27 +4330,36 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
+ mutex_lock(&fs_info->cleaner_mutex);
+ ret = relocate_block_group(rc);
+ mutex_unlock(&fs_info->cleaner_mutex);
+- if (ret < 0) {
++ if (ret < 0)
+ err = ret;
+- goto out;
+- }
+-
+- if (rc->extents_found == 0)
+- break;
+-
+- btrfs_info(fs_info, "found %llu extents", rc->extents_found);
+
++ /*
++ * We may have gotten ENOSPC after we already dirtied some
++ * extents. If writeout happens while we're relocating a
++ * different block group we could end up hitting the
++ * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
++ * btrfs_reloc_cow_block. Make sure we write everything out
++ * properly so we don't trip over this problem, and then break
++ * out of the loop if we hit an error.
++ */
+ if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
+ ret = btrfs_wait_ordered_range(rc->data_inode, 0,
+ (u64)-1);
+- if (ret) {
++ if (ret)
+ err = ret;
+- goto out;
+- }
+ invalidate_mapping_pages(rc->data_inode->i_mapping,
+ 0, -1);
+ rc->stage = UPDATE_DATA_PTRS;
+ }
++
++ if (err < 0)
++ goto out;
++
++ if (rc->extents_found == 0)
++ break;
++
++ btrfs_info(fs_info, "found %llu extents", rc->extents_found);
++
+ }
+
+ WARN_ON(rc->block_group->pinned > 0);
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index 893d12fbfda0..22124122728c 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -132,16 +132,17 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
+ return -ENOMEM;
+
+ ret = btrfs_search_slot(trans, root, key, path, 0, 1);
+- if (ret < 0) {
+- btrfs_abort_transaction(trans, ret);
++ if (ret < 0)
+ goto out;
+- }
+
+- if (ret != 0) {
+- btrfs_print_leaf(path->nodes[0]);
+- btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
+- key->objectid, key->type, key->offset);
+- BUG_ON(1);
++ if (ret > 0) {
++ btrfs_crit(fs_info,
++ "unable to find root key (%llu %u %llu) in tree %llu",
++ key->objectid, key->type, key->offset,
++ root->root_key.objectid);
++ ret = -EUCLEAN;
++ btrfs_abort_transaction(trans, ret);
++ goto out;
+ }
+
+ l = path->nodes[0];
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index 5a5930e3d32b..2f078b77fe14 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -825,7 +825,12 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
+ fs_devs->fsid_kobj.kset = btrfs_kset;
+ error = kobject_init_and_add(&fs_devs->fsid_kobj,
+ &btrfs_ktype, parent, "%pU", fs_devs->fsid);
+- return error;
++ if (error) {
++ kobject_put(&fs_devs->fsid_kobj);
++ return error;
++ }
++
++ return 0;
+ }
+
+ int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 561884f60d35..60aac95be54b 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4169,6 +4169,7 @@ fill_holes:
+ *last_extent, 0,
+ 0, len, 0, len,
+ 0, 0, 0);
++ *last_extent += len;
+ }
+ }
+ }
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+index a279c58fe360..8a63cfa29005 100644
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -159,6 +159,12 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
+ ret = -EBUSY;
+ goto out;
+ }
++
++ if (new_min < old_min && new_max > old_max) {
++ ret = -EBUSY;
++ goto out;
++ }
++
+ }
+
+ cd->next = *cp;
+diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
+index 4dc788e3bc96..fe38b5306045 100644
+--- a/fs/crypto/crypto.c
++++ b/fs/crypto/crypto.c
+@@ -334,7 +334,7 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+ spin_lock(&dentry->d_lock);
+ cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+- dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
++ dir_has_key = fscrypt_has_encryption_key(d_inode(dir));
+ dput(dir);
+
+ /*
+diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
+index 7ff40a73dbec..050384c79f40 100644
+--- a/fs/crypto/fname.c
++++ b/fs/crypto/fname.c
+@@ -269,7 +269,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
+ if (iname->len < FS_CRYPTO_BLOCK_SIZE)
+ return -EUCLEAN;
+
+- if (inode->i_crypt_info)
++ if (fscrypt_has_encryption_key(inode))
+ return fname_decrypt(inode, iname, oname);
+
+ if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) {
+@@ -336,7 +336,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
+ if (ret)
+ return ret;
+
+- if (dir->i_crypt_info) {
++ if (fscrypt_has_encryption_key(dir)) {
+ if (!fscrypt_fname_encrypted_size(dir, iname->len,
+ dir->i_sb->s_cop->max_namelen,
+ &fname->crypto_buf.len))
+diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
+index 322ce9686bdb..bf291c10c682 100644
+--- a/fs/crypto/keyinfo.c
++++ b/fs/crypto/keyinfo.c
+@@ -509,7 +509,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
+ u8 *raw_key = NULL;
+ int res;
+
+- if (inode->i_crypt_info)
++ if (fscrypt_has_encryption_key(inode))
+ return 0;
+
+ res = fscrypt_initialize(inode->i_sb->s_cop->flags);
+@@ -573,7 +573,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
+ if (res)
+ goto out;
+
+- if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
++ if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL)
+ crypt_info = NULL;
+ out:
+ if (res == -ENOKEY)
+diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
+index bd7eaf9b3f00..d536889ac31b 100644
+--- a/fs/crypto/policy.c
++++ b/fs/crypto/policy.c
+@@ -194,8 +194,8 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
+ res = fscrypt_get_encryption_info(child);
+ if (res)
+ return 0;
+- parent_ci = parent->i_crypt_info;
+- child_ci = child->i_crypt_info;
++ parent_ci = READ_ONCE(parent->i_crypt_info);
++ child_ci = READ_ONCE(child->i_crypt_info);
+
+ if (parent_ci && child_ci) {
+ return memcmp(parent_ci->ci_master_key_descriptor,
+@@ -246,7 +246,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child,
+ if (res < 0)
+ return res;
+
+- ci = parent->i_crypt_info;
++ ci = READ_ONCE(parent->i_crypt_info);
+ if (ci == NULL)
+ return -ENOKEY;
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index b32a57bc5d5d..7fd2d14dc27c 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5619,25 +5619,22 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ext4_journal_stop(handle);
+ if (error) {
+- if (orphan)
++ if (orphan && inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+ goto err_out;
+ }
+ }
+- if (!shrink)
++ if (!shrink) {
+ pagecache_isize_extended(inode, oldsize, inode->i_size);
+-
+- /*
+- * Blocks are going to be removed from the inode. Wait
+- * for dio in flight. Temporarily disable
+- * dioread_nolock to prevent livelock.
+- */
+- if (orphan) {
+- if (!ext4_should_journal_data(inode)) {
+- inode_dio_wait(inode);
+- } else
+- ext4_wait_for_tail_page_commit(inode);
++ } else {
++ /*
++ * Blocks are going to be removed from the inode. Wait
++ * for dio in flight.
++ */
++ inode_dio_wait(inode);
+ }
++ if (orphan && ext4_should_journal_data(inode))
++ ext4_wait_for_tail_page_commit(inode);
+ down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ rc = ext4_break_layouts(inode);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index d32964cd1117..71c28ff98b56 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -140,6 +140,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
+ {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
++ BUG_ON(atomic_read(&gl->gl_revokes));
+ rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
+ smp_mb();
+ wake_up_glock(gl);
+@@ -183,15 +184,19 @@ static int demote_ok(const struct gfs2_glock *gl)
+
+ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
+ {
++ if (!(gl->gl_ops->go_flags & GLOF_LRU))
++ return;
++
+ spin_lock(&lru_lock);
+
+- if (!list_empty(&gl->gl_lru))
+- list_del_init(&gl->gl_lru);
+- else
++ list_del(&gl->gl_lru);
++ list_add_tail(&gl->gl_lru, &lru_list);
++
++ if (!test_bit(GLF_LRU, &gl->gl_flags)) {
++ set_bit(GLF_LRU, &gl->gl_flags);
+ atomic_inc(&lru_count);
++ }
+
+- list_add_tail(&gl->gl_lru, &lru_list);
+- set_bit(GLF_LRU, &gl->gl_flags);
+ spin_unlock(&lru_lock);
+ }
+
+@@ -201,7 +206,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
+ return;
+
+ spin_lock(&lru_lock);
+- if (!list_empty(&gl->gl_lru)) {
++ if (test_bit(GLF_LRU, &gl->gl_flags)) {
+ list_del_init(&gl->gl_lru);
+ atomic_dec(&lru_count);
+ clear_bit(GLF_LRU, &gl->gl_flags);
+@@ -1159,8 +1164,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
+ !test_bit(GLF_DEMOTE, &gl->gl_flags))
+ fast_path = 1;
+ }
+- if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
+- (glops->go_flags & GLOF_LRU))
++ if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
+ gfs2_glock_add_to_lru(gl);
+
+ trace_gfs2_glock_queue(gh, 0);
+@@ -1456,6 +1460,7 @@ __acquires(&lru_lock)
+ if (!spin_trylock(&gl->gl_lockref.lock)) {
+ add_back_to_lru:
+ list_add(&gl->gl_lru, &lru_list);
++ set_bit(GLF_LRU, &gl->gl_flags);
+ atomic_inc(&lru_count);
+ continue;
+ }
+@@ -1463,7 +1468,6 @@ add_back_to_lru:
+ spin_unlock(&gl->gl_lockref.lock);
+ goto add_back_to_lru;
+ }
+- clear_bit(GLF_LRU, &gl->gl_flags);
+ gl->gl_lockref.count++;
+ if (demote_ok(gl))
+ handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+@@ -1498,6 +1502,7 @@ static long gfs2_scan_glock_lru(int nr)
+ if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
+ list_move(&gl->gl_lru, &dispose);
+ atomic_dec(&lru_count);
++ clear_bit(GLF_LRU, &gl->gl_flags);
+ freed++;
+ continue;
+ }
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index cdf07b408f54..539e8dc5a3f6 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -621,6 +621,7 @@ enum {
+ SDF_SKIP_DLM_UNLOCK = 8,
+ SDF_FORCE_AIL_FLUSH = 9,
+ SDF_AIL1_IO_ERROR = 10,
++ SDF_FS_FROZEN = 11,
+ };
+
+ enum gfs2_freeze_state {
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index 31df26ed7854..69bd1597bacf 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -31,9 +31,10 @@
+ * @delta is the difference between the current rtt sample and the
+ * running average srtt. We add 1/8 of that to the srtt in order to
+ * update the current srtt estimate. The variance estimate is a bit
+- * more complicated. We subtract the abs value of the @delta from
+- * the current variance estimate and add 1/4 of that to the running
+- * total.
++ * more complicated. We subtract the current variance estimate from
++ * the abs value of the @delta and add 1/4 of that to the running
++ * total. That's equivalent to 3/4 of the current variance
++ * estimate plus 1/4 of the abs of @delta.
+ *
+ * Note that the index points at the array entry containing the smoothed
+ * mean value, and the variance is always in the following entry
+@@ -49,7 +50,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
+ s64 delta = sample - s->stats[index];
+ s->stats[index] += (delta >> 3);
+ index++;
+- s->stats[index] += ((abs(delta) - s->stats[index]) >> 2);
++ s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
+ }
+
+ /**
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index b8830fda51e8..0e04f87a7ddd 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -606,7 +606,8 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
+ gfs2_remove_from_ail(bd); /* drops ref on bh */
+ bd->bd_bh = NULL;
+ sdp->sd_log_num_revoke++;
+- atomic_inc(&gl->gl_revokes);
++ if (atomic_inc_return(&gl->gl_revokes) == 1)
++ gfs2_glock_hold(gl);
+ set_bit(GLF_LFLUSH, &gl->gl_flags);
+ list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
+ }
+diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
+index 8722c60b11fe..4b280611246d 100644
+--- a/fs/gfs2/lops.c
++++ b/fs/gfs2/lops.c
+@@ -669,8 +669,10 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+ bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
+ list_del_init(&bd->bd_list);
+ gl = bd->bd_gl;
+- atomic_dec(&gl->gl_revokes);
+- clear_bit(GLF_LFLUSH, &gl->gl_flags);
++ if (atomic_dec_return(&gl->gl_revokes) == 0) {
++ clear_bit(GLF_LFLUSH, &gl->gl_flags);
++ gfs2_glock_queue_put(gl);
++ }
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
+ }
+ }
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index ca71163ff7cf..360206704a14 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -973,8 +973,7 @@ void gfs2_freeze_func(struct work_struct *work)
+ if (error) {
+ printk(KERN_INFO "GFS2: couldn't get freeze lock : %d\n", error);
+ gfs2_assert_withdraw(sdp, 0);
+- }
+- else {
++ } else {
+ atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
+ error = thaw_super(sb);
+ if (error) {
+@@ -987,6 +986,8 @@ void gfs2_freeze_func(struct work_struct *work)
+ gfs2_glock_dq_uninit(&freeze_gh);
+ }
+ deactivate_super(sb);
++ clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
++ wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
+ return;
+ }
+
+@@ -1029,6 +1030,7 @@ static int gfs2_freeze(struct super_block *sb)
+ msleep(1000);
+ }
+ error = 0;
++ set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
+ out:
+ mutex_unlock(&sdp->sd_freeze_mutex);
+ return error;
+@@ -1053,7 +1055,7 @@ static int gfs2_unfreeze(struct super_block *sb)
+
+ gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
+ mutex_unlock(&sdp->sd_freeze_mutex);
+- return 0;
++ return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
+ }
+
+ /**
+diff --git a/fs/internal.h b/fs/internal.h
+index 6a8b71643af4..2e7362837a6e 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -89,9 +89,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
+
+ extern void __init mnt_init(void);
+
+-extern int __mnt_want_write(struct vfsmount *);
+ extern int __mnt_want_write_file(struct file *);
+-extern void __mnt_drop_write(struct vfsmount *);
+ extern void __mnt_drop_write_file(struct file *);
+
+ /*
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 84efb8956734..30a5687a17b6 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2334,7 +2334,7 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
+ nr_cpu_ids);
+
+ ret = -EINVAL;
+- if (!cpu_possible(cpu))
++ if (!cpu_online(cpu))
+ goto err;
+
+ ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 90d71fda65ce..dfb796eab912 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -284,6 +284,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
+ struct nfs_client *clp;
+ const struct sockaddr *sap = data->addr;
+ struct nfs_net *nn = net_generic(data->net, nfs_net_id);
++ int error;
+
+ again:
+ list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+@@ -296,9 +297,11 @@ again:
+ if (clp->cl_cons_state > NFS_CS_READY) {
+ refcount_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+- nfs_wait_client_init_complete(clp);
++ error = nfs_wait_client_init_complete(clp);
+ nfs_put_client(clp);
+ spin_lock(&nn->nfs_client_lock);
++ if (error < 0)
++ return ERR_PTR(error);
+ goto again;
+ }
+
+@@ -407,6 +410,8 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
+ clp = nfs_match_client(cl_init);
+ if (clp) {
+ spin_unlock(&nn->nfs_client_lock);
++ if (IS_ERR(clp))
++ return clp;
+ if (new)
+ new->rpc_ops->free_client(new);
+ return nfs_found_client(cl_init, clp);
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 00d17198ee12..f10b660805fc 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -187,7 +187,7 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
+ bool same_inode = false;
+ int ret;
+
+- if (remap_flags & ~REMAP_FILE_ADVISORY)
++ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ return -EINVAL;
+
+ /* check alignment w.r.t. clone_blksize */
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 82c129bfe58d..93872bb50230 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -260,7 +260,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
+ * hashed directory inode aliases.
+ */
+ inode = ovl_get_inode(dentry->d_sb, &oip);
+- if (WARN_ON(IS_ERR(inode)))
++ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ } else {
+ WARN_ON(ovl_inode_real(inode) != d_inode(newdentry));
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 3b7ed5d2279c..b48273e846ad 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -832,7 +832,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
+ int fsid = bylower ? oip->lowerpath->layer->fsid : 0;
+ bool is_dir, metacopy = false;
+ unsigned long ino = 0;
+- int err = -ENOMEM;
++ int err = oip->newinode ? -EEXIST : -ENOMEM;
+
+ if (!realinode)
+ realinode = d_inode(lowerdentry);
+@@ -917,6 +917,7 @@ out:
+ return inode;
+
+ out_err:
++ pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err);
+ inode = ERR_PTR(err);
+ goto out;
+ }
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index 3b31c1b349ae..bc143b410359 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -152,7 +152,13 @@ struct shash_desc {
+ };
+
+ #define HASH_MAX_DIGESTSIZE 64
+-#define HASH_MAX_DESCSIZE 360
++
++/*
++ * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc'
++ * containing a 'struct sha3_state'.
++ */
++#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
++
+ #define HASH_MAX_STATESIZE 512
+
+ #define SHASH_DESC_ON_STACK(shash, ctx) \
+diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
+index f4ec2834bc22..7dfa67a15a04 100644
+--- a/include/drm/tinydrm/mipi-dbi.h
++++ b/include/drm/tinydrm/mipi-dbi.h
+@@ -43,7 +43,7 @@ struct mipi_dbi {
+ struct spi_device *spi;
+ bool enabled;
+ struct mutex cmdlock;
+- int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num);
++ int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num);
+ const u8 *read_commands;
+ struct gpio_desc *dc;
+ u16 *tx_buf;
+@@ -82,6 +82,7 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
+
+ int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
+ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
++int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
+ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swap);
+ /**
+@@ -99,7 +100,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+ #define mipi_dbi_command(mipi, cmd, seq...) \
+ ({ \
+ u8 d[] = { seq }; \
+- mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \
++ mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \
+ })
+
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index e584673c1881..5becbafb84e8 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -224,7 +224,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
+ {
+ if (count != 1) {
+ bio->bi_flags |= (1 << BIO_REFFED);
+- smp_mb__before_atomic();
++ smp_mb();
+ }
+ atomic_set(&bio->__bi_cnt, count);
+ }
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 1c70803e9f77..7d57890cec67 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -349,6 +349,11 @@ struct cgroup {
+ * Dying cgroups are cgroups which were deleted by a user,
+ * but are still existing because someone else is holding a reference.
+ * max_descendants is a maximum allowed number of descent cgroups.
++ *
++ * nr_descendants and nr_dying_descendants are protected
++ * by cgroup_mutex and css_set_lock. It's fine to read them holding
++ * any of cgroup_mutex and css_set_lock; for writing both locks
++ * should be held.
+ */
+ int nr_descendants;
+ int nr_dying_descendants;
+diff --git a/include/linux/dax.h b/include/linux/dax.h
+index 0dd316a74a29..becaea5f4488 100644
+--- a/include/linux/dax.h
++++ b/include/linux/dax.h
+@@ -19,6 +19,12 @@ struct dax_operations {
+ */
+ long (*direct_access)(struct dax_device *, pgoff_t, long,
+ void **, pfn_t *);
++ /*
++ * Validate whether this device is usable as an fsdax backing
++ * device.
++ */
++ bool (*dax_supported)(struct dax_device *, struct block_device *, int,
++ sector_t, sector_t);
+ /* copy_from_iter: required operation for fs-dax direct-i/o */
+ size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
+ struct iov_iter *);
+@@ -75,6 +81,17 @@ static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
+ return __bdev_dax_supported(bdev, blocksize);
+ }
+
++bool __generic_fsdax_supported(struct dax_device *dax_dev,
++ struct block_device *bdev, int blocksize, sector_t start,
++ sector_t sectors);
++static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
++ struct block_device *bdev, int blocksize, sector_t start,
++ sector_t sectors)
++{
++ return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
++ sectors);
++}
++
+ static inline struct dax_device *fs_dax_get_by_host(const char *host)
+ {
+ return dax_get_by_host(host);
+@@ -99,6 +116,13 @@ static inline bool bdev_dax_supported(struct block_device *bdev,
+ return false;
+ }
+
++static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
++ struct block_device *bdev, int blocksize, sector_t start,
++ sector_t sectors)
++{
++ return false;
++}
++
+ static inline struct dax_device *fs_dax_get_by_host(const char *host)
+ {
+ return NULL;
+@@ -142,6 +166,8 @@ bool dax_alive(struct dax_device *dax_dev);
+ void *dax_get_private(struct dax_device *dax_dev);
+ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn);
++bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
++ int blocksize, sector_t start, sector_t len);
+ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
+ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 6074aa064b54..14ec3bdad9a9 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -746,6 +746,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+ static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+ {
+ set_memory_ro((unsigned long)hdr, hdr->pages);
++ set_memory_x((unsigned long)hdr, hdr->pages);
+ }
+
+ static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
+index e5194fc3983e..08246f068fd8 100644
+--- a/include/linux/fscrypt.h
++++ b/include/linux/fscrypt.h
+@@ -79,7 +79,8 @@ struct fscrypt_ctx {
+
+ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+ {
+- return (inode->i_crypt_info != NULL);
++ /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */
++ return READ_ONCE(inode->i_crypt_info) != NULL;
+ }
+
+ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index 06c0fd594097..69db1affedb0 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -610,6 +610,7 @@ struct unixware_disklabel {
+
+ extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
+ extern void blk_free_devt(dev_t devt);
++extern void blk_invalidate_devt(dev_t devt);
+ extern dev_t blk_lookup_devt(const char *name, int partno);
+ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
+
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index f9707d1dcb58..ac0c70b4ce10 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -417,6 +417,7 @@ struct hid_global {
+
+ struct hid_local {
+ unsigned usage[HID_MAX_USAGES]; /* usage array */
++ u8 usage_size[HID_MAX_USAGES]; /* usage size array */
+ unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
+ unsigned usage_index;
+ unsigned usage_minimum;
+diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
+index 7e84351fa2c0..6e9fb1932dde 100644
+--- a/include/linux/iio/adc/ad_sigma_delta.h
++++ b/include/linux/iio/adc/ad_sigma_delta.h
+@@ -69,6 +69,7 @@ struct ad_sigma_delta {
+ bool irq_dis;
+
+ bool bus_locked;
++ bool keep_cs_asserted;
+
+ uint8_t comm;
+
+diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
+index 96d8435421de..0ca77dd1429c 100644
+--- a/include/linux/mlx5/eswitch.h
++++ b/include/linux/mlx5/eswitch.h
+@@ -35,7 +35,7 @@ struct mlx5_eswitch_rep_if {
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ void *priv;
+- u8 state;
++ atomic_t state;
+ };
+
+ struct mlx5_eswitch_rep {
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index 9197ddbf35fb..bf8cc4108b8f 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -87,6 +87,8 @@ extern bool mnt_may_suid(struct vfsmount *mnt);
+
+ struct path;
+ extern struct vfsmount *clone_private_mount(const struct path *path);
++extern int __mnt_want_write(struct vfsmount *);
++extern void __mnt_drop_write(struct vfsmount *);
+
+ struct file_system_type;
+ extern struct vfsmount *fc_mount(struct fs_context *fc);
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h
+index 40b48e2133cb..15eb85de9226 100644
+--- a/include/linux/overflow.h
++++ b/include/linux/overflow.h
+@@ -36,6 +36,12 @@
+ #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+ #define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
++/*
++ * Avoids triggering -Wtype-limits compilation warning,
++ * while using unsigned data types to check a < 0.
++ */
++#define is_non_negative(a) ((a) > 0 || (a) == 0)
++#define is_negative(a) (!(is_non_negative(a)))
+
+ #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+ /*
+@@ -227,10 +233,10 @@
+ typeof(d) _d = d; \
+ u64 _a_full = _a; \
+ unsigned int _to_shift = \
+- _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
++ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
+ *_d = (_a_full << _to_shift); \
+- (_to_shift != _s || *_d < 0 || _a < 0 || \
+- (*_d >> _to_shift) != _a); \
++ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
++ (*_d >> _to_shift) != _a); \
+ })
+
+ /**
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 6cdb1db776cf..922bb6848813 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp)
+ static inline bool
+ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
+ {
+- if (READ_ONCE(rhp->func) == f)
++ rcu_callback_t func = READ_ONCE(rhp->func);
++
++ if (func == f)
+ return true;
+- WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
++ WARN_ON_ONCE(func != (rcu_callback_t)~0L);
+ return false;
+ }
+
+diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
+index f3f76051e8b0..aaf3cee70439 100644
+--- a/include/linux/regulator/consumer.h
++++ b/include/linux/regulator/consumer.h
+@@ -478,6 +478,11 @@ static inline int regulator_is_supported_voltage(struct regulator *regulator,
+ return 0;
+ }
+
++static inline unsigned int regulator_get_linear_step(struct regulator *regulator)
++{
++ return 0;
++}
++
+ static inline int regulator_set_current_limit(struct regulator *regulator,
+ int min_uA, int max_uA)
+ {
+diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
+index d0884b525001..9d1bc65d226c 100644
+--- a/include/linux/smpboot.h
++++ b/include/linux/smpboot.h
+@@ -29,7 +29,7 @@ struct smpboot_thread_data;
+ * @thread_comm: The base name of the thread
+ */
+ struct smp_hotplug_thread {
+- struct task_struct __percpu **store;
++ struct task_struct * __percpu *store;
+ struct list_head list;
+ int (*thread_should_run)(unsigned int cpu);
+ void (*thread_fn)(unsigned int cpu);
+diff --git a/include/linux/time64.h b/include/linux/time64.h
+index f38d382ffec1..a620ee610b9f 100644
+--- a/include/linux/time64.h
++++ b/include/linux/time64.h
+@@ -33,6 +33,17 @@ struct itimerspec64 {
+ #define KTIME_MAX ((s64)~((u64)1 << 63))
+ #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+
++/*
++ * Limits for settimeofday():
++ *
++ * To prevent setting the time close to the wraparound point time setting
++ * is limited so a reasonable uptime can be accomodated. Uptime of 30 years
++ * should be really sufficient, which means the cutoff is 2232. At that
++ * point the cutoff is just a small part of the larger problem.
++ */
++#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600)
++#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX)
++
+ static inline int timespec64_equal(const struct timespec64 *a,
+ const struct timespec64 *b)
+ {
+@@ -100,6 +111,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts)
+ return true;
+ }
+
++static inline bool timespec64_valid_settod(const struct timespec64 *ts)
++{
++ if (!timespec64_valid(ts))
++ return false;
++ /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */
++ if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX)
++ return false;
++ return true;
++}
++
+ /**
+ * timespec64_to_ns - Convert timespec64 to nanoseconds
+ * @ts: pointer to the timespec64 variable to be converted
+diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
+index 910f3d469005..65108819de5a 100644
+--- a/include/media/videobuf2-core.h
++++ b/include/media/videobuf2-core.h
+@@ -595,6 +595,7 @@ struct vb2_queue {
+ unsigned int start_streaming_called:1;
+ unsigned int error:1;
+ unsigned int waiting_for_buffers:1;
++ unsigned int waiting_in_dqbuf:1;
+ unsigned int is_multiplanar:1;
+ unsigned int is_output:1;
+ unsigned int copy_timestamp:1;
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index fbba43e9bef5..9a5330eed794 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -282,6 +282,7 @@ enum {
+ HCI_FORCE_BREDR_SMP,
+ HCI_FORCE_STATIC_ADDR,
+ HCI_LL_RPA_RESOLUTION,
++ HCI_CMD_PENDING,
+
+ __HCI_NUM_FLAGS,
+ };
+diff --git a/include/xen/xen.h b/include/xen/xen.h
+index 19d032373de5..19a72f591e2b 100644
+--- a/include/xen/xen.h
++++ b/include/xen/xen.h
+@@ -43,8 +43,10 @@ extern struct hvm_start_info pvh_start_info;
+ #endif /* CONFIG_XEN_DOM0 */
+
+ struct bio_vec;
++struct page;
++
+ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+- const struct bio_vec *vec2);
++ const struct page *page);
+
+ #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
+ extern u64 xen_saved_max_mem_size;
+diff --git a/kernel/acct.c b/kernel/acct.c
+index addf7732fb56..81f9831a7859 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -227,7 +227,7 @@ static int acct_on(struct filename *pathname)
+ filp_close(file, NULL);
+ return PTR_ERR(internal);
+ }
+- err = mnt_want_write(internal);
++ err = __mnt_want_write(internal);
+ if (err) {
+ mntput(internal);
+ kfree(acct);
+@@ -252,7 +252,7 @@ static int acct_on(struct filename *pathname)
+ old = xchg(&ns->bacct, &acct->pin);
+ mutex_unlock(&acct->lock);
+ pin_kill(old);
+- mnt_drop_write(mnt);
++ __mnt_drop_write(mnt);
+ mntput(mnt);
+ return 0;
+ }
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index 63f8b3f26fab..3ac71c4fda49 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -1114,22 +1114,24 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz)
+ int err = 0;
+ struct audit_entry *entry;
+
+- entry = audit_data_to_entry(data, datasz);
+- if (IS_ERR(entry))
+- return PTR_ERR(entry);
+-
+ switch (type) {
+ case AUDIT_ADD_RULE:
++ entry = audit_data_to_entry(data, datasz);
++ if (IS_ERR(entry))
++ return PTR_ERR(entry);
+ err = audit_add_rule(entry);
+ audit_log_rule_change("add_rule", &entry->rule, !err);
+ break;
+ case AUDIT_DEL_RULE:
++ entry = audit_data_to_entry(data, datasz);
++ if (IS_ERR(entry))
++ return PTR_ERR(entry);
+ err = audit_del_rule(entry);
+ audit_log_rule_change("remove_rule", &entry->rule, !err);
+ break;
+ default:
+- err = -EINVAL;
+ WARN_ON(1);
++ return -EINVAL;
+ }
+
+ if (err || type == AUDIT_DEL_RULE) {
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index d1eab1d4a930..fa7b8047aab8 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -840,6 +840,13 @@ static inline void audit_proctitle_free(struct audit_context *context)
+ context->proctitle.len = 0;
+ }
+
++static inline void audit_free_module(struct audit_context *context)
++{
++ if (context->type == AUDIT_KERN_MODULE) {
++ kfree(context->module.name);
++ context->module.name = NULL;
++ }
++}
+ static inline void audit_free_names(struct audit_context *context)
+ {
+ struct audit_names *n, *next;
+@@ -923,6 +930,7 @@ int audit_alloc(struct task_struct *tsk)
+
+ static inline void audit_free_context(struct audit_context *context)
+ {
++ audit_free_module(context);
+ audit_free_names(context);
+ unroll_tree_refs(context, NULL, 0);
+ free_tree_refs(context);
+@@ -1266,7 +1274,6 @@ static void show_special(struct audit_context *context, int *call_panic)
+ audit_log_format(ab, "name=");
+ if (context->module.name) {
+ audit_log_untrustedstring(ab, context->module.name);
+- kfree(context->module.name);
+ } else
+ audit_log_format(ab, "(null)");
+
+@@ -1697,6 +1704,7 @@ void __audit_syscall_exit(int success, long return_code)
+ context->in_syscall = 0;
+ context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
+
++ audit_free_module(context);
+ audit_free_names(context);
+ unroll_tree_refs(context, NULL, 0);
+ audit_free_aux(context);
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 191b79948424..1e525d70f833 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -164,6 +164,9 @@ static void dev_map_free(struct bpf_map *map)
+ bpf_clear_redirect_map(map);
+ synchronize_rcu();
+
++ /* Make sure prior __dev_map_entry_free() have completed. */
++ rcu_barrier();
++
+ /* To ensure all pending flush operations have completed wait for flush
+ * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
+ * Because the above synchronize_rcu() ensures the map is disconnected
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 3f2b4bde0f9c..9fcf6338ea5f 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -4781,9 +4781,11 @@ static void css_release_work_fn(struct work_struct *work)
+ if (cgroup_on_dfl(cgrp))
+ cgroup_rstat_flush(cgrp);
+
++ spin_lock_irq(&css_set_lock);
+ for (tcgrp = cgroup_parent(cgrp); tcgrp;
+ tcgrp = cgroup_parent(tcgrp))
+ tcgrp->nr_dying_descendants--;
++ spin_unlock_irq(&css_set_lock);
+
+ cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ cgrp->id = -1;
+@@ -5001,12 +5003,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
+ if (ret)
+ goto out_psi_free;
+
++ spin_lock_irq(&css_set_lock);
+ for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
+ cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
+
+ if (tcgrp != cgrp)
+ tcgrp->nr_descendants++;
+ }
++ spin_unlock_irq(&css_set_lock);
+
+ if (notify_on_release(parent))
+ set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+@@ -5291,10 +5295,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+ if (parent && cgroup_is_threaded(cgrp))
+ parent->nr_threaded_children--;
+
++ spin_lock_irq(&css_set_lock);
+ for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
+ tcgrp->nr_descendants--;
+ tcgrp->nr_dying_descendants++;
+ }
++ spin_unlock_irq(&css_set_lock);
+
+ cgroup1_check_for_release(parent);
+
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index 6b7cdf17ccf8..73288914ed5e 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
+ */
+ }
+
+-/*
+- * Enqueue the irq_work @work on @cpu unless it's already pending
+- * somewhere.
+- *
+- * Can be re-enqueued while the callback is still in progress.
+- */
+-bool irq_work_queue_on(struct irq_work *work, int cpu)
++/* Enqueue on current CPU, work must already be claimed and preempt disabled */
++static void __irq_work_queue_local(struct irq_work *work)
+ {
+- /* All work should have been flushed before going offline */
+- WARN_ON_ONCE(cpu_is_offline(cpu));
+-
+-#ifdef CONFIG_SMP
+-
+- /* Arch remote IPI send/receive backend aren't NMI safe */
+- WARN_ON_ONCE(in_nmi());
++ /* If the work is "lazy", handle it from next tick if any */
++ if (work->flags & IRQ_WORK_LAZY) {
++ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
++ tick_nohz_tick_stopped())
++ arch_irq_work_raise();
++ } else {
++ if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
++ arch_irq_work_raise();
++ }
++}
+
++/* Enqueue the irq work @work on the current CPU */
++bool irq_work_queue(struct irq_work *work)
++{
+ /* Only queue if not already pending */
+ if (!irq_work_claim(work))
+ return false;
+
+- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+- arch_send_call_function_single_ipi(cpu);
+-
+-#else /* #ifdef CONFIG_SMP */
+- irq_work_queue(work);
+-#endif /* #else #ifdef CONFIG_SMP */
++ /* Queue the entry and raise the IPI if needed. */
++ preempt_disable();
++ __irq_work_queue_local(work);
++ preempt_enable();
+
+ return true;
+ }
++EXPORT_SYMBOL_GPL(irq_work_queue);
+
+-/* Enqueue the irq work @work on the current CPU */
+-bool irq_work_queue(struct irq_work *work)
++/*
++ * Enqueue the irq_work @work on @cpu unless it's already pending
++ * somewhere.
++ *
++ * Can be re-enqueued while the callback is still in progress.
++ */
++bool irq_work_queue_on(struct irq_work *work, int cpu)
+ {
++#ifndef CONFIG_SMP
++ return irq_work_queue(work);
++
++#else /* CONFIG_SMP: */
++ /* All work should have been flushed before going offline */
++ WARN_ON_ONCE(cpu_is_offline(cpu));
++
+ /* Only queue if not already pending */
+ if (!irq_work_claim(work))
+ return false;
+
+- /* Queue the entry and raise the IPI if needed. */
+ preempt_disable();
+-
+- /* If the work is "lazy", handle it from next tick if any */
+- if (work->flags & IRQ_WORK_LAZY) {
+- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+- tick_nohz_tick_stopped())
+- arch_irq_work_raise();
++ if (cpu != smp_processor_id()) {
++ /* Arch remote IPI send/receive backend aren't NMI safe */
++ WARN_ON_ONCE(in_nmi());
++ if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
++ arch_send_call_function_single_ipi(cpu);
+ } else {
+- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+- arch_irq_work_raise();
++ __irq_work_queue_local(work);
+ }
+-
+ preempt_enable();
+
+ return true;
++#endif /* CONFIG_SMP */
+ }
+-EXPORT_SYMBOL_GPL(irq_work_queue);
++
+
+ bool irq_work_needs_cpu(void)
+ {
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index bad96b476eb6..a799b1ac6b2f 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -206,6 +206,8 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
+ unsigned long rate_limit,
+ struct delayed_work *work)
+ {
++ int val;
++
+ lockdep_assert_cpus_held();
+
+ /*
+@@ -215,17 +217,20 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
+ * returns is unbalanced, because all other static_key_slow_inc()
+ * instances block while the update is in progress.
+ */
+- if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
+- WARN(atomic_read(&key->enabled) < 0,
+- "jump label: negative count!\n");
++ val = atomic_fetch_add_unless(&key->enabled, -1, 1);
++ if (val != 1) {
++ WARN(val < 0, "jump label: negative count!\n");
+ return;
+ }
+
+- if (rate_limit) {
+- atomic_inc(&key->enabled);
+- schedule_delayed_work(work, rate_limit);
+- } else {
+- jump_label_update(key);
++ jump_label_lock();
++ if (atomic_dec_and_test(&key->enabled)) {
++ if (rate_limit) {
++ atomic_inc(&key->enabled);
++ schedule_delayed_work(work, rate_limit);
++ } else {
++ jump_label_update(key);
++ }
+ }
+ jump_label_unlock();
+ }
+diff --git a/kernel/module.c b/kernel/module.c
+index 0b9aa8ab89f0..2b2845ae983e 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -1950,8 +1950,13 @@ void module_enable_ro(const struct module *mod, bool after_init)
+ return;
+
+ frob_text(&mod->core_layout, set_memory_ro);
++ frob_text(&mod->core_layout, set_memory_x);
++
+ frob_rodata(&mod->core_layout, set_memory_ro);
++
+ frob_text(&mod->init_layout, set_memory_ro);
++ frob_text(&mod->init_layout, set_memory_x);
++
+ frob_rodata(&mod->init_layout, set_memory_ro);
+
+ if (after_init)
+diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
+index c29761152874..7a6890b23c5f 100644
+--- a/kernel/rcu/rcuperf.c
++++ b/kernel/rcu/rcuperf.c
+@@ -494,6 +494,10 @@ rcu_perf_cleanup(void)
+
+ if (torture_cleanup_begin())
+ return;
++ if (!cur_ops) {
++ torture_cleanup_end();
++ return;
++ }
+
+ if (reader_tasks) {
+ for (i = 0; i < nrealreaders; i++)
+@@ -614,6 +618,7 @@ rcu_perf_init(void)
+ pr_cont("\n");
+ WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
+ firsterr = -EINVAL;
++ cur_ops = NULL;
+ goto unwind;
+ }
+ if (cur_ops->init)
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index f14d1b18a74f..a2efe27317be 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -2094,6 +2094,10 @@ rcu_torture_cleanup(void)
+ cur_ops->cb_barrier();
+ return;
+ }
++ if (!cur_ops) {
++ torture_cleanup_end();
++ return;
++ }
+
+ rcu_torture_barrier_cleanup();
+ torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
+@@ -2267,6 +2271,7 @@ rcu_torture_init(void)
+ pr_cont("\n");
+ WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
+ firsterr = -EINVAL;
++ cur_ops = NULL;
+ goto unwind;
+ }
+ if (cur_ops->fqs == NULL && fqs_duration != 0) {
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4778c48a7fda..a75ad50b5e2f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6559,6 +6559,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
+ static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 shareval)
+ {
++ if (shareval > scale_load_down(ULONG_MAX))
++ shareval = MAX_SHARES;
+ return sched_group_set_shares(css_tg(css), scale_load(shareval));
+ }
+
+@@ -6661,8 +6663,10 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
+ period = ktime_to_ns(tg->cfs_bandwidth.period);
+ if (cfs_quota_us < 0)
+ quota = RUNTIME_INF;
+- else
++ else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
+ quota = (u64)cfs_quota_us * NSEC_PER_USEC;
++ else
++ return -EINVAL;
+
+ return tg_set_cfs_bandwidth(tg, period, quota);
+ }
+@@ -6684,6 +6688,9 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
+ {
+ u64 quota, period;
+
++ if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
++ return -EINVAL;
++
+ period = (u64)cfs_period_us * NSEC_PER_USEC;
+ quota = tg->cfs_bandwidth.quota;
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 35f3ea375084..232491e3ed0d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -9551,22 +9551,26 @@ static inline int on_null_domain(struct rq *rq)
+ * - When one of the busy CPUs notice that there may be an idle rebalancing
+ * needed, they will kick the idle load balancer, which then does idle
+ * load balancing for all the idle CPUs.
++ * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
++ * anywhere yet.
+ */
+
+ static inline int find_new_ilb(void)
+ {
+- int ilb = cpumask_first(nohz.idle_cpus_mask);
++ int ilb;
+
+- if (ilb < nr_cpu_ids && idle_cpu(ilb))
+- return ilb;
++ for_each_cpu_and(ilb, nohz.idle_cpus_mask,
++ housekeeping_cpumask(HK_FLAG_MISC)) {
++ if (idle_cpu(ilb))
++ return ilb;
++ }
+
+ return nr_cpu_ids;
+ }
+
+ /*
+- * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
+- * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
+- * CPU (if there is one).
++ * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
++ * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
+ */
+ static void kick_ilb(unsigned int flags)
+ {
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 90fa23d36565..1e6b909dca36 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2555,6 +2555,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
+ rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
+ if (rt_runtime_us < 0)
+ rt_runtime = RUNTIME_INF;
++ else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
++ return -EINVAL;
+
+ return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
+ }
+@@ -2575,6 +2577,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
+ {
+ u64 rt_runtime, rt_period;
+
++ if (rt_period_us > U64_MAX / NSEC_PER_USEC)
++ return -EINVAL;
++
+ rt_period = rt_period_us * NSEC_PER_USEC;
+ rt_runtime = tg->rt_bandwidth.rt_runtime;
+
+diff --git a/kernel/time/time.c b/kernel/time/time.c
+index c3f756f8534b..86656bbac232 100644
+--- a/kernel/time/time.c
++++ b/kernel/time/time.c
+@@ -171,7 +171,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
+ static int firsttime = 1;
+ int error = 0;
+
+- if (tv && !timespec64_valid(tv))
++ if (tv && !timespec64_valid_settod(tv))
+ return -EINVAL;
+
+ error = security_settime64(tv, tz);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index f986e1918d12..f136c56c2805 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1221,7 +1221,7 @@ int do_settimeofday64(const struct timespec64 *ts)
+ unsigned long flags;
+ int ret = 0;
+
+- if (!timespec64_valid_strict(ts))
++ if (!timespec64_valid_settod(ts))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&timekeeper_lock, flags);
+@@ -1278,7 +1278,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts)
+ /* Make sure the proposed value is valid */
+ tmp = timespec64_add(tk_xtime(tk), *ts);
+ if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
+- !timespec64_valid_strict(&tmp)) {
++ !timespec64_valid_settod(&tmp)) {
+ ret = -EINVAL;
+ goto error;
+ }
+@@ -1527,7 +1527,7 @@ void __init timekeeping_init(void)
+ unsigned long flags;
+
+ read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
+- if (timespec64_valid_strict(&wall_time) &&
++ if (timespec64_valid_settod(&wall_time) &&
+ timespec64_to_ns(&wall_time) > 0) {
+ persistent_clock_exists = true;
+ } else if (timespec64_to_ns(&wall_time) != 0) {
+diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
+index 4ad967453b6f..3ea65cdff30d 100644
+--- a/kernel/trace/trace_branch.c
++++ b/kernel/trace/trace_branch.c
+@@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ int expect, int is_constant)
+ {
++ unsigned long flags = user_access_save();
++
+ /* A constant is always correct */
+ if (is_constant) {
+ f->constant++;
+@@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ f->data.correct++;
+ else
+ f->data.incorrect++;
++
++ user_access_restore(flags);
+ }
+ EXPORT_SYMBOL(ftrace_likely_update);
+
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 795aa2038377..0a200d42fa96 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -3543,14 +3543,20 @@ static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
+ struct track_data *track_data = tr->cond_snapshot->cond_data;
+ struct hist_elt_data *elt_data, *track_elt_data;
+ struct snapshot_context *context = cond_data;
++ struct action_data *action;
+ u64 track_val;
+
+ if (!track_data)
+ return false;
+
++ action = track_data->action_data;
++
+ track_val = get_track_val(track_data->hist_data, context->elt,
+ track_data->action_data);
+
++ if (!action->track_data.check_val(track_data->track_val, track_val))
++ return false;
++
+ track_data->track_val = track_val;
+ memcpy(track_data->key, context->key, track_data->key_len);
+
+diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
+index f05802687ba4..7998affa45d4 100644
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -466,6 +466,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+ int i = 0;
+ int retval = 0;
+
++ /*
++ * Mark "remove" event done regardless of result, for some subsystems
++ * do not want to re-trigger "remove" event via automatic cleanup.
++ */
++ if (action == KOBJ_REMOVE)
++ kobj->state_remove_uevent_sent = 1;
++
+ pr_debug("kobject: '%s' (%p): %s\n",
+ kobject_name(kobj), kobj, __func__);
+
+@@ -567,10 +574,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+ kobj->state_add_uevent_sent = 1;
+ break;
+
+- case KOBJ_REMOVE:
+- kobj->state_remove_uevent_sent = 1;
+- break;
+-
+ case KOBJ_UNBIND:
+ zap_modalias_env(env);
+ break;
+diff --git a/lib/sbitmap.c b/lib/sbitmap.c
+index 155fe38756ec..4a7fc4915dfc 100644
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -435,7 +435,7 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+ * to ensure that the batch size is updated before the wait
+ * counts.
+ */
+- smp_mb__before_atomic();
++ smp_mb();
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++)
+ atomic_set(&sbq->ws[i].wait_cnt, 1);
+ }
+diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
+index 58eacd41526c..023ba9f3b99f 100644
+--- a/lib/strncpy_from_user.c
++++ b/lib/strncpy_from_user.c
+@@ -23,10 +23,11 @@
+ * hit it), 'max' is the address space maximum (and we return
+ * -EFAULT if we hit it).
+ */
+-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
++static inline long do_strncpy_from_user(char *dst, const char __user *src,
++ unsigned long count, unsigned long max)
+ {
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+- long res = 0;
++ unsigned long res = 0;
+
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
+index 1c1a1b0e38a5..7f2db3fe311f 100644
+--- a/lib/strnlen_user.c
++++ b/lib/strnlen_user.c
+@@ -28,7 +28,7 @@
+ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+ {
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+- long align, res = 0;
++ unsigned long align, res = 0;
+ unsigned long c;
+
+ /*
+@@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
+ * Do everything aligned. But that means that we
+ * need to also expand the maximum..
+ */
+- align = (sizeof(long) - 1) & (unsigned long)src;
++ align = (sizeof(unsigned long) - 1) & (unsigned long)src;
+ src -= align;
+ max += align;
+
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
+index 310a4f353008..8d290da0d596 100644
+--- a/net/batman-adv/distributed-arp-table.c
++++ b/net/batman-adv/distributed-arp-table.c
+@@ -1444,7 +1444,6 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+ hw_src, &ip_src, hw_dst, &ip_dst,
+ dat_entry->mac_addr, &dat_entry->ip);
+ dropped = true;
+- goto out;
+ }
+
+ /* Update our internal cache with both the IP addresses the node got
+@@ -1453,6 +1452,9 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
+
++ if (dropped)
++ goto out;
++
+ /* If BLA is enabled, only forward ARP replies if we have claimed the
+ * source of the ARP reply or if no one else of the same backbone has
+ * already claimed that client. This prevents that different gateways
+diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
+index 75750870cf04..f8725786b596 100644
+--- a/net/batman-adv/main.c
++++ b/net/batman-adv/main.c
+@@ -161,6 +161,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
+ spin_lock_init(&bat_priv->tt.commit_lock);
+ spin_lock_init(&bat_priv->gw.list_lock);
+ #ifdef CONFIG_BATMAN_ADV_MCAST
++ spin_lock_init(&bat_priv->mcast.mla_lock);
+ spin_lock_init(&bat_priv->mcast.want_lists_lock);
+ #endif
+ spin_lock_init(&bat_priv->tvlv.container_list_lock);
+diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
+index f91b1b6265cf..1b985ab89c08 100644
+--- a/net/batman-adv/multicast.c
++++ b/net/batman-adv/multicast.c
+@@ -325,8 +325,6 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
+ * translation table except the ones listed in the given mcast_list.
+ *
+ * If mcast_list is NULL then all are retracted.
+- *
+- * Do not call outside of the mcast worker! (or cancel mcast worker first)
+ */
+ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
+ struct hlist_head *mcast_list)
+@@ -334,8 +332,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
+ struct batadv_hw_addr *mcast_entry;
+ struct hlist_node *tmp;
+
+- WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
+-
+ hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
+ list) {
+ if (mcast_list &&
+@@ -359,8 +355,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
+ *
+ * Adds multicast listener announcements from the given mcast_list to the
+ * translation table if they have not been added yet.
+- *
+- * Do not call outside of the mcast worker! (or cancel mcast worker first)
+ */
+ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
+ struct hlist_head *mcast_list)
+@@ -368,8 +362,6 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
+ struct batadv_hw_addr *mcast_entry;
+ struct hlist_node *tmp;
+
+- WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
+-
+ if (!mcast_list)
+ return;
+
+@@ -658,7 +650,10 @@ static void batadv_mcast_mla_update(struct work_struct *work)
+ priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
+ bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
+
++ spin_lock(&bat_priv->mcast.mla_lock);
+ __batadv_mcast_mla_update(bat_priv);
++ spin_unlock(&bat_priv->mcast.mla_lock);
++
+ batadv_mcast_start_timer(bat_priv);
+ }
+
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index a21b34ed6548..ed0f6a519de5 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -1223,6 +1223,11 @@ struct batadv_priv_mcast {
+ /** @bridged: whether the soft interface has a bridge on top */
+ unsigned char bridged:1;
+
++ /**
++ * @mla_lock: a lock protecting mla_list and mla_flags
++ */
++ spinlock_t mla_lock;
++
+ /**
+ * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
+ * traffic
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index d6b2540ba7f8..f275c9905650 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -4383,6 +4383,9 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ return;
+ }
+
++ /* If we reach this point this event matches the last command sent */
++ hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
++
+ /* If the command succeeded and there's still more commands in
+ * this request the request is not yet complete.
+ */
+@@ -4493,6 +4496,8 @@ static void hci_cmd_work(struct work_struct *work)
+
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+ if (hdev->sent_cmd) {
++ if (hci_req_status_pend(hdev))
++ hci_dev_set_flag(hdev, HCI_CMD_PENDING);
+ atomic_dec(&hdev->cmd_cnt);
+ hci_send_frame(hdev, skb);
+ if (test_bit(HCI_RESET, &hdev->flags))
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 609fd6871c5a..8b893baf9bbe 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3404,6 +3404,12 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
+ hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
+ req_complete_skb);
+
++ if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
++ bt_dev_err(hdev,
++ "unexpected event for opcode 0x%4.4x", *opcode);
++ return;
++ }
++
+ if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ }
+@@ -3511,6 +3517,12 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
+ hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
+ req_complete_skb);
+
++ if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
++ bt_dev_err(hdev,
++ "unexpected event for opcode 0x%4.4x", *opcode);
++ return;
++ }
++
+ if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ }
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index ca73d36cc149..e9a95ed65491 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -46,6 +46,11 @@ void hci_req_purge(struct hci_request *req)
+ skb_queue_purge(&req->cmd_q);
+ }
+
++bool hci_req_status_pend(struct hci_dev *hdev)
++{
++ return hdev->req_status == HCI_REQ_PEND;
++}
++
+ static int req_run(struct hci_request *req, hci_req_complete_t complete,
+ hci_req_complete_skb_t complete_skb)
+ {
+diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
+index 692cc8b13368..55b2050cc9ff 100644
+--- a/net/bluetooth/hci_request.h
++++ b/net/bluetooth/hci_request.h
+@@ -37,6 +37,7 @@ struct hci_request {
+
+ void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
+ void hci_req_purge(struct hci_request *req);
++bool hci_req_status_pend(struct hci_dev *hdev);
+ int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
+ void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 2dbcf5d5512e..b7a9fe3d5fcb 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1188,9 +1188,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
+ goto out;
+ }
+
+- /* XXX: shouldn't really modify cfg80211-owned data! */
+- ifmgd->associated->channel = sdata->csa_chandef.chan;
+-
+ ifmgd->csa_waiting_bcn = true;
+
+ ieee80211_sta_reset_beacon_monitor(sdata);
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index d7f61b0547c6..d2715b4d2e72 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -1254,7 +1254,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
+ struct nf_conntrack_tuple tuple;
+ struct nf_conn *ct;
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+- u_int8_t u3 = nfmsg->nfgen_family;
++ u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
+ struct nf_conntrack_zone zone;
+ int err;
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 47e30a58566c..d2a7459a5da4 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -15727,6 +15727,11 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
+
+ wdev->chandef = *chandef;
+ wdev->preset_chandef = *chandef;
++
++ if (wdev->iftype == NL80211_IFTYPE_STATION &&
++ !WARN_ON(!wdev->current_bss))
++ wdev->current_bss->pub.channel = chandef->chan;
++
+ nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0);
+ }
+diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
+index 5cd7c1d1a5d5..7409722727ca 100644
+--- a/samples/bpf/asm_goto_workaround.h
++++ b/samples/bpf/asm_goto_workaround.h
+@@ -13,4 +13,5 @@
+ #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
+ #endif
+
++#define volatile(x...) volatile("")
+ #endif
+diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
+index 186e727b737b..6fd9954e1c08 100644
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -288,11 +288,8 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
+ int rc;
+ struct netlbl_lsm_secattr secattr;
+ struct sk_security_struct *sksec = ep->base.sk->sk_security;
+- struct sockaddr *addr;
+ struct sockaddr_in addr4;
+-#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 addr6;
+-#endif
+
+ if (ep->base.sk->sk_family != PF_INET &&
+ ep->base.sk->sk_family != PF_INET6)
+@@ -310,16 +307,15 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
+ if (ip_hdr(skb)->version == 4) {
+ addr4.sin_family = AF_INET;
+ addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
+- addr = (struct sockaddr *)&addr4;
+-#if IS_ENABLED(CONFIG_IPV6)
+- } else {
++ rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr4, &secattr);
++ } else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
+ addr6.sin6_family = AF_INET6;
+ addr6.sin6_addr = ipv6_hdr(skb)->saddr;
+- addr = (struct sockaddr *)&addr6;
+-#endif
++ rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr6, &secattr);
++ } else {
++ rc = -EAFNOSUPPORT;
+ }
+
+- rc = netlbl_conn_setattr(ep->base.sk, addr, &secattr);
+ if (rc == 0)
+ sksec->nlbl_state = NLBL_LABELED;
+
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 701a69d856f5..b20eb7fc83eb 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -832,7 +832,13 @@ static int snd_hda_codec_dev_free(struct snd_device *device)
+ struct hda_codec *codec = device->device_data;
+
+ codec->in_freeing = 1;
+- snd_hdac_device_unregister(&codec->core);
++ /*
++ * snd_hda_codec_device_new() is used by legacy HDA and ASoC driver.
++ * We can't unregister ASoC device since it will be unregistered in
++ * snd_hdac_ext_bus_device_remove().
++ */
++ if (codec->core.type == HDA_DEV_LEGACY)
++ snd_hdac_device_unregister(&codec->core);
+ codec_display_power(codec, false);
+ put_device(hda_codec_dev(codec));
+ return 0;
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 35df73e42cbc..fb2f0ac1f16f 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -439,8 +439,12 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
+ if (!ret) {
+ ret = snd_pcm_hw_constraint_eld(substream->runtime,
+ hcp->eld);
+- if (ret)
++ if (ret) {
++ mutex_lock(&hcp->current_stream_lock);
++ hcp->current_stream = NULL;
++ mutex_unlock(&hcp->current_stream_lock);
+ return ret;
++ }
+ }
+ /* Select chmap supported */
+ hdmi_codec_eld_chmap(hcp);
+diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
+index 981f88a5f615..a04a7cedd99d 100644
+--- a/sound/soc/codecs/wcd9335.c
++++ b/sound/soc/codecs/wcd9335.c
+@@ -5188,6 +5188,7 @@ static int wcd9335_slim_status(struct slim_device *sdev,
+
+ wcd->slim = sdev;
+ wcd->slim_ifc_dev = of_slim_get_device(sdev->ctrl, ifc_dev_np);
++ of_node_put(ifc_dev_np);
+ if (!wcd->slim_ifc_dev) {
+ dev_err(dev, "Unable to get SLIM Interface device\n");
+ return -EINVAL;
+diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
+index 7b1d9970be8b..1f65cf555ebe 100644
+--- a/sound/soc/fsl/Kconfig
++++ b/sound/soc/fsl/Kconfig
+@@ -182,16 +182,17 @@ config SND_MPC52xx_SOC_EFIKA
+
+ endif # SND_POWERPC_SOC
+
++config SND_SOC_IMX_PCM_FIQ
++ tristate
++ default y if SND_SOC_IMX_SSI=y && (SND_SOC_FSL_SSI=m || SND_SOC_FSL_SPDIF=m) && (MXC_TZIC || MXC_AVIC)
++ select FIQ
++
+ if SND_IMX_SOC
+
+ config SND_SOC_IMX_SSI
+ tristate
+ select SND_SOC_FSL_UTILS
+
+-config SND_SOC_IMX_PCM_FIQ
+- tristate
+- select FIQ
+-
+ comment "SoC Audio support for Freescale i.MX boards:"
+
+ config SND_MXC_SOC_WM1133_EV1
+diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
+index 191426a6d9ad..30a3d68b5c03 100644
+--- a/sound/soc/fsl/eukrea-tlv320.c
++++ b/sound/soc/fsl/eukrea-tlv320.c
+@@ -118,13 +118,13 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fsl,mux-int-port node missing or invalid.\n");
+- return ret;
++ goto err;
+ }
+ ret = of_property_read_u32(np, "fsl,mux-ext-port", &ext_port);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fsl,mux-ext-port node missing or invalid.\n");
+- return ret;
++ goto err;
+ }
+
+ /*
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index db9e0872f73d..7549b74e464e 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -268,12 +268,14 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
+ case SND_SOC_DAIFMT_CBS_CFS:
+ val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
+ val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
++ sai->is_slave_mode = false;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ sai->is_slave_mode = true;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
++ sai->is_slave_mode = false;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
+diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
+index 9981668ab590..040d06b89f00 100644
+--- a/sound/soc/fsl/fsl_utils.c
++++ b/sound/soc/fsl/fsl_utils.c
+@@ -71,6 +71,7 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
+ iprop = of_get_property(dma_np, "cell-index", NULL);
+ if (!iprop) {
+ of_node_put(dma_np);
++ of_node_put(dma_channel_np);
+ return -EINVAL;
+ }
+ *dma_id = be32_to_cpup(iprop);
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+index 38f6ab74709d..07491a0f8fb8 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+@@ -188,7 +188,7 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
+
+ jack = &ctx->kabylake_headset;
+
+- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
++ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 46e3ab0fced4..fe99b02bbf17 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -2828,10 +2828,21 @@ EXPORT_SYMBOL_GPL(snd_soc_register_card);
+
+ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
+ {
++ struct snd_soc_pcm_runtime *rtd;
++ int order;
++
+ if (card->instantiated) {
+ card->instantiated = false;
+ snd_soc_dapm_shutdown(card);
+ snd_soc_flush_all_delayed_work(card);
++
++ /* remove all components used by DAI links on this card */
++ for_each_comp_order(order) {
++ for_each_card_rtds(card, rtd) {
++ soc_remove_link_components(card, rtd, order);
++ }
++ }
++
+ soc_cleanup_card_resources(card);
+ if (!unregister)
+ list_add(&card->list, &unbind_card_list);
+diff --git a/sound/soc/ti/Kconfig b/sound/soc/ti/Kconfig
+index 4bf3c15d4e51..ee7c202c69b7 100644
+--- a/sound/soc/ti/Kconfig
++++ b/sound/soc/ti/Kconfig
+@@ -21,8 +21,8 @@ config SND_SOC_DAVINCI_ASP
+
+ config SND_SOC_DAVINCI_MCASP
+ tristate "Multichannel Audio Serial Port (McASP) support"
+- select SND_SOC_TI_EDMA_PCM if TI_EDMA
+- select SND_SOC_TI_SDMA_PCM if DMA_OMAP
++ select SND_SOC_TI_EDMA_PCM
++ select SND_SOC_TI_SDMA_PCM
+ help
+ Say Y or M here if you want to have support for McASP IP found in
+ various Texas Instruments SoCs like:
+diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
+index a3a67a8f0f54..9fbc759fdefe 100644
+--- a/sound/soc/ti/davinci-mcasp.c
++++ b/sound/soc/ti/davinci-mcasp.c
+@@ -45,6 +45,7 @@
+
+ #define MCASP_MAX_AFIFO_DEPTH 64
+
++#ifdef CONFIG_PM
+ static u32 context_regs[] = {
+ DAVINCI_MCASP_TXFMCTL_REG,
+ DAVINCI_MCASP_RXFMCTL_REG,
+@@ -68,6 +69,7 @@ struct davinci_mcasp_context {
+ u32 *xrsr_regs; /* for serializer configuration */
+ bool pm_state;
+ };
++#endif
+
+ struct davinci_mcasp_ruledata {
+ struct davinci_mcasp *mcasp;
+diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
+index 67167e44b726..8248b8dd89d4 100644
+--- a/tools/bpf/bpftool/.gitignore
++++ b/tools/bpf/bpftool/.gitignore
+@@ -1,5 +1,5 @@
+ *.d
+-bpftool
++/bpftool
+ bpftool*.8
+ bpf-helpers.*
+ FEATURE-DUMP.bpftool
+diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
+index 9cd015574e83..d82edadf7589 100644
+--- a/tools/lib/bpf/bpf.c
++++ b/tools/lib/bpf/bpf.c
+@@ -46,6 +46,8 @@
+ # define __NR_bpf 349
+ # elif defined(__s390__)
+ # define __NR_bpf 351
++# elif defined(__arc__)
++# define __NR_bpf 280
+ # else
+ # error __NR_bpf not defined. libbpf does not support your arch.
+ # endif
+diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
+index 6ffdd79bea89..6dc1f418034f 100644
+--- a/tools/lib/bpf/bpf.h
++++ b/tools/lib/bpf/bpf.h
+@@ -26,6 +26,7 @@
+ #include <linux/bpf.h>
+ #include <stdbool.h>
+ #include <stddef.h>
++#include <stdint.h>
+
+ #ifdef __cplusplus
+ extern "C" {
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index 8d0078b65486..af5f310ecca1 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -248,8 +248,7 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
+ return 0;
+
+ out_mmap:
+- munmap(umem->fill,
+- off.fr.desc + umem->config.fill_size * sizeof(__u64));
++ munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
+ out_socket:
+ close(umem->fd);
+ out_umem_alloc:
+@@ -523,11 +522,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *usr_config)
+ {
++ void *rx_map = NULL, *tx_map = NULL;
+ struct sockaddr_xdp sxdp = {};
+ struct xdp_mmap_offsets off;
+ struct xsk_socket *xsk;
+ socklen_t optlen;
+- void *map;
+ int err;
+
+ if (!umem || !xsk_ptr || !rx || !tx)
+@@ -593,40 +592,40 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ }
+
+ if (rx) {
+- map = xsk_mmap(NULL, off.rx.desc +
+- xsk->config.rx_size * sizeof(struct xdp_desc),
+- PROT_READ | PROT_WRITE,
+- MAP_SHARED | MAP_POPULATE,
+- xsk->fd, XDP_PGOFF_RX_RING);
+- if (map == MAP_FAILED) {
++ rx_map = xsk_mmap(NULL, off.rx.desc +
++ xsk->config.rx_size * sizeof(struct xdp_desc),
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED | MAP_POPULATE,
++ xsk->fd, XDP_PGOFF_RX_RING);
++ if (rx_map == MAP_FAILED) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ rx->mask = xsk->config.rx_size - 1;
+ rx->size = xsk->config.rx_size;
+- rx->producer = map + off.rx.producer;
+- rx->consumer = map + off.rx.consumer;
+- rx->ring = map + off.rx.desc;
++ rx->producer = rx_map + off.rx.producer;
++ rx->consumer = rx_map + off.rx.consumer;
++ rx->ring = rx_map + off.rx.desc;
+ }
+ xsk->rx = rx;
+
+ if (tx) {
+- map = xsk_mmap(NULL, off.tx.desc +
+- xsk->config.tx_size * sizeof(struct xdp_desc),
+- PROT_READ | PROT_WRITE,
+- MAP_SHARED | MAP_POPULATE,
+- xsk->fd, XDP_PGOFF_TX_RING);
+- if (map == MAP_FAILED) {
++ tx_map = xsk_mmap(NULL, off.tx.desc +
++ xsk->config.tx_size * sizeof(struct xdp_desc),
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED | MAP_POPULATE,
++ xsk->fd, XDP_PGOFF_TX_RING);
++ if (tx_map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap_rx;
+ }
+
+ tx->mask = xsk->config.tx_size - 1;
+ tx->size = xsk->config.tx_size;
+- tx->producer = map + off.tx.producer;
+- tx->consumer = map + off.tx.consumer;
+- tx->ring = map + off.tx.desc;
++ tx->producer = tx_map + off.tx.producer;
++ tx->consumer = tx_map + off.tx.consumer;
++ tx->ring = tx_map + off.tx.desc;
+ tx->cached_cons = xsk->config.tx_size;
+ }
+ xsk->tx = tx;
+@@ -653,13 +652,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+
+ out_mmap_tx:
+ if (tx)
+- munmap(xsk->tx,
+- off.tx.desc +
++ munmap(tx_map, off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc));
+ out_mmap_rx:
+ if (rx)
+- munmap(xsk->rx,
+- off.rx.desc +
++ munmap(rx_map, off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc));
+ out_socket:
+ if (--umem->refcount)
+@@ -684,10 +681,12 @@ int xsk_umem__delete(struct xsk_umem *umem)
+ optlen = sizeof(off);
+ err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+- munmap(umem->fill->ring,
+- off.fr.desc + umem->config.fill_size * sizeof(__u64));
+- munmap(umem->comp->ring,
+- off.cr.desc + umem->config.comp_size * sizeof(__u64));
++ (void)munmap(umem->fill->ring - off.fr.desc,
++ off.fr.desc +
++ umem->config.fill_size * sizeof(__u64));
++ (void)munmap(umem->comp->ring - off.cr.desc,
++ off.cr.desc +
++ umem->config.comp_size * sizeof(__u64));
+ }
+
+ close(umem->fd);
+@@ -698,6 +697,7 @@ int xsk_umem__delete(struct xsk_umem *umem)
+
+ void xsk_socket__delete(struct xsk_socket *xsk)
+ {
++ size_t desc_sz = sizeof(struct xdp_desc);
+ struct xdp_mmap_offsets off;
+ socklen_t optlen;
+ int err;
+@@ -710,14 +710,17 @@ void xsk_socket__delete(struct xsk_socket *xsk)
+ optlen = sizeof(off);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+- if (xsk->rx)
+- munmap(xsk->rx->ring,
+- off.rx.desc +
+- xsk->config.rx_size * sizeof(struct xdp_desc));
+- if (xsk->tx)
+- munmap(xsk->tx->ring,
+- off.tx.desc +
+- xsk->config.tx_size * sizeof(struct xdp_desc));
++ if (xsk->rx) {
++ (void)munmap(xsk->rx->ring - off.rx.desc,
++ off.rx.desc +
++ xsk->config.rx_size * desc_sz);
++ }
++ if (xsk->tx) {
++ (void)munmap(xsk->tx->ring - off.tx.desc,
++ off.tx.desc +
++ xsk->config.tx_size * desc_sz);
++ }
++
+ }
+
+ xsk->umem->refcount--;
+diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
+index 65cbd30704b5..9e9db202d218 100644
+--- a/tools/testing/selftests/bpf/test_libbpf_open.c
++++ b/tools/testing/selftests/bpf/test_libbpf_open.c
+@@ -11,6 +11,8 @@ static const char *__doc__ =
+ #include <bpf/libbpf.h>
+ #include <getopt.h>
+
++#include "bpf_rlimit.h"
++
+ static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h' },
+ {"debug", no_argument, NULL, 'D' },
+diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
+index 4cdb63bf0521..9a9fc6c9b70b 100644
+--- a/tools/testing/selftests/bpf/trace_helpers.c
++++ b/tools/testing/selftests/bpf/trace_helpers.c
+@@ -52,6 +52,10 @@ struct ksym *ksym_search(long key)
+ int start = 0, end = sym_cnt;
+ int result;
+
++ /* kallsyms not loaded. return NULL */
++ if (sym_cnt <= 0)
++ return NULL;
++
+ while (start < end) {
+ size_t mid = start + (end - start) / 2;
+
+diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
+index 28d321ba311b..6f339882a6ca 100644
+--- a/tools/testing/selftests/cgroup/test_memcontrol.c
++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
+@@ -26,7 +26,7 @@
+ */
+ static int test_memcg_subtree_control(const char *root)
+ {
+- char *parent, *child, *parent2, *child2;
++ char *parent, *child, *parent2 = NULL, *child2 = NULL;
+ int ret = KSFT_FAIL;
+ char buf[PAGE_SIZE];
+
+@@ -34,50 +34,54 @@ static int test_memcg_subtree_control(const char *root)
+ parent = cg_name(root, "memcg_test_0");
+ child = cg_name(root, "memcg_test_0/memcg_test_1");
+ if (!parent || !child)
+- goto cleanup;
++ goto cleanup_free;
+
+ if (cg_create(parent))
+- goto cleanup;
++ goto cleanup_free;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+memory"))
+- goto cleanup;
++ goto cleanup_parent;
+
+ if (cg_create(child))
+- goto cleanup;
++ goto cleanup_parent;
+
+ if (cg_read_strstr(child, "cgroup.controllers", "memory"))
+- goto cleanup;
++ goto cleanup_child;
+
+ /* Create two nested cgroups without enabling memory controller */
+ parent2 = cg_name(root, "memcg_test_1");
+ child2 = cg_name(root, "memcg_test_1/memcg_test_1");
+ if (!parent2 || !child2)
+- goto cleanup;
++ goto cleanup_free2;
+
+ if (cg_create(parent2))
+- goto cleanup;
++ goto cleanup_free2;
+
+ if (cg_create(child2))
+- goto cleanup;
++ goto cleanup_parent2;
+
+ if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
+- goto cleanup;
++ goto cleanup_all;
+
+ if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
+- goto cleanup;
++ goto cleanup_all;
+
+ ret = KSFT_PASS;
+
+-cleanup:
+- cg_destroy(child);
+- cg_destroy(parent);
+- free(parent);
+- free(child);
+-
++cleanup_all:
+ cg_destroy(child2);
++cleanup_parent2:
+ cg_destroy(parent2);
++cleanup_free2:
+ free(parent2);
+ free(child2);
++cleanup_child:
++ cg_destroy(child);
++cleanup_parent:
++ cg_destroy(parent);
++cleanup_free:
++ free(parent);
++ free(child);
+
+ return ret;
+ }
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 001aeda4c154..3972a9564c76 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -44,6 +44,12 @@
+
+ static struct workqueue_struct *irqfd_cleanup_wq;
+
++bool __attribute__((weak))
++kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
++{
++ return true;
++}
++
+ static void
+ irqfd_inject(struct work_struct *work)
+ {
+@@ -297,6 +303,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ if (!kvm_arch_intc_initialized(kvm))
+ return -EAGAIN;
+
++ if (!kvm_arch_irqfd_allowed(kvm, args))
++ return -EINVAL;
++
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
+ if (!irqfd)
+ return -ENOMEM;