diff options
author | 2020-03-11 14:19:00 -0400 | |
---|---|---|
committer | 2020-03-11 14:19:00 -0400 | |
commit | 24804fd723e7cf65363c473ffe5fa05c8416f588 (patch) | |
tree | fbb60c463f3bddc3f9b77926f186d3b02bbddf0f | |
parent | Linux patch 4.14.172 (diff) | |
download | linux-patches-4.14-183.tar.gz linux-patches-4.14-183.tar.bz2 linux-patches-4.14-183.zip |
Linux patch 4.14.1734.14-183
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1172_linux-4.14.173.patch | 4643 |
2 files changed, 4647 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 699eb35f..fad3b75d 100644 --- a/0000_README +++ b/0000_README @@ -731,6 +731,10 @@ Patch: 1171_linux-4.14.172.patch From: https://www.kernel.org Desc: Linux 4.14.172 +Patch: 1172_linux-4.14.173.patch +From: https://www.kernel.org +Desc: Linux 4.14.173 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1172_linux-4.14.173.patch b/1172_linux-4.14.173.patch new file mode 100644 index 00000000..eab400d2 --- /dev/null +++ b/1172_linux-4.14.173.patch @@ -0,0 +1,4643 @@ +diff --git a/Makefile b/Makefile +index 6d3cecad7f1e..9a524b5c1d55 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 172 ++SUBLEVEL = 173 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi +index 1c88c581ff18..78d58b8af67e 100644 +--- a/arch/arm/boot/dts/dra76x.dtsi ++++ b/arch/arm/boot/dts/dra76x.dtsi +@@ -17,3 +17,8 @@ + &crossbar_mpu { + ti,irqs-skip = <10 67 68 133 139 140>; + }; ++ ++&mmc3 { ++ /* dra76x is not affected by i887 */ ++ max-frequency = <96000000>; ++}; +diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi +index 1343c86988c5..68f4482c35e2 100644 +--- a/arch/arm/boot/dts/ls1021a.dtsi ++++ b/arch/arm/boot/dts/ls1021a.dtsi +@@ -562,7 +562,7 @@ + }; + + mdio0: mdio@2d24000 { +- compatible = "fsl,etsec2-mdio"; ++ compatible = "gianfar"; + device_type = "mdio"; + #address-cells = <1>; + #size-cells = <0>; +@@ -570,7 +570,7 @@ + }; + + mdio1: mdio@2d64000 { +- compatible = "fsl,etsec2-mdio"; ++ compatible = "gianfar"; + device_type = "mdio"; + #address-cells = <1>; + #size-cells = <0>; +diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile +index 8ff71058207d..8cf1a98785a5 100644 +--- a/arch/arm/mach-imx/Makefile ++++ b/arch/arm/mach-imx/Makefile +@@ -87,6 +87,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a + obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o + obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o + endif ++AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a ++obj-$(CONFIG_SOC_IMX6) += resume-imx6.o + obj-$(CONFIG_SOC_IMX6) += pm-imx6.o + + obj-$(CONFIG_SOC_IMX1) += mach-imx1.o +diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h +index b09a2ec19267..4b318c864446 100644 +--- a/arch/arm/mach-imx/common.h ++++ b/arch/arm/mach-imx/common.h +@@ -111,17 +111,17 @@ void imx_cpu_die(unsigned int cpu); + int imx_cpu_kill(unsigned int cpu); + + #ifdef CONFIG_SUSPEND +-void v7_cpu_resume(void); + void imx53_suspend(void __iomem *ocram_vbase); + extern const u32 imx53_suspend_sz; + void imx6_suspend(void __iomem *ocram_vbase); + #else +-static inline void v7_cpu_resume(void) {} + static inline void imx53_suspend(void __iomem *ocram_vbase) {} + static const u32 imx53_suspend_sz; + static inline void imx6_suspend(void __iomem *ocram_vbase) {} + #endif + ++void v7_cpu_resume(void); ++ + void imx6_pm_ccm_init(const char *ccm_compat); + void imx6q_pm_init(void); + void imx6dl_pm_init(void); +diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S +new file mode 100644 +index 000000000000..5bd1ba7ef15b +--- /dev/null ++++ b/arch/arm/mach-imx/resume-imx6.S +@@ -0,0 +1,24 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright 2014 Freescale Semiconductor, Inc. ++ */ ++ ++#include <linux/linkage.h> ++#include <asm/assembler.h> ++#include <asm/asm-offsets.h> ++#include <asm/hardware/cache-l2x0.h> ++#include "hardware.h" ++ ++/* ++ * The following code must assume it is running from physical address ++ * where absolute virtual addresses to the data section have to be ++ * turned into relative ones. ++ */ ++ ++ENTRY(v7_cpu_resume) ++ bl v7_invalidate_l1 ++#ifdef CONFIG_CACHE_L2X0 ++ bl l2c310_early_resume ++#endif ++ b cpu_resume ++ENDPROC(v7_cpu_resume) +diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S +index 76ee2ceec8d5..7d84b617af48 100644 +--- a/arch/arm/mach-imx/suspend-imx6.S ++++ b/arch/arm/mach-imx/suspend-imx6.S +@@ -333,17 +333,3 @@ resume: + + ret lr + ENDPROC(imx6_suspend) +- +-/* +- * The following code must assume it is running from physical address +- * where absolute virtual addresses to the data section have to be +- * turned into relative ones. +- */ +- +-ENTRY(v7_cpu_resume) +- bl v7_invalidate_l1 +-#ifdef CONFIG_CACHE_L2X0 +- bl l2c310_early_resume +-#endif +- b cpu_resume +-ENDPROC(v7_cpu_resume) +diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c +index 544ea21bfef9..b2683aca401f 100644 +--- a/arch/mips/kernel/vpe.c ++++ b/arch/mips/kernel/vpe.c +@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v) + { + list_del(&v->list); + if (v->load_addr) +- release_progmem(v); ++ release_progmem(v->load_addr); + kfree(v); + } + +diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c +index da4b0e379238..6ef41e823013 100644 +--- a/arch/powerpc/kernel/cputable.c ++++ b/arch/powerpc/kernel/cputable.c +@@ -2232,11 +2232,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, + * oprofile_cpu_type already has a value, then we are + * possibly overriding a real PVR with a logical one, + * and, in that case, keep the current value for +- * oprofile_cpu_type. ++ * oprofile_cpu_type. Futhermore, let's ensure that the ++ * fix for the PMAO bug is enabled on compatibility mode. + */ + if (old.oprofile_cpu_type != NULL) { + t->oprofile_cpu_type = old.oprofile_cpu_type; + t->oprofile_type = old.oprofile_type; ++ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG; + } + } + +diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c +index e691ff734cb5..46573842d8c3 100644 +--- a/arch/x86/boot/compressed/pagetable.c ++++ b/arch/x86/boot/compressed/pagetable.c +@@ -36,9 +36,6 @@ + #define __PAGE_OFFSET __PAGE_OFFSET_BASE + #include "../../mm/ident_map.c" + +-/* Used by pgtable.h asm code to force instruction serialization. */ +-unsigned long __force_order; +- + /* Used to track our page table allocation area. */ + struct alloc_pgt_data { + unsigned char *pgt_buf; +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 3d805e8b3739..7b4141889919 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -360,7 +360,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c) + * cpuid bit to be set. We need to ensure that we + * update that bit in this CPU's "cpu_info". + */ +- get_cpu_cap(c); ++ set_cpu_cap(c, X86_FEATURE_OSPKE); + } + + #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c +index f12141ba9a76..e57b59762f9f 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c ++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c +@@ -46,8 +46,6 @@ + static struct mce i_mce; + static struct dentry *dfs_inj; + +-static u8 n_banks; +- + #define MAX_FLAG_OPT_SIZE 4 + #define NBCFG 0x44 + +@@ -570,9 +568,15 @@ err: + static int inj_bank_set(void *data, u64 val) + { + struct mce *m = (struct mce *)data; ++ u8 n_banks; ++ u64 cap; ++ ++ /* Get bank count on target CPU so we can handle non-uniform values. */ ++ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap); ++ n_banks = cap & MCG_BANKCNT_MASK; + + if (val >= n_banks) { +- pr_err("Non-existent MCE bank: %llu\n", val); ++ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu); + return -EINVAL; + } + +@@ -665,10 +669,6 @@ static struct dfs_node { + static int __init debugfs_init(void) + { + unsigned int i; +- u64 cap; +- +- rdmsrl(MSR_IA32_MCG_CAP, cap); +- n_banks = cap & MCG_BANKCNT_MASK; + + dfs_inj = debugfs_create_dir("mce-inject", NULL); + if (!dfs_inj) +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c +index 0b0e44f85393..95c09db1bba2 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -1499,13 +1499,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq); + static int __mcheck_cpu_mce_banks_init(void) + { + int i; +- u8 num_banks = mca_cfg.banks; + +- mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL); ++ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL); + if (!mce_banks) + return -ENOMEM; + +- for (i = 0; i < num_banks; i++) { ++ for (i = 0; i < MAX_NR_BANKS; i++) { + struct mce_bank *b = &mce_banks[i]; + + b->ctl = -1ULL; +@@ -1519,28 +1518,19 @@ static int __mcheck_cpu_mce_banks_init(void) + */ + static int __mcheck_cpu_cap_init(void) + { +- unsigned b; + u64 cap; ++ u8 b; + + rdmsrl(MSR_IA32_MCG_CAP, cap); + + b = cap & MCG_BANKCNT_MASK; +- if (!mca_cfg.banks) +- pr_info("CPU supports %d MCE banks\n", b); +- +- if (b > MAX_NR_BANKS) { +- pr_warn("Using only %u machine check banks out of %u\n", +- MAX_NR_BANKS, b); ++ if (WARN_ON_ONCE(b > MAX_NR_BANKS)) + b = MAX_NR_BANKS; +- } + +- /* Don't support asymmetric configurations today */ +- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); +- mca_cfg.banks = b; ++ mca_cfg.banks = max(mca_cfg.banks, b); + + if (!mce_banks) { + int err = __mcheck_cpu_mce_banks_init(); +- + if (err) + return err; + } +@@ -2470,6 +2460,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key); + + static int __init mcheck_late_init(void) + { ++ pr_info("Using %d MCE banks\n", mca_cfg.banks); ++ + if (mca_cfg.recovery) + static_branch_inc(&mcsafe_key); + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 8e65a9b40c18..d63621386418 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -1088,6 +1088,47 @@ static int avic_ga_log_notifier(u32 ga_tag) + return 0; + } + ++/* ++ * The default MMIO mask is a single bit (excluding the present bit), ++ * which could conflict with the memory encryption bit. Check for ++ * memory encryption support and override the default MMIO mask if ++ * memory encryption is enabled. ++ */ ++static __init void svm_adjust_mmio_mask(void) ++{ ++ unsigned int enc_bit, mask_bit; ++ u64 msr, mask; ++ ++ /* If there is no memory encryption support, use existing mask */ ++ if (cpuid_eax(0x80000000) < 0x8000001f) ++ return; ++ ++ /* If memory encryption is not enabled, use existing mask */ ++ rdmsrl(MSR_K8_SYSCFG, msr); ++ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) ++ return; ++ ++ enc_bit = cpuid_ebx(0x8000001f) & 0x3f; ++ mask_bit = boot_cpu_data.x86_phys_bits; ++ ++ /* Increment the mask bit if it is the same as the encryption bit */ ++ if (enc_bit == mask_bit) ++ mask_bit++; ++ ++ /* ++ * If the mask bit location is below 52, then some bits above the ++ * physical addressing limit will always be reserved, so use the ++ * rsvd_bits() function to generate the mask. This mask, along with ++ * the present bit, will be used to generate a page fault with ++ * PFER.RSV = 1. ++ * ++ * If the mask bit location is 52 (or above), then clear the mask. ++ */ ++ mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; ++ ++ kvm_mmu_set_mmio_spte_mask(mask, mask); ++} ++ + static __init int svm_hardware_setup(void) + { + int cpu; +@@ -1123,6 +1164,8 @@ static __init int svm_hardware_setup(void) + kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); + } + ++ svm_adjust_mmio_mask(); ++ + for_each_possible_cpu(cpu) { + r = svm_cpu_init(cpu); + if (r) +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index acf72da288f9..f85680b86524 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -12370,6 +12370,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, + else + intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); + ++ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ + return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; + } + +@@ -12399,6 +12400,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, + case x86_intercept_outs: + return vmx_check_intercept_io(vcpu, info); + ++ case x86_intercept_lgdt: ++ case x86_intercept_lidt: ++ case x86_intercept_lldt: ++ case x86_intercept_ltr: ++ case x86_intercept_sgdt: ++ case x86_intercept_sidt: ++ case x86_intercept_sldt: ++ case x86_intercept_str: ++ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) ++ return X86EMUL_CONTINUE; ++ ++ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ ++ break; ++ + /* TODO: check more intercepts... */ + default: + break; +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c +index f79a0cdc6b4e..1f8175bf2a5e 100644 +--- a/arch/x86/xen/enlighten_pv.c ++++ b/arch/x86/xen/enlighten_pv.c +@@ -909,14 +909,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) + static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) + { + int ret; ++#ifdef CONFIG_X86_64 ++ unsigned int which; ++ u64 base; ++#endif + + ret = 0; + + switch (msr) { + #ifdef CONFIG_X86_64 +- unsigned which; +- u64 base; +- + case MSR_FS_BASE: which = SEGBASE_FS; goto set; + case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; + case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; +diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c +index 95600309ce42..23cde3d8e8fb 100644 +--- a/drivers/acpi/acpi_watchdog.c ++++ b/drivers/acpi/acpi_watchdog.c +@@ -129,12 +129,11 @@ void __init acpi_watchdog_init(void) + gas = &entries[i].register_region; + + res.start = gas->address; ++ res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + res.flags = IORESOURCE_MEM; +- res.end = res.start + ALIGN(gas->access_width, 4) - 1; + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + res.flags = IORESOURCE_IO; +- res.end = res.start + gas->access_width - 1; + } else { + pr_warn("Unsupported address space: %u\n", + gas->space_id); +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index 941bffd9b49c..0146bc3252c5 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -750,10 +750,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + msg = ssif_info->curr_msg; + if (msg) { ++ if (data) { ++ if (len > IPMI_MAX_MSG_LENGTH) ++ len = IPMI_MAX_MSG_LENGTH; ++ memcpy(msg->rsp, data, len); ++ } else { ++ len = 0; ++ } + msg->rsp_size = len; +- if (msg->rsp_size > IPMI_MAX_MSG_LENGTH) +- msg->rsp_size = IPMI_MAX_MSG_LENGTH; +- memcpy(msg->rsp, data, msg->rsp_size); + ssif_info->curr_msg = NULL; + } + +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 50d5846acf48..e6efa07e9f9e 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -1598,9 +1598,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, + print_once = true; + #endif + if (__ratelimit(&unseeded_warning)) +- printk_deferred(KERN_NOTICE "random: %s called from %pS " +- "with crng_init=%d\n", func_name, caller, +- crng_init); ++ pr_notice("random: %s called from %pS with crng_init=%d\n", ++ func_name, caller, crng_init); + } + + /* +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c +index 58ec3abfd321..b05e6a15221c 100644 +--- a/drivers/devfreq/devfreq.c ++++ b/drivers/devfreq/devfreq.c +@@ -513,7 +513,6 @@ struct devfreq *devfreq_add_device(struct device *dev, + { + struct devfreq *devfreq; + struct devfreq_governor *governor; +- static atomic_t devfreq_no = ATOMIC_INIT(-1); + int err = 0; + + if (!dev || !profile || !governor_name) { +@@ -556,8 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev, + mutex_lock(&devfreq->lock); + } + +- dev_set_name(&devfreq->dev, "devfreq%d", +- atomic_inc_return(&devfreq_no)); ++ dev_set_name(&devfreq->dev, "%s", dev_name(dev)); + err = device_register(&devfreq->dev); + if (err) { + mutex_unlock(&devfreq->lock); +diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c +index 6d7d2d54eacf..f0932f25a9b1 100644 +--- a/drivers/dma/coh901318.c ++++ b/drivers/dma/coh901318.c +@@ -1944,8 +1944,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) + return; + } + +- spin_lock(&cohc->lock); +- + /* + * When we reach this point, at least one queue item + * should have been moved over from cohc->queue to +@@ -1966,8 +1964,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) + if (coh901318_queue_start(cohc) == NULL) + cohc->busy = 0; + +- spin_unlock(&cohc->lock); +- + /* + * This tasklet will remove items from cohc->active + * and thus terminates them. +diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c +index 7db2766b5fe9..3402494cadf9 100644 +--- a/drivers/dma/tegra20-apb-dma.c ++++ b/drivers/dma/tegra20-apb-dma.c +@@ -288,7 +288,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( + + /* Do not allocate if desc are waiting for ack */ + list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { +- if (async_tx_test_ack(&dma_desc->txd)) { ++ if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) { + list_del(&dma_desc->node); + spin_unlock_irqrestore(&tdc->lock, flags); + dma_desc->txd.flags = 0; +@@ -755,10 +755,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) + bool was_busy; + + spin_lock_irqsave(&tdc->lock, flags); +- if (list_empty(&tdc->pending_sg_req)) { +- spin_unlock_irqrestore(&tdc->lock, flags); +- return 0; +- } + + if (!tdc->busy) + goto skip_dma_stop; +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index 40fb0e7ff8fd..b36abd253786 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -2863,6 +2863,7 @@ static int init_csrows(struct mem_ctl_info *mci) + dimm = csrow->channels[j]->dimm; + dimm->mtype = pvt->dram_type; + dimm->edac_mode = edac_mode; ++ dimm->grain = 64; + } + } + +diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c +index 02c61a1ad56a..e9f9063dbf63 100644 +--- a/drivers/gpu/drm/i915/gvt/vgpu.c ++++ b/drivers/gpu/drm/i915/gvt/vgpu.c +@@ -513,9 +513,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + + intel_vgpu_reset_mmio(vgpu, dmlr); + populate_pvinfo_page(vgpu); +- intel_vgpu_reset_display(vgpu); + + if (dmlr) { ++ intel_vgpu_reset_display(vgpu); + intel_vgpu_reset_cfg_space(vgpu); + /* only reset the failsafe mode when dmlr reset */ + vgpu->failsafe = false; +diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c +index 855248132b2b..9fbfa9f94e6c 100644 +--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c ++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c +@@ -400,7 +400,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) + return num; + } + +-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + int id = dsi_mgr_connector_get_id(connector); +@@ -543,6 +543,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); + struct mipi_dsi_host *host = msm_dsi->host; + struct drm_panel *panel = msm_dsi->panel; ++ struct msm_dsi_pll *src_pll; + bool is_dual_dsi = IS_DUAL_DSI(); + int ret; + +@@ -583,6 +584,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) + id, ret); + } + ++ /* Save PLL status if it is a clock source */ ++ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); ++ msm_dsi_pll_save_state(src_pll); ++ + ret = msm_dsi_host_power_off(host); + if (ret) + pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +index 7c9bf91bc22b..c0a7fa56d9a7 100644 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +@@ -613,10 +613,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) + if (!phy || !phy->cfg->ops.disable) + return; + +- /* Save PLL status if it is a clock source */ +- if (phy->usecase != MSM_DSI_PHY_SLAVE) +- msm_dsi_pll_save_state(phy->pll); +- + phy->cfg->ops.disable(phy); + + dsi_phy_regulator_disable(phy); +diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +index 440977677001..99d356b6e915 100644 +--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c ++++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +@@ -1004,8 +1004,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) + ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, + msecs_to_jiffies(50)); + if (ret == 0) +- dev_warn(dev->dev, "pp done time out, lm=%d\n", +- mdp5_cstate->pipeline.mixer->lm); ++ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", ++ mdp5_cstate->pipeline.mixer->lm); + } + + static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c +index 77c45a2ebd83..d9c0687435a0 100644 +--- a/drivers/gpu/drm/msm/msm_drv.c ++++ b/drivers/gpu/drm/msm/msm_drv.c +@@ -420,6 +420,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) + if (ret) + goto fail; + ++ if (!dev->dma_parms) { ++ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), ++ GFP_KERNEL); ++ if (!dev->dma_parms) ++ return -ENOMEM; ++ } ++ dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); ++ + msm_gem_shrinker_init(ddev); + + switch (get_mdp_ver(pdev)) { +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 2d089d3954e3..75b0a337114d 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1567,7 +1567,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, + + rsize = ((report->size - 1) >> 3) + 1; + +- if (rsize > HID_MAX_BUFFER_SIZE) ++ if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) ++ rsize = HID_MAX_BUFFER_SIZE - 1; ++ else if (rsize > HID_MAX_BUFFER_SIZE) + rsize = HID_MAX_BUFFER_SIZE; + + if (csize < rsize) { +diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c +index 2ce1eb0c9212..f2e23f81601e 100644 +--- a/drivers/hid/hid-ite.c ++++ b/drivers/hid/hid-ite.c +@@ -44,8 +44,9 @@ static const struct hid_device_id ite_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, + { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ +- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, +- USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, ++ USB_VENDOR_ID_SYNAPTICS, ++ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, + { } + }; + MODULE_DEVICE_TABLE(hid, ite_devices); +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c +index bccd97cdc53f..d9602f3a359e 100644 +--- a/drivers/hid/usbhid/hiddev.c ++++ b/drivers/hid/usbhid/hiddev.c +@@ -954,9 +954,9 @@ void hiddev_disconnect(struct hid_device *hid) + hiddev->exist = 0; + + if (hiddev->open) { +- mutex_unlock(&hiddev->existancelock); + hid_hw_close(hiddev->hid); + wake_up_interruptible(&hiddev->wait); ++ mutex_unlock(&hiddev->existancelock); + } else { + mutex_unlock(&hiddev->existancelock); + kfree(hiddev); +diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c +index 19f2a6d48bac..bdd7679fd298 100644 +--- a/drivers/hwmon/adt7462.c ++++ b/drivers/hwmon/adt7462.c +@@ -426,7 +426,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which) + return 0x95; + break; + } +- return -ENODEV; ++ return 0; + } + + /* Provide labels for sysfs */ +diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c +index f5e1941e65b5..a1cdcfc74acf 100644 +--- a/drivers/i2c/busses/i2c-altera.c ++++ b/drivers/i2c/busses/i2c-altera.c +@@ -182,7 +182,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev) + /* SCL Low Time */ + writel(t_low, idev->base + ALTR_I2C_SCL_LOW); + /* SDA Hold Time, 300ns */ +- writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD); ++ writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD); + + /* Mask all master interrupt bits */ + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c +index 30132c3957cd..41ca9ff7b5da 100644 +--- a/drivers/i2c/busses/i2c-jz4780.c ++++ b/drivers/i2c/busses/i2c-jz4780.c +@@ -82,25 +82,6 @@ + #define JZ4780_I2C_STA_TFNF BIT(1) + #define JZ4780_I2C_STA_ACT BIT(0) + +-static const char * const jz4780_i2c_abrt_src[] = { +- "ABRT_7B_ADDR_NOACK", +- "ABRT_10ADDR1_NOACK", +- "ABRT_10ADDR2_NOACK", +- "ABRT_XDATA_NOACK", +- "ABRT_GCALL_NOACK", +- "ABRT_GCALL_READ", +- "ABRT_HS_ACKD", +- "SBYTE_ACKDET", +- "ABRT_HS_NORSTRT", +- "SBYTE_NORSTRT", +- "ABRT_10B_RD_NORSTRT", +- "ABRT_MASTER_DIS", +- "ARB_LOST", +- "SLVFLUSH_TXFIFO", +- "SLV_ARBLOST", +- "SLVRD_INTX", +-}; +- + #define JZ4780_I2C_INTST_IGC BIT(11) + #define JZ4780_I2C_INTST_ISTT BIT(10) + #define JZ4780_I2C_INTST_ISTP BIT(9) +@@ -538,21 +519,8 @@ done: + + static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src) + { +- int i; +- +- dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src); +- dev_err(&i2c->adap.dev, "device addr=%x\n", +- jz4780_i2c_readw(i2c, JZ4780_I2C_TAR)); +- dev_err(&i2c->adap.dev, "send cmd count:%d %d\n", +- i2c->cmd, i2c->cmd_buf[i2c->cmd]); +- dev_err(&i2c->adap.dev, "receive data count:%d %d\n", +- i2c->cmd, i2c->data_buf[i2c->cmd]); +- +- for (i = 0; i < 16; i++) { +- if (src & BIT(i)) +- dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n", +- i, jz4780_i2c_abrt_src[i]); +- } ++ dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n", ++ src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]); + } + + static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c, +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index 2af79e4f3235..80a8eb7e5d6e 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -1143,6 +1143,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, + /* Sharing an ib_cm_id with different handlers is not + * supported */ + spin_unlock_irqrestore(&cm.lock, flags); ++ ib_destroy_cm_id(cm_id); + return ERR_PTR(-EINVAL); + } + atomic_inc(&cm_id_priv->refcount); +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c +index 30d7277249b8..16b0c10348e8 100644 +--- a/drivers/infiniband/core/iwcm.c ++++ b/drivers/infiniband/core/iwcm.c +@@ -158,8 +158,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) + { + struct list_head *e, *tmp; + +- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) ++ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) { ++ list_del(e); + kfree(list_entry(e, struct iwcm_work, free_list)); ++ } + } + + static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c +index 9b8276691329..ce8e3009344a 100644 +--- a/drivers/infiniband/core/security.c ++++ b/drivers/infiniband/core/security.c +@@ -339,15 +339,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, + return NULL; + + if (qp_attr_mask & IB_QP_PORT) +- new_pps->main.port_num = +- (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num; ++ new_pps->main.port_num = qp_attr->port_num; ++ else if (qp_pps) ++ new_pps->main.port_num = qp_pps->main.port_num; ++ + if (qp_attr_mask & IB_QP_PKEY_INDEX) +- new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index : +- qp_attr->pkey_index; ++ new_pps->main.pkey_index = qp_attr->pkey_index; ++ else if (qp_pps) ++ new_pps->main.pkey_index = qp_pps->main.pkey_index; ++ + if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT)) + new_pps->main.state = IB_PORT_PKEY_VALID; + +- if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) { ++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) { + new_pps->main.port_num = qp_pps->main.port_num; + new_pps->main.pkey_index = qp_pps->main.pkey_index; + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) +diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c +index ad78b471c112..b962dbcfe9a7 100644 +--- a/drivers/infiniband/hw/hfi1/verbs.c ++++ b/drivers/infiniband/hw/hfi1/verbs.c +@@ -593,10 +593,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet, + opa_get_lid(packet->dlid, 9B)); + if (!mcast) + goto drop; ++ rcu_read_lock(); + list_for_each_entry_rcu(p, &mcast->qp_list, list) { + packet->qp = p->qp; + if (hfi1_do_pkey_check(packet)) +- goto drop; ++ goto unlock_drop; + spin_lock_irqsave(&packet->qp->r_lock, flags); + packet_handler = qp_ok(packet); + if (likely(packet_handler)) +@@ -605,6 +606,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet, + ibp->rvp.n_pkt_drops++; + spin_unlock_irqrestore(&packet->qp->r_lock, flags); + } ++ rcu_read_unlock(); + /* + * Notify rvt_multicast_detach() if it is waiting for us + * to finish. +diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c +index 350bc29a066f..b473df8eea1a 100644 +--- a/drivers/infiniband/hw/qib/qib_verbs.c ++++ b/drivers/infiniband/hw/qib/qib_verbs.c +@@ -360,8 +360,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) + if (mcast == NULL) + goto drop; + this_cpu_inc(ibp->pmastats->n_multicast_rcv); ++ rcu_read_lock(); + list_for_each_entry_rcu(p, &mcast->qp_list, list) + qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); ++ rcu_read_unlock(); + /* + * Notify rvt_multicast_detach() if it is waiting for us + * to finish. +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index b5f541112fca..69cdb29ef6be 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -2971,8 +2971,8 @@ static void cache_postsuspend(struct dm_target *ti) + prevent_background_work(cache); + BUG_ON(atomic_read(&cache->nr_io_migrations)); + +- cancel_delayed_work(&cache->waker); +- flush_workqueue(cache->wq); ++ cancel_delayed_work_sync(&cache->waker); ++ drain_workqueue(cache->wq); + WARN_ON(cache->tracker.in_flight); + + /* +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index 23f0f4eaaa2e..b6ca5b1100db 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -187,6 +187,7 @@ struct dm_integrity_c { + struct rb_root in_progress; + wait_queue_head_t endio_wait; + struct workqueue_struct *wait_wq; ++ struct workqueue_struct *offload_wq; + + unsigned char commit_seq; + commit_id_t commit_ids[N_COMMIT_IDS]; +@@ -1157,7 +1158,7 @@ static void dec_in_flight(struct dm_integrity_io *dio) + dio->range.logical_sector += dio->range.n_sectors; + bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); + INIT_WORK(&dio->work, integrity_bio_wait); +- queue_work(ic->wait_wq, &dio->work); ++ queue_work(ic->offload_wq, &dio->work); + return; + } + do_endio_flush(ic, dio); +@@ -1577,7 +1578,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map + + if (need_sync_io && from_map) { + INIT_WORK(&dio->work, integrity_bio_wait); +- queue_work(ic->metadata_wq, &dio->work); ++ queue_work(ic->offload_wq, &dio->work); + return; + } + +@@ -3005,6 +3006,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) + goto bad; + } + ++ ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, ++ METADATA_WORKQUEUE_MAX_ACTIVE); ++ if (!ic->offload_wq) { ++ ti->error = "Cannot allocate workqueue"; ++ r = -ENOMEM; ++ goto bad; ++ } ++ + ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); + if (!ic->commit_wq) { + ti->error = "Cannot allocate workqueue"; +@@ -3189,6 +3198,8 @@ static void dm_integrity_dtr(struct dm_target *ti) + destroy_workqueue(ic->metadata_wq); + if (ic->wait_wq) + destroy_workqueue(ic->wait_wq); ++ if (ic->offload_wq) ++ destroy_workqueue(ic->offload_wq); + if (ic->commit_wq) + destroy_workqueue(ic->commit_wq); + if (ic->writer_wq) +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index 747062f04bb5..6bca42e34a53 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -138,8 +138,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) + /* Force link status for IMP port */ + reg = core_readl(priv, offset); + reg |= (MII_SW_OR | LINK_STS); +- if (priv->type == BCM7278_DEVICE_ID) +- reg |= GMII_SPEED_UP_2G; ++ reg &= ~GMII_SPEED_UP_2G; + core_writel(priv, reg, offset); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c +index 10e6053f6671..dc9149a32f41 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c +@@ -199,6 +199,11 @@ static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, + static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, + u16 command_id, bool capture) + { ++ if (unlikely(!queue->comp_ctx)) { ++ pr_err("Completion context is NULL\n"); ++ return NULL; ++ } ++ + if (unlikely(command_id >= queue->q_depth)) { + pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, queue->q_depth); +@@ -843,6 +848,24 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev, + 0); + } + ++static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) ++{ ++ struct ena_admin_feature_rss_flow_hash_control *hash_key = ++ (ena_dev->rss).hash_key; ++ ++ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); ++ /* The key is stored in the device in u32 array ++ * as well as the API requires the key to be passed in this ++ * format. Thus the size of our array should be divided by 4 ++ */ ++ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32); ++} ++ ++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) ++{ ++ return ena_dev->rss.hash_func; ++} ++ + static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) + { + struct ena_rss *rss = &ena_dev->rss; +@@ -2069,15 +2092,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + + switch (func) { + case ENA_ADMIN_TOEPLITZ: +- if (key_len > sizeof(hash_key->key)) { +- pr_err("key len (%hu) is bigger than the max supported (%zu)\n", +- key_len, sizeof(hash_key->key)); +- return -EINVAL; ++ if (key) { ++ if (key_len != sizeof(hash_key->key)) { ++ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n", ++ key_len, sizeof(hash_key->key)); ++ return -EINVAL; ++ } ++ memcpy(hash_key->key, key, key_len); ++ rss->hash_init_val = init_val; ++ hash_key->keys_num = key_len >> 2; + } +- +- memcpy(hash_key->key, key, key_len); +- rss->hash_init_val = init_val; +- hash_key->keys_num = key_len >> 2; + break; + case ENA_ADMIN_CRC32: + rss->hash_init_val = init_val; +@@ -2114,7 +2138,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + if (unlikely(rc)) + return rc; + +- rss->hash_func = get_resp.u.flow_hash_func.selected_func; ++ /* ffs() returns 1 in case the lsb is set */ ++ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); ++ if (rss->hash_func) ++ rss->hash_func--; ++ + if (func) + *func = rss->hash_func; + +@@ -2402,6 +2430,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) + if (unlikely(rc)) + goto err_hash_key; + ++ ena_com_hash_key_fill_default_key(ena_dev); ++ + rc = ena_com_hash_ctrl_init(ena_dev); + if (unlikely(rc)) + goto err_hash_ctrl; +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h +index 7b784f8a06a6..7272fb0d858d 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_com.h ++++ b/drivers/net/ethernet/amazon/ena/ena_com.h +@@ -42,6 +42,7 @@ + #include <linux/spinlock.h> + #include <linux/types.h> + #include <linux/wait.h> ++#include <linux/netdevice.h> + + #include "ena_common_defs.h" + #include "ena_admin_defs.h" +@@ -631,6 +632,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); + */ + void ena_com_rss_destroy(struct ena_com_dev *ena_dev); + ++/* ena_com_get_current_hash_function - Get RSS hash function ++ * @ena_dev: ENA communication layer struct ++ * ++ * Return the current hash function. ++ * @return: 0 or one of the ena_admin_hash_functions values. ++ */ ++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev); ++ + /* ena_com_fill_hash_function - Fill RSS hash function + * @ena_dev: ENA communication layer struct + * @func: The hash function (Toeplitz or crc) +diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c +index a2f02c23fe14..d29e256bf610 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c ++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c +@@ -648,6 +648,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev) + return ENA_HASH_KEY_SIZE; + } + ++static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) ++{ ++ struct ena_com_dev *ena_dev = adapter->ena_dev; ++ int i, rc; ++ ++ if (!indir) ++ return 0; ++ ++ rc = ena_com_indirect_table_get(ena_dev, indir); ++ if (rc) ++ return rc; ++ ++ /* Our internal representation of the indices is: even indices ++ * for Tx and uneven indices for Rx. We need to convert the Rx ++ * indices to be consecutive ++ */ ++ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) ++ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]); ++ ++ return rc; ++} ++ + static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) + { +@@ -656,11 +678,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 func; + int rc; + +- rc = ena_com_indirect_table_get(adapter->ena_dev, indir); ++ rc = ena_indirection_table_get(adapter, indir); + if (rc) + return rc; + ++ /* We call this function in order to check if the device ++ * supports getting/setting the hash function. ++ */ + rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key); ++ ++ if (rc) { ++ if (rc == -EOPNOTSUPP) { ++ key = NULL; ++ hfunc = NULL; ++ rc = 0; ++ } ++ ++ return rc; ++ } ++ + if (rc) + return rc; + +@@ -669,7 +705,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + func = ETH_RSS_HASH_TOP; + break; + case ENA_ADMIN_CRC32: +- func = ETH_RSS_HASH_XOR; ++ func = ETH_RSS_HASH_CRC32; + break; + default: + netif_err(adapter, drv, netdev, +@@ -712,10 +748,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, + } + + switch (hfunc) { ++ case ETH_RSS_HASH_NO_CHANGE: ++ func = ena_com_get_current_hash_function(ena_dev); ++ break; + case ETH_RSS_HASH_TOP: + func = ENA_ADMIN_TOEPLITZ; + break; +- case ETH_RSS_HASH_XOR: ++ case ETH_RSS_HASH_CRC32: + func = ENA_ADMIN_CRC32; + break; + default: +@@ -816,6 +855,7 @@ static const struct ethtool_ops ena_ethtool_ops = { + .get_channels = ena_get_channels, + .get_tunable = ena_get_tunable, + .set_tunable = ena_set_tunable, ++ .get_ts_info = ethtool_op_get_ts_info, + }; + + void ena_set_ethtool_ops(struct net_device *netdev) +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c +index 518ff393a026..d9ece9ac6f53 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c +@@ -2803,8 +2803,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) + if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) + return; + +- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies + +- adapter->keep_alive_timeout); ++ keep_alive_expired = adapter->last_keep_alive_jiffies + ++ adapter->keep_alive_timeout; + if (unlikely(time_is_before_jiffies(keep_alive_expired))) { + netif_err(adapter, drv, adapter->netdev, + "Keep alive watchdog timeout.\n"); +@@ -2906,7 +2906,7 @@ static void ena_timer_service(unsigned long data) + } + + /* Reset the timer */ +- mod_timer(&adapter->timer_service, jiffies + HZ); ++ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); + } + + static int ena_calc_io_queue_num(struct pci_dev *pdev, +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h +index 3404376c28ca..5a72267b858b 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h +@@ -113,6 +113,8 @@ + + #define ENA_IO_TXQ_IDX(q) (2 * (q)) + #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) ++#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2) ++#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2) + + #define ENA_MGMNT_IRQ_IDX 0 + #define ENA_IO_IRQ_FIRST_IDX 1 +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +index 50dd6bf176d0..3a489b2b99c9 100644 +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +@@ -2034,7 +2034,7 @@ static int xgene_enet_probe(struct platform_device *pdev) + int ret; + + ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), +- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING); ++ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING); + if (!ndev) + return -ENOMEM; + +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +index a69f5f1ad32a..7a900f76c9ac 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +@@ -519,8 +519,10 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, + dx_buff->len, + DMA_TO_DEVICE); + +- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) ++ if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) { ++ ret = 0; + goto exit; ++ } + + first = dx_buff; + dx_buff->len_pkt = skb->len; +diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +index 586e35593310..d678f088925c 100644 +--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c ++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +@@ -234,10 +234,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) + lmac = &bgx->lmac[lmacid]; + + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); +- if (enable) ++ if (enable) { + cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; +- else ++ ++ /* enable TX FIFO Underflow interrupt */ ++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S, ++ GMI_TXX_INT_UNDFLW); ++ } else { + cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); ++ ++ /* Disable TX FIFO Underflow interrupt */ ++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C, ++ GMI_TXX_INT_UNDFLW); ++ } + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + if (bgx->is_rgx) +@@ -1340,6 +1349,48 @@ static int bgx_init_phy(struct bgx *bgx) + return bgx_init_of_phy(bgx); + } + ++static irqreturn_t bgx_intr_handler(int irq, void *data) ++{ ++ struct bgx *bgx = (struct bgx *)data; ++ u64 status, val; ++ int lmac; ++ ++ for (lmac = 0; lmac < bgx->lmac_count; lmac++) { ++ status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT); ++ if (status & GMI_TXX_INT_UNDFLW) { ++ pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n", ++ bgx->bgx_id, lmac); ++ val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG); ++ val &= ~CMR_EN; ++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); ++ val |= CMR_EN; ++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); ++ } ++ /* clear interrupts */ ++ bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static void bgx_register_intr(struct pci_dev *pdev) ++{ ++ struct bgx *bgx = pci_get_drvdata(pdev); ++ int ret; ++ ++ ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET, ++ BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES); ++ if (ret < 0) { ++ pci_err(pdev, "Req for #%d msix vectors failed\n", ++ BGX_LMAC_VEC_OFFSET); ++ return; ++ } ++ ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL, ++ bgx, "BGX%d", bgx->bgx_id); ++ if (ret) ++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); ++} ++ + static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { + int err; +@@ -1355,7 +1406,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + + pci_set_drvdata(pdev, bgx); + +- err = pci_enable_device(pdev); ++ err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); +@@ -1409,6 +1460,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + + bgx_init_hw(bgx); + ++ bgx_register_intr(pdev); ++ + /* Enable all LMACs */ + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + err = bgx_lmac_enable(bgx, lmac); +@@ -1425,6 +1478,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + + err_enable: + bgx_vnic[bgx->bgx_id] = NULL; ++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); + err_release_regions: + pci_release_regions(pdev); + err_disable_device: +@@ -1442,6 +1496,8 @@ static void bgx_remove(struct pci_dev *pdev) + for (lmac = 0; lmac < bgx->lmac_count; lmac++) + bgx_lmac_disable(bgx, lmac); + ++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); ++ + bgx_vnic[bgx->bgx_id] = NULL; + pci_release_regions(pdev); + pci_disable_device(pdev); +diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +index 23acdc5ab896..adaa3bfa5f6c 100644 +--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h ++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +@@ -179,6 +179,15 @@ + #define BGX_GMP_GMI_TXX_BURST 0x38228 + #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240 + #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300 ++#define BGX_GMP_GMI_TXX_INT 0x38500 ++#define BGX_GMP_GMI_TXX_INT_W1S 0x38508 ++#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510 ++#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518 ++#define GMI_TXX_INT_PTP_LOST BIT_ULL(4) ++#define GMI_TXX_INT_LATE_COL BIT_ULL(3) ++#define GMI_TXX_INT_XSDEF BIT_ULL(2) ++#define GMI_TXX_INT_XSCOL BIT_ULL(1) ++#define GMI_TXX_INT_UNDFLW BIT_ULL(0) + + #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */ + #define BGX_MSIX_VEC_0_29_CTL 0x400008 +diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c +index adbe0a6fe0db..799154d7c047 100644 +--- a/drivers/net/ethernet/micrel/ks8851_mll.c ++++ b/drivers/net/ethernet/micrel/ks8851_mll.c +@@ -474,24 +474,6 @@ static int msg_enable; + * chip is busy transferring packet data (RX/TX FIFO accesses). + */ + +-/** +- * ks_rdreg8 - read 8 bit register from device +- * @ks : The chip information +- * @offset: The register address +- * +- * Read a 8bit register from the chip, returning the result +- */ +-static u8 ks_rdreg8(struct ks_net *ks, int offset) +-{ +- u16 data; +- u8 shift_bit = offset & 0x03; +- u8 shift_data = (offset & 1) << 3; +- ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit); +- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); +- data = ioread16(ks->hw_addr); +- return (u8)(data >> shift_data); +-} +- + /** + * ks_rdreg16 - read 16 bit register from device + * @ks : The chip information +@@ -502,27 +484,11 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset) + + static u16 ks_rdreg16(struct ks_net *ks, int offset) + { +- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); ++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02)); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + return ioread16(ks->hw_addr); + } + +-/** +- * ks_wrreg8 - write 8bit register value to chip +- * @ks: The chip information +- * @offset: The register address +- * @value: The value to write +- * +- */ +-static void ks_wrreg8(struct ks_net *ks, int offset, u8 value) +-{ +- u8 shift_bit = (offset & 0x03); +- u16 value_write = (u16)(value << ((offset & 1) << 3)); +- ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit); +- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); +- iowrite16(value_write, ks->hw_addr); +-} +- + /** + * ks_wrreg16 - write 16bit register value to chip + * @ks: The chip information +@@ -533,7 +499,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 value) + + static void ks_wrreg16(struct ks_net *ks, int offset, u16 value) + { +- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); ++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02)); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + iowrite16(value, ks->hw_addr); + } +@@ -549,7 +515,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len) + { + len >>= 1; + while (len--) +- *wptr++ = (u16)ioread16(ks->hw_addr); ++ *wptr++ = be16_to_cpu(ioread16(ks->hw_addr)); + } + + /** +@@ -563,7 +529,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len) + { + len >>= 1; + while (len--) +- iowrite16(*wptr++, ks->hw_addr); ++ iowrite16(cpu_to_be16(*wptr++), ks->hw_addr); + } + + static void ks_disable_int(struct ks_net *ks) +@@ -642,8 +608,7 @@ static void ks_read_config(struct ks_net *ks) + u16 reg_data = 0; + + /* Regardless of bus width, 8 bit read should always work.*/ +- reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF; +- reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8; ++ reg_data = ks_rdreg16(ks, KS_CCR); + + /* addr/data bus are multiplexed */ + ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED; +@@ -747,7 +712,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) + + /* 1. set sudo DMA mode */ + ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); +- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); + + /* 2. read prepend data */ + /** +@@ -764,7 +729,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) + ks_inblk(ks, buf, ALIGN(len, 4)); + + /* 4. reset sudo DMA Mode */ +- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + } + + /** +@@ -997,13 +962,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) + ks->txh.txw[1] = cpu_to_le16(len); + + /* 1. set sudo-DMA mode */ +- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); + /* 2. write status/lenth info */ + ks_outblk(ks, ks->txh.txw, 4); + /* 3. write pkt data */ + ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4)); + /* 4. reset sudo-DMA mode */ +- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */ + ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE); + /* 6. wait until TXQCR_METFE is auto-cleared */ +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h +index adb700512baa..a80531b5aecc 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede.h ++++ b/drivers/net/ethernet/qlogic/qede/qede.h +@@ -156,6 +156,8 @@ struct qede_rdma_dev { + struct list_head entry; + struct list_head rdma_event_list; + struct workqueue_struct *rdma_wq; ++ struct kref refcnt; ++ struct completion event_comp; + }; + + struct qede_ptp; +diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c +index 1900bf7e67d1..cd12fb919ad5 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c +@@ -57,6 +57,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev) + static int qede_rdma_create_wq(struct qede_dev *edev) + { + INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); ++ kref_init(&edev->rdma_info.refcnt); ++ init_completion(&edev->rdma_info.event_comp); ++ + edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); + if (!edev->rdma_info.rdma_wq) { + DP_NOTICE(edev, "qedr: Could not create workqueue\n"); +@@ -81,8 +84,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev) + } + } + ++static void qede_rdma_complete_event(struct kref *ref) ++{ ++ struct qede_rdma_dev *rdma_dev = ++ container_of(ref, struct qede_rdma_dev, refcnt); ++ ++ /* no more events will be added after this */ ++ complete(&rdma_dev->event_comp); ++} ++ + static void qede_rdma_destroy_wq(struct qede_dev *edev) + { ++ /* Avoid race with add_event flow, make sure it finishes before ++ * we start accessing the list and cleaning up the work ++ */ ++ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); ++ wait_for_completion(&edev->rdma_info.event_comp); ++ + qede_rdma_cleanup_event(edev); + destroy_workqueue(edev->rdma_info.rdma_wq); + } +@@ -287,15 +305,24 @@ static void qede_rdma_add_event(struct qede_dev *edev, + if (!edev->rdma_info.qedr_dev) + return; + ++ /* We don't want the cleanup flow to start while we're allocating and ++ * scheduling the work ++ */ ++ if (!kref_get_unless_zero(&edev->rdma_info.refcnt)) ++ return; /* already being destroyed */ ++ + event_node = qede_rdma_get_free_event_node(edev); + if (!event_node) +- return; ++ goto out; + + event_node->event = event; + event_node->ptr = edev; + + INIT_WORK(&event_node->work, qede_rdma_handle_event); + queue_work(edev->rdma_info.rdma_wq, &event_node->work); ++ ++out: ++ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); + } + + void qede_rdma_dev_event_open(struct qede_dev *edev) +diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c +index 46fe1ae919a3..51ce3ea17fb3 100644 +--- a/drivers/net/phy/mdio-bcm-iproc.c ++++ b/drivers/net/phy/mdio-bcm-iproc.c +@@ -188,6 +188,23 @@ static int iproc_mdio_remove(struct platform_device *pdev) + return 0; + } + ++#ifdef CONFIG_PM_SLEEP ++int iproc_mdio_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct iproc_mdio_priv *priv = platform_get_drvdata(pdev); ++ ++ /* restore the mii clock configuration */ ++ iproc_mdio_config_clk(priv->base); ++ ++ return 0; ++} ++ ++static const struct dev_pm_ops iproc_mdio_pm_ops = { ++ .resume = iproc_mdio_resume ++}; ++#endif /* CONFIG_PM_SLEEP */ ++ + static const struct of_device_id iproc_mdio_of_match[] = { + { .compatible = "brcm,iproc-mdio", }, + { /* sentinel */ }, +@@ -198,6 +215,9 @@ static struct platform_driver iproc_mdio_driver = { + .driver = { + .name = "iproc-mdio", + .of_match_table = iproc_mdio_of_match, ++#ifdef CONFIG_PM_SLEEP ++ .pm = &iproc_mdio_pm_ops, ++#endif + }, + .probe = iproc_mdio_probe, + .remove = iproc_mdio_remove, +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 3086211829a7..ba34f61d70de 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -1134,6 +1134,13 @@ static void tun_net_init(struct net_device *dev) + dev->max_mtu = MAX_MTU - dev->hard_header_len; + } + ++static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) ++{ ++ struct sock *sk = tfile->socket.sk; ++ ++ return (tun->dev->flags & IFF_UP) && sock_writeable(sk); ++} ++ + /* Character device part */ + + /* Poll */ +@@ -1156,10 +1163,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) + if (!skb_array_empty(&tfile->tx_array)) + mask |= POLLIN | POLLRDNORM; + +- if (tun->dev->flags & IFF_UP && +- (sock_writeable(sk) || +- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && +- sock_writeable(sk)))) ++ /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to ++ * guarantee EPOLLOUT to be raised by either here or ++ * tun_sock_write_space(). Then process could get notification ++ * after it writes to a down device and meets -EIO. ++ */ ++ if (tun_sock_writeable(tun, tfile) || ++ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && ++ tun_sock_writeable(tun, tfile))) + mask |= POLLOUT | POLLWRNORM; + + if (tun->dev->reg_state != NETREG_REGISTERED) +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index db70d4c5778a..189715438328 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -63,7 +63,6 @@ enum qmi_wwan_flags { + + enum qmi_wwan_quirks { + QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ +- QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */ + }; + + struct qmimux_hdr { +@@ -853,16 +852,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { + .data = QMI_WWAN_QUIRK_DTR, + }; + +-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { +- .description = "WWAN/QMI device", +- .flags = FLAG_WWAN | FLAG_SEND_ZLP, +- .bind = qmi_wwan_bind, +- .unbind = qmi_wwan_unbind, +- .manage_power = qmi_wwan_manage_power, +- .rx_fixup = qmi_wwan_rx_fixup, +- .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG, +-}; +- + #define HUAWEI_VENDOR_ID 0x12D1 + + /* map QMI/wwan function by a fixed interface number */ +@@ -883,14 +872,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { + #define QMI_GOBI_DEVICE(vend, prod) \ + QMI_FIXED_INTF(vend, prod, 0) + +-/* Quectel does not use fixed interface numbers on at least some of their +- * devices. We need to check the number of endpoints to ensure that we bind to +- * the correct interface. ++/* Many devices have QMI and DIAG functions which are distinguishable ++ * from other vendor specific functions by class, subclass and ++ * protocol all being 0xff. The DIAG function has exactly 2 endpoints ++ * and is silently rejected when probed. ++ * ++ * This makes it possible to match dynamically numbered QMI functions ++ * as seen on e.g. many Quectel modems. + */ +-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \ ++#define QMI_MATCH_FF_FF_FF(vend, prod) \ + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \ + USB_SUBCLASS_VENDOR_SPEC, 0xff), \ +- .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg ++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr + + static const struct usb_device_id products[] = { + /* 1. CDC ECM like devices match on the control interface */ +@@ -996,10 +989,10 @@ static const struct usb_device_id products[] = { + USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), + .driver_info = (unsigned long)&qmi_wwan_info, + }, +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ + + /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ +@@ -1287,6 +1280,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ ++ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ +@@ -1378,7 +1372,6 @@ static int qmi_wwan_probe(struct usb_interface *intf, + { + struct usb_device_id *id = (struct usb_device_id *)prod; + struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; +- const struct driver_info *info; + + /* Workaround to enable dynamic IDs. This disables usbnet + * blacklisting functionality. Which, if required, can be +@@ -1414,12 +1407,8 @@ static int qmi_wwan_probe(struct usb_interface *intf, + * different. Ignore the current interface if the number of endpoints + * equals the number for the diag interface (two). + */ +- info = (void *)id->driver_info; +- +- if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { +- if (desc->bNumEndpoints == 2) +- return -ENODEV; +- } ++ if (desc->bNumEndpoints == 2) ++ return -ENODEV; + + return usbnet_probe(intf, id); + } +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +index dffa697d71e0..8a074a516fb2 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +@@ -3023,6 +3023,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, + spin_lock_init(&trans_pcie->reg_lock); + mutex_init(&trans_pcie->mutex); + init_waitqueue_head(&trans_pcie->ucode_write_waitq); ++ ++ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", ++ WQ_HIGHPRI | WQ_UNBOUND, 1); ++ if (!trans_pcie->rba.alloc_wq) { ++ ret = -ENOMEM; ++ goto out_free_trans; ++ } ++ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); ++ + trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); + if (!trans_pcie->tso_hdr_page) { + ret = -ENOMEM; +@@ -3195,10 +3204,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, + trans_pcie->inta_mask = CSR_INI_SET_MASK; + } + +- trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", +- WQ_HIGHPRI | WQ_UNBOUND, 1); +- INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); +- + #ifdef CONFIG_IWLWIFI_PCIE_RTPM + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; + #else +@@ -3211,6 +3216,8 @@ out_free_ict: + iwl_pcie_free_ict(trans); + out_no_pci: + free_percpu(trans_pcie->tso_hdr_page); ++ destroy_workqueue(trans_pcie->rba.alloc_wq); ++out_free_trans: + iwl_trans_free(trans); + return ERR_PTR(ret); + } +diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c +index b5340af9fa5e..80d20fb6f348 100644 +--- a/drivers/net/wireless/marvell/mwifiex/tdls.c ++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c +@@ -897,7 +897,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + u8 *peer, *pos, *end; + u8 i, action, basic; + u16 cap = 0; +- int ie_len = 0; ++ int ies_len = 0; + + if (len < (sizeof(struct ethhdr) + 3)) + return; +@@ -919,7 +919,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + pos = buf + sizeof(struct ethhdr) + 4; + /* payload 1+ category 1 + action 1 + dialog 1 */ + cap = get_unaligned_le16(pos); +- ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN; ++ ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN; + pos += 2; + break; + +@@ -929,7 +929,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/ + pos = buf + sizeof(struct ethhdr) + 6; + cap = get_unaligned_le16(pos); +- ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN; ++ ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN; + pos += 2; + break; + +@@ -937,7 +937,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN)) + return; + pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN; +- ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN; ++ ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN; + break; + default: + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n"); +@@ -950,33 +950,33 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + + sta_ptr->tdls_cap.capab = cpu_to_le16(cap); + +- for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) { +- if (pos + 2 + pos[1] > end) ++ for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) { ++ u8 ie_len = pos[1]; ++ ++ if (pos + 2 + ie_len > end) + break; + + switch (*pos) { + case WLAN_EID_SUPP_RATES: +- if (pos[1] > 32) ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates)) + return; +- sta_ptr->tdls_cap.rates_len = pos[1]; +- for (i = 0; i < pos[1]; i++) ++ sta_ptr->tdls_cap.rates_len = ie_len; ++ for (i = 0; i < ie_len; i++) + sta_ptr->tdls_cap.rates[i] = pos[i + 2]; + break; + + case WLAN_EID_EXT_SUPP_RATES: +- if (pos[1] > 32) ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates)) + return; + basic = sta_ptr->tdls_cap.rates_len; +- if (pos[1] > 32 - basic) ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic) + return; +- for (i = 0; i < pos[1]; i++) ++ for (i = 0; i < ie_len; i++) + sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2]; +- sta_ptr->tdls_cap.rates_len += pos[1]; ++ sta_ptr->tdls_cap.rates_len += ie_len; + break; + case WLAN_EID_HT_CAPABILITY: +- if (pos > end - sizeof(struct ieee80211_ht_cap) - 2) +- return; +- if (pos[1] != sizeof(struct ieee80211_ht_cap)) ++ if (ie_len != sizeof(struct ieee80211_ht_cap)) + return; + /* copy the ie's value into ht_capb*/ + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2, +@@ -984,59 +984,45 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + sta_ptr->is_11n_enabled = 1; + break; + case WLAN_EID_HT_OPERATION: +- if (pos > end - +- sizeof(struct ieee80211_ht_operation) - 2) +- return; +- if (pos[1] != sizeof(struct ieee80211_ht_operation)) ++ if (ie_len != sizeof(struct ieee80211_ht_operation)) + return; + /* copy the ie's value into ht_oper*/ + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2, + sizeof(struct ieee80211_ht_operation)); + break; + case WLAN_EID_BSS_COEX_2040: +- if (pos > end - 3) +- return; +- if (pos[1] != 1) ++ if (ie_len != sizeof(pos[2])) + return; + sta_ptr->tdls_cap.coex_2040 = pos[2]; + break; + case WLAN_EID_EXT_CAPABILITY: +- if (pos > end - sizeof(struct ieee_types_header)) +- return; +- if (pos[1] < sizeof(struct ieee_types_header)) ++ if (ie_len < sizeof(struct ieee_types_header)) + return; +- if (pos[1] > 8) ++ if (ie_len > 8) + return; + memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos, + sizeof(struct ieee_types_header) + +- min_t(u8, pos[1], 8)); ++ min_t(u8, ie_len, 8)); + break; + case WLAN_EID_RSN: +- if (pos > end - sizeof(struct ieee_types_header)) ++ if (ie_len < sizeof(struct ieee_types_header)) + return; +- if (pos[1] < sizeof(struct ieee_types_header)) +- return; +- if (pos[1] > IEEE_MAX_IE_SIZE - ++ if (ie_len > IEEE_MAX_IE_SIZE - + sizeof(struct ieee_types_header)) + return; + memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos, + sizeof(struct ieee_types_header) + +- min_t(u8, pos[1], IEEE_MAX_IE_SIZE - ++ min_t(u8, ie_len, IEEE_MAX_IE_SIZE - + sizeof(struct ieee_types_header))); + break; + case WLAN_EID_QOS_CAPA: +- if (pos > end - 3) +- return; +- if (pos[1] != 1) ++ if (ie_len != sizeof(pos[2])) + return; + sta_ptr->tdls_cap.qos_info = pos[2]; + break; + case WLAN_EID_VHT_OPERATION: + if (priv->adapter->is_hw_11ac_capable) { +- if (pos > end - +- sizeof(struct ieee80211_vht_operation) - 2) +- return; +- if (pos[1] != ++ if (ie_len != + sizeof(struct ieee80211_vht_operation)) + return; + /* copy the ie's value into vhtoper*/ +@@ -1046,10 +1032,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + break; + case WLAN_EID_VHT_CAPABILITY: + if (priv->adapter->is_hw_11ac_capable) { +- if (pos > end - +- sizeof(struct ieee80211_vht_cap) - 2) +- return; +- if (pos[1] != sizeof(struct ieee80211_vht_cap)) ++ if (ie_len != sizeof(struct ieee80211_vht_cap)) + return; + /* copy the ie's value into vhtcap*/ + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2, +@@ -1059,9 +1042,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, + break; + case WLAN_EID_AID: + if (priv->adapter->is_hw_11ac_capable) { +- if (pos > end - 4) +- return; +- if (pos[1] != 2) ++ if (ie_len != sizeof(u16)) + return; + sta_ptr->tdls_cap.aid = + get_unaligned_le16((pos + 2)); +diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c +index 4b14740edb67..8ba5a6d6329e 100644 +--- a/drivers/nfc/pn544/i2c.c ++++ b/drivers/nfc/pn544/i2c.c +@@ -236,6 +236,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy) + + out: + gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); ++ usleep_range(10000, 15000); + } + + static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode) +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index f543b9932c83..a760c449f4a9 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -889,8 +889,8 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, + static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, + void *buffer, size_t buflen, u32 *result) + { ++ union nvme_result res = { 0 }; + struct nvme_command c; +- union nvme_result res; + int ret; + + memset(&c, 0, sizeof(c)); +diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c +index 2a3f874a21d5..9cebff8e8d74 100644 +--- a/drivers/s390/cio/blacklist.c ++++ b/drivers/s390/cio/blacklist.c +@@ -303,8 +303,10 @@ static void * + cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) + { + struct ccwdev_iter *iter; ++ loff_t p = *offset; + +- if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) ++ (*offset)++; ++ if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + return NULL; + iter = it; + if (iter->devno == __MAX_SUBCHANNEL) { +@@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) + return NULL; + } else + iter->devno++; +- (*offset)++; + return iter; + } + +diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c +index 411b4b03457b..899f36b59af7 100644 +--- a/drivers/tty/serial/8250/8250_exar.c ++++ b/drivers/tty/serial/8250/8250_exar.c +@@ -27,6 +27,14 @@ + + #include "8250.h" + ++#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052 ++#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d ++#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c ++#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8 ++#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2 ++#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db ++#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea ++ + #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 + #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 + #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a +@@ -562,6 +570,22 @@ static int __maybe_unused exar_resume(struct device *dev) + + static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume); + ++static const struct exar8250_board acces_com_2x = { ++ .num_ports = 2, ++ .setup = pci_xr17c154_setup, ++}; ++ ++static const struct exar8250_board acces_com_4x = { ++ .num_ports = 4, ++ .setup = pci_xr17c154_setup, ++}; ++ ++static const struct exar8250_board acces_com_8x = { ++ .num_ports = 8, ++ .setup = pci_xr17c154_setup, ++}; ++ ++ + static const struct exar8250_board pbn_fastcom335_2 = { + .num_ports = 2, + .setup = pci_fastcom335_setup, +@@ -632,6 +656,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = { + } + + static const struct pci_device_id exar_pci_tbl[] = { ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x), ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x), ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x), ++ EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x), ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x), ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x), ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x), ++ ++ + CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect), + CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect), + CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect), +diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c +index ed545a61413c..ac56a5131a9c 100644 +--- a/drivers/tty/serial/ar933x_uart.c ++++ b/drivers/tty/serial/ar933x_uart.c +@@ -289,6 +289,10 @@ static void ar933x_uart_set_termios(struct uart_port *port, + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_HOST_INT_EN); + ++ /* enable RX and TX ready overide */ ++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, ++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); ++ + /* reenable the UART */ + ar933x_uart_rmw(up, AR933X_UART_CS_REG, + AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S, +@@ -421,6 +425,10 @@ static int ar933x_uart_startup(struct uart_port *port) + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, + AR933X_UART_CS_HOST_INT_EN); + ++ /* enable RX and TX ready overide */ ++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, ++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE); ++ + /* Enable RX interrupts */ + up->ier = AR933X_UART_INT_RX_VALID; + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c +index 401c983ec5f3..a10e4aa9e18e 100644 +--- a/drivers/tty/serial/mvebu-uart.c ++++ b/drivers/tty/serial/mvebu-uart.c +@@ -581,7 +581,7 @@ static int mvebu_uart_probe(struct platform_device *pdev) + + port->membase = devm_ioremap_resource(&pdev->dev, reg); + if (IS_ERR(port->membase)) +- return -PTR_ERR(port->membase); ++ return PTR_ERR(port->membase); + + data = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart_data), + GFP_KERNEL); +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index 377b3592384e..4c716ddd6599 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -546,7 +546,6 @@ void __handle_sysrq(int key, bool check_mask) + */ + orig_log_level = console_loglevel; + console_loglevel = CONSOLE_LOGLEVEL_DEFAULT; +- pr_info("SysRq : "); + + op_p = __sysrq_get_key_op(key); + if (op_p) { +@@ -555,14 +554,15 @@ void __handle_sysrq(int key, bool check_mask) + * should not) and is the invoked operation enabled? + */ + if (!check_mask || sysrq_on_mask(op_p->enable_mask)) { +- pr_cont("%s\n", op_p->action_msg); ++ pr_info("%s\n", op_p->action_msg); + console_loglevel = orig_log_level; + op_p->handler(key); + } else { +- pr_cont("This sysrq operation is disabled.\n"); ++ pr_info("This sysrq operation is disabled.\n"); ++ console_loglevel = orig_log_level; + } + } else { +- pr_cont("HELP : "); ++ pr_info("HELP : "); + /* Only print the help msg once per handler */ + for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) { + if (sysrq_key_table[i]) { +diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c +index b157f17d2be2..91ffe3f2b8a0 100644 +--- a/drivers/tty/vt/selection.c ++++ b/drivers/tty/vt/selection.c +@@ -14,6 +14,7 @@ + #include <linux/tty.h> + #include <linux/sched.h> + #include <linux/mm.h> ++#include <linux/mutex.h> + #include <linux/slab.h> + #include <linux/types.h> + +@@ -43,6 +44,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */ + static int sel_end; + static int sel_buffer_lth; + static char *sel_buffer; ++static DEFINE_MUTEX(sel_lock); + + /* clear_selection, highlight and highlight_pointer can be called + from interrupt (via scrollback/front) */ +@@ -155,14 +157,14 @@ static int store_utf8(u16 c, char *p) + * The entire selection process is managed under the console_lock. It's + * a lot under the lock but its hardly a performance path + */ +-int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty) ++static int __set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty) + { + struct vc_data *vc = vc_cons[fg_console].d; + int sel_mode, new_sel_start, new_sel_end, spc; + char *bp, *obp; + int i, ps, pe, multiplier; + u16 c; +- int mode; ++ int mode, ret = 0; + + poke_blanked_console(); + +@@ -323,7 +325,21 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t + } + } + sel_buffer_lth = bp - sel_buffer; +- return 0; ++ ++ return ret; ++} ++ ++int set_selection(const struct tiocl_selection __user *v, struct tty_struct *tty) ++{ ++ int ret; ++ ++ mutex_lock(&sel_lock); ++ console_lock(); ++ ret = __set_selection(v, tty); ++ console_unlock(); ++ mutex_unlock(&sel_lock); ++ ++ return ret; + } + + /* Insert the contents of the selection buffer into the +@@ -352,6 +368,7 @@ int paste_selection(struct tty_struct *tty) + tty_buffer_lock_exclusive(&vc->port); + + add_wait_queue(&vc->paste_wait, &wait); ++ mutex_lock(&sel_lock); + while (sel_buffer && sel_buffer_lth > pasted) { + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { +@@ -359,7 +376,9 @@ int paste_selection(struct tty_struct *tty) + break; + } + if (tty_throttled(tty)) { ++ mutex_unlock(&sel_lock); + schedule(); ++ mutex_lock(&sel_lock); + continue; + } + __set_current_state(TASK_RUNNING); +@@ -368,6 +387,7 @@ int paste_selection(struct tty_struct *tty) + count); + pasted += count; + } ++ mutex_unlock(&sel_lock); + remove_wait_queue(&vc->paste_wait, &wait); + __set_current_state(TASK_RUNNING); + +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 06761fcedeff..826433af4bdd 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -2688,9 +2688,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) + switch (type) + { + case TIOCL_SETSEL: +- console_lock(); + ret = set_selection((struct tiocl_selection __user *)(p+1), tty); +- console_unlock(); + break; + case TIOCL_PASTESEL: + ret = paste_selection(tty); +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index ff1be6a6841b..4391192bdd19 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -958,13 +958,17 @@ int usb_remove_device(struct usb_device *udev) + { + struct usb_hub *hub; + struct usb_interface *intf; ++ int ret; + + if (!udev->parent) /* Can't remove a root hub */ + return -EINVAL; + hub = usb_hub_to_struct_hub(udev->parent); + intf = to_usb_interface(hub->intfdev); + +- usb_autopm_get_interface(intf); ++ ret = usb_autopm_get_interface(intf); ++ if (ret < 0) ++ return ret; ++ + set_bit(udev->portnum, hub->removed_bits); + hub_port_logical_disconnect(hub, udev->portnum); + usb_autopm_put_interface(intf); +@@ -1818,7 +1822,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) + + if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) { + hub->quirk_disable_autosuspend = 1; +- usb_autopm_get_interface(intf); ++ usb_autopm_get_interface_no_resume(intf); + } + + if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) +diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c +index 460c855be0d0..53c1f6e604b1 100644 +--- a/drivers/usb/core/port.c ++++ b/drivers/usb/core/port.c +@@ -179,7 +179,10 @@ static int usb_port_runtime_resume(struct device *dev) + if (!port_dev->is_superspeed && peer) + pm_runtime_get_sync(&peer->dev); + +- usb_autopm_get_interface(intf); ++ retval = usb_autopm_get_interface(intf); ++ if (retval < 0) ++ return retval; ++ + retval = usb_hub_set_port_power(hdev, hub, port1, true); + msleep(hub_power_on_good_delay(hub)); + if (udev && !retval) { +@@ -232,7 +235,10 @@ static int usb_port_runtime_suspend(struct device *dev) + if (usb_port_block_power_off) + return -EBUSY; + +- usb_autopm_get_interface(intf); ++ retval = usb_autopm_get_interface(intf); ++ if (retval < 0) ++ return retval; ++ + retval = usb_hub_set_port_power(hdev, hub, port1, false); + usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION); + if (!port_dev->is_superspeed) +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index ad8307140df8..64c03e871f2d 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -86,6 +86,9 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Logitech PTZ Pro Camera */ + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, + ++ /* Logitech Screen Share */ ++ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM }, ++ + /* Logitech Quickcam Fusion */ + { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, + +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index 6e30b177aa22..5a4cf779b269 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -441,9 +441,13 @@ static u8 encode_bMaxPower(enum usb_device_speed speed, + if (!val) + return 0; + if (speed < USB_SPEED_SUPER) +- return DIV_ROUND_UP(val, 2); ++ return min(val, 500U) / 2; + else +- return DIV_ROUND_UP(val, 8); ++ /* ++ * USB 3.x supports up to 900mA, but since 900 isn't divisible ++ * by 8 the integral division will effectively cap to 896mA. ++ */ ++ return min(val, 900U) / 8; + } + + static int config_buf(struct usb_configuration *config, +@@ -841,6 +845,10 @@ static int set_config(struct usb_composite_dev *cdev, + + /* when we return, be sure our power usage is valid */ + power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; ++ if (gadget->speed < USB_SPEED_SUPER) ++ power = min(power, 500U); ++ else ++ power = min(power, 900U); + done: + usb_gadget_vbus_draw(gadget, power); + if (result >= 0 && cdev->delayed_status) +@@ -2280,7 +2288,7 @@ void composite_resume(struct usb_gadget *gadget) + { + struct usb_composite_dev *cdev = get_gadget_data(gadget); + struct usb_function *f; +- u16 maxpower; ++ unsigned maxpower; + + /* REVISIT: should we have config level + * suspend/resume callbacks? +@@ -2294,10 +2302,14 @@ void composite_resume(struct usb_gadget *gadget) + f->resume(f); + } + +- maxpower = cdev->config->MaxPower; ++ maxpower = cdev->config->MaxPower ? ++ cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; ++ if (gadget->speed < USB_SPEED_SUPER) ++ maxpower = min(maxpower, 500U); ++ else ++ maxpower = min(maxpower, 900U); + +- usb_gadget_vbus_draw(gadget, maxpower ? +- maxpower : CONFIG_USB_GADGET_VBUS_DRAW); ++ usb_gadget_vbus_draw(gadget, maxpower); + } + + cdev->suspended = 0; +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index cdffbe999500..282396e8eec6 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1078,18 +1078,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb) + { + struct ffs_io_data *io_data = kiocb->private; + struct ffs_epfile *epfile = kiocb->ki_filp->private_data; ++ unsigned long flags; + int value; + + ENTER(); + +- spin_lock_irq(&epfile->ffs->eps_lock); ++ spin_lock_irqsave(&epfile->ffs->eps_lock, flags); + + if (likely(io_data && io_data->ep && io_data->req)) + value = usb_ep_dequeue(io_data->ep, io_data->req); + else + value = -EINVAL; + +- spin_unlock_irq(&epfile->ffs->eps_lock); ++ spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags); + + return value; + } +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c +index 520ace49f91d..942d2977797d 100644 +--- a/drivers/usb/gadget/function/u_serial.c ++++ b/drivers/usb/gadget/function/u_serial.c +@@ -715,8 +715,10 @@ static int gs_start_io(struct gs_port *port) + port->n_read = 0; + started = gs_start_rx(port); + +- /* unblock any pending writes into our circular buffer */ + if (started) { ++ gs_start_tx(port); ++ /* Unblock any pending writes into our circular buffer, in case ++ * we didn't in gs_start_tx() */ + tty_wakeup(port->port.tty); + } else { + gs_free_requests(ep, head, &port->read_allocated); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 868878f5b72b..97cf8e1fc07c 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1640,6 +1640,12 @@ static void handle_port_status(struct xhci_hcd *xhci, + if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3)) + hcd = xhci->shared_hcd; + ++ if (!hcd) { ++ xhci_dbg(xhci, "No hcd found for port %u event\n", port_id); ++ bogus_port_status = true; ++ goto cleanup; ++ } ++ + if (major_revision == 0) { + xhci_warn(xhci, "Event for port %u not in " + "Extended Capabilities, ignoring.\n", +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index fb69cb64f7d4..df8ee83c3f1a 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -1277,6 +1277,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, + USB_SC_RBC, USB_PR_BULK, NULL, + 0 ), + ++UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100, ++ "Samsung", ++ "Flash Drive FIT", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_MAX_SECTORS_64), ++ + /* aeb */ + UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, + "Feiya", +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index 4d11152e60c1..8fe07622ae59 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -1025,11 +1025,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) + + static struct socket *get_raw_socket(int fd) + { +- struct { +- struct sockaddr_ll sa; +- char buf[MAX_ADDR_LEN]; +- } uaddr; +- int uaddr_len = sizeof uaddr, r; ++ int r; + struct socket *sock = sockfd_lookup(fd, &r); + + if (!sock) +@@ -1041,12 +1037,7 @@ static struct socket *get_raw_socket(int fd) + goto err; + } + +- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, +- &uaddr_len, 0); +- if (r) +- goto err; +- +- if (uaddr.sa.sll_family != AF_PACKET) { ++ if (sock->sk->sk_family != AF_PACKET) { + r = -EPFNOSUPPORT; + goto err; + } +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index a17ba1465815..ff6612a3ddc8 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -1309,6 +1309,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font) + static int vgacon_resize(struct vc_data *c, unsigned int width, + unsigned int height, unsigned int user) + { ++ if ((width << 1) * height > vga_vram_size) ++ return -EINVAL; ++ + if (width % 2 || width > screen_info.orig_video_cols || + height > (screen_info.orig_video_lines * vga_default_font_height)/ + c->vc_font.height) +diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c +index 9083d3d922b0..79383ff62019 100644 +--- a/drivers/watchdog/da9062_wdt.c ++++ b/drivers/watchdog/da9062_wdt.c +@@ -126,13 +126,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd) + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + int ret; + +- ret = da9062_reset_watchdog_timer(wdt); +- if (ret) { +- dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n", +- ret); +- return ret; +- } +- + ret = regmap_update_bits(wdt->hw->regmap, + DA9062AA_CONTROL_D, + DA9062AA_TWDSCALE_MASK, +diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c +index 0da9943d405f..c310e841561c 100644 +--- a/drivers/watchdog/wdat_wdt.c ++++ b/drivers/watchdog/wdat_wdt.c +@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) + + memset(&r, 0, sizeof(r)); + r.start = gas->address; +- r.end = r.start + gas->access_width - 1; ++ r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + r.flags = IORESOURCE_MEM; + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c +index b98436f5c7c7..73d428af97a9 100644 +--- a/fs/cifs/cifsacl.c ++++ b/fs/cifs/cifsacl.c +@@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, + ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) + *pmode |= (S_IXUGO & (*pbits_to_set)); + +- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode); ++ cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode); + return; + } + +@@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, + if (mode & S_IXUGO) + *pace_flags |= SET_FILE_EXEC_RIGHTS; + +- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n", ++ cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n", + mode, *pace_flags); + return; + } +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 6e5ecf70996a..697edc92dff2 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -3521,7 +3521,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, + cifs_sb->mnt_gid = pvolume_info->linux_gid; + cifs_sb->mnt_file_mode = pvolume_info->file_mode; + cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; +- cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n", ++ cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n", + cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); + + cifs_sb->actimeo = pvolume_info->actimeo; +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index a35c14105906..bdce714e9448 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -1581,7 +1581,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) + struct TCP_Server_Info *server; + char *full_path; + +- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n", ++ cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n", + mode, inode); + + cifs_sb = CIFS_SB(inode->i_sb); +@@ -1998,6 +1998,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry) + struct inode *inode = d_inode(dentry); + struct super_block *sb = dentry->d_sb; + char *full_path = NULL; ++ int count = 0; + + if (inode == NULL) + return -ENOENT; +@@ -2019,15 +2020,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry) + full_path, inode, inode->i_count.counter, + dentry, cifs_get_time(dentry), jiffies); + ++again: + if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) + rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); + else + rc = cifs_get_inode_info(&inode, full_path, NULL, sb, + xid, NULL); +- ++ if (rc == -EAGAIN && count++ < 10) ++ goto again; + out: + kfree(full_path); + free_xid(xid); ++ + return rc; + } + +diff --git a/fs/dax.c b/fs/dax.c +index ddb4981ae32e..34a55754164f 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -1057,6 +1057,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, + lockdep_assert_held(&inode->i_rwsem); + } + ++ if (iocb->ki_flags & IOCB_NOWAIT) ++ flags |= IOMAP_NOWAIT; ++ + while (iov_iter_count(iter)) { + ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, + iter, dax_iomap_actor); +diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c +index 3f3ec50bf773..b134315fb69d 100644 +--- a/fs/ecryptfs/keystore.c ++++ b/fs/ecryptfs/keystore.c +@@ -1285,7 +1285,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat, + printk(KERN_ERR "Enter w/ first byte != 0x%.2x\n", + ECRYPTFS_TAG_1_PACKET_TYPE); + rc = -EINVAL; +- goto out_free; ++ goto out; + } + /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or + * at end of function upon failure */ +@@ -1335,7 +1335,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat, + printk(KERN_WARNING "Tag 1 packet contains key larger " + "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES"); + rc = -EINVAL; +- goto out; ++ goto out_free; + } + memcpy((*new_auth_tok)->session_key.encrypted_key, + &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2))); +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index 70266a3355dc..fb38f20f869e 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -280,6 +280,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, + ext4_group_t ngroups = ext4_get_groups_count(sb); + struct ext4_group_desc *desc; + struct ext4_sb_info *sbi = EXT4_SB(sb); ++ struct buffer_head *bh_p; + + if (block_group >= ngroups) { + ext4_error(sb, "block_group >= groups_count - block_group = %u," +@@ -290,7 +291,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, + + group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); + offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); +- if (!sbi->s_group_desc[group_desc]) { ++ bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc); ++ /* ++ * sbi_array_rcu_deref returns with rcu unlocked, this is ok since ++ * the pointer being dereferenced won't be dereferenced again. By ++ * looking at the usage in add_new_gdb() the value isn't modified, ++ * just the pointer, and so it remains valid. ++ */ ++ if (!bh_p) { + ext4_error(sb, "Group descriptor not loaded - " + "block_group = %u, group_desc = %u, desc = %u", + block_group, group_desc, offset); +@@ -298,10 +306,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, + } + + desc = (struct ext4_group_desc *)( +- (__u8 *)sbi->s_group_desc[group_desc]->b_data + ++ (__u8 *)bh_p->b_data + + offset * EXT4_DESC_SIZE(sb)); + if (bh) +- *bh = sbi->s_group_desc[group_desc]; ++ *bh = bh_p; + return desc; + } + +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index b162f602c430..4aa0f8f7d9a0 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -1382,7 +1382,7 @@ struct ext4_sb_info { + loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */ + struct buffer_head * s_sbh; /* Buffer containing the super block */ + struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */ +- struct buffer_head **s_group_desc; ++ struct buffer_head * __rcu *s_group_desc; + unsigned int s_mount_opt; + unsigned int s_mount_opt2; + unsigned int s_mount_flags; +@@ -1442,7 +1442,7 @@ struct ext4_sb_info { + #endif + + /* for buddy allocator */ +- struct ext4_group_info ***s_group_info; ++ struct ext4_group_info ** __rcu *s_group_info; + struct inode *s_buddy_cache; + spinlock_t s_md_lock; + unsigned short *s_mb_offsets; +@@ -1492,7 +1492,7 @@ struct ext4_sb_info { + unsigned int s_extent_max_zeroout_kb; + + unsigned int s_log_groups_per_flex; +- struct flex_groups *s_flex_groups; ++ struct flex_groups * __rcu *s_flex_groups; + ext4_group_t s_flex_groups_allocated; + + /* workqueue for reserved extent conversions (buffered io) */ +@@ -1556,6 +1556,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) + ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); + } + ++/* ++ * Returns: sbi->field[index] ++ * Used to access an array element from the following sbi fields which require ++ * rcu protection to avoid dereferencing an invalid pointer due to reassignment ++ * - s_group_desc ++ * - s_group_info ++ * - s_flex_group ++ */ ++#define sbi_array_rcu_deref(sbi, field, index) \ ++({ \ ++ typeof(*((sbi)->field)) _v; \ ++ rcu_read_lock(); \ ++ _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \ ++ rcu_read_unlock(); \ ++ _v; \ ++}) ++ + /* + * Inode dynamic state flags + */ +@@ -2569,6 +2586,7 @@ extern int ext4_generic_delete_entry(handle_t *handle, + extern bool ext4_empty_dir(struct inode *inode); + + /* resize.c */ ++extern void ext4_kvfree_array_rcu(void *to_free); + extern int ext4_group_add(struct super_block *sb, + struct ext4_new_group_data *input); + extern int ext4_group_extend(struct super_block *sb, +@@ -2814,13 +2832,13 @@ static inline + struct ext4_group_info *ext4_get_group_info(struct super_block *sb, + ext4_group_t group) + { +- struct ext4_group_info ***grp_info; ++ struct ext4_group_info **grp_info; + long indexv, indexh; + BUG_ON(group >= EXT4_SB(sb)->s_groups_count); +- grp_info = EXT4_SB(sb)->s_group_info; + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); +- return grp_info[indexv][indexh]; ++ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); ++ return grp_info[indexh]; + } + + /* +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index 2f46564d3fca..2a480c0ef1bc 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -333,11 +333,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) + + percpu_counter_inc(&sbi->s_freeinodes_counter); + if (sbi->s_log_groups_per_flex) { +- ext4_group_t f = ext4_flex_group(sbi, block_group); ++ struct flex_groups *fg; + +- atomic_inc(&sbi->s_flex_groups[f].free_inodes); ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, ++ ext4_flex_group(sbi, block_group)); ++ atomic_inc(&fg->free_inodes); + if (is_directory) +- atomic_dec(&sbi->s_flex_groups[f].used_dirs); ++ atomic_dec(&fg->used_dirs); + } + BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); + fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); +@@ -378,12 +380,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, + int flex_size, struct orlov_stats *stats) + { + struct ext4_group_desc *desc; +- struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups; + + if (flex_size > 1) { +- stats->free_inodes = atomic_read(&flex_group[g].free_inodes); +- stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); +- stats->used_dirs = atomic_read(&flex_group[g].used_dirs); ++ struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb), ++ s_flex_groups, g); ++ stats->free_inodes = atomic_read(&fg->free_inodes); ++ stats->free_clusters = atomic64_read(&fg->free_clusters); ++ stats->used_dirs = atomic_read(&fg->used_dirs); + return; + } + +@@ -1062,7 +1065,8 @@ got: + if (sbi->s_log_groups_per_flex) { + ext4_group_t f = ext4_flex_group(sbi, group); + +- atomic_inc(&sbi->s_flex_groups[f].used_dirs); ++ atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups, ++ f)->used_dirs); + } + } + if (ext4_has_group_desc_csum(sb)) { +@@ -1085,7 +1089,8 @@ got: + + if (sbi->s_log_groups_per_flex) { + flex_group = ext4_flex_group(sbi, group); +- atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); ++ atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups, ++ flex_group)->free_inodes); + } + + inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 3ba9a4ae4eac..745a89d30a57 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2389,7 +2389,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + unsigned size; +- struct ext4_group_info ***new_groupinfo; ++ struct ext4_group_info ***old_groupinfo, ***new_groupinfo; + + size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> + EXT4_DESC_PER_BLOCK_BITS(sb); +@@ -2402,13 +2402,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) + ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); + return -ENOMEM; + } +- if (sbi->s_group_info) { +- memcpy(new_groupinfo, sbi->s_group_info, ++ rcu_read_lock(); ++ old_groupinfo = rcu_dereference(sbi->s_group_info); ++ if (old_groupinfo) ++ memcpy(new_groupinfo, old_groupinfo, + sbi->s_group_info_size * sizeof(*sbi->s_group_info)); +- kvfree(sbi->s_group_info); +- } +- sbi->s_group_info = new_groupinfo; ++ rcu_read_unlock(); ++ rcu_assign_pointer(sbi->s_group_info, new_groupinfo); + sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); ++ if (old_groupinfo) ++ ext4_kvfree_array_rcu(old_groupinfo); + ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", + sbi->s_group_info_size); + return 0; +@@ -2420,6 +2423,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, + { + int i; + int metalen = 0; ++ int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_group_info **meta_group_info; + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); +@@ -2438,12 +2442,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, + "for a buddy group"); + goto exit_meta_group_info; + } +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = +- meta_group_info; ++ rcu_read_lock(); ++ rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; ++ rcu_read_unlock(); + } + +- meta_group_info = +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; ++ meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); + i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); + + meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); +@@ -2491,8 +2495,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, + exit_group_info: + /* If a meta_group_info table has been allocated, release it now */ + if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { +- kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; ++ struct ext4_group_info ***group_info; ++ ++ rcu_read_lock(); ++ group_info = rcu_dereference(sbi->s_group_info); ++ kfree(group_info[idx]); ++ group_info[idx] = NULL; ++ rcu_read_unlock(); + } + exit_meta_group_info: + return -ENOMEM; +@@ -2505,6 +2514,7 @@ static int ext4_mb_init_backend(struct super_block *sb) + struct ext4_sb_info *sbi = EXT4_SB(sb); + int err; + struct ext4_group_desc *desc; ++ struct ext4_group_info ***group_info; + struct kmem_cache *cachep; + + err = ext4_mb_alloc_groupinfo(sb, ngroups); +@@ -2539,11 +2549,16 @@ err_freebuddy: + while (i-- > 0) + kmem_cache_free(cachep, ext4_get_group_info(sb, i)); + i = sbi->s_group_info_size; ++ rcu_read_lock(); ++ group_info = rcu_dereference(sbi->s_group_info); + while (i-- > 0) +- kfree(sbi->s_group_info[i]); ++ kfree(group_info[i]); ++ rcu_read_unlock(); + iput(sbi->s_buddy_cache); + err_freesgi: +- kvfree(sbi->s_group_info); ++ rcu_read_lock(); ++ kvfree(rcu_dereference(sbi->s_group_info)); ++ rcu_read_unlock(); + return -ENOMEM; + } + +@@ -2733,7 +2748,7 @@ int ext4_mb_release(struct super_block *sb) + ext4_group_t ngroups = ext4_get_groups_count(sb); + ext4_group_t i; + int num_meta_group_infos; +- struct ext4_group_info *grinfo; ++ struct ext4_group_info *grinfo, ***group_info; + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); + +@@ -2751,9 +2766,12 @@ int ext4_mb_release(struct super_block *sb) + num_meta_group_infos = (ngroups + + EXT4_DESC_PER_BLOCK(sb) - 1) >> + EXT4_DESC_PER_BLOCK_BITS(sb); ++ rcu_read_lock(); ++ group_info = rcu_dereference(sbi->s_group_info); + for (i = 0; i < num_meta_group_infos; i++) +- kfree(sbi->s_group_info[i]); +- kvfree(sbi->s_group_info); ++ kfree(group_info[i]); ++ kvfree(group_info); ++ rcu_read_unlock(); + } + kfree(sbi->s_mb_offsets); + kfree(sbi->s_mb_maxs); +@@ -3052,7 +3070,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, + ext4_group_t flex_group = ext4_flex_group(sbi, + ac->ac_b_ex.fe_group); + atomic64_sub(ac->ac_b_ex.fe_len, +- &sbi->s_flex_groups[flex_group].free_clusters); ++ &sbi_array_rcu_deref(sbi, s_flex_groups, ++ flex_group)->free_clusters); + } + + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); +@@ -4947,7 +4966,8 @@ do_more: + if (sbi->s_log_groups_per_flex) { + ext4_group_t flex_group = ext4_flex_group(sbi, block_group); + atomic64_add(count_clusters, +- &sbi->s_flex_groups[flex_group].free_clusters); ++ &sbi_array_rcu_deref(sbi, s_flex_groups, ++ flex_group)->free_clusters); + } + + if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) +@@ -5092,7 +5112,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, + if (sbi->s_log_groups_per_flex) { + ext4_group_t flex_group = ext4_flex_group(sbi, block_group); + atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), +- &sbi->s_flex_groups[flex_group].free_clusters); ++ &sbi_array_rcu_deref(sbi, s_flex_groups, ++ flex_group)->free_clusters); + } + + ext4_mb_unload_buddy(&e4b); +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index 4f7cd78d0364..d42f7471fd5b 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -17,6 +17,33 @@ + + #include "ext4_jbd2.h" + ++struct ext4_rcu_ptr { ++ struct rcu_head rcu; ++ void *ptr; ++}; ++ ++static void ext4_rcu_ptr_callback(struct rcu_head *head) ++{ ++ struct ext4_rcu_ptr *ptr; ++ ++ ptr = container_of(head, struct ext4_rcu_ptr, rcu); ++ kvfree(ptr->ptr); ++ kfree(ptr); ++} ++ ++void ext4_kvfree_array_rcu(void *to_free) ++{ ++ struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); ++ ++ if (ptr) { ++ ptr->ptr = to_free; ++ call_rcu(&ptr->rcu, ext4_rcu_ptr_callback); ++ return; ++ } ++ synchronize_rcu(); ++ kvfree(to_free); ++} ++ + int ext4_resize_begin(struct super_block *sb) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +@@ -545,8 +572,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb, + brelse(gdb); + goto out; + } +- memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, +- gdb->b_size); ++ memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, ++ s_group_desc, j)->b_data, gdb->b_size); + set_buffer_uptodate(gdb); + + err = ext4_handle_dirty_metadata(handle, NULL, gdb); +@@ -854,13 +881,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, + } + brelse(dind); + +- o_group_desc = EXT4_SB(sb)->s_group_desc; ++ rcu_read_lock(); ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); + memcpy(n_group_desc, o_group_desc, + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); ++ rcu_read_unlock(); + n_group_desc[gdb_num] = gdb_bh; +- EXT4_SB(sb)->s_group_desc = n_group_desc; ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); + EXT4_SB(sb)->s_gdb_count++; +- kvfree(o_group_desc); ++ ext4_kvfree_array_rcu(o_group_desc); + + le16_add_cpu(&es->s_reserved_gdt_blocks, -1); + err = ext4_handle_dirty_super(handle, sb); +@@ -904,9 +933,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb, + return err; + } + +- o_group_desc = EXT4_SB(sb)->s_group_desc; ++ rcu_read_lock(); ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); + memcpy(n_group_desc, o_group_desc, + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); ++ rcu_read_unlock(); + n_group_desc[gdb_num] = gdb_bh; + + BUFFER_TRACE(gdb_bh, "get_write_access"); +@@ -917,9 +948,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb, + return err; + } + +- EXT4_SB(sb)->s_group_desc = n_group_desc; ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); + EXT4_SB(sb)->s_gdb_count++; +- kvfree(o_group_desc); ++ ext4_kvfree_array_rcu(o_group_desc); + return err; + } + +@@ -1183,7 +1214,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, + * use non-sparse filesystems anymore. This is already checked above. + */ + if (gdb_off) { +- gdb_bh = sbi->s_group_desc[gdb_num]; ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, ++ gdb_num); + BUFFER_TRACE(gdb_bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, gdb_bh); + +@@ -1265,7 +1297,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, + /* + * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). + */ +- gdb_bh = sbi->s_group_desc[gdb_num]; ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); + /* Update group descriptor block for new group */ + gdp = (struct ext4_group_desc *)(gdb_bh->b_data + + gdb_off * EXT4_DESC_SIZE(sb)); +@@ -1393,11 +1425,14 @@ static void ext4_update_super(struct super_block *sb, + percpu_counter_read(&sbi->s_freeclusters_counter)); + if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { + ext4_group_t flex_group; ++ struct flex_groups *fg; ++ + flex_group = ext4_flex_group(sbi, group_data[0].group); ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); + atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), +- &sbi->s_flex_groups[flex_group].free_clusters); ++ &fg->free_clusters); + atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, +- &sbi->s_flex_groups[flex_group].free_inodes); ++ &fg->free_inodes); + } + + /* +@@ -1492,7 +1527,8 @@ exit_journal: + for (; gdb_num <= gdb_num_end; gdb_num++) { + struct buffer_head *gdb_bh; + +- gdb_bh = sbi->s_group_desc[gdb_num]; ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, ++ gdb_num); + if (old_gdb == gdb_bh->b_blocknr) + continue; + update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 09b443709bca..f5646bcad770 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -900,6 +900,8 @@ static void ext4_put_super(struct super_block *sb) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_super_block *es = sbi->s_es; ++ struct buffer_head **group_desc; ++ struct flex_groups **flex_groups; + int aborted = 0; + int i, err; + +@@ -931,10 +933,18 @@ static void ext4_put_super(struct super_block *sb) + if (!sb_rdonly(sb)) + ext4_commit_super(sb, 1); + ++ rcu_read_lock(); ++ group_desc = rcu_dereference(sbi->s_group_desc); + for (i = 0; i < sbi->s_gdb_count; i++) +- brelse(sbi->s_group_desc[i]); +- kvfree(sbi->s_group_desc); +- kvfree(sbi->s_flex_groups); ++ brelse(group_desc[i]); ++ kvfree(group_desc); ++ flex_groups = rcu_dereference(sbi->s_flex_groups); ++ if (flex_groups) { ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++) ++ kvfree(flex_groups[i]); ++ kvfree(flex_groups); ++ } ++ rcu_read_unlock(); + percpu_counter_destroy(&sbi->s_freeclusters_counter); + percpu_counter_destroy(&sbi->s_freeinodes_counter); + percpu_counter_destroy(&sbi->s_dirs_counter); +@@ -2227,8 +2237,8 @@ done: + int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +- struct flex_groups *new_groups; +- int size; ++ struct flex_groups **old_groups, **new_groups; ++ int size, i, j; + + if (!sbi->s_log_groups_per_flex) + return 0; +@@ -2237,22 +2247,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) + if (size <= sbi->s_flex_groups_allocated) + return 0; + +- size = roundup_pow_of_two(size * sizeof(struct flex_groups)); +- new_groups = kvzalloc(size, GFP_KERNEL); ++ new_groups = kvzalloc(roundup_pow_of_two(size * ++ sizeof(*sbi->s_flex_groups)), GFP_KERNEL); + if (!new_groups) { +- ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", +- size / (int) sizeof(struct flex_groups)); ++ ext4_msg(sb, KERN_ERR, ++ "not enough memory for %d flex group pointers", size); + return -ENOMEM; + } +- +- if (sbi->s_flex_groups) { +- memcpy(new_groups, sbi->s_flex_groups, +- (sbi->s_flex_groups_allocated * +- sizeof(struct flex_groups))); +- kvfree(sbi->s_flex_groups); ++ for (i = sbi->s_flex_groups_allocated; i < size; i++) { ++ new_groups[i] = kvzalloc(roundup_pow_of_two( ++ sizeof(struct flex_groups)), ++ GFP_KERNEL); ++ if (!new_groups[i]) { ++ for (j = sbi->s_flex_groups_allocated; j < i; j++) ++ kvfree(new_groups[j]); ++ kvfree(new_groups); ++ ext4_msg(sb, KERN_ERR, ++ "not enough memory for %d flex groups", size); ++ return -ENOMEM; ++ } + } +- sbi->s_flex_groups = new_groups; +- sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); ++ rcu_read_lock(); ++ old_groups = rcu_dereference(sbi->s_flex_groups); ++ if (old_groups) ++ memcpy(new_groups, old_groups, ++ (sbi->s_flex_groups_allocated * ++ sizeof(struct flex_groups *))); ++ rcu_read_unlock(); ++ rcu_assign_pointer(sbi->s_flex_groups, new_groups); ++ sbi->s_flex_groups_allocated = size; ++ if (old_groups) ++ ext4_kvfree_array_rcu(old_groups); + return 0; + } + +@@ -2260,6 +2285,7 @@ static int ext4_fill_flex_info(struct super_block *sb) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_group_desc *gdp = NULL; ++ struct flex_groups *fg; + ext4_group_t flex_group; + int i, err; + +@@ -2277,12 +2303,11 @@ static int ext4_fill_flex_info(struct super_block *sb) + gdp = ext4_get_group_desc(sb, i, NULL); + + flex_group = ext4_flex_group(sbi, i); +- atomic_add(ext4_free_inodes_count(sb, gdp), +- &sbi->s_flex_groups[flex_group].free_inodes); ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); ++ atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes); + atomic64_add(ext4_free_group_clusters(sb, gdp), +- &sbi->s_flex_groups[flex_group].free_clusters); +- atomic_add(ext4_used_dirs_count(sb, gdp), +- &sbi->s_flex_groups[flex_group].used_dirs); ++ &fg->free_clusters); ++ atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs); + } + + return 1; +@@ -3489,9 +3514,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + { + struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev); + char *orig_data = kstrdup(data, GFP_KERNEL); +- struct buffer_head *bh; ++ struct buffer_head *bh, **group_desc; + struct ext4_super_block *es = NULL; + struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); ++ struct flex_groups **flex_groups; + ext4_fsblk_t block; + ext4_fsblk_t sb_block = get_sb_block(&data); + ext4_fsblk_t logical_sb_block; +@@ -4104,9 +4130,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + goto failed_mount; + } + } +- sbi->s_group_desc = kvmalloc(db_count * ++ rcu_assign_pointer(sbi->s_group_desc, ++ kvmalloc_array(db_count, + sizeof(struct buffer_head *), +- GFP_KERNEL); ++ GFP_KERNEL)); + if (sbi->s_group_desc == NULL) { + ext4_msg(sb, KERN_ERR, "not enough memory"); + ret = -ENOMEM; +@@ -4122,14 +4149,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + } + + for (i = 0; i < db_count; i++) { ++ struct buffer_head *bh; ++ + block = descriptor_loc(sb, logical_sb_block, i); +- sbi->s_group_desc[i] = sb_bread_unmovable(sb, block); +- if (!sbi->s_group_desc[i]) { ++ bh = sb_bread_unmovable(sb, block); ++ if (!bh) { + ext4_msg(sb, KERN_ERR, + "can't read group descriptor %d", i); + db_count = i; + goto failed_mount2; + } ++ rcu_read_lock(); ++ rcu_dereference(sbi->s_group_desc)[i] = bh; ++ rcu_read_unlock(); + } + sbi->s_gdb_count = db_count; + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { +@@ -4484,8 +4516,14 @@ failed_mount7: + ext4_unregister_li_request(sb); + failed_mount6: + ext4_mb_release(sb); +- if (sbi->s_flex_groups) +- kvfree(sbi->s_flex_groups); ++ rcu_read_lock(); ++ flex_groups = rcu_dereference(sbi->s_flex_groups); ++ if (flex_groups) { ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++) ++ kvfree(flex_groups[i]); ++ kvfree(flex_groups); ++ } ++ rcu_read_unlock(); + percpu_counter_destroy(&sbi->s_freeclusters_counter); + percpu_counter_destroy(&sbi->s_freeinodes_counter); + percpu_counter_destroy(&sbi->s_dirs_counter); +@@ -4521,9 +4559,12 @@ failed_mount3: + if (sbi->s_mmp_tsk) + kthread_stop(sbi->s_mmp_tsk); + failed_mount2: ++ rcu_read_lock(); ++ group_desc = rcu_dereference(sbi->s_group_desc); + for (i = 0; i < db_count; i++) +- brelse(sbi->s_group_desc[i]); +- kvfree(sbi->s_group_desc); ++ brelse(group_desc[i]); ++ kvfree(group_desc); ++ rcu_read_unlock(); + failed_mount: + if (sbi->s_chksum_driver) + crypto_free_shash(sbi->s_chksum_driver); +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index 3b40937b942a..1df023c4c2cc 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -736,6 +736,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb) + return NULL; + + init_rwsem(&ei->truncate_lock); ++ /* Zeroing to allow iput() even if partial initialized inode. */ ++ ei->mmu_private = 0; ++ ei->i_start = 0; ++ ei->i_logstart = 0; ++ ei->i_attrs = 0; ++ ei->i_pos = 0; ++ + return &ei->vfs_inode; + } + +@@ -1366,16 +1373,6 @@ out: + return 0; + } + +-static void fat_dummy_inode_init(struct inode *inode) +-{ +- /* Initialize this dummy inode to work as no-op. */ +- MSDOS_I(inode)->mmu_private = 0; +- MSDOS_I(inode)->i_start = 0; +- MSDOS_I(inode)->i_logstart = 0; +- MSDOS_I(inode)->i_attrs = 0; +- MSDOS_I(inode)->i_pos = 0; +-} +- + static int fat_read_root(struct inode *inode) + { + struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); +@@ -1820,13 +1817,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, + fat_inode = new_inode(sb); + if (!fat_inode) + goto out_fail; +- fat_dummy_inode_init(fat_inode); + sbi->fat_inode = fat_inode; + + fsinfo_inode = new_inode(sb); + if (!fsinfo_inode) + goto out_fail; +- fat_dummy_inode_init(fsinfo_inode); + fsinfo_inode->i_ino = MSDOS_FSINFO_INO; + sbi->fsinfo_inode = fsinfo_inode; + insert_inode_hash(fsinfo_inode); +diff --git a/fs/namei.c b/fs/namei.c +index f421f8d80f4d..c5ebe33984f4 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -1382,7 +1382,7 @@ static int follow_dotdot_rcu(struct nameidata *nd) + nd->path.dentry = parent; + nd->seq = seq; + if (unlikely(!path_connected(&nd->path))) +- return -ENOENT; ++ return -ECHILD; + break; + } else { + struct mount *mnt = real_mount(nd->path.mnt); +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h +index 4f077edb9b81..71fadbe77e21 100644 +--- a/include/acpi/actypes.h ++++ b/include/acpi/actypes.h +@@ -556,11 +556,12 @@ typedef u64 acpi_integer; + #define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) + + /* +- * Algorithm to obtain access bit width. ++ * Algorithm to obtain access bit or byte width. + * Can be used with access_width of struct acpi_generic_address and access_size of + * struct acpi_resource_generic_register. + */ + #define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2)) ++#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1)) + + /******************************************************************************* + * +diff --git a/include/linux/bitops.h b/include/linux/bitops.h +index b767c7ad65c6..c51574fab0b0 100644 +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -4,7 +4,8 @@ + #include <asm/types.h> + #include <linux/bits.h> + +-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) ++#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) ++#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) + + extern unsigned int __sw_hweight8(unsigned int w); + extern unsigned int __sw_hweight16(unsigned int w); +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 3656a04d764b..ba1f67559831 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -477,7 +477,7 @@ struct hid_report_enum { + }; + + #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ +-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */ ++#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */ + #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ + #define HID_OUTPUT_FIFO_SIZE 64 + +diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h +index 227dc0a84172..ddf916e5e57d 100644 +--- a/include/net/flow_dissector.h ++++ b/include/net/flow_dissector.h +@@ -5,6 +5,7 @@ + #include <linux/types.h> + #include <linux/in6.h> + #include <linux/siphash.h> ++#include <linux/string.h> + #include <uapi/linux/if_ether.h> + + /** +@@ -282,4 +283,12 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec + return ((char *)target_container) + flow_dissector->offset[key_id]; + } + ++static inline void ++flow_dissector_init_keys(struct flow_dissector_key_control *key_control, ++ struct flow_dissector_key_basic *key_basic) ++{ ++ memset(key_control, 0, sizeof(*key_control)); ++ memset(key_basic, 0, sizeof(*key_basic)); ++} ++ + #endif +diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h +index 5f72af35b3ed..ad22079125bf 100644 +--- a/include/uapi/linux/usb/charger.h ++++ b/include/uapi/linux/usb/charger.h +@@ -14,18 +14,18 @@ + * ACA (Accessory Charger Adapters) + */ + enum usb_charger_type { +- UNKNOWN_TYPE, +- SDP_TYPE, +- DCP_TYPE, +- CDP_TYPE, +- ACA_TYPE, ++ UNKNOWN_TYPE = 0, ++ SDP_TYPE = 1, ++ DCP_TYPE = 2, ++ CDP_TYPE = 3, ++ ACA_TYPE = 4, + }; + + /* USB charger state */ + enum usb_charger_state { +- USB_CHARGER_DEFAULT, +- USB_CHARGER_PRESENT, +- USB_CHARGER_ABSENT, ++ USB_CHARGER_DEFAULT = 0, ++ USB_CHARGER_PRESENT = 1, ++ USB_CHARGER_ABSENT = 2, + }; + + #endif /* _UAPI__LINUX_USB_CHARGER_H */ +diff --git a/kernel/audit.c b/kernel/audit.c +index d301276bca58..b21a8910f765 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -1067,13 +1067,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature + audit_log_end(ab); + } + +-static int audit_set_feature(struct sk_buff *skb) ++static int audit_set_feature(struct audit_features *uaf) + { +- struct audit_features *uaf; + int i; + + BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names)); +- uaf = nlmsg_data(nlmsg_hdr(skb)); + + /* if there is ever a version 2 we should handle that here */ + +@@ -1141,6 +1139,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + { + u32 seq; + void *data; ++ int data_len; + int err; + struct audit_buffer *ab; + u16 msg_type = nlh->nlmsg_type; +@@ -1154,6 +1153,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + + seq = nlh->nlmsg_seq; + data = nlmsg_data(nlh); ++ data_len = nlmsg_len(nlh); + + switch (msg_type) { + case AUDIT_GET: { +@@ -1177,7 +1177,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + struct audit_status s; + memset(&s, 0, sizeof(s)); + /* guard against past and future API changes */ +- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh))); ++ memcpy(&s, data, min_t(size_t, sizeof(s), data_len)); + if (s.mask & AUDIT_STATUS_ENABLED) { + err = audit_set_enabled(s.enabled); + if (err < 0) +@@ -1281,7 +1281,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + return err; + break; + case AUDIT_SET_FEATURE: +- err = audit_set_feature(skb); ++ if (data_len < sizeof(struct audit_features)) ++ return -EINVAL; ++ err = audit_set_feature(data); + if (err) + return err; + break; +@@ -1293,6 +1295,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + + err = audit_filter(msg_type, AUDIT_FILTER_USER); + if (err == 1) { /* match or error */ ++ char *str = data; ++ + err = 0; + if (msg_type == AUDIT_USER_TTY) { + err = tty_audit_push(); +@@ -1300,26 +1304,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + break; + } + audit_log_common_recv_msg(&ab, msg_type); +- if (msg_type != AUDIT_USER_TTY) ++ if (msg_type != AUDIT_USER_TTY) { ++ /* ensure NULL termination */ ++ str[data_len - 1] = '\0'; + audit_log_format(ab, " msg='%.*s'", + AUDIT_MESSAGE_TEXT_MAX, +- (char *)data); +- else { +- int size; +- ++ str); ++ } else { + audit_log_format(ab, " data="); +- size = nlmsg_len(nlh); +- if (size > 0 && +- ((unsigned char *)data)[size - 1] == '\0') +- size--; +- audit_log_n_untrustedstring(ab, data, size); ++ if (data_len > 0 && str[data_len - 1] == '\0') ++ data_len--; ++ audit_log_n_untrustedstring(ab, str, data_len); + } + audit_log_end(ab); + } + break; + case AUDIT_ADD_RULE: + case AUDIT_DEL_RULE: +- if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) ++ if (data_len < sizeof(struct audit_rule_data)) + return -EINVAL; + if (audit_enabled == AUDIT_LOCKED) { + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE); +@@ -1327,7 +1329,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + audit_log_end(ab); + return -EPERM; + } +- err = audit_rule_change(msg_type, seq, data, nlmsg_len(nlh)); ++ err = audit_rule_change(msg_type, seq, data, data_len); + break; + case AUDIT_LIST_RULES: + err = audit_list_rules_send(skb, seq); +@@ -1341,7 +1343,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + case AUDIT_MAKE_EQUIV: { + void *bufp = data; + u32 sizes[2]; +- size_t msglen = nlmsg_len(nlh); ++ size_t msglen = data_len; + char *old, *new; + + err = -EINVAL; +@@ -1417,7 +1419,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + + memset(&s, 0, sizeof(s)); + /* guard against past and future API changes */ +- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh))); ++ memcpy(&s, data, min_t(size_t, sizeof(s), data_len)); + /* check if new data is valid */ + if ((s.enabled != 0 && s.enabled != 1) || + (s.log_passwd != 0 && s.log_passwd != 1)) +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c +index 215c6e1ee026..16cf396ea738 100644 +--- a/kernel/auditfilter.c ++++ b/kernel/auditfilter.c +@@ -435,6 +435,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, + bufp = data->buf; + for (i = 0; i < data->field_count; i++) { + struct audit_field *f = &entry->rule.fields[i]; ++ u32 f_val; + + err = -EINVAL; + +@@ -443,12 +444,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, + goto exit_free; + + f->type = data->fields[i]; +- f->val = data->values[i]; ++ f_val = data->values[i]; + + /* Support legacy tests for a valid loginuid */ +- if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) { ++ if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) { + f->type = AUDIT_LOGINUID_SET; +- f->val = 0; ++ f_val = 0; + entry->rule.pflags |= AUDIT_LOGINUID_LEGACY; + } + +@@ -464,7 +465,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, + case AUDIT_SUID: + case AUDIT_FSUID: + case AUDIT_OBJ_UID: +- f->uid = make_kuid(current_user_ns(), f->val); ++ f->uid = make_kuid(current_user_ns(), f_val); + if (!uid_valid(f->uid)) + goto exit_free; + break; +@@ -473,12 +474,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, + case AUDIT_SGID: + case AUDIT_FSGID: + case AUDIT_OBJ_GID: +- f->gid = make_kgid(current_user_ns(), f->val); ++ f->gid = make_kgid(current_user_ns(), f_val); + if (!gid_valid(f->gid)) + goto exit_free; + break; + case AUDIT_SESSIONID: + case AUDIT_ARCH: ++ f->val = f_val; + entry->rule.arch_f = f; + break; + case AUDIT_SUBJ_USER: +@@ -491,11 +493,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, + case AUDIT_OBJ_TYPE: + case AUDIT_OBJ_LEV_LOW: + case AUDIT_OBJ_LEV_HIGH: +- str = audit_unpack_string(&bufp, &remain, f->val); +- if (IS_ERR(str)) ++ str = audit_unpack_string(&bufp, &remain, f_val); ++ if (IS_ERR(str)) { ++ err = PTR_ERR(str); + goto exit_free; +- entry->rule.buflen += f->val; +- ++ } ++ entry->rule.buflen += f_val; ++ f->lsm_str = str; + err = security_audit_rule_init(f->type, f->op, str, + (void **)&f->lsm_rule); + /* Keep currently invalid fields around in case they +@@ -504,68 +508,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, + pr_warn("audit rule for LSM \'%s\' is invalid\n", + str); + err = 0; +- } +- if (err) { +- kfree(str); ++ } else if (err) + goto exit_free; +- } else +- f->lsm_str = str; + break; + case AUDIT_WATCH: +- str = audit_unpack_string(&bufp, &remain, f->val); +- if (IS_ERR(str)) ++ str = audit_unpack_string(&bufp, &remain, f_val); ++ if (IS_ERR(str)) { ++ err = PTR_ERR(str); + goto exit_free; +- entry->rule.buflen += f->val; +- +- err = audit_to_watch(&entry->rule, str, f->val, f->op); ++ } ++ err = audit_to_watch(&entry->rule, str, f_val, f->op); + if (err) { + kfree(str); + goto exit_free; + } ++ entry->rule.buflen += f_val; + break; + case AUDIT_DIR: +- str = audit_unpack_string(&bufp, &remain, f->val); +- if (IS_ERR(str)) ++ str = audit_unpack_string(&bufp, &remain, f_val); ++ if (IS_ERR(str)) { ++ err = PTR_ERR(str); + goto exit_free; +- entry->rule.buflen += f->val; +- ++ } + err = audit_make_tree(&entry->rule, str, f->op); + kfree(str); + if (err) + goto exit_free; ++ entry->rule.buflen += f_val; + break; + case AUDIT_INODE: ++ f->val = f_val; + err = audit_to_inode(&entry->rule, f); + if (err) + goto exit_free; + break; + case AUDIT_FILTERKEY: +- if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN) ++ if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN) + goto exit_free; +- str = audit_unpack_string(&bufp, &remain, f->val); +- if (IS_ERR(str)) ++ str = audit_unpack_string(&bufp, &remain, f_val); ++ if (IS_ERR(str)) { ++ err = PTR_ERR(str); + goto exit_free; +- entry->rule.buflen += f->val; ++ } ++ entry->rule.buflen += f_val; + entry->rule.filterkey = str; + break; + case AUDIT_EXE: +- if (entry->rule.exe || f->val > PATH_MAX) ++ if (entry->rule.exe || f_val > PATH_MAX) + goto exit_free; +- str = audit_unpack_string(&bufp, &remain, f->val); ++ str = audit_unpack_string(&bufp, &remain, f_val); + if (IS_ERR(str)) { + err = PTR_ERR(str); + goto exit_free; + } +- entry->rule.buflen += f->val; +- +- audit_mark = audit_alloc_mark(&entry->rule, str, f->val); ++ audit_mark = audit_alloc_mark(&entry->rule, str, f_val); + if (IS_ERR(audit_mark)) { + kfree(str); + err = PTR_ERR(audit_mark); + goto exit_free; + } ++ entry->rule.buflen += f_val; + entry->rule.exe = audit_mark; + break; ++ default: ++ f->val = f_val; ++ break; + } + } + +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index d0fe20a5475f..66f1818d4762 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -523,6 +523,8 @@ static void do_unoptimize_kprobes(void) + arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); + /* Loop free_list for disarming */ + list_for_each_entry_safe(op, tmp, &freeing_list, list) { ++ /* Switching from detour code to origin */ ++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; + /* Disarm probes if marked disabled */ + if (kprobe_disabled(&op->kp)) + arch_disarm_kprobe(&op->kp); +@@ -623,6 +625,18 @@ void wait_for_kprobe_optimizer(void) + mutex_unlock(&kprobe_mutex); + } + ++static bool optprobe_queued_unopt(struct optimized_kprobe *op) ++{ ++ struct optimized_kprobe *_op; ++ ++ list_for_each_entry(_op, &unoptimizing_list, list) { ++ if (op == _op) ++ return true; ++ } ++ ++ return false; ++} ++ + /* Optimize kprobe if p is ready to be optimized */ + static void optimize_kprobe(struct kprobe *p) + { +@@ -644,17 +658,21 @@ static void optimize_kprobe(struct kprobe *p) + return; + + /* Check if it is already optimized. */ +- if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) ++ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { ++ if (optprobe_queued_unopt(op)) { ++ /* This is under unoptimizing. Just dequeue the probe */ ++ list_del_init(&op->list); ++ } + return; ++ } + op->kp.flags |= KPROBE_FLAG_OPTIMIZED; + +- if (!list_empty(&op->list)) +- /* This is under unoptimizing. Just dequeue the probe */ +- list_del_init(&op->list); +- else { +- list_add(&op->list, &optimizing_list); +- kick_kprobe_optimizer(); +- } ++ /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */ ++ if (WARN_ON_ONCE(!list_empty(&op->list))) ++ return; ++ ++ list_add(&op->list, &optimizing_list); ++ kick_kprobe_optimizer(); + } + + /* Short cut to direct unoptimizing */ +@@ -662,6 +680,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op) + { + lockdep_assert_cpus_held(); + arch_unoptimize_kprobe(op); ++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; + if (kprobe_disabled(&op->kp)) + arch_disarm_kprobe(&op->kp); + } +@@ -675,31 +694,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force) + return; /* This is not an optprobe nor optimized */ + + op = container_of(p, struct optimized_kprobe, kp); +- if (!kprobe_optimized(p)) { +- /* Unoptimized or unoptimizing case */ +- if (force && !list_empty(&op->list)) { +- /* +- * Only if this is unoptimizing kprobe and forced, +- * forcibly unoptimize it. (No need to unoptimize +- * unoptimized kprobe again :) +- */ +- list_del_init(&op->list); +- force_unoptimize_kprobe(op); +- } ++ if (!kprobe_optimized(p)) + return; +- } + +- op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; + if (!list_empty(&op->list)) { +- /* Dequeue from the optimization queue */ +- list_del_init(&op->list); ++ if (optprobe_queued_unopt(op)) { ++ /* Queued in unoptimizing queue */ ++ if (force) { ++ /* ++ * Forcibly unoptimize the kprobe here, and queue it ++ * in the freeing list for release afterwards. ++ */ ++ force_unoptimize_kprobe(op); ++ list_move(&op->list, &freeing_list); ++ } ++ } else { ++ /* Dequeue from the optimizing queue */ ++ list_del_init(&op->list); ++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; ++ } + return; + } ++ + /* Optimized kprobe case */ +- if (force) ++ if (force) { + /* Forcibly update the code: this is a special case */ + force_unoptimize_kprobe(op); +- else { ++ } else { + list_add(&op->list, &unoptimizing_list); + kick_kprobe_optimizer(); + } +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index c456c2b06277..207d7c35214f 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1543,6 +1543,7 @@ static __init int init_trace_selftests(void) + + pr_info("Running postponed tracer tests:\n"); + ++ tracing_selftest_running = true; + list_for_each_entry_safe(p, n, &postponed_selftests, list) { + ret = run_tracer_selftest(p->type); + /* If the test fails, then warn and remove from available_tracers */ +@@ -1561,6 +1562,7 @@ static __init int init_trace_selftests(void) + list_del(&p->list); + kfree(p); + } ++ tracing_selftest_running = false; + + out: + mutex_unlock(&trace_types_lock); +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 92915cc87549..283963032eff 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -163,16 +163,13 @@ static ssize_t enabled_store(struct kobject *kobj, + { + ssize_t ret = count; + +- if (!memcmp("always", buf, +- min(sizeof("always")-1, count))) { ++ if (sysfs_streq(buf, "always")) { + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); +- } else if (!memcmp("madvise", buf, +- min(sizeof("madvise")-1, count))) { ++ } else if (sysfs_streq(buf, "madvise")) { + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); +- } else if (!memcmp("never", buf, +- min(sizeof("never")-1, count))) { ++ } else if (sysfs_streq(buf, "never")) { + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); + } else +@@ -236,32 +233,27 @@ static ssize_t defrag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) + { +- if (!memcmp("always", buf, +- min(sizeof("always")-1, count))) { ++ if (sysfs_streq(buf, "always")) { + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); +- } else if (!memcmp("defer+madvise", buf, +- min(sizeof("defer+madvise")-1, count))) { ++ } else if (sysfs_streq(buf, "defer+madvise")) { + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); +- } else if (!memcmp("defer", buf, +- min(sizeof("defer")-1, count))) { ++ } else if (sysfs_streq(buf, "defer")) { + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); +- } else if (!memcmp("madvise", buf, +- min(sizeof("madvise")-1, count))) { ++ } else if (sysfs_streq(buf, "madvise")) { + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); +- } else if (!memcmp("never", buf, +- min(sizeof("never")-1, count))) { ++ } else if (sysfs_streq(buf, "never")) { + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); +@@ -2561,7 +2553,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) + unsigned long flags; + pgoff_t end; + +- VM_BUG_ON_PAGE(is_huge_zero_page(page), page); ++ VM_BUG_ON_PAGE(is_huge_zero_page(head), head); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageCompound(page), page); + +diff --git a/mm/mprotect.c b/mm/mprotect.c +index 60864e19421e..18ecbd744978 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -148,6 +148,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + return pages; + } + ++/* ++ * Used when setting automatic NUMA hinting protection where it is ++ * critical that a numa hinting PMD is not confused with a bad PMD. ++ */ ++static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) ++{ ++ pmd_t pmdval = pmd_read_atomic(pmd); ++ ++ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */ ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++ barrier(); ++#endif ++ ++ if (pmd_none(pmdval)) ++ return 1; ++ if (pmd_trans_huge(pmdval)) ++ return 0; ++ if (unlikely(pmd_bad(pmdval))) { ++ pmd_clear_bad(pmd); ++ return 1; ++ } ++ ++ return 0; ++} ++ + static inline unsigned long change_pmd_range(struct vm_area_struct *vma, + pud_t *pud, unsigned long addr, unsigned long end, + pgprot_t newprot, int dirty_accountable, int prot_numa) +@@ -164,8 +189,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, + unsigned long this_pages; + + next = pmd_addr_end(addr, end); +- if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) +- && pmd_none_or_clear_bad(pmd)) ++ ++ /* ++ * Automatic NUMA balancing walks the tables with mmap_sem ++ * held for read. It's possible a parallel update to occur ++ * between pmd_trans_huge() and a pmd_none_or_clear_bad() ++ * check leading to a false positive and clearing. ++ * Hence, it's necessary to atomically read the PMD value ++ * for all the checks. ++ */ ++ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && ++ pmd_none_or_clear_bad_unless_trans_huge(pmd)) + goto next; + + /* invoke the mmu notifier if the pmd is populated */ +diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c +index 9a6d97c1d810..9bb321df0869 100644 +--- a/net/core/fib_rules.c ++++ b/net/core/fib_rules.c +@@ -799,7 +799,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, + + frh = nlmsg_data(nlh); + frh->family = ops->family; +- frh->table = rule->table; ++ frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT; + if (nla_put_u32(skb, FRA_TABLE, rule->table)) + goto nla_put_failure; + if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index e5308d7cbd75..d43abeb1e415 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -893,8 +893,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, + found++; + break; + } +- if (rt_can_ecmp) +- fallback_ins = fallback_ins ?: ins; ++ fallback_ins = fallback_ins ?: ins; + goto next_iter; + } + +@@ -934,7 +933,9 @@ next_iter: + } + + if (fallback_ins && !found) { +- /* No ECMP-able route found, replace first non-ECMP one */ ++ /* No matching route with same ecmp-able-ness found, replace ++ * first matching route ++ */ + ins = fallback_ins; + iter = *ins; + found++; +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index b81522bcf223..a4079ed56803 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3283,6 +3283,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, + */ + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | + NLM_F_REPLACE); ++ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; + nhn++; + } + +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index 81f120466c38..cd3cdd1a0b57 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -944,16 +944,22 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, + elem_parse_failed = true; + break; + case WLAN_EID_VHT_OPERATION: +- if (elen >= sizeof(struct ieee80211_vht_operation)) ++ if (elen >= sizeof(struct ieee80211_vht_operation)) { + elems->vht_operation = (void *)pos; +- else +- elem_parse_failed = true; ++ if (calc_crc) ++ crc = crc32_be(crc, pos - 2, elen + 2); ++ break; ++ } ++ elem_parse_failed = true; + break; + case WLAN_EID_OPMODE_NOTIF: +- if (elen > 0) ++ if (elen > 0) { + elems->opmode_notif = pos; +- else +- elem_parse_failed = true; ++ if (calc_crc) ++ crc = crc32_be(crc, pos - 2, elen + 2); ++ break; ++ } ++ elem_parse_failed = true; + break; + case WLAN_EID_MESH_ID: + elems->mesh_id = pos; +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 2e65271bed01..a79f5a89cab1 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -543,6 +543,18 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, + net_eq(net, nf_ct_net(ct)); + } + ++static inline bool ++nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2) ++{ ++ return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple, ++ &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) && ++ nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple, ++ &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) && ++ nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) && ++ nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) && ++ net_eq(nf_ct_net(ct1), nf_ct_net(ct2)); ++} ++ + /* caller must hold rcu readlock and none of the nf_conntrack_locks */ + static void nf_ct_gc_expired(struct nf_conn *ct) + { +@@ -736,19 +748,21 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, + /* This is the conntrack entry already in hashes that won race. */ + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + const struct nf_conntrack_l4proto *l4proto; ++ enum ip_conntrack_info oldinfo; ++ struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); + + l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + if (l4proto->allow_clash && +- ((ct->status & IPS_NAT_DONE_MASK) == 0) && + !nf_ct_is_dying(ct) && + atomic_inc_not_zero(&ct->ct_general.use)) { +- enum ip_conntrack_info oldinfo; +- struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); +- +- nf_ct_acct_merge(ct, ctinfo, loser_ct); +- nf_conntrack_put(&loser_ct->ct_general); +- nf_ct_set(skb, ct, oldinfo); +- return NF_ACCEPT; ++ if (((ct->status & IPS_NAT_DONE_MASK) == 0) || ++ nf_ct_match(ct, loser_ct)) { ++ nf_ct_acct_merge(ct, ctinfo, loser_ct); ++ nf_conntrack_put(&loser_ct->ct_general); ++ nf_ct_set(skb, ct, oldinfo); ++ return NF_ACCEPT; ++ } ++ nf_ct_put(ct); + } + NF_CT_STAT_INC(net, drop); + return NF_DROP; +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index c67abda5d639..07924559cb10 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -997,7 +997,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, + if (nlk->netlink_bind && groups) { + int group; + +- for (group = 0; group < nlk->ngroups; group++) { ++ /* nl_groups is a u32, so cap the maximum groups we can bind */ ++ for (group = 0; group < BITS_PER_TYPE(u32); group++) { + if (!test_bit(group, &groups)) + continue; + err = nlk->netlink_bind(net, group + 1); +@@ -1016,7 +1017,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, + netlink_insert(sk, nladdr->nl_pid) : + netlink_autobind(sock); + if (err) { +- netlink_undo_bind(nlk->ngroups, groups, sk); ++ netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk); + goto unlock; + } + } +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c +index 80a5a6d503c8..8974bd25c71e 100644 +--- a/net/sched/cls_flower.c ++++ b/net/sched/cls_flower.c +@@ -160,6 +160,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, + if (!atomic_read(&head->ht.nelems)) + return -1; + ++ flow_dissector_init_keys(&skb_key.control, &skb_key.basic); + fl_clear_masked_range(&skb_key, &head->mask); + + info = skb_tunnel_info(skb); +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index a2e058127ef7..ba29d782af30 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -182,6 +182,16 @@ static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, + return true; + } + ++/* Check for format error in an ABORT chunk */ ++static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) ++{ ++ struct sctp_errhdr *err; ++ ++ sctp_walk_errors(err, chunk->chunk_hdr); ++ ++ return (void *)err == (void *)chunk->chunk_end; ++} ++ + /********************************************************** + * These are the state functions for handling chunk events. + **********************************************************/ +@@ -2202,6 +2212,9 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort( + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + ++ if (!sctp_err_chunk_valid(chunk)) ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ++ + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); + } + +@@ -2245,6 +2258,9 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort( + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + ++ if (!sctp_err_chunk_valid(chunk)) ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ++ + /* Stop the T2-shutdown timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); +@@ -2512,6 +2528,9 @@ enum sctp_disposition sctp_sf_do_9_1_abort( + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + ++ if (!sctp_err_chunk_valid(chunk)) ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ++ + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); + } + +@@ -2529,16 +2548,8 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort( + + /* See if we have an error cause code in the chunk. */ + len = ntohs(chunk->chunk_hdr->length); +- if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) { +- struct sctp_errhdr *err; +- +- sctp_walk_errors(err, chunk->chunk_hdr); +- if ((void *)err != (void *)chunk->chunk_end) +- return sctp_sf_pdiscard(net, ep, asoc, type, arg, +- commands); +- ++ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) + error = ((struct sctp_errhdr *)chunk->skb->data)->cause; +- } + + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); + /* ASSOC_FAILED will DELETE_TCB. */ +diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c +index a9c0f368db5d..24e18405cdb4 100644 +--- a/net/wireless/ethtool.c ++++ b/net/wireless/ethtool.c +@@ -7,9 +7,13 @@ + void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + struct wireless_dev *wdev = dev->ieee80211_ptr; ++ struct device *pdev = wiphy_dev(wdev->wiphy); + +- strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, +- sizeof(info->driver)); ++ if (pdev->driver) ++ strlcpy(info->driver, pdev->driver->name, ++ sizeof(info->driver)); ++ else ++ strlcpy(info->driver, "N/A", sizeof(info->driver)); + + strlcpy(info->version, init_utsname()->release, sizeof(info->version)); + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index df8c5312f26a..b248578aeb7b 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -321,6 +321,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 }, + [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG }, + [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, ++ [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 }, + [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, + [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, + [NL80211_ATTR_PID] = { .type = NLA_U32 }, +diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c +index 68feae262476..940bdc30753d 100644 +--- a/sound/soc/codecs/pcm512x.c ++++ b/sound/soc/codecs/pcm512x.c +@@ -1438,13 +1438,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) + } + + pcm512x->sclk = devm_clk_get(dev, NULL); +- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) { ++ ret = -EPROBE_DEFER; ++ goto err; ++ } + if (!IS_ERR(pcm512x->sclk)) { + ret = clk_prepare_enable(pcm512x->sclk); + if (ret != 0) { + dev_err(dev, "Failed to enable SCLK: %d\n", ret); +- return ret; ++ goto err; + } + } + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index 1987f78ea91e..71c6bbf37b6c 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -42,8 +42,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf, + int i; + ssize_t ret = 0; + +- for (i = 0; i < max_pin; i++) +- ret += snprintf(buf + size, MOD_BUF - size, ++ for (i = 0; i < max_pin; i++) { ++ ret += scnprintf(buf + size, MOD_BUF - size, + "%s %d\n\tModule %d\n\tInstance %d\n\t" + "In-used %s\n\tType %s\n" + "\tState %d\n\tIndex %d\n", +@@ -53,13 +53,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf, + m_pin[i].in_use ? "Used" : "Unused", + m_pin[i].is_dynamic ? "Dynamic" : "Static", + m_pin[i].pin_state, i); ++ size += ret; ++ } + return ret; + } + + static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf, + ssize_t size, bool direction) + { +- return snprintf(buf + size, MOD_BUF - size, ++ return scnprintf(buf + size, MOD_BUF - size, + "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t" + "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t" + "Sample Type %d\n\tCh Map %#x\n", +@@ -81,16 +83,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf, + if (!buf) + return -ENOMEM; + +- ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n" ++ ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n" + "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid, + mconfig->id.module_id, mconfig->id.instance_id, + mconfig->id.pvt_id); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "Resources:\n\tMCPS %#x\n\tIBS %#x\n\tOBS %#x\t\n", + mconfig->mcps, mconfig->ibs, mconfig->obs); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "Module data:\n\tCore %d\n\tIn queue %d\n\t" + "Out queue %d\n\tType %s\n", + mconfig->core_id, mconfig->max_in_queue, +@@ -100,38 +102,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf, + ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true); + ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "Fixup:\n\tParams %#x\n\tConverter %#x\n", + mconfig->params_fixup, mconfig->converter); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n", + mconfig->dev_type, mconfig->vbus_id, + mconfig->hw_conn_type, mconfig->time_slot); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t" + "Pages %#x\n", mconfig->pipe->ppl_id, + mconfig->pipe->pipe_priority, mconfig->pipe->conn_type, + mconfig->pipe->memory_pages); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n", + mconfig->pipe->p_params->host_dma_id, + mconfig->pipe->p_params->link_dma_id); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n", + mconfig->pipe->p_params->ch, + mconfig->pipe->p_params->s_freq, + mconfig->pipe->p_params->s_fmt); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "\tLink %#x\n\tStream %#x\n", + mconfig->pipe->p_params->linktype, + mconfig->pipe->p_params->stream); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "\tState %d\n\tPassthru %s\n", + mconfig->pipe->state, + mconfig->pipe->passthru ? "true" : "false"); +@@ -141,7 +143,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf, + ret += skl_print_pins(mconfig->m_out_pin, buf, + mconfig->max_out_queue, ret, false); + +- ret += snprintf(buf + ret, MOD_BUF - ret, ++ ret += scnprintf(buf + ret, MOD_BUF - ret, + "Other:\n\tDomain %d\n\tHomogenous Input %s\n\t" + "Homogenous Output %s\n\tIn Queue Mask %d\n\t" + "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t" +@@ -199,7 +201,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf, + __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2); + + for (offset = 0; offset < FW_REG_SIZE; offset += 16) { +- ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset); ++ ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset); + hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4, + tmp + ret, FW_REG_BUF - ret, 0); + ret += strlen(tmp + ret); +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 104d5f487c7d..fb2fef166672 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -4481,7 +4481,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm) + continue; + if (w->power) { + dapm_seq_insert(w, &down_list, false); +- w->power = 0; ++ w->new_power = 0; + powerdown = 1; + } + } +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 89f772ed4705..e75822dd9930 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2957,16 +2957,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, + ssize_t offset = 0; + + /* FE state */ +- offset += snprintf(buf + offset, size - offset, ++ offset += scnprintf(buf + offset, size - offset, + "[%s - %s]\n", fe->dai_link->name, + stream ? "Capture" : "Playback"); + +- offset += snprintf(buf + offset, size - offset, "State: %s\n", ++ offset += scnprintf(buf + offset, size - offset, "State: %s\n", + dpcm_state_string(fe->dpcm[stream].state)); + + if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) && + (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP)) +- offset += snprintf(buf + offset, size - offset, ++ offset += scnprintf(buf + offset, size - offset, + "Hardware Params: " + "Format = %s, Channels = %d, Rate = %d\n", + snd_pcm_format_name(params_format(params)), +@@ -2974,10 +2974,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, + params_rate(params)); + + /* BEs state */ +- offset += snprintf(buf + offset, size - offset, "Backends:\n"); ++ offset += scnprintf(buf + offset, size - offset, "Backends:\n"); + + if (list_empty(&fe->dpcm[stream].be_clients)) { +- offset += snprintf(buf + offset, size - offset, ++ offset += scnprintf(buf + offset, size - offset, + " No active DSP links\n"); + goto out; + } +@@ -2986,16 +2986,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, + struct snd_soc_pcm_runtime *be = dpcm->be; + params = &dpcm->hw_params; + +- offset += snprintf(buf + offset, size - offset, ++ offset += scnprintf(buf + offset, size - offset, + "- %s\n", be->dai_link->name); + +- offset += snprintf(buf + offset, size - offset, ++ offset += scnprintf(buf + offset, size - offset, + " State: %s\n", + dpcm_state_string(be->dpcm[stream].state)); + + if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) && + (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP)) +- offset += snprintf(buf + offset, size - offset, ++ offset += scnprintf(buf + offset, size - offset, + " Hardware Params: " + "Format = %s, Channels = %d, Rate = %d\n", + snd_pcm_format_name(params_format(params)), +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 72301bcad3bd..1a912f72bddd 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -2177,8 +2177,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg, + } + + ret = soc_tplg_link_config(tplg, _link); +- if (ret < 0) ++ if (ret < 0) { ++ if (!abi_match) ++ kfree(_link); + return ret; ++ } + + /* offset by version-specific struct size and + * real priv data size +@@ -2330,7 +2333,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg, + { + struct snd_soc_tplg_manifest *manifest, *_manifest; + bool abi_match; +- int err; ++ int ret = 0; + + if (tplg->pass != SOC_TPLG_PASS_MANIFEST) + return 0; +@@ -2343,19 +2346,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg, + _manifest = manifest; + } else { + abi_match = false; +- err = manifest_new_ver(tplg, manifest, &_manifest); +- if (err < 0) +- return err; ++ ret = manifest_new_ver(tplg, manifest, &_manifest); ++ if (ret < 0) ++ return ret; + } + + /* pass control to component driver for optional further init */ + if (tplg->comp && tplg->ops && tplg->ops->manifest) +- return tplg->ops->manifest(tplg->comp, _manifest); ++ ret = tplg->ops->manifest(tplg->comp, _manifest); + + if (!abi_match) /* free the duplicated one */ + kfree(_manifest); + +- return 0; ++ return ret; + } + + /* validate header magic, size and type */ +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c +index 628ad5f7eddb..49a87fb64156 100644 +--- a/tools/perf/ui/browsers/hists.c ++++ b/tools/perf/ui/browsers/hists.c +@@ -3142,6 +3142,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, + + continue; + } ++ actions->ms.map = map; + top = pstack__peek(browser->pstack); + if (top == &browser->hists->dso_filter) { + /* +diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk +index 5bef05d6ba39..c9be64dc681d 100644 +--- a/tools/testing/selftests/lib.mk ++++ b/tools/testing/selftests/lib.mk +@@ -54,17 +54,20 @@ else + $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS)) + endif + ++define INSTALL_SINGLE_RULE ++ $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH)) ++ $(if $(INSTALL_LIST),@echo rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/) ++ $(if $(INSTALL_LIST),@rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/) ++endef ++ + define INSTALL_RULE +- @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ +- mkdir -p ${INSTALL_PATH}; \ +- echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ +- rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ +- fi +- @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \ +- mkdir -p ${INSTALL_PATH}; \ +- echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \ +- rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \ +- fi ++ $(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE) ++ $(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE) ++ $(eval INSTALL_LIST = $(TEST_FILES)) $(INSTALL_SINGLE_RULE) ++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS)) $(INSTALL_SINGLE_RULE) ++ $(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE) ++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE) ++ $(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE) + endef + + install: all +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 745ee09083dd..71f77ae6c2a6 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -2027,12 +2027,12 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + if (slots->generation != ghc->generation) + __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); + +- if (unlikely(!ghc->memslot)) +- return kvm_write_guest(kvm, gpa, data, len); +- + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + ++ if (unlikely(!ghc->memslot)) ++ return kvm_write_guest(kvm, gpa, data, len); ++ + r = __copy_to_user((void __user *)ghc->hva + offset, data, len); + if (r) + return -EFAULT; +@@ -2060,12 +2060,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + if (slots->generation != ghc->generation) + __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); + +- if (unlikely(!ghc->memslot)) +- return kvm_read_guest(kvm, ghc->gpa, data, len); +- + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + ++ if (unlikely(!ghc->memslot)) ++ return kvm_read_guest(kvm, ghc->gpa, data, len); ++ + r = __copy_from_user(data, (void __user *)ghc->hva, len); + if (r) + return -EFAULT; |